From 334ef7a7ab8f80b689a2be95d5e62d2167900865 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 12 May 2008 21:21:13 +0200 Subject: x86: use performance variant for_each_cpu_mask_nr Change references from for_each_cpu_mask to for_each_cpu_mask_nr where appropriate Reviewed-by: Paul Jackson Reviewed-by: Christoph Lameter Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner commit 2d474871e2fb092eb46a0930aba5442e10eb96cc Author: Mike Travis Date: Mon May 12 21:21:13 2008 +0200 --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 6 +++--- arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 6 +++--- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 8 ++++---- arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 10 +++++----- arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 4 ++-- arch/x86/kernel/cpu/intel_cacheinfo.c | 2 +- arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 4 ++-- arch/x86/kernel/io_apic_64.c | 8 ++++---- arch/x86/kernel/smpboot.c | 8 ++++---- arch/x86/xen/smp.c | 4 ++-- 10 files changed, 30 insertions(+), 30 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index b0c8208df9f..dd097b83583 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -202,7 +202,7 @@ static void drv_write(struct drv_cmd *cmd) cpumask_t saved_mask = current->cpus_allowed; unsigned int i; - for_each_cpu_mask(i, cmd->mask) { + for_each_cpu_mask_nr(i, cmd->mask) { set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); do_drv_write(cmd); } @@ -451,7 +451,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, freqs.old = perf->states[perf->state].core_frequency * 1000; freqs.new = data->freq_table[next_state].frequency; - for_each_cpu_mask(i, cmd.mask) { + for_each_cpu_mask_nr(i, cmd.mask) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -466,7 +466,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, } } - for_each_cpu_mask(i, cmd.mask) { + for_each_cpu_mask_nr(i, cmd.mask) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 199e4e05e5d..f1685fb91fb 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, return 0; /* notifiers */ - for_each_cpu_mask(i, policy->cpus) { + for_each_cpu_mask_nr(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software * Developer's Manual, Volume 3 */ - for_each_cpu_mask(i, policy->cpus) + for_each_cpu_mask_nr(i, policy->cpus) cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); /* notifiers */ - for_each_cpu_mask(i, policy->cpus) { + for_each_cpu_mask_nr(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 46d4034d9f3..06d6eea5e07 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -966,7 +966,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i freqs.old = find_khz_freq_from_fid(data->currfid); freqs.new = find_khz_freq_from_fid(fid); - for_each_cpu_mask(i, *(data->available_cores)) { + for_each_cpu_mask_nr(i, *(data->available_cores)) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -974,7 +974,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i res = transition_fid_vid(data, fid, vid); freqs.new = find_khz_freq_from_fid(data->currfid); - for_each_cpu_mask(i, *(data->available_cores)) { + for_each_cpu_mask_nr(i, *(data->available_cores)) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } @@ -997,7 +997,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); - for_each_cpu_mask(i, *(data->available_cores)) { + for_each_cpu_mask_nr(i, *(data->available_cores)) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -1005,7 +1005,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i res = transition_pstate(data, pstate); freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); - for_each_cpu_mask(i, *(data->available_cores)) { + for_each_cpu_mask_nr(i, *(data->available_cores)) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 908dd347c67..8b0dd6f2a1a 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -476,7 +476,7 @@ static int centrino_target (struct cpufreq_policy *policy, saved_mask = current->cpus_allowed; first_cpu = 1; cpus_clear(covered_cpus); - for_each_cpu_mask(j, online_policy_cpus) { + for_each_cpu_mask_nr(j, online_policy_cpus) { /* * Support for SMP systems. * Make sure we are running on CPU that wants to change freq @@ -517,7 +517,7 @@ static int centrino_target (struct cpufreq_policy *policy, dprintk("target=%dkHz old=%d new=%d msr=%04x\n", target_freq, freqs.old, freqs.new, msr); - for_each_cpu_mask(k, online_policy_cpus) { + for_each_cpu_mask_nr(k, online_policy_cpus) { freqs.cpu = k; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); @@ -540,7 +540,7 @@ static int centrino_target (struct cpufreq_policy *policy, preempt_enable(); } - for_each_cpu_mask(k, online_policy_cpus) { + for_each_cpu_mask_nr(k, online_policy_cpus) { freqs.cpu = k; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } @@ -554,7 +554,7 @@ static int centrino_target (struct cpufreq_policy *policy, */ if (!cpus_empty(covered_cpus)) { - for_each_cpu_mask(j, covered_cpus) { + for_each_cpu_mask_nr(j, covered_cpus) { set_cpus_allowed_ptr(current, &cpumask_of_cpu(j)); wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); @@ -564,7 +564,7 @@ static int centrino_target (struct cpufreq_policy *policy, tmp = freqs.new; freqs.new = freqs.old; freqs.old = tmp; - for_each_cpu_mask(j, online_policy_cpus) { + for_each_cpu_mask_nr(j, online_policy_cpus) { freqs.cpu = j; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 1b50244b1fd..191f7263c61 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c @@ -279,7 +279,7 @@ static int speedstep_target (struct cpufreq_policy *policy, cpus_allowed = current->cpus_allowed; - for_each_cpu_mask(i, policy->cpus) { + for_each_cpu_mask_nr(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -292,7 +292,7 @@ static int speedstep_target (struct cpufreq_policy *policy, /* allow to be run on all CPUs */ set_cpus_allowed_ptr(current, &cpus_allowed); - for_each_cpu_mask(i, policy->cpus) { + for_each_cpu_mask_nr(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 26d615dcb14..bfade3301c3 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -488,7 +488,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) int sibling; this_leaf = CPUID4_INFO_IDX(cpu, index); - for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) { + for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { sibling_leaf = CPUID4_INFO_IDX(sibling, index); cpu_clear(cpu, sibling_leaf->shared_cpu_map); } diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 7c9a813e119..88736cadbaa 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c @@ -527,7 +527,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) if (err) goto out_free; - for_each_cpu_mask(i, b->cpus) { + for_each_cpu_mask_nr(i, b->cpus) { if (i == cpu) continue; @@ -617,7 +617,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) #endif /* remove all sibling symlinks before unregistering */ - for_each_cpu_mask(i, b->cpus) { + for_each_cpu_mask_nr(i, b->cpus) { if (i == cpu) continue; diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index ef1a8dfcc52..e2838cbd2ff 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -718,7 +718,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask) return 0; } - for_each_cpu_mask(cpu, mask) { + for_each_cpu_mask_nr(cpu, mask) { cpumask_t domain, new_mask; int new_cpu; int vector, offset; @@ -739,7 +739,7 @@ next: continue; if (vector == IA32_SYSCALL_VECTOR) goto next; - for_each_cpu_mask(new_cpu, new_mask) + for_each_cpu_mask_nr(new_cpu, new_mask) if (per_cpu(vector_irq, new_cpu)[vector] != -1) goto next; /* Found one! */ @@ -749,7 +749,7 @@ next: cfg->move_in_progress = 1; cfg->old_domain = cfg->domain; } - for_each_cpu_mask(new_cpu, new_mask) + for_each_cpu_mask_nr(new_cpu, new_mask) per_cpu(vector_irq, new_cpu)[vector] = irq; cfg->vector = vector; cfg->domain = domain; @@ -781,7 +781,7 @@ static void __clear_irq_vector(int irq) vector = cfg->vector; cpus_and(mask, cfg->domain, cpu_online_map); - for_each_cpu_mask(cpu, mask) + for_each_cpu_mask_nr(cpu, mask) per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = 0; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 38988491c62..fff8ebaa554 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -487,7 +487,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) cpu_set(cpu, cpu_sibling_setup_map); if (smp_num_siblings > 1) { - for_each_cpu_mask(i, cpu_sibling_setup_map) { + for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { if (c->phys_proc_id == cpu_data(i).phys_proc_id && c->cpu_core_id == cpu_data(i).cpu_core_id) { cpu_set(i, per_cpu(cpu_sibling_map, cpu)); @@ -510,7 +510,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) return; } - for_each_cpu_mask(i, cpu_sibling_setup_map) { + for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { cpu_set(i, c->llc_shared_map); @@ -1298,7 +1298,7 @@ static void remove_siblinginfo(int cpu) int sibling; struct cpuinfo_x86 *c = &cpu_data(cpu); - for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { + for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) { cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); /*/ * last thread sibling in this cpu core going down @@ -1307,7 +1307,7 @@ static void remove_siblinginfo(int cpu) cpu_data(sibling).booted_cores--; } - for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) + for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu)) cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); cpus_clear(per_cpu(cpu_sibling_map, cpu)); cpus_clear(per_cpu(cpu_core_map, cpu)); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 94e69000f98..7a70638797e 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -345,7 +345,7 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) cpus_and(mask, mask, cpu_online_map); - for_each_cpu_mask(cpu, mask) + for_each_cpu_mask_nr(cpu, mask) xen_send_IPI_one(cpu, vector); } @@ -413,7 +413,7 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), /* Make sure other vcpus get a chance to run if they need to. */ yield = false; - for_each_cpu_mask(cpu, mask) + for_each_cpu_mask_nr(cpu, mask) if (xen_vcpu_stolen(cpu)) yield = true; -- cgit v1.2.3-70-g09d2 From 323ec001c6bb98eeabb5abbdbb8c8055d9496554 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Sun, 29 Jun 2008 14:19:31 +0400 Subject: x86: use generic per-device dma coherent allocator Signed-off-by: Dmitry Baryshkov Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 + arch/x86/kernel/pci-dma.c | 122 +----------------------------------------- include/asm-x86/dma-mapping.h | 22 +------- 3 files changed, 4 insertions(+), 141 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e0edaaa6920..9d4714e0020 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -25,6 +25,7 @@ config X86 select HAVE_KRETPROBES select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) select HAVE_ARCH_KGDB if !X86_VOYAGER + select HAVE_GENERIC_DMA_COHERENT if X86_32 config ARCH_DEFCONFIG string diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index dc00a1331ac..b75c81a8d3e 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -197,124 +197,6 @@ static __init int iommu_setup(char *p) } early_param("iommu", iommu_setup); -#ifdef CONFIG_X86_32 -int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, - dma_addr_t device_addr, size_t size, int flags) -{ - void __iomem *mem_base = NULL; - int pages = size >> PAGE_SHIFT; - int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); - - if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) - goto out; - if (!size) - goto out; - if (dev->dma_mem) - goto out; - - /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ - - mem_base = ioremap(bus_addr, size); - if (!mem_base) - goto out; - - dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); - if (!dev->dma_mem) - goto out; - dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - if (!dev->dma_mem->bitmap) - goto free1_out; - - dev->dma_mem->virt_base = mem_base; - dev->dma_mem->device_base = device_addr; - dev->dma_mem->size = pages; - dev->dma_mem->flags = flags; - - if (flags & DMA_MEMORY_MAP) - return DMA_MEMORY_MAP; - - return DMA_MEMORY_IO; - - free1_out: - kfree(dev->dma_mem); - out: - if (mem_base) - iounmap(mem_base); - return 0; -} -EXPORT_SYMBOL(dma_declare_coherent_memory); - -void dma_release_declared_memory(struct device *dev) -{ - struct dma_coherent_mem *mem = dev->dma_mem; - - if (!mem) - return; - dev->dma_mem = NULL; - iounmap(mem->virt_base); - kfree(mem->bitmap); - kfree(mem); -} -EXPORT_SYMBOL(dma_release_declared_memory); - -void *dma_mark_declared_memory_occupied(struct device *dev, - dma_addr_t device_addr, size_t size) -{ - struct dma_coherent_mem *mem = dev->dma_mem; - int pos, err; - int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1); - - pages >>= PAGE_SHIFT; - - if (!mem) - return ERR_PTR(-EINVAL); - - pos = (device_addr - mem->device_base) >> PAGE_SHIFT; - err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); - if (err != 0) - return ERR_PTR(err); - return mem->virt_base + (pos << PAGE_SHIFT); -} -EXPORT_SYMBOL(dma_mark_declared_memory_occupied); - -static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size, - dma_addr_t *dma_handle, void **ret) -{ - struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; - int order = get_order(size); - - if (mem) { - int page = bitmap_find_free_region(mem->bitmap, mem->size, - order); - if (page >= 0) { - *dma_handle = mem->device_base + (page << PAGE_SHIFT); - *ret = mem->virt_base + (page << PAGE_SHIFT); - memset(*ret, 0, size); - } - if (mem->flags & DMA_MEMORY_EXCLUSIVE) - *ret = NULL; - } - return (mem != NULL); -} - -static int dma_release_coherent(struct device *dev, int order, void *vaddr) -{ - struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; - - if (mem && vaddr >= mem->virt_base && vaddr < - (mem->virt_base + (mem->size << PAGE_SHIFT))) { - int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; - - bitmap_release_region(mem->bitmap, page, order); - return 1; - } - return 0; -} -#else -#define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0) -#define dma_release_coherent(dev, order, vaddr) (0) -#endif /* CONFIG_X86_32 */ - int dma_supported(struct device *dev, u64 mask) { #ifdef CONFIG_PCI @@ -383,7 +265,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); - if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) + if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) return memory; if (!dev) { @@ -486,7 +368,7 @@ void dma_free_coherent(struct device *dev, size_t size, { int order = get_order(size); WARN_ON(irqs_disabled()); /* for portability */ - if (dma_release_coherent(dev, order, vaddr)) + if (dma_release_from_coherent(dev, order, vaddr)) return; if (dma_ops->unmap_single) dma_ops->unmap_single(dev, bus, size, 0); diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h index a1a4dc7fe6e..c68f360ef56 100644 --- a/include/asm-x86/dma-mapping.h +++ b/include/asm-x86/dma-mapping.h @@ -213,25 +213,5 @@ static inline int dma_get_cache_alignment(void) #define dma_is_consistent(d, h) (1) -#ifdef CONFIG_X86_32 -# define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY -struct dma_coherent_mem { - void *virt_base; - u32 device_base; - int size; - int flags; - unsigned long *bitmap; -}; - -extern int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, - dma_addr_t device_addr, size_t size, int flags); - -extern void -dma_release_declared_memory(struct device *dev); - -extern void * -dma_mark_declared_memory_occupied(struct device *dev, - dma_addr_t device_addr, size_t size); -#endif /* CONFIG_X86_32 */ +#include #endif -- cgit v1.2.3-70-g09d2 From b8f8c3cf0a4ac0632ec3f0e15e9dc0c29de917af Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 18 Jul 2008 17:27:28 +0200 Subject: nohz: prevent tick stop outside of the idle loop Jack Ren and Eric Miao tracked down the following long standing problem in the NOHZ code: scheduler switch to idle task enable interrupts Window starts here ----> interrupt happens (does not set NEED_RESCHED) irq_exit() stops the tick ----> interrupt happens (does set NEED_RESCHED) return from schedule() cpu_idle(): preempt_disable(); Window ends here The interrupts can happen at any point inside the race window. The first interrupt stops the tick, the second one causes the scheduler to rerun and switch away from idle again and we end up with the tick disabled. The fact that it needs two interrupts where the first one does not set NEED_RESCHED and the second one does made the bug obscure and extremly hard to reproduce and analyse. Kudos to Jack and Eric. Solution: Limit the NOHZ functionality to the idle loop to make sure that we can not run into such a situation ever again. cpu_idle() { preempt_disable(); while(1) { tick_nohz_stop_sched_tick(1); <- tell NOHZ code that we are in the idle loop while (!need_resched()) halt(); tick_nohz_restart_sched_tick(); <- disables NOHZ mode preempt_enable_no_resched(); schedule(); preempt_disable(); } } In hindsight we should have done this forever, but ... /me grabs a large brown paperbag. Debugged-by: Jack Ren , Debugged-by: eric miao Signed-off-by: Thomas Gleixner --- arch/arm/kernel/process.c | 2 +- arch/avr32/kernel/process.c | 2 +- arch/blackfin/kernel/process.c | 2 +- arch/mips/kernel/process.c | 2 +- arch/powerpc/kernel/idle.c | 2 +- arch/powerpc/platforms/iseries/setup.c | 4 ++-- arch/sh/kernel/process_32.c | 2 +- arch/sparc64/kernel/process.c | 2 +- arch/um/kernel/process.c | 2 +- arch/x86/kernel/process_32.c | 2 +- arch/x86/kernel/process_64.c | 2 +- include/linux/tick.h | 5 +++-- kernel/softirq.c | 2 +- kernel/time/tick-sched.c | 12 ++++++++++-- 14 files changed, 26 insertions(+), 17 deletions(-) (limited to 'arch/x86') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 46bf2ede612..84f5a4c778f 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -164,7 +164,7 @@ void cpu_idle(void) if (!idle) idle = default_idle; leds_event(led_idle_start); - tick_nohz_stop_sched_tick(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) idle(); leds_event(led_idle_end); diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index 6cf9df17627..ff820a9e743 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c @@ -31,7 +31,7 @@ void cpu_idle(void) { /* endless idle loop with no priority at all */ while (1) { - tick_nohz_stop_sched_tick(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) cpu_idle_sleep(); tick_nohz_restart_sched_tick(); diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 53c2cd25544..77800dd83e5 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c @@ -105,7 +105,7 @@ void cpu_idle(void) #endif if (!idle) idle = default_idle; - tick_nohz_stop_sched_tick(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) idle(); tick_nohz_restart_sched_tick(); diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 2c09a442e5e..bdead3aad25 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -53,7 +53,7 @@ void __noreturn cpu_idle(void) { /* endless idle loop with no priority at all */ while (1) { - tick_nohz_stop_sched_tick(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) { #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG extern void smtc_idle_loop_hook(void); diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index c3cf0e8f3ac..d308a9f70f1 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -60,7 +60,7 @@ void cpu_idle(void) set_thread_flag(TIF_POLLING_NRFLAG); while (1) { - tick_nohz_stop_sched_tick(); + tick_nohz_stop_sched_tick(1); while (!need_resched() && !cpu_should_die()) { ppc64_runlatch_off(); diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index b72120751bb..70b688c1aef 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c @@ -561,7 +561,7 @@ static void yield_shared_processor(void) static void iseries_shared_idle(void) { while (1) { - tick_nohz_stop_sched_tick(); + tick_nohz_stop_sched_tick(1); while (!need_resched() && !hvlpevent_is_pending()) { local_irq_disable(); ppc64_runlatch_off(); @@ -591,7 +591,7 @@ static void iseries_dedicated_idle(void) set_thread_flag(TIF_POLLING_NRFLAG); while (1) { - tick_nohz_stop_sched_tick(); + tick_nohz_stop_sched_tick(1); if (!need_resched()) { while (!need_resched()) { ppc64_runlatch_off(); diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index b98e37a1f54..921892c351d 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -86,7 +86,7 @@ void cpu_idle(void) if (!idle) idle = default_idle; - tick_nohz_stop_sched_tick(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) idle(); tick_nohz_restart_sched_tick(); diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 2084f81a76e..0798928ba36 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c @@ -97,7 +97,7 @@ void cpu_idle(void) set_thread_flag(TIF_POLLING_NRFLAG); while(1) { - tick_nohz_stop_sched_tick(); + tick_nohz_stop_sched_tick(1); while (!need_resched() && !cpu_is_offline(cpu)) sparc64_yield(cpu); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 83603cfbde8..a1c6d07cac3 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -243,7 +243,7 @@ void default_idle(void) if (need_resched()) schedule(); - tick_nohz_stop_sched_tick(); + tick_nohz_stop_sched_tick(1); nsecs = disable_timer(); idle_sleep(nsecs); tick_nohz_restart_sched_tick(); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index f8476dfbb60..1f5fa1cf16d 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -166,7 +166,7 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { - tick_nohz_stop_sched_tick(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) { void (*idle)(void); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index e2319f39988..c0a5c2a687e 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -148,7 +148,7 @@ void cpu_idle(void) current_thread_info()->status |= TS_POLLING; /* endless idle loop with no priority at all */ while (1) { - tick_nohz_stop_sched_tick(); + tick_nohz_stop_sched_tick(1); while (!need_resched()) { void (*idle)(void); diff --git a/include/linux/tick.h b/include/linux/tick.h index a881c652f7e..d3c02695dc5 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -49,6 +49,7 @@ struct tick_sched { unsigned long check_clocks; enum tick_nohz_mode nohz_mode; ktime_t idle_tick; + int inidle; int tick_stopped; unsigned long idle_jiffies; unsigned long idle_calls; @@ -105,14 +106,14 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ # ifdef CONFIG_NO_HZ -extern void tick_nohz_stop_sched_tick(void); +extern void tick_nohz_stop_sched_tick(int inidle); extern void tick_nohz_restart_sched_tick(void); extern void tick_nohz_update_jiffies(void); extern ktime_t tick_nohz_get_sleep_length(void); extern void tick_nohz_stop_idle(int cpu); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); # else -static inline void tick_nohz_stop_sched_tick(void) { } +static inline void tick_nohz_stop_sched_tick(int inidle) { } static inline void tick_nohz_restart_sched_tick(void) { } static inline void tick_nohz_update_jiffies(void) { } static inline ktime_t tick_nohz_get_sleep_length(void) diff --git a/kernel/softirq.c b/kernel/softirq.c index 36e06174004..05f248039d7 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -312,7 +312,7 @@ void irq_exit(void) #ifdef CONFIG_NO_HZ /* Make sure that timer wheel updates are propagated */ if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched()) - tick_nohz_stop_sched_tick(); + tick_nohz_stop_sched_tick(0); rcu_irq_exit(); #endif preempt_enable_no_resched(); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 86baa4f0dfe..ee962d11107 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -195,7 +195,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) * Called either from the idle loop or from irq_exit() when an idle period was * just interrupted by an interrupt which did not cause a reschedule. */ -void tick_nohz_stop_sched_tick(void) +void tick_nohz_stop_sched_tick(int inidle) { unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; struct tick_sched *ts; @@ -224,6 +224,11 @@ void tick_nohz_stop_sched_tick(void) if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) goto end; + if (!inidle && !ts->inidle) + goto end; + + ts->inidle = 1; + if (need_resched()) goto end; @@ -372,11 +377,14 @@ void tick_nohz_restart_sched_tick(void) local_irq_disable(); tick_nohz_stop_idle(cpu); - if (!ts->tick_stopped) { + if (!ts->inidle || !ts->tick_stopped) { + ts->inidle = 0; local_irq_enable(); return; } + ts->inidle = 0; + rcu_exit_nohz(); /* Update jiffies first */ -- cgit v1.2.3-70-g09d2 From 65c011845316d3c1381f478ca0d8265c43b3b039 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 15 Jul 2008 14:14:30 -0700 Subject: cpumask: Replace cpumask_of_cpu with cpumask_of_cpu_ptr * This patch replaces the dangerous lvalue version of cpumask_of_cpu with new cpumask_of_cpu_ptr macros. These are patterned after the node_to_cpumask_ptr macros. In general terms, if there is a cpumask_of_cpu_map[] then a pointer to the cpumask_of_cpu_map[cpu] entry is used. The cpumask_of_cpu_map is provided when there is a large NR_CPUS count, reducing greatly the amount of code generated and stack space used for cpumask_of_cpu(). The pointer to the cpumask_t value is needed for calling set_cpus_allowed_ptr() to reduce the amount of stack space needed to pass the cpumask_t value. If there isn't a cpumask_of_cpu_map[], then a temporary variable is declared and filled in with value from cpumask_of_cpu(cpu) as well as a pointer variable pointing to this temporary variable. Afterwards, the pointer is used to reference the cpumask value. The compiler will optimize out the extra dereference through the pointer as well as the stack space used for the pointer, resulting in identical code. A good example of the orthogonal usages is in net/sunrpc/svc.c: case SVC_POOL_PERCPU: { unsigned int cpu = m->pool_to[pidx]; cpumask_of_cpu_ptr(cpumask, cpu); *oldmask = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask); return 1; } case SVC_POOL_PERNODE: { unsigned int node = m->pool_to[pidx]; node_to_cpumask_ptr(nodecpumask, node); *oldmask = current->cpus_allowed; set_cpus_allowed_ptr(current, nodecpumask); return 1; } Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/cstate.c | 3 ++- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 10 +++++--- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 15 +++++++---- arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 9 ++++--- arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 3 ++- arch/x86/kernel/cpu/intel_cacheinfo.c | 3 ++- arch/x86/kernel/microcode.c | 13 +++++++--- arch/x86/kernel/reboot.c | 14 +++++++---- drivers/acpi/processor_throttling.c | 11 +++++--- drivers/firmware/dcdbas.c | 3 ++- include/linux/cpumask.h | 32 ++++++++++++++++++++---- kernel/stop_machine.c | 3 ++- kernel/trace/trace_sysprof.c | 4 ++- net/sunrpc/svc.c | 3 ++- 14 files changed, 91 insertions(+), 35 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index c2502eb9aa8..9220cf46aa1 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -73,6 +73,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, struct cpuinfo_x86 *c = &cpu_data(cpu); cpumask_t saved_mask; + cpumask_of_cpu_ptr(new_mask, cpu); int retval; unsigned int eax, ebx, ecx, edx; unsigned int edx_part; @@ -91,7 +92,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, /* Make sure we are running on right CPU */ saved_mask = current->cpus_allowed; - retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + retval = set_cpus_allowed_ptr(current, new_mask); if (retval) return -1; diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index dd097b83583..ff2fff56f0a 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -200,10 +200,12 @@ static void drv_read(struct drv_cmd *cmd) static void drv_write(struct drv_cmd *cmd) { cpumask_t saved_mask = current->cpus_allowed; + cpumask_of_cpu_ptr_declare(cpu_mask); unsigned int i; for_each_cpu_mask_nr(i, cmd->mask) { - set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); + cpumask_of_cpu_ptr_next(cpu_mask, i); + set_cpus_allowed_ptr(current, cpu_mask); do_drv_write(cmd); } @@ -267,11 +269,12 @@ static unsigned int get_measured_perf(unsigned int cpu) } aperf_cur, mperf_cur; cpumask_t saved_mask; + cpumask_of_cpu_ptr(cpu_mask, cpu); unsigned int perf_percent; unsigned int retval; saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, cpu_mask); if (get_cpu() != cpu) { /* We were not able to run on requested processor */ put_cpu(); @@ -337,6 +340,7 @@ static unsigned int get_measured_perf(unsigned int cpu) static unsigned int get_cur_freq_on_cpu(unsigned int cpu) { + cpumask_of_cpu_ptr(cpu_mask, cpu); struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); unsigned int freq; unsigned int cached_freq; @@ -349,7 +353,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) } cached_freq = data->freq_table[data->acpi_data->state].frequency; - freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); + freq = extract_freq(get_cur_val(cpu_mask), data); if (freq != cached_freq) { /* * The dreaded BIOS frequency change behind our back. diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index c45ca6d4dce..53c7b693697 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -479,11 +479,12 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi static int check_supported_cpu(unsigned int cpu) { cpumask_t oldmask; + cpumask_of_cpu_ptr(cpu_mask, cpu); u32 eax, ebx, ecx, edx; unsigned int rc = 0; oldmask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, cpu_mask); if (smp_processor_id() != cpu) { printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); @@ -1016,6 +1017,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) { cpumask_t oldmask; + cpumask_of_cpu_ptr(cpu_mask, pol->cpu); struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); u32 checkfid; u32 checkvid; @@ -1030,7 +1032,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi /* only run on specific CPU from here on */ oldmask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); + set_cpus_allowed_ptr(current, cpu_mask); if (smp_processor_id() != pol->cpu) { printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); @@ -1105,6 +1107,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) { struct powernow_k8_data *data; cpumask_t oldmask; + cpumask_of_cpu_ptr_declare(newmask); int rc; if (!cpu_online(pol->cpu)) @@ -1156,7 +1159,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) /* only run on specific CPU from here on */ oldmask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); + cpumask_of_cpu_ptr_next(newmask, pol->cpu); + set_cpus_allowed_ptr(current, newmask); if (smp_processor_id() != pol->cpu) { printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); @@ -1178,7 +1182,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) set_cpus_allowed_ptr(current, &oldmask); if (cpu_family == CPU_HW_PSTATE) - pol->cpus = cpumask_of_cpu(pol->cpu); + pol->cpus = *newmask; else pol->cpus = per_cpu(cpu_core_map, pol->cpu); data->available_cores = &(pol->cpus); @@ -1244,6 +1248,7 @@ static unsigned int powernowk8_get (unsigned int cpu) { struct powernow_k8_data *data; cpumask_t oldmask = current->cpus_allowed; + cpumask_of_cpu_ptr(newmask, cpu); unsigned int khz = 0; unsigned int first; @@ -1253,7 +1258,7 @@ static unsigned int powernowk8_get (unsigned int cpu) if (!data) return -EINVAL; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, newmask); if (smp_processor_id() != cpu) { printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu); diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 8b0dd6f2a1a..fd561bb26c6 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -313,9 +313,10 @@ static unsigned int get_cur_freq(unsigned int cpu) unsigned l, h; unsigned clock_freq; cpumask_t saved_mask; + cpumask_of_cpu_ptr(new_mask, cpu); saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, new_mask); if (smp_processor_id() != cpu) return 0; @@ -554,9 +555,11 @@ static int centrino_target (struct cpufreq_policy *policy, */ if (!cpus_empty(covered_cpus)) { + cpumask_of_cpu_ptr_declare(new_mask); + for_each_cpu_mask_nr(j, covered_cpus) { - set_cpus_allowed_ptr(current, - &cpumask_of_cpu(j)); + cpumask_of_cpu_ptr_next(new_mask, j); + set_cpus_allowed_ptr(current, new_mask); wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); } } diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 191f7263c61..2f3728dc24f 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c @@ -244,7 +244,8 @@ static unsigned int _speedstep_get(const cpumask_t *cpus) static unsigned int speedstep_get(unsigned int cpu) { - return _speedstep_get(&cpumask_of_cpu(cpu)); + cpumask_of_cpu_ptr(newmask, cpu); + return _speedstep_get(newmask); } /** diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index a7b0f8f1736..e4b8d189d7e 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -516,6 +516,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) unsigned long j; int retval; cpumask_t oldmask; + cpumask_of_cpu_ptr(newmask, cpu); if (num_cache_leaves == 0) return -ENOENT; @@ -526,7 +527,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) return -ENOMEM; oldmask = current->cpus_allowed; - retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + retval = set_cpus_allowed_ptr(current, newmask); if (retval) goto out; diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c index 56b933119a0..58520169e35 100644 --- a/arch/x86/kernel/microcode.c +++ b/arch/x86/kernel/microcode.c @@ -388,6 +388,7 @@ static int do_microcode_update (void) void *new_mc = NULL; int cpu; cpumask_t old; + cpumask_of_cpu_ptr_declare(newmask); old = current->cpus_allowed; @@ -404,7 +405,8 @@ static int do_microcode_update (void) if (!uci->valid) continue; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + cpumask_of_cpu_ptr_next(newmask, cpu); + set_cpus_allowed_ptr(current, newmask); error = get_maching_microcode(new_mc, cpu); if (error < 0) goto out; @@ -574,6 +576,7 @@ static int apply_microcode_check_cpu(int cpu) struct cpuinfo_x86 *c = &cpu_data(cpu); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; cpumask_t old; + cpumask_of_cpu_ptr(newmask, cpu); unsigned int val[2]; int err = 0; @@ -582,7 +585,7 @@ static int apply_microcode_check_cpu(int cpu) return 0; old = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, newmask); /* Check if the microcode we have in memory matches the CPU */ if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || @@ -620,11 +623,12 @@ static int apply_microcode_check_cpu(int cpu) static void microcode_init_cpu(int cpu, int resume) { cpumask_t old; + cpumask_of_cpu_ptr(newmask, cpu); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; old = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, newmask); mutex_lock(µcode_mutex); collect_cpu_info(cpu); if (uci->valid && system_state == SYSTEM_RUNNING && !resume) @@ -656,11 +660,12 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz) return -EINVAL; if (val == 1) { cpumask_t old; + cpumask_of_cpu_ptr(newmask, cpu); old = current->cpus_allowed; get_online_cpus(); - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, newmask); mutex_lock(µcode_mutex); if (uci->valid) diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index f8a62160e15..214bbdfc851 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -403,24 +403,28 @@ void native_machine_shutdown(void) { /* Stop the cpus and apics */ #ifdef CONFIG_SMP - int reboot_cpu_id; /* The boot cpu is always logical cpu 0 */ - reboot_cpu_id = 0; + int reboot_cpu_id = 0; + cpumask_of_cpu_ptr(newmask, reboot_cpu_id); #ifdef CONFIG_X86_32 /* See if there has been given a command line override */ if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && - cpu_online(reboot_cpu)) + cpu_online(reboot_cpu)) { reboot_cpu_id = reboot_cpu; + cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id); + } #endif /* Make certain the cpu I'm about to reboot on is online */ - if (!cpu_online(reboot_cpu_id)) + if (!cpu_online(reboot_cpu_id)) { reboot_cpu_id = smp_processor_id(); + cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id); + } /* Make certain I only run on the appropriate processor */ - set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); + set_cpus_allowed_ptr(current, newmask); /* O.K Now that I'm on the appropriate processor, * stop all of the others. diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index a56fc6c4394..a2c3f9cfa54 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -827,6 +827,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) static int acpi_processor_get_throttling(struct acpi_processor *pr) { cpumask_t saved_mask; + cpumask_of_cpu_ptr_declare(new_mask); int ret; if (!pr) @@ -838,7 +839,8 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) * Migrate task to the cpu pointed by pr. */ saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); + cpumask_of_cpu_ptr_next(new_mask, pr->id); + set_cpus_allowed_ptr(current, new_mask); ret = pr->throttling.acpi_processor_get_throttling(pr); /* restore the previous state */ set_cpus_allowed_ptr(current, &saved_mask); @@ -987,6 +989,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, int acpi_processor_set_throttling(struct acpi_processor *pr, int state) { cpumask_t saved_mask; + cpumask_of_cpu_ptr_declare(new_mask); int ret = 0; unsigned int i; struct acpi_processor *match_pr; @@ -1025,7 +1028,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) * it can be called only for the cpu pointed by pr. */ if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); + cpumask_of_cpu_ptr_next(new_mask, pr->id); + set_cpus_allowed_ptr(current, new_mask); ret = p_throttling->acpi_processor_set_throttling(pr, t_state.target_state); } else { @@ -1056,7 +1060,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) continue; } t_state.cpu = i; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); + cpumask_of_cpu_ptr_next(new_mask, i); + set_cpus_allowed_ptr(current, new_mask); ret = match_pr->throttling. acpi_processor_set_throttling( match_pr, t_state.target_state); diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index 25918f7dfd0..0b624e927a6 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c @@ -254,6 +254,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev, static int smi_request(struct smi_cmd *smi_cmd) { cpumask_t old_mask; + cpumask_of_cpu_ptr(new_mask, 0); int ret = 0; if (smi_cmd->magic != SMI_CMD_MAGIC) { @@ -264,7 +265,7 @@ static int smi_request(struct smi_cmd *smi_cmd) /* SMI requires CPU 0 */ old_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); + set_cpus_allowed_ptr(current, new_mask); if (smp_processor_id() != 0) { dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", __func__); diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 80226e77614..2dbd9a287e7 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -62,6 +62,15 @@ * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids * * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set + *ifdef CONFIG_HAS_CPUMASK_OF_CPU + * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v + * cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu] + * cpumask_of_cpu_ptr(v, cpu) Combines above two operations + *else + * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v + * cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu) + * cpumask_of_cpu_ptr(v, cpu) Combines above two operations + *endif * CPU_MASK_ALL Initializer - all bits set * CPU_MASK_NONE Initializer - no bits set * unsigned long *cpus_addr(mask) Array of unsigned long's in mask @@ -236,11 +245,16 @@ static inline void __cpus_shift_left(cpumask_t *dstp, #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP extern cpumask_t *cpumask_of_cpu_map; -#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) - +#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) +#define cpumask_of_cpu_ptr(v, cpu) \ + const cpumask_t *v = &cpumask_of_cpu(cpu) +#define cpumask_of_cpu_ptr_declare(v) \ + const cpumask_t *v +#define cpumask_of_cpu_ptr_next(v, cpu) \ + v = &cpumask_of_cpu(cpu) #else #define cpumask_of_cpu(cpu) \ -(*({ \ +({ \ typeof(_unused_cpumask_arg_) m; \ if (sizeof(m) == sizeof(unsigned long)) { \ m.bits[0] = 1UL<<(cpu); \ @@ -248,8 +262,16 @@ extern cpumask_t *cpumask_of_cpu_map; cpus_clear(m); \ cpu_set((cpu), m); \ } \ - &m; \ -})) + m; \ +}) +#define cpumask_of_cpu_ptr(v, cpu) \ + cpumask_t _##v = cpumask_of_cpu(cpu); \ + const cpumask_t *v = &_##v +#define cpumask_of_cpu_ptr_declare(v) \ + cpumask_t _##v; \ + const cpumask_t *v = &_##v +#define cpumask_of_cpu_ptr_next(v, cpu) \ + _##v = cpumask_of_cpu(cpu) #endif #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index ba9b2054ecb..738b411ff2d 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -33,8 +33,9 @@ static int stopmachine(void *cpu) { int irqs_disabled = 0; int prepared = 0; + cpumask_of_cpu_ptr(cpumask, (int)(long)cpu); - set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu)); + set_cpus_allowed_ptr(current, cpumask); /* Ack: we are alive */ smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 2301e1e7c60..63528086337 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c @@ -213,7 +213,9 @@ static void start_stack_timers(void) int cpu; for_each_online_cpu(cpu) { - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + cpumask_of_cpu_ptr(new_mask, cpu); + + set_cpus_allowed_ptr(current, new_mask); start_stack_timer(cpu); } set_cpus_allowed_ptr(current, &saved_mask); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index d43cf8ddff6..083d1268813 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -314,9 +314,10 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) case SVC_POOL_PERCPU: { unsigned int cpu = m->pool_to[pidx]; + cpumask_of_cpu_ptr(cpumask, cpu); *oldmask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, cpumask); return 1; } case SVC_POOL_PERNODE: -- cgit v1.2.3-70-g09d2 From cb6d2be60dc3ec9ac788f45d8e24b82a9faacdd9 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 15 Jul 2008 14:14:31 -0700 Subject: cpumask: Optimize cpumask_of_cpu in arch/x86/kernel/io_apic_64.c * Optimize various places where a pointer to the cpumask_of_cpu value will result in reducing stack pressure. Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_64.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index bf27114773d..a85db790754 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -1372,12 +1372,10 @@ static unsigned int startup_ioapic_irq(unsigned int irq) static int ioapic_retrigger_irq(unsigned int irq) { struct irq_cfg *cfg = &irq_cfg[irq]; - cpumask_t mask; unsigned long flags; spin_lock_irqsave(&vector_lock, flags); - mask = cpumask_of_cpu(first_cpu(cfg->domain)); - send_IPI_mask(mask, cfg->vector); + send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); spin_unlock_irqrestore(&vector_lock, flags); return 1; -- cgit v1.2.3-70-g09d2 From c42f4f4c6dab3b2b7768c36173ee7c7ecf79eddb Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 15 Jul 2008 14:14:32 -0700 Subject: cpumask: Optimize cpumask_of_cpu in arch/x86/kernel/ldt.c * Optimize various places where a pointer to the cpumask_of_cpu value will result in reducing stack pressure. Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/ldt.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index a8449571858..3fee2aa50f3 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -62,12 +62,12 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) if (reload) { #ifdef CONFIG_SMP - cpumask_t mask; + cpumask_of_cpu_ptr_declare(mask); preempt_disable(); load_LDT(pc); - mask = cpumask_of_cpu(smp_processor_id()); - if (!cpus_equal(current->mm->cpu_vm_mask, mask)) + cpumask_of_cpu_ptr_next(mask, smp_processor_id()); + if (!cpus_equal(current->mm->cpu_vm_mask, *mask)) smp_call_function(flush_ldt, current->mm, 1); preempt_enable(); #else -- cgit v1.2.3-70-g09d2 From eb53fac5cafc4b2f8443ff064938b4494a28c54e Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 15 Jul 2008 14:14:37 -0700 Subject: cpumask: Use optimized CPUMASK_ALLOC macros in the centrino_target * Use the CPUMASK_ALLOC macros in the centrino_target() function. Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 73 +++++++++++++++--------- 1 file changed, 45 insertions(+), 28 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index fd561bb26c6..470c016cb25 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -442,6 +442,13 @@ static int centrino_verify (struct cpufreq_policy *policy) * * Sets a new CPUFreq policy. */ +struct allmasks { + cpumask_t online_policy_cpus; + cpumask_t saved_mask; + cpumask_t set_mask; + cpumask_t covered_cpus; +}; + static int centrino_target (struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) @@ -449,48 +456,55 @@ static int centrino_target (struct cpufreq_policy *policy, unsigned int newstate = 0; unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; struct cpufreq_freqs freqs; - cpumask_t online_policy_cpus; - cpumask_t saved_mask; - cpumask_t set_mask; - cpumask_t covered_cpus; int retval = 0; unsigned int j, k, first_cpu, tmp; - - if (unlikely(centrino_model[cpu] == NULL)) - return -ENODEV; + CPUMASK_ALLOC(allmasks); + CPUMASK_VAR(online_policy_cpus, allmasks); + CPUMASK_VAR(saved_mask, allmasks); + CPUMASK_VAR(set_mask, allmasks); + CPUMASK_VAR(covered_cpus, allmasks); + + if (unlikely(allmasks == NULL)) + return -ENOMEM; + + if (unlikely(centrino_model[cpu] == NULL)) { + retval = -ENODEV; + goto out; + } if (unlikely(cpufreq_frequency_table_target(policy, centrino_model[cpu]->op_points, target_freq, relation, &newstate))) { - return -EINVAL; + retval = -EINVAL; + goto out; } #ifdef CONFIG_HOTPLUG_CPU /* cpufreq holds the hotplug lock, so we are safe from here on */ - cpus_and(online_policy_cpus, cpu_online_map, policy->cpus); + cpus_and(*online_policy_cpus, cpu_online_map, policy->cpus); #else - online_policy_cpus = policy->cpus; + *online_policy_cpus = policy->cpus; #endif - saved_mask = current->cpus_allowed; + *saved_mask = current->cpus_allowed; first_cpu = 1; - cpus_clear(covered_cpus); - for_each_cpu_mask_nr(j, online_policy_cpus) { + cpus_clear(*covered_cpus); + for_each_cpu_mask_nr(j, *online_policy_cpus) { /* * Support for SMP systems. * Make sure we are running on CPU that wants to change freq */ - cpus_clear(set_mask); + cpus_clear(*set_mask); if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) - cpus_or(set_mask, set_mask, online_policy_cpus); + cpus_or(*set_mask, *set_mask, *online_policy_cpus); else - cpu_set(j, set_mask); + cpu_set(j, *set_mask); - set_cpus_allowed_ptr(current, &set_mask); + set_cpus_allowed_ptr(current, set_mask); preempt_disable(); - if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) { + if (unlikely(!cpu_isset(smp_processor_id(), *set_mask))) { dprintk("couldn't limit to CPUs in this domain\n"); retval = -EAGAIN; if (first_cpu) { @@ -518,7 +532,7 @@ static int centrino_target (struct cpufreq_policy *policy, dprintk("target=%dkHz old=%d new=%d msr=%04x\n", target_freq, freqs.old, freqs.new, msr); - for_each_cpu_mask_nr(k, online_policy_cpus) { + for_each_cpu_mask_nr(k, *online_policy_cpus) { freqs.cpu = k; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); @@ -537,11 +551,11 @@ static int centrino_target (struct cpufreq_policy *policy, break; } - cpu_set(j, covered_cpus); + cpu_set(j, *covered_cpus); preempt_enable(); } - for_each_cpu_mask_nr(k, online_policy_cpus) { + for_each_cpu_mask_nr(k, *online_policy_cpus) { freqs.cpu = k; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } @@ -554,10 +568,10 @@ static int centrino_target (struct cpufreq_policy *policy, * Best effort undo.. */ - if (!cpus_empty(covered_cpus)) { + if (!cpus_empty(*covered_cpus)) { cpumask_of_cpu_ptr_declare(new_mask); - for_each_cpu_mask_nr(j, covered_cpus) { + for_each_cpu_mask_nr(j, *covered_cpus) { cpumask_of_cpu_ptr_next(new_mask, j); set_cpus_allowed_ptr(current, new_mask); wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); @@ -567,19 +581,22 @@ static int centrino_target (struct cpufreq_policy *policy, tmp = freqs.new; freqs.new = freqs.old; freqs.old = tmp; - for_each_cpu_mask_nr(j, online_policy_cpus) { + for_each_cpu_mask_nr(j, *online_policy_cpus) { freqs.cpu = j; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } } - set_cpus_allowed_ptr(current, &saved_mask); - return 0; + set_cpus_allowed_ptr(current, saved_mask); + retval = 0; + goto out; migrate_end: preempt_enable(); - set_cpus_allowed_ptr(current, &saved_mask); - return 0; + set_cpus_allowed_ptr(current, saved_mask); +out: + CPUMASK_FREE(allmasks); + return retval; } static struct freq_attr* centrino_attr[] = { -- cgit v1.2.3-70-g09d2 From 91467bdf6e53058af13fd255375d6634ba0c70e0 Mon Sep 17 00:00:00 2001 From: Bernhard Walle Date: Fri, 18 Jul 2008 19:07:53 +0200 Subject: x86: move dma32_reserve_bootmem() after reserve_crashkernel() On a x86-64 machine (nothing special I could encounter) I had the problem that crashkernel reservation with the usual "64M@16M" failed. While debugging that, I encountered that dma32_reserve_bootmem() reserves a memory region which is in that area. Because dma32_reserve_bootmem() does not rely on a specific offset but crashkernel does, it makes sense to move the dma32_reserve_bootmem() reservation down a bit. I tested that patch and it works without problems. I don't see any negative effects of that move, but maybe I oversaw something ... While we strictly don't need that patch in 2.6.27 because we have the automatic, dynamic offset detection, it makes sense to also include it here because: - it's easier to get it in -stable then, - many people are still used to the 'crashkernel=...@16M' syntax, - not everybody may be using a reloatable kernel. Signed-off-by: Bernhard Walle Cc: kexec@lists.infradead.org Cc: vgoyal@redhat.com Cc: akpm@linux-foundation.org Cc: Bernhard Walle Cc: yhlu.kernel@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 531b55b8e81..74d110ef269 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -792,10 +792,6 @@ void __init setup_arch(char **cmdline_p) initmem_init(0, max_pfn); -#ifdef CONFIG_X86_64 - dma32_reserve_bootmem(); -#endif - #ifdef CONFIG_ACPI_SLEEP /* * Reserve low memory region for sleep support. @@ -810,6 +806,15 @@ void __init setup_arch(char **cmdline_p) #endif reserve_crashkernel(); +#ifdef CONFIG_X86_64 + /* + * dma32_reserve_bootmem() allocates bootmem which may conflict + * with the crashkernel command line, so do that after + * reserve_crashkernel() + */ + dma32_reserve_bootmem(); +#endif + reserve_ibft_region(); #ifdef CONFIG_KVM_CLOCK -- cgit v1.2.3-70-g09d2 From 6bca67f951f80b9e61078f8cdf5fb7b3d9e51aa9 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Fri, 18 Jul 2008 18:11:27 -0700 Subject: NR_CPUS: Replace NR_CPUS in arch/x86/kernel/cpu/mcheck/mce_64.c * nr_cpu_ids should be used to allocate arrays based on the number of cpu's present. Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index c4a7ec31394..2fe06ab5c54 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c @@ -580,7 +580,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, char __user *buf = ubuf; int i, err; - cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL); + cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); if (!cpu_tsc) return -ENOMEM; -- cgit v1.2.3-70-g09d2 From f2ad47ffeb1d292b7c7d1e2f6aedb37646c391db Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Fri, 18 Jul 2008 18:11:28 -0700 Subject: NR_CPUS: Replace NR_CPUS in arch/x86/kernel/cpu/proc.c * Use nr_cpu_ids instead of NR_CPUS to limit traversal of cpu online map. Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/proc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 0d0d9057e7c..a26c480b949 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -160,7 +160,7 @@ static void *c_start(struct seq_file *m, loff_t *pos) { if (*pos == 0) /* just in case, cpu 0 is not the first */ *pos = first_cpu(cpu_online_map); - if ((*pos) < NR_CPUS && cpu_online(*pos)) + if ((*pos) < nr_cpu_ids && cpu_online(*pos)) return &cpu_data(*pos); return NULL; } -- cgit v1.2.3-70-g09d2 From 247bc6ca0f691e4617e7bdb70cbaccc939f754ec Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Fri, 18 Jul 2008 18:11:29 -0700 Subject: NR_CPUS: Replace NR_CPUS in arch/x86/kernel/genx2apic_uv_x.c * Replace NR_CPUS loop with for_each_possible_cpu(). * nr_cpu_ids should be used to determine if a percpu area is available for a given cpu. Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/genx2apic_uv_x.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 711f11c30b0..3e5d7b8698f 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -94,7 +94,7 @@ static void uv_send_IPI_mask(cpumask_t mask, int vector) { unsigned int cpu; - for (cpu = 0; cpu < NR_CPUS; ++cpu) + for_each_possible_cpu(cpu) if (cpu_isset(cpu, mask)) uv_send_IPI_one(cpu, vector); } @@ -128,7 +128,7 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) * May as well be the first. */ cpu = first_cpu(cpumask); - if ((unsigned)cpu < NR_CPUS) + if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); else return BAD_APICID; -- cgit v1.2.3-70-g09d2 From 1bd9d6b64e1474f1a03f8660e8721d746cffae57 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Fri, 18 Jul 2008 18:11:30 -0700 Subject: NR_CPUS: Replace NR_CPUS in arch/x86/kernel/genapic_flat_64.c * nr_cpu_ids should be used to determine if a percpu area is available for a given cpu. Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/genapic_flat_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 1a9c68845ee..786548a62d3 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -168,7 +168,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) * May as well be the first. */ cpu = first_cpu(cpumask); - if ((unsigned)cpu < NR_CPUS) + if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); else return BAD_APICID; -- cgit v1.2.3-70-g09d2 From c4762aba0b1f72659aae9ce37b772ca8bd8f06f4 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Fri, 18 Jul 2008 18:11:34 -0700 Subject: NR_CPUS: Replace NR_CPUS in speedstep-centrino.c Some cleanups in speedstep-centrino.c for NR_CPUS=4096. * Use new CPUMASK_PTR (instead of old CPUMASK_VAR). * Replace arrays sized by NR_CPUS with percpu variables. * Cleanup some formatting problems (>80 chars per line) and other checkpatch complaints. Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 85 ++++++++++++++---------- 1 file changed, 51 insertions(+), 34 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 470c016cb25..ca2ac13b7af 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -28,7 +28,8 @@ #define PFX "speedstep-centrino: " #define MAINTAINER "cpufreq@lists.linux.org.uk" -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) +#define dprintk(msg...) \ + cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) #define INTEL_MSR_RANGE (0xffff) @@ -66,11 +67,12 @@ struct cpu_model struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */ }; -static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x); +static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, + const struct cpu_id *x); /* Operating points for current CPU */ -static struct cpu_model *centrino_model[NR_CPUS]; -static const struct cpu_id *centrino_cpu[NR_CPUS]; +static DEFINE_PER_CPU(struct cpu_model *, centrino_model); +static DEFINE_PER_CPU(const struct cpu_id *, centrino_cpu); static struct cpufreq_driver centrino_driver; @@ -255,7 +257,7 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) return -ENOENT; } - centrino_model[policy->cpu] = model; + per_cpu(centrino_model, policy->cpu) = model; dprintk("found \"%s\": max frequency: %dkHz\n", model->model_name, model->max_freq); @@ -264,10 +266,14 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) } #else -static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) { return -ENODEV; } +static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) +{ + return -ENODEV; +} #endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */ -static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x) +static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, + const struct cpu_id *x) { if ((c->x86 == x->x86) && (c->x86_model == x->x86_model) && @@ -286,23 +292,28 @@ static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe) * for centrino, as some DSDTs are buggy. * Ideally, this can be done using the acpi_data structure. */ - if ((centrino_cpu[cpu] == &cpu_ids[CPU_BANIAS]) || - (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_A1]) || - (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_B0])) { + if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || + (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || + (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { msr = (msr >> 8) & 0xff; return msr * 100000; } - if ((!centrino_model[cpu]) || (!centrino_model[cpu]->op_points)) + if ((!per_cpu(centrino_model, cpu)) || + (!per_cpu(centrino_model, cpu)->op_points)) return 0; msr &= 0xffff; - for (i=0;centrino_model[cpu]->op_points[i].frequency != CPUFREQ_TABLE_END; i++) { - if (msr == centrino_model[cpu]->op_points[i].index) - return centrino_model[cpu]->op_points[i].frequency; + for (i = 0; + per_cpu(centrino_model, cpu)->op_points[i].frequency + != CPUFREQ_TABLE_END; + i++) { + if (msr == per_cpu(centrino_model, cpu)->op_points[i].index) + return per_cpu(centrino_model, cpu)-> + op_points[i].frequency; } if (failsafe) - return centrino_model[cpu]->op_points[i-1].frequency; + return per_cpu(centrino_model, cpu)->op_points[i-1].frequency; else return 0; } @@ -348,7 +359,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) int i; /* Only Intel makes Enhanced Speedstep-capable CPUs */ - if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST)) + if (cpu->x86_vendor != X86_VENDOR_INTEL || + !cpu_has(cpu, X86_FEATURE_EST)) return -ENODEV; if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) @@ -362,9 +374,9 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) break; if (i != N_IDS) - centrino_cpu[policy->cpu] = &cpu_ids[i]; + per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; - if (!centrino_cpu[policy->cpu]) { + if (!per_cpu(centrino_cpu, policy->cpu)) { dprintk("found unsupported CPU with " "Enhanced SpeedStep: send /proc/cpuinfo to " MAINTAINER "\n"); @@ -387,23 +399,26 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) /* check to see if it stuck */ rdmsr(MSR_IA32_MISC_ENABLE, l, h); if (!(l & (1<<16))) { - printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n"); + printk(KERN_INFO PFX + "couldn't enable Enhanced SpeedStep\n"); return -ENODEV; } } freq = get_cur_freq(policy->cpu); - - policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */ + policy->cpuinfo.transition_latency = 10000; + /* 10uS transition latency */ policy->cur = freq; dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur); - ret = cpufreq_frequency_table_cpuinfo(policy, centrino_model[policy->cpu]->op_points); + ret = cpufreq_frequency_table_cpuinfo(policy, + per_cpu(centrino_model, policy->cpu)->op_points); if (ret) return (ret); - cpufreq_frequency_table_get_attr(centrino_model[policy->cpu]->op_points, policy->cpu); + cpufreq_frequency_table_get_attr( + per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu); return 0; } @@ -412,12 +427,12 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; - if (!centrino_model[cpu]) + if (!per_cpu(centrino_model, cpu)) return -ENODEV; cpufreq_frequency_table_put_attr(cpu); - centrino_model[cpu] = NULL; + per_cpu(centrino_model, cpu) = NULL; return 0; } @@ -431,14 +446,16 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy) */ static int centrino_verify (struct cpufreq_policy *policy) { - return cpufreq_frequency_table_verify(policy, centrino_model[policy->cpu]->op_points); + return cpufreq_frequency_table_verify(policy, + per_cpu(centrino_model, policy->cpu)->op_points); } /** * centrino_setpolicy - set a new CPUFreq policy * @policy: new policy * @target_freq: the target frequency - * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * @relation: how that frequency relates to achieved frequency + * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) * * Sets a new CPUFreq policy. */ @@ -459,21 +476,21 @@ static int centrino_target (struct cpufreq_policy *policy, int retval = 0; unsigned int j, k, first_cpu, tmp; CPUMASK_ALLOC(allmasks); - CPUMASK_VAR(online_policy_cpus, allmasks); - CPUMASK_VAR(saved_mask, allmasks); - CPUMASK_VAR(set_mask, allmasks); - CPUMASK_VAR(covered_cpus, allmasks); + CPUMASK_PTR(online_policy_cpus, allmasks); + CPUMASK_PTR(saved_mask, allmasks); + CPUMASK_PTR(set_mask, allmasks); + CPUMASK_PTR(covered_cpus, allmasks); if (unlikely(allmasks == NULL)) return -ENOMEM; - if (unlikely(centrino_model[cpu] == NULL)) { + if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { retval = -ENODEV; goto out; } if (unlikely(cpufreq_frequency_table_target(policy, - centrino_model[cpu]->op_points, + per_cpu(centrino_model, cpu)->op_points, target_freq, relation, &newstate))) { @@ -515,7 +532,7 @@ static int centrino_target (struct cpufreq_policy *policy, break; } - msr = centrino_model[cpu]->op_points[newstate].index; + msr = per_cpu(centrino_model, cpu)->op_points[newstate].index; if (first_cpu) { rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); -- cgit v1.2.3-70-g09d2 From 31656519e132f6612584815f128c83976a9aaaef Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 18 Jul 2008 18:01:23 +0200 Subject: sched, x86: clean up hrtick implementation random uvesafb failures were reported against Gentoo: http://bugs.gentoo.org/show_bug.cgi?id=222799 and Mihai Moldovan bisected it back to: > 8f4d37ec073c17e2d4aa8851df5837d798606d6f is first bad commit > commit 8f4d37ec073c17e2d4aa8851df5837d798606d6f > Author: Peter Zijlstra > Date: Fri Jan 25 21:08:29 2008 +0100 > > sched: high-res preemption tick Linus suspected it to be hrtick + vm86 interaction and observed: > Btw, Peter, Ingo: I think that commit is doing bad things. They aren't > _incorrect_ per se, but they are definitely bad. > > Why? > > Using random _TIF_WORK_MASK flags is really impolite for doing > "scheduling" work. There's a reason that arch/x86/kernel/entry_32.S > special-cases the _TIF_NEED_RESCHED flag: we don't want to exit out of > vm86 mode unnecessarily. > > See the "work_notifysig_v86" label, and how it does that > "save_v86_state()" thing etc etc. Right, I never liked having to fiddle with those TIF flags. Initially I needed it because the hrtimer base lock could not nest in the rq lock. That however is fixed these days. Currently the only reason left to fiddle with the TIF flags is remote wakeups. We cannot program a remote cpu's hrtimer. I've been thinking about using the new and improved IPI function call stuff to implement hrtimer_start_on(). However that does require that smp_call_function_single(.wait=0) works from interrupt context - /me looks at the latest series from Jens - Yes that does seem to be supported, good. Here's a stab at cleaning this stuff up ... Mihai reported test success as well. Signed-off-by: Peter Zijlstra Tested-by: Mihai Moldovan Cc: Michal Januszewski Cc: Antonino Daplas Signed-off-by: Ingo Molnar --- arch/x86/kernel/signal_32.c | 3 - arch/x86/kernel/signal_64.c | 3 - include/asm-x86/thread_info.h | 4 +- kernel/Kconfig.hz | 2 +- kernel/sched.c | 202 +++++++++++++----------------------------- kernel/sched_fair.c | 5 +- 6 files changed, 64 insertions(+), 155 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index d9237363096..e1fc7bd57bf 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -667,8 +667,5 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); - if (thread_info_flags & _TIF_HRTICK_RESCHED) - hrtick_resched(); - clear_thread_flag(TIF_IRET); } diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index e53b267662e..88023fcc704 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -502,9 +502,6 @@ void do_notify_resume(struct pt_regs *regs, void *unused, /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); - - if (thread_info_flags & _TIF_HRTICK_RESCHED) - hrtick_resched(); } void signal_fault(struct pt_regs *regs, void __user *frame, char *where) diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h index 895339d2bc0..d7012634ace 100644 --- a/include/asm-x86/thread_info.h +++ b/include/asm-x86/thread_info.h @@ -81,7 +81,6 @@ struct thread_info { #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ -#define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */ #define TIF_NOTSC 16 /* TSC is not accessible in userland */ #define TIF_IA32 17 /* 32bit process */ #define TIF_FORK 18 /* ret_from_fork */ @@ -108,7 +107,6 @@ struct thread_info { #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) -#define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED) #define _TIF_NOTSC (1 << TIF_NOTSC) #define _TIF_IA32 (1 << TIF_IA32) #define _TIF_FORK (1 << TIF_FORK) @@ -132,7 +130,7 @@ struct thread_info { /* Only used for 64 bit */ #define _TIF_DO_NOTIFY_MASK \ - (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) + (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY) /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz index 526128a2e62..2a202a84675 100644 --- a/kernel/Kconfig.hz +++ b/kernel/Kconfig.hz @@ -55,4 +55,4 @@ config HZ default 1000 if HZ_1000 config SCHED_HRTICK - def_bool HIGH_RES_TIMERS && X86 + def_bool HIGH_RES_TIMERS diff --git a/kernel/sched.c b/kernel/sched.c index 1ee18dbb451..c13c75e9f9f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -571,8 +571,10 @@ struct rq { #endif #ifdef CONFIG_SCHED_HRTICK - unsigned long hrtick_flags; - ktime_t hrtick_expire; +#ifdef CONFIG_SMP + int hrtick_csd_pending; + struct call_single_data hrtick_csd; +#endif struct hrtimer hrtick_timer; #endif @@ -983,13 +985,6 @@ static struct rq *this_rq_lock(void) return rq; } -static void __resched_task(struct task_struct *p, int tif_bit); - -static inline void resched_task(struct task_struct *p) -{ - __resched_task(p, TIF_NEED_RESCHED); -} - #ifdef CONFIG_SCHED_HRTICK /* * Use HR-timers to deliver accurate preemption points. @@ -1001,25 +996,6 @@ static inline void resched_task(struct task_struct *p) * When we get rescheduled we reprogram the hrtick_timer outside of the * rq->lock. */ -static inline void resched_hrt(struct task_struct *p) -{ - __resched_task(p, TIF_HRTICK_RESCHED); -} - -static inline void resched_rq(struct rq *rq) -{ - unsigned long flags; - - spin_lock_irqsave(&rq->lock, flags); - resched_task(rq->curr); - spin_unlock_irqrestore(&rq->lock, flags); -} - -enum { - HRTICK_SET, /* re-programm hrtick_timer */ - HRTICK_RESET, /* not a new slice */ - HRTICK_BLOCK, /* stop hrtick operations */ -}; /* * Use hrtick when: @@ -1030,72 +1006,17 @@ static inline int hrtick_enabled(struct rq *rq) { if (!sched_feat(HRTICK)) return 0; - if (unlikely(test_bit(HRTICK_BLOCK, &rq->hrtick_flags))) + if (!cpu_online(cpu_of(rq))) return 0; return hrtimer_is_hres_active(&rq->hrtick_timer); } -/* - * Called to set the hrtick timer state. - * - * called with rq->lock held and irqs disabled - */ -static void hrtick_start(struct rq *rq, u64 delay, int reset) -{ - assert_spin_locked(&rq->lock); - - /* - * preempt at: now + delay - */ - rq->hrtick_expire = - ktime_add_ns(rq->hrtick_timer.base->get_time(), delay); - /* - * indicate we need to program the timer - */ - __set_bit(HRTICK_SET, &rq->hrtick_flags); - if (reset) - __set_bit(HRTICK_RESET, &rq->hrtick_flags); - - /* - * New slices are called from the schedule path and don't need a - * forced reschedule. - */ - if (reset) - resched_hrt(rq->curr); -} - static void hrtick_clear(struct rq *rq) { if (hrtimer_active(&rq->hrtick_timer)) hrtimer_cancel(&rq->hrtick_timer); } -/* - * Update the timer from the possible pending state. - */ -static void hrtick_set(struct rq *rq) -{ - ktime_t time; - int set, reset; - unsigned long flags; - - WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); - - spin_lock_irqsave(&rq->lock, flags); - set = __test_and_clear_bit(HRTICK_SET, &rq->hrtick_flags); - reset = __test_and_clear_bit(HRTICK_RESET, &rq->hrtick_flags); - time = rq->hrtick_expire; - clear_thread_flag(TIF_HRTICK_RESCHED); - spin_unlock_irqrestore(&rq->lock, flags); - - if (set) { - hrtimer_start(&rq->hrtick_timer, time, HRTIMER_MODE_ABS); - if (reset && !hrtimer_active(&rq->hrtick_timer)) - resched_rq(rq); - } else - hrtick_clear(rq); -} - /* * High-resolution timer tick. * Runs from hardirq context with interrupts disabled. @@ -1115,27 +1036,37 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) } #ifdef CONFIG_SMP -static void hotplug_hrtick_disable(int cpu) +/* + * called from hardirq (IPI) context + */ +static void __hrtick_start(void *arg) { - struct rq *rq = cpu_rq(cpu); - unsigned long flags; - - spin_lock_irqsave(&rq->lock, flags); - rq->hrtick_flags = 0; - __set_bit(HRTICK_BLOCK, &rq->hrtick_flags); - spin_unlock_irqrestore(&rq->lock, flags); + struct rq *rq = arg; - hrtick_clear(rq); + spin_lock(&rq->lock); + hrtimer_restart(&rq->hrtick_timer); + rq->hrtick_csd_pending = 0; + spin_unlock(&rq->lock); } -static void hotplug_hrtick_enable(int cpu) +/* + * Called to set the hrtick timer state. + * + * called with rq->lock held and irqs disabled + */ +static void hrtick_start(struct rq *rq, u64 delay) { - struct rq *rq = cpu_rq(cpu); - unsigned long flags; + struct hrtimer *timer = &rq->hrtick_timer; + ktime_t time = ktime_add_ns(timer->base->get_time(), delay); - spin_lock_irqsave(&rq->lock, flags); - __clear_bit(HRTICK_BLOCK, &rq->hrtick_flags); - spin_unlock_irqrestore(&rq->lock, flags); + timer->expires = time; + + if (rq == this_rq()) { + hrtimer_restart(timer); + } else if (!rq->hrtick_csd_pending) { + __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd); + rq->hrtick_csd_pending = 1; + } } static int @@ -1150,16 +1081,7 @@ hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_DOWN_PREPARE_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: - hotplug_hrtick_disable(cpu); - return NOTIFY_OK; - - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - hotplug_hrtick_enable(cpu); + hrtick_clear(cpu_rq(cpu)); return NOTIFY_OK; } @@ -1170,46 +1092,45 @@ static void init_hrtick(void) { hotcpu_notifier(hotplug_hrtick, 0); } -#endif /* CONFIG_SMP */ +#else +/* + * Called to set the hrtick timer state. + * + * called with rq->lock held and irqs disabled + */ +static void hrtick_start(struct rq *rq, u64 delay) +{ + hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL); +} -static void init_rq_hrtick(struct rq *rq) +static void init_hrtick(void) { - rq->hrtick_flags = 0; - hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - rq->hrtick_timer.function = hrtick; - rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; } +#endif /* CONFIG_SMP */ -void hrtick_resched(void) +static void init_rq_hrtick(struct rq *rq) { - struct rq *rq; - unsigned long flags; +#ifdef CONFIG_SMP + rq->hrtick_csd_pending = 0; - if (!test_thread_flag(TIF_HRTICK_RESCHED)) - return; + rq->hrtick_csd.flags = 0; + rq->hrtick_csd.func = __hrtick_start; + rq->hrtick_csd.info = rq; +#endif - local_irq_save(flags); - rq = cpu_rq(smp_processor_id()); - hrtick_set(rq); - local_irq_restore(flags); + hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + rq->hrtick_timer.function = hrtick; + rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; } #else static inline void hrtick_clear(struct rq *rq) { } -static inline void hrtick_set(struct rq *rq) -{ -} - static inline void init_rq_hrtick(struct rq *rq) { } -void hrtick_resched(void) -{ -} - static inline void init_hrtick(void) { } @@ -1228,16 +1149,16 @@ static inline void init_hrtick(void) #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) #endif -static void __resched_task(struct task_struct *p, int tif_bit) +static void resched_task(struct task_struct *p) { int cpu; assert_spin_locked(&task_rq(p)->lock); - if (unlikely(test_tsk_thread_flag(p, tif_bit))) + if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) return; - set_tsk_thread_flag(p, tif_bit); + set_tsk_thread_flag(p, TIF_NEED_RESCHED); cpu = task_cpu(p); if (cpu == smp_processor_id()) @@ -1303,10 +1224,10 @@ void wake_up_idle_cpu(int cpu) #endif /* CONFIG_NO_HZ */ #else /* !CONFIG_SMP */ -static void __resched_task(struct task_struct *p, int tif_bit) +static void resched_task(struct task_struct *p) { assert_spin_locked(&task_rq(p)->lock); - set_tsk_thread_flag(p, tif_bit); + set_tsk_need_resched(p); } #endif /* CONFIG_SMP */ @@ -4395,7 +4316,7 @@ asmlinkage void __sched schedule(void) struct task_struct *prev, *next; unsigned long *switch_count; struct rq *rq; - int cpu, hrtick = sched_feat(HRTICK); + int cpu; need_resched: preempt_disable(); @@ -4410,7 +4331,7 @@ need_resched_nonpreemptible: schedule_debug(prev); - if (hrtick) + if (sched_feat(HRTICK)) hrtick_clear(rq); /* @@ -4457,9 +4378,6 @@ need_resched_nonpreemptible: } else spin_unlock_irq(&rq->lock); - if (hrtick) - hrtick_set(rq); - if (unlikely(reacquire_kernel_lock(current) < 0)) goto need_resched_nonpreemptible; diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f2aa987027d..6893b3ed65f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -878,7 +878,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) #ifdef CONFIG_SCHED_HRTICK static void hrtick_start_fair(struct rq *rq, struct task_struct *p) { - int requeue = rq->curr == p; struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); @@ -899,10 +898,10 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) * Don't schedule slices shorter than 10000ns, that just * doesn't make sense. Rely on vruntime for fairness. */ - if (!requeue) + if (rq->curr != p) delta = max(10000LL, delta); - hrtick_start(rq, delta, requeue); + hrtick_start(rq, delta); } } #else /* !CONFIG_SCHED_HRTICK */ -- cgit v1.2.3-70-g09d2 From 5171c3047df9d5b5183b2b179aa797a5aed8369b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 21 Jul 2008 21:58:34 +0200 Subject: x86: move the last Dprintk instance to pr_debug() Signed-off-by: Thomas Gleixner --- arch/x86/mach-es7000/es7000plat.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mach-es7000/es7000plat.c b/arch/x86/mach-es7000/es7000plat.c index 4354ce80488..50189af14b8 100644 --- a/arch/x86/mach-es7000/es7000plat.c +++ b/arch/x86/mach-es7000/es7000plat.c @@ -130,10 +130,10 @@ parse_unisys_oem (char *oemptr) mip_addr = val; mip = (struct mip_reg *)val; mip_reg = __va(mip); - Dprintk("es7000_mipcfg: host_reg = 0x%lx \n", - (unsigned long)host_reg); - Dprintk("es7000_mipcfg: mip_reg = 0x%lx \n", - (unsigned long)mip_reg); + pr_debug("es7000_mipcfg: host_reg = 0x%lx \n", + (unsigned long)host_reg); + pr_debug("es7000_mipcfg: mip_reg = 0x%lx \n", + (unsigned long)mip_reg); success++; break; case MIP_PSAI_REG: -- cgit v1.2.3-70-g09d2 From 3bfd49c8ab1859ae0f5fa1df2b3781c99115f442 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 21 May 2008 12:52:33 -0700 Subject: device create: x86: convert device_create to device_create_drvdata device_create() is race-prone, so use the race-free device_create_drvdata() instead as device_create() is going away. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: H. Peter Anvin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpuid.c | 4 ++-- arch/x86/kernel/msr.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 2de5fa2bbf7..14b11b3be31 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -141,8 +141,8 @@ static __cpuinit int cpuid_device_create(int cpu) { struct device *dev; - dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), - "cpu%d", cpu); + dev = device_create_drvdata(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), + NULL, "cpu%d", cpu); return IS_ERR(dev) ? PTR_ERR(dev) : 0; } diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index a153b3905f6..9fd80955244 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -149,8 +149,8 @@ static int __cpuinit msr_device_create(int cpu) { struct device *dev; - dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, cpu), - "msr%d", cpu); + dev = device_create_drvdata(msr_class, NULL, MKDEV(MSR_MAJOR, cpu), + NULL, "msr%d", cpu); return IS_ERR(dev) ? PTR_ERR(dev) : 0; } -- cgit v1.2.3-70-g09d2 From fc3a8828b139c24aade3f9d608775e36c248f8f5 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 2 May 2008 06:02:41 +0200 Subject: driver core: fix a lot of printk usages of bus_id We have the dev_printk() variants for this kind of thing, use them instead of directly trying to access the bus_id field of struct device. This is done in order to remove bus_id entirely. Cc: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- arch/arm/common/dmabounce.c | 22 +++++++++------------- arch/arm/common/sa1111.c | 3 ++- arch/arm/kernel/ecard.c | 3 +-- arch/arm/mach-integrator/impd1.c | 3 +-- arch/powerpc/platforms/chrp/pci.c | 2 +- arch/x86/kernel/pci-dma.c | 6 ++---- arch/x86/kernel/pci-gart_64.c | 4 +--- drivers/acpi/fan.c | 10 +++++----- drivers/acpi/glue.c | 6 ++---- drivers/acpi/processor_core.c | 5 ++--- drivers/acpi/scan.c | 2 +- drivers/acpi/thermal.c | 4 ++-- drivers/acpi/video.c | 5 ++--- drivers/base/power/trace.c | 2 +- 14 files changed, 32 insertions(+), 45 deletions(-) (limited to 'arch/x86') diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 2744673314b..dd294734260 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -554,9 +554,8 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); if (!device_info) { - printk(KERN_ERR - "Could not allocated dmabounce_device_info for %s", - dev->bus_id); + dev_err(dev, + "Could not allocated dmabounce_device_info\n"); return -ENOMEM; } @@ -594,8 +593,7 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, dev->archdata.dmabounce = device_info; - printk(KERN_INFO "dmabounce: registered device %s on %s bus\n", - dev->bus_id, dev->bus->name); + dev_info(dev, "dmabounce: registered device\n"); return 0; @@ -614,16 +612,15 @@ dmabounce_unregister_dev(struct device *dev) dev->archdata.dmabounce = NULL; if (!device_info) { - printk(KERN_WARNING - "%s: Never registered with dmabounce but attempting" \ - "to unregister!\n", dev->bus_id); + dev_warn(dev, + "Never registered with dmabounce but attempting" + "to unregister!\n"); return; } if (!list_empty(&device_info->safe_buffers)) { - printk(KERN_ERR - "%s: Removing from dmabounce with pending buffers!\n", - dev->bus_id); + dev_err(dev, + "Removing from dmabounce with pending buffers!\n"); BUG(); } @@ -639,8 +636,7 @@ dmabounce_unregister_dev(struct device *dev) kfree(device_info); - printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n", - dev->bus_id, dev->bus->name); + dev_info(dev, "dmabounce: device unregistered\n"); } diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index eb06d0b2cb7..2048ae0b155 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -593,7 +593,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent, if (dev->dma_mask != 0xffffffffUL) { ret = dmabounce_register_dev(&dev->dev, 1024, 4096); if (ret) { - printk("SA1111: Failed to register %s with dmabounce", dev->dev.bus_id); + dev_err(&dev->dev, "SA1111: Failed to register" + " with dmabounce\n"); device_unregister(&dev->dev); } } diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c index 8bfd299bfe7..2de425f62c2 100644 --- a/arch/arm/kernel/ecard.c +++ b/arch/arm/kernel/ecard.c @@ -853,8 +853,7 @@ static struct expansion_card *__init ecard_alloc_card(int type, int slot) for (i = 0; i < ECARD_NUM_RESOURCES; i++) { if (ec->resource[i].flags && request_resource(&iomem_resource, &ec->resource[i])) { - printk(KERN_ERR "%s: resource(s) not available\n", - ec->dev.bus_id); + dev_err(&ec->dev, "resource(s) not available\n"); ec->resource[i].end -= ec->resource[i].start; ec->resource[i].start = 0; ec->resource[i].flags = 0; diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c index 62e653a3ea1..619d05e6cf7 100644 --- a/arch/arm/mach-integrator/impd1.c +++ b/arch/arm/mach-integrator/impd1.c @@ -407,8 +407,7 @@ static int impd1_probe(struct lm_device *dev) ret = amba_device_register(d, &dev->resource); if (ret) { - printk("unable to register device %s: %d\n", - d->dev.bus_id, ret); + dev_err(&d->dev, "unable to register device: %d\n"); kfree(d); } } diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c index 609c46db4a1..768c262b936 100644 --- a/arch/powerpc/platforms/chrp/pci.c +++ b/arch/powerpc/platforms/chrp/pci.c @@ -367,7 +367,7 @@ static void chrp_pci_fixup_vt8231_ata(struct pci_dev *viaide) viaisa = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL); if (!viaisa) return; - printk("Fixing VIA IDE, force legacy mode on '%s'\n", viaide->dev.bus_id); + dev_info(&viaide->dev, "Fixing VIA IDE, force legacy mode on\n"); pci_read_config_byte(viaide, PCI_CLASS_PROG, &progif); pci_write_config_byte(viaide, PCI_CLASS_PROG, progif & ~0x5); diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index a4213c00dff..cbecb05551b 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -314,8 +314,7 @@ int dma_supported(struct device *dev, u64 mask) { #ifdef CONFIG_PCI if (mask > 0xffffffff && forbid_dac > 0) { - printk(KERN_INFO "PCI: Disallowing DAC for device %s\n", - dev->bus_id); + dev_info(dev, "PCI: Disallowing DAC for device\n"); return 0; } #endif @@ -342,8 +341,7 @@ int dma_supported(struct device *dev, u64 mask) type. Normally this doesn't make any difference, but gives more gentle handling of IOMMU overflow. */ if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { - printk(KERN_INFO "%s: Force SAC with mask %Lx\n", - dev->bus_id, mask); + dev_info(dev, "Force SAC with mask %Lx\n", mask); return 0; } diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index be60961f869..df5f142657d 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -198,9 +198,7 @@ static void iommu_full(struct device *dev, size_t size, int dir) * out. Hopefully no network devices use single mappings that big. */ - printk(KERN_ERR - "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n", - size, dev->bus_id); + dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size); if (size > PAGE_SIZE*EMERGENCY_PAGES) { if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 55c17afbe66..2655bc1b4ee 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -263,22 +263,22 @@ static int acpi_fan_add(struct acpi_device *device) goto end; } - printk(KERN_INFO PREFIX - "%s is registered as cooling_device%d\n", - device->dev.bus_id, cdev->id); + dev_info(&device->dev, "registered as cooling_device%d\n", cdev->id); acpi_driver_data(device) = cdev; result = sysfs_create_link(&device->dev.kobj, &cdev->device.kobj, "thermal_cooling"); if (result) - printk(KERN_ERR PREFIX "Create sysfs link\n"); + dev_err(&device->dev, "Failed to create sysfs link " + "'thermal_cooling'\n"); result = sysfs_create_link(&cdev->device.kobj, &device->dev.kobj, "device"); if (result) - printk(KERN_ERR PREFIX "Create sysfs link\n"); + dev_err(&device->dev, "Failed to create sysfs link " + "'device'\n"); result = acpi_fan_add_fs(device); if (result) diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 2f173e83f8a..084109507c9 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -146,8 +146,7 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle) acpi_status status; if (dev->archdata.acpi_handle) { - printk(KERN_WARNING PREFIX - "Drivers changed 'acpi_handle' for %s\n", dev->bus_id); + dev_warn(dev, "Drivers changed 'acpi_handle'\n"); return -EINVAL; } get_device(dev); @@ -195,8 +194,7 @@ static int acpi_unbind_one(struct device *dev) /* acpi_bind_one increase refcnt by one */ put_device(dev); } else { - printk(KERN_ERR PREFIX - "Oops, 'acpi_handle' corrupt for %s\n", dev->bus_id); + dev_err(dev, "Oops, 'acpi_handle' corrupt\n"); } return 0; } diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index ec0f2d581ec..e36422a7122 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -714,9 +714,8 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device) goto end; } - printk(KERN_INFO PREFIX - "%s is registered as cooling_device%d\n", - device->dev.bus_id, pr->cdev->id); + dev_info(&device->dev, "registered as cooling_device%d\n", + pr->cdev->id); result = sysfs_create_link(&device->dev.kobj, &pr->cdev->device.kobj, diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index f3132aa47a6..f6f52c1a2ab 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -471,7 +471,7 @@ static int acpi_device_register(struct acpi_device *device, device->dev.release = &acpi_device_release; result = device_add(&device->dev); if(result) { - printk(KERN_ERR PREFIX "Error adding device %s", device->dev.bus_id); + dev_err(&device->dev, "Error adding device\n"); goto end; } diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 84c795fb9b1..30a34133793 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -1179,8 +1179,8 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz) tz->tz_enabled = 1; - printk(KERN_INFO PREFIX "%s is registered as thermal_zone%d\n", - tz->device->dev.bus_id, tz->thermal_zone->id); + dev_info(&tz->device->dev, "registered as thermal_zone%d\n", + tz->thermal_zone->id); return 0; } diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 64c889331f3..37b9e16710d 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -762,9 +762,8 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) if (IS_ERR(device->cdev)) return; - printk(KERN_INFO PREFIX - "%s is registered as cooling_device%d\n", - device->dev->dev.bus_id, device->cdev->id); + dev_info(&device->dev->dev, "registered as cooling_device%d\n", + device->cdev->id); result = sysfs_create_link(&device->dev->dev.kobj, &device->cdev->device.kobj, "thermal_cooling"); diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 9b1b20b59e0..2aa6e8fc4de 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -194,7 +194,7 @@ static int show_dev_hash(unsigned int value) struct device * dev = to_device(entry); unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH); if (hash == value) { - printk(" hash matches device %s\n", dev->bus_id); + dev_info(dev, "hash matches\n"); match++; } entry = entry->prev; -- cgit v1.2.3-70-g09d2 From 4a0b2b4dbe1335b8b9886ba3dc85a145d5d938ed Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 1 Jul 2008 18:48:41 +0200 Subject: sysdev: Pass the attribute to the low level sysdev show/store function This allow to dynamically generate attributes and share show/store functions between attributes. Right now most attributes are generated by special macros and lots of duplicated code. With the attribute passed it's instead possible to attach some data to the attribute and then use that in shared low level functions to do different things. I need this for the dynamically generated bank attributes in the x86 machine check code, but it'll allow some further cleanups. I converted all users in tree to the new show/store prototype. It's a single huge patch to avoid unbisectable sections. Runtime tested: x86-32, x86-64 Compiled only: ia64, powerpc Not compile tested/only grep converted: sh, arm, avr32 Signed-off-by: Andi Kleen Signed-off-by: Greg Kroah-Hartman --- arch/arm/kernel/time.c | 4 ++- arch/avr32/kernel/cpu.c | 38 +++++++++++++++++--------- arch/ia64/kernel/err_inject.c | 22 ++++++++++----- arch/powerpc/kernel/sysfs.c | 15 ++++++++--- arch/powerpc/platforms/cell/cbe_thermal.c | 45 ++++++++++++++++++++----------- arch/powerpc/platforms/cell/spu_base.c | 3 ++- arch/s390/kernel/smp.c | 36 ++++++++++++++++--------- arch/s390/kernel/time.c | 35 ++++++++++++++++-------- arch/sh/drivers/dma/dma-sysfs.c | 15 ++++++++--- arch/sparc64/kernel/sysfs.c | 16 +++++++---- arch/x86/kernel/cpu/mcheck/mce_64.c | 14 +++++++--- arch/x86/kernel/cpu/mcheck/therm_throt.c | 1 + arch/x86/kernel/microcode.c | 10 ++++--- drivers/base/cpu.c | 10 ++++--- drivers/base/memory.c | 12 ++++++--- drivers/base/node.c | 15 +++++++---- drivers/base/sys.c | 4 +-- drivers/base/topology.c | 17 ++++++++---- drivers/cpuidle/sysfs.c | 10 ++++--- drivers/xen/balloon.c | 1 + include/linux/sysdev.h | 5 ++-- kernel/rtmutex-tester.c | 7 ++--- kernel/sched.c | 8 ++++-- kernel/time/clocksource.c | 8 ++++-- 24 files changed, 239 insertions(+), 112 deletions(-) (limited to 'arch/x86') diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index cc5145b28e7..368d171754c 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -130,7 +130,9 @@ static const struct leds_evt_name evt_names[] = { { "red", led_red_on, led_red_off }, }; -static ssize_t leds_store(struct sys_device *dev, const char *buf, size_t size) +static ssize_t leds_store(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, size_t size) { int ret = -EINVAL, len = strcspn(buf, " "); diff --git a/arch/avr32/kernel/cpu.c b/arch/avr32/kernel/cpu.c index b8409caeb23..e84faffbbec 100644 --- a/arch/avr32/kernel/cpu.c +++ b/arch/avr32/kernel/cpu.c @@ -26,14 +26,16 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); * XXX: If/when a SMP-capable implementation of AVR32 will ever be * made, we must make sure that the code executes on the correct CPU. */ -static ssize_t show_pc0event(struct sys_device *dev, char *buf) +static ssize_t show_pc0event(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { unsigned long pccr; pccr = sysreg_read(PCCR); return sprintf(buf, "0x%lx\n", (pccr >> 12) & 0x3f); } -static ssize_t store_pc0event(struct sys_device *dev, const char *buf, +static ssize_t store_pc0event(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { unsigned long val; @@ -46,15 +48,17 @@ static ssize_t store_pc0event(struct sys_device *dev, const char *buf, sysreg_write(PCCR, val); return count; } -static ssize_t show_pc0count(struct sys_device *dev, char *buf) +static ssize_t show_pc0count(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { unsigned long pcnt0; pcnt0 = sysreg_read(PCNT0); return sprintf(buf, "%lu\n", pcnt0); } -static ssize_t store_pc0count(struct sys_device *dev, const char *buf, - size_t count) +static ssize_t store_pc0count(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, size_t count) { unsigned long val; char *endp; @@ -67,14 +71,16 @@ static ssize_t store_pc0count(struct sys_device *dev, const char *buf, return count; } -static ssize_t show_pc1event(struct sys_device *dev, char *buf) +static ssize_t show_pc1event(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { unsigned long pccr; pccr = sysreg_read(PCCR); return sprintf(buf, "0x%lx\n", (pccr >> 18) & 0x3f); } -static ssize_t store_pc1event(struct sys_device *dev, const char *buf, +static ssize_t store_pc1event(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { unsigned long val; @@ -87,14 +93,16 @@ static ssize_t store_pc1event(struct sys_device *dev, const char *buf, sysreg_write(PCCR, val); return count; } -static ssize_t show_pc1count(struct sys_device *dev, char *buf) +static ssize_t show_pc1count(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { unsigned long pcnt1; pcnt1 = sysreg_read(PCNT1); return sprintf(buf, "%lu\n", pcnt1); } -static ssize_t store_pc1count(struct sys_device *dev, const char *buf, +static ssize_t store_pc1count(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { unsigned long val; @@ -108,14 +116,16 @@ static ssize_t store_pc1count(struct sys_device *dev, const char *buf, return count; } -static ssize_t show_pccycles(struct sys_device *dev, char *buf) +static ssize_t show_pccycles(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { unsigned long pccnt; pccnt = sysreg_read(PCCNT); return sprintf(buf, "%lu\n", pccnt); } -static ssize_t store_pccycles(struct sys_device *dev, const char *buf, +static ssize_t store_pccycles(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { unsigned long val; @@ -129,14 +139,16 @@ static ssize_t store_pccycles(struct sys_device *dev, const char *buf, return count; } -static ssize_t show_pcenable(struct sys_device *dev, char *buf) +static ssize_t show_pcenable(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { unsigned long pccr; pccr = sysreg_read(PCCR); return sprintf(buf, "%c\n", (pccr & 1)?'1':'0'); } -static ssize_t store_pcenable(struct sys_device *dev, const char *buf, +static ssize_t store_pcenable(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { unsigned long pccr, val; diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c index b642648cc2a..c539c689493 100644 --- a/arch/ia64/kernel/err_inject.c +++ b/arch/ia64/kernel/err_inject.c @@ -55,7 +55,8 @@ static u64 resources[NR_CPUS]; #define show(name) \ static ssize_t \ -show_##name(struct sys_device *dev, char *buf) \ +show_##name(struct sys_device *dev, struct sysdev_attribute *attr, \ + char *buf) \ { \ u32 cpu=dev->id; \ return sprintf(buf, "%lx\n", name[cpu]); \ @@ -63,7 +64,8 @@ show_##name(struct sys_device *dev, char *buf) \ #define store(name) \ static ssize_t \ -store_##name(struct sys_device *dev, const char *buf, size_t size) \ +store_##name(struct sys_device *dev, struct sysdev_attribute *attr, \ + const char *buf, size_t size) \ { \ unsigned int cpu=dev->id; \ name[cpu] = simple_strtoull(buf, NULL, 16); \ @@ -76,7 +78,8 @@ show(call_start) * processor. The cpu number in driver is only used for storing data. */ static ssize_t -store_call_start(struct sys_device *dev, const char *buf, size_t size) +store_call_start(struct sys_device *dev, struct sysdev_attribute *attr, + const char *buf, size_t size) { unsigned int cpu=dev->id; unsigned long call_start = simple_strtoull(buf, NULL, 16); @@ -124,14 +127,16 @@ show(err_type_info) store(err_type_info) static ssize_t -show_virtual_to_phys(struct sys_device *dev, char *buf) +show_virtual_to_phys(struct sys_device *dev, struct sysdev_attribute *attr, + char *buf) { unsigned int cpu=dev->id; return sprintf(buf, "%lx\n", phys_addr[cpu]); } static ssize_t -store_virtual_to_phys(struct sys_device *dev, const char *buf, size_t size) +store_virtual_to_phys(struct sys_device *dev, struct sysdev_attribute *attr, + const char *buf, size_t size) { unsigned int cpu=dev->id; u64 virt_addr=simple_strtoull(buf, NULL, 16); @@ -154,7 +159,8 @@ show(err_struct_info) store(err_struct_info) static ssize_t -show_err_data_buffer(struct sys_device *dev, char *buf) +show_err_data_buffer(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { unsigned int cpu=dev->id; @@ -165,7 +171,9 @@ show_err_data_buffer(struct sys_device *dev, char *buf) } static ssize_t -store_err_data_buffer(struct sys_device *dev, const char *buf, size_t size) +store_err_data_buffer(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, size_t size) { unsigned int cpu=dev->id; int ret; diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index c8127f832df..aba0ba95f06 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -28,7 +28,9 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); /* Time in microseconds we delay before sleeping in the idle loop */ DEFINE_PER_CPU(unsigned long, smt_snooze_delay) = { 100 }; -static ssize_t store_smt_snooze_delay(struct sys_device *dev, const char *buf, +static ssize_t store_smt_snooze_delay(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, size_t count) { struct cpu *cpu = container_of(dev, struct cpu, sysdev); @@ -44,7 +46,9 @@ static ssize_t store_smt_snooze_delay(struct sys_device *dev, const char *buf, return count; } -static ssize_t show_smt_snooze_delay(struct sys_device *dev, char *buf) +static ssize_t show_smt_snooze_delay(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) { struct cpu *cpu = container_of(dev, struct cpu, sysdev); @@ -152,14 +156,17 @@ static unsigned long write_##NAME(unsigned long val) \ mtspr(ADDRESS, val); \ return 0; \ } \ -static ssize_t show_##NAME(struct sys_device *dev, char *buf) \ +static ssize_t show_##NAME(struct sys_device *dev, \ + struct sysdev_attribute *attr, \ + char *buf) \ { \ struct cpu *cpu = container_of(dev, struct cpu, sysdev); \ unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \ return sprintf(buf, "%lx\n", val); \ } \ static ssize_t __used \ - store_##NAME(struct sys_device *dev, const char *buf, size_t count) \ + store_##NAME(struct sys_device *dev, struct sysdev_attribute *attr, \ + const char *buf, size_t count) \ { \ struct cpu *cpu = container_of(dev, struct cpu, sysdev); \ unsigned long val; \ diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c index 4852bf312d8..4d4c8c16912 100644 --- a/arch/powerpc/platforms/cell/cbe_thermal.c +++ b/arch/powerpc/platforms/cell/cbe_thermal.c @@ -97,7 +97,8 @@ static u8 spu_read_register_value(struct sys_device *sysdev, union spe_reg __iom return value.spe[spu->spe_id]; } -static ssize_t spu_show_temp(struct sys_device *sysdev, char *buf) +static ssize_t spu_show_temp(struct sys_device *sysdev, struct sysdev_attribute *attr, + char *buf) { u8 value; struct cbe_pmd_regs __iomem *pmd_regs; @@ -146,32 +147,38 @@ static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char return size; } -static ssize_t spu_show_throttle_end(struct sys_device *sysdev, char *buf) +static ssize_t spu_show_throttle_end(struct sys_device *sysdev, + struct sysdev_attribute *attr, char *buf) { return show_throttle(get_pmd_regs(sysdev), buf, 0); } -static ssize_t spu_show_throttle_begin(struct sys_device *sysdev, char *buf) +static ssize_t spu_show_throttle_begin(struct sys_device *sysdev, + struct sysdev_attribute *attr, char *buf) { return show_throttle(get_pmd_regs(sysdev), buf, 8); } -static ssize_t spu_show_throttle_full_stop(struct sys_device *sysdev, char *buf) +static ssize_t spu_show_throttle_full_stop(struct sys_device *sysdev, + struct sysdev_attribute *attr, char *buf) { return show_throttle(get_pmd_regs(sysdev), buf, 16); } -static ssize_t spu_store_throttle_end(struct sys_device *sysdev, const char *buf, size_t size) +static ssize_t spu_store_throttle_end(struct sys_device *sysdev, + struct sysdev_attribute *attr, const char *buf, size_t size) { return store_throttle(get_pmd_regs(sysdev), buf, size, 0); } -static ssize_t spu_store_throttle_begin(struct sys_device *sysdev, const char *buf, size_t size) +static ssize_t spu_store_throttle_begin(struct sys_device *sysdev, + struct sysdev_attribute *attr, const char *buf, size_t size) { return store_throttle(get_pmd_regs(sysdev), buf, size, 8); } -static ssize_t spu_store_throttle_full_stop(struct sys_device *sysdev, const char *buf, size_t size) +static ssize_t spu_store_throttle_full_stop(struct sys_device *sysdev, + struct sysdev_attribute *attr, const char *buf, size_t size) { return store_throttle(get_pmd_regs(sysdev), buf, size, 16); } @@ -192,43 +199,51 @@ static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos) /* shows the temperature of the DTS on the PPE, * located near the linear thermal sensor */ -static ssize_t ppe_show_temp0(struct sys_device *sysdev, char *buf) +static ssize_t ppe_show_temp0(struct sys_device *sysdev, + struct sysdev_attribute *attr, char *buf) { return ppe_show_temp(sysdev, buf, 32); } /* shows the temperature of the second DTS on the PPE */ -static ssize_t ppe_show_temp1(struct sys_device *sysdev, char *buf) +static ssize_t ppe_show_temp1(struct sys_device *sysdev, + struct sysdev_attribute *attr, char *buf) { return ppe_show_temp(sysdev, buf, 0); } -static ssize_t ppe_show_throttle_end(struct sys_device *sysdev, char *buf) +static ssize_t ppe_show_throttle_end(struct sys_device *sysdev, + struct sysdev_attribute *attr, char *buf) { return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 32); } -static ssize_t ppe_show_throttle_begin(struct sys_device *sysdev, char *buf) +static ssize_t ppe_show_throttle_begin(struct sys_device *sysdev, + struct sysdev_attribute *attr, char *buf) { return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 40); } -static ssize_t ppe_show_throttle_full_stop(struct sys_device *sysdev, char *buf) +static ssize_t ppe_show_throttle_full_stop(struct sys_device *sysdev, + struct sysdev_attribute *attr, char *buf) { return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 48); } -static ssize_t ppe_store_throttle_end(struct sys_device *sysdev, const char *buf, size_t size) +static ssize_t ppe_store_throttle_end(struct sys_device *sysdev, + struct sysdev_attribute *attr, const char *buf, size_t size) { return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 32); } -static ssize_t ppe_store_throttle_begin(struct sys_device *sysdev, const char *buf, size_t size) +static ssize_t ppe_store_throttle_begin(struct sys_device *sysdev, + struct sysdev_attribute *attr, const char *buf, size_t size) { return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 40); } -static ssize_t ppe_store_throttle_full_stop(struct sys_device *sysdev, const char *buf, size_t size) +static ssize_t ppe_store_throttle_full_stop(struct sys_device *sysdev, + struct sysdev_attribute *attr, const char *buf, size_t size) { return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 48); } diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 78f905bc6a4..a5bdb89a17c 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -703,7 +703,8 @@ static unsigned long long spu_acct_time(struct spu *spu, } -static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf) +static ssize_t spu_stat_show(struct sys_device *sysdev, + struct sysdev_attribute *attr, char *buf) { struct spu *spu = container_of(sysdev, struct spu, sysdev); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index b6781030cfb..b795b3e24af 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -864,7 +864,8 @@ int setup_profiling_timer(unsigned int multiplier) } #ifdef CONFIG_HOTPLUG_CPU -static ssize_t cpu_configure_show(struct sys_device *dev, char *buf) +static ssize_t cpu_configure_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { ssize_t count; @@ -874,8 +875,9 @@ static ssize_t cpu_configure_show(struct sys_device *dev, char *buf) return count; } -static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf, - size_t count) +static ssize_t cpu_configure_store(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, size_t count) { int cpu = dev->id; int val, rc; @@ -922,7 +924,8 @@ out: static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); #endif /* CONFIG_HOTPLUG_CPU */ -static ssize_t cpu_polarization_show(struct sys_device *dev, char *buf) +static ssize_t cpu_polarization_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { int cpu = dev->id; ssize_t count; @@ -950,7 +953,8 @@ static ssize_t cpu_polarization_show(struct sys_device *dev, char *buf) } static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL); -static ssize_t show_cpu_address(struct sys_device *dev, char *buf) +static ssize_t show_cpu_address(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); } @@ -970,7 +974,8 @@ static struct attribute_group cpu_common_attr_group = { .attrs = cpu_common_attrs, }; -static ssize_t show_capability(struct sys_device *dev, char *buf) +static ssize_t show_capability(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { unsigned int capability; int rc; @@ -982,7 +987,8 @@ static ssize_t show_capability(struct sys_device *dev, char *buf) } static SYSDEV_ATTR(capability, 0444, show_capability, NULL); -static ssize_t show_idle_count(struct sys_device *dev, char *buf) +static ssize_t show_idle_count(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct s390_idle_data *idle; unsigned long long idle_count; @@ -995,7 +1001,8 @@ static ssize_t show_idle_count(struct sys_device *dev, char *buf) } static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL); -static ssize_t show_idle_time(struct sys_device *dev, char *buf) +static ssize_t show_idle_time(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct s390_idle_data *idle; unsigned long long new_time; @@ -1112,7 +1119,9 @@ out: return rc; } -static ssize_t __ref rescan_store(struct sys_device *dev, const char *buf, +static ssize_t __ref rescan_store(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, size_t count) { int rc; @@ -1123,7 +1132,9 @@ static ssize_t __ref rescan_store(struct sys_device *dev, const char *buf, static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store); #endif /* CONFIG_HOTPLUG_CPU */ -static ssize_t dispatching_show(struct sys_device *dev, char *buf) +static ssize_t dispatching_show(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) { ssize_t count; @@ -1133,8 +1144,9 @@ static ssize_t dispatching_show(struct sys_device *dev, char *buf) return count; } -static ssize_t dispatching_store(struct sys_device *dev, const char *buf, - size_t count) +static ssize_t dispatching_store(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, size_t count) { int val, rc; char delim; diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index f2cede3947b..ab70d9bd926 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -1100,7 +1100,9 @@ static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev) return etr_port1_online ? &etr_port1 : NULL; } -static ssize_t etr_online_show(struct sys_device *dev, char *buf) +static ssize_t etr_online_show(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) { unsigned int online; @@ -1109,7 +1111,8 @@ static ssize_t etr_online_show(struct sys_device *dev, char *buf) } static ssize_t etr_online_store(struct sys_device *dev, - const char *buf, size_t count) + struct sysdev_attribute *attr, + const char *buf, size_t count) { unsigned int value; @@ -1136,7 +1139,9 @@ static ssize_t etr_online_store(struct sys_device *dev, static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store); -static ssize_t etr_stepping_control_show(struct sys_device *dev, char *buf) +static ssize_t etr_stepping_control_show(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) { return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ? etr_eacr.e0 : etr_eacr.e1); @@ -1144,7 +1149,8 @@ static ssize_t etr_stepping_control_show(struct sys_device *dev, char *buf) static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL); -static ssize_t etr_mode_code_show(struct sys_device *dev, char *buf) +static ssize_t etr_mode_code_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { if (!etr_port0_online && !etr_port1_online) /* Status word is not uptodate if both ports are offline. */ @@ -1155,7 +1161,8 @@ static ssize_t etr_mode_code_show(struct sys_device *dev, char *buf) static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL); -static ssize_t etr_untuned_show(struct sys_device *dev, char *buf) +static ssize_t etr_untuned_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct etr_aib *aib = etr_aib_from_dev(dev); @@ -1166,7 +1173,8 @@ static ssize_t etr_untuned_show(struct sys_device *dev, char *buf) static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL); -static ssize_t etr_network_id_show(struct sys_device *dev, char *buf) +static ssize_t etr_network_id_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct etr_aib *aib = etr_aib_from_dev(dev); @@ -1177,7 +1185,8 @@ static ssize_t etr_network_id_show(struct sys_device *dev, char *buf) static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL); -static ssize_t etr_id_show(struct sys_device *dev, char *buf) +static ssize_t etr_id_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct etr_aib *aib = etr_aib_from_dev(dev); @@ -1188,7 +1197,8 @@ static ssize_t etr_id_show(struct sys_device *dev, char *buf) static SYSDEV_ATTR(id, 0400, etr_id_show, NULL); -static ssize_t etr_port_number_show(struct sys_device *dev, char *buf) +static ssize_t etr_port_number_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct etr_aib *aib = etr_aib_from_dev(dev); @@ -1199,7 +1209,8 @@ static ssize_t etr_port_number_show(struct sys_device *dev, char *buf) static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL); -static ssize_t etr_coupled_show(struct sys_device *dev, char *buf) +static ssize_t etr_coupled_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct etr_aib *aib = etr_aib_from_dev(dev); @@ -1210,7 +1221,8 @@ static ssize_t etr_coupled_show(struct sys_device *dev, char *buf) static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL); -static ssize_t etr_local_time_show(struct sys_device *dev, char *buf) +static ssize_t etr_local_time_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct etr_aib *aib = etr_aib_from_dev(dev); @@ -1221,7 +1233,8 @@ static ssize_t etr_local_time_show(struct sys_device *dev, char *buf) static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL); -static ssize_t etr_utc_offset_show(struct sys_device *dev, char *buf) +static ssize_t etr_utc_offset_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct etr_aib *aib = etr_aib_from_dev(dev); diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c index 51b57c0d1a3..347ee11351e 100644 --- a/arch/sh/drivers/dma/dma-sysfs.c +++ b/arch/sh/drivers/dma/dma-sysfs.c @@ -23,7 +23,8 @@ static struct sysdev_class dma_sysclass = { }; EXPORT_SYMBOL(dma_sysclass); -static ssize_t dma_show_devices(struct sys_device *dev, char *buf) +static ssize_t dma_show_devices(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { ssize_t len = 0; int i; @@ -57,13 +58,15 @@ static int __init dma_sysclass_init(void) } postcore_initcall(dma_sysclass_init); -static ssize_t dma_show_dev_id(struct sys_device *dev, char *buf) +static ssize_t dma_show_dev_id(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct dma_channel *channel = to_dma_channel(dev); return sprintf(buf, "%s\n", channel->dev_id); } static ssize_t dma_store_dev_id(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { struct dma_channel *channel = to_dma_channel(dev); @@ -74,6 +77,7 @@ static ssize_t dma_store_dev_id(struct sys_device *dev, static SYSDEV_ATTR(dev_id, S_IRUGO | S_IWUSR, dma_show_dev_id, dma_store_dev_id); static ssize_t dma_store_config(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { struct dma_channel *channel = to_dma_channel(dev); @@ -87,13 +91,15 @@ static ssize_t dma_store_config(struct sys_device *dev, static SYSDEV_ATTR(config, S_IWUSR, NULL, dma_store_config); -static ssize_t dma_show_mode(struct sys_device *dev, char *buf) +static ssize_t dma_show_mode(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct dma_channel *channel = to_dma_channel(dev); return sprintf(buf, "0x%08x\n", channel->mode); } static ssize_t dma_store_mode(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { struct dma_channel *channel = to_dma_channel(dev); @@ -104,7 +110,8 @@ static ssize_t dma_store_mode(struct sys_device *dev, static SYSDEV_ATTR(mode, S_IRUGO | S_IWUSR, dma_show_mode, dma_store_mode); #define dma_ro_attr(field, fmt) \ -static ssize_t dma_show_##field(struct sys_device *dev, char *buf) \ +static ssize_t dma_show_##field(struct sys_device *dev, \ + struct sysdev_attribute *attr, char *buf)\ { \ struct dma_channel *channel = to_dma_channel(dev); \ return sprintf(buf, fmt, channel->field); \ diff --git a/arch/sparc64/kernel/sysfs.c b/arch/sparc64/kernel/sysfs.c index e885034a6b7..84e5ce14671 100644 --- a/arch/sparc64/kernel/sysfs.c +++ b/arch/sparc64/kernel/sysfs.c @@ -14,7 +14,8 @@ static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64))); #define SHOW_MMUSTAT_ULONG(NAME) \ -static ssize_t show_##NAME(struct sys_device *dev, char *buf) \ +static ssize_t show_##NAME(struct sys_device *dev, \ + struct sysdev_attribute *attr, char *buf) \ { \ struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \ return sprintf(buf, "%lu\n", p->NAME); \ @@ -135,13 +136,16 @@ static unsigned long write_mmustat_enable(unsigned long val) return sun4v_mmustat_conf(ra, &orig_ra); } -static ssize_t show_mmustat_enable(struct sys_device *s, char *buf) +static ssize_t show_mmustat_enable(struct sys_device *s, + struct sysdev_attribute *attr, char *buf) { unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0); return sprintf(buf, "%lx\n", val); } -static ssize_t store_mmustat_enable(struct sys_device *s, const char *buf, size_t count) +static ssize_t store_mmustat_enable(struct sys_device *s, + struct sysdev_attribute *attr, const char *buf, + size_t count) { unsigned long val, err; int ret = sscanf(buf, "%ld", &val); @@ -179,14 +183,16 @@ static void unregister_mmu_stats(struct sys_device *s) #endif #define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \ -static ssize_t show_##NAME(struct sys_device *dev, char *buf) \ +static ssize_t show_##NAME(struct sys_device *dev, \ + struct sysdev_attribute *attr, char *buf) \ { \ cpuinfo_sparc *c = &cpu_data(dev->id); \ return sprintf(buf, "%lu\n", c->MEMBER); \ } #define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \ -static ssize_t show_##NAME(struct sys_device *dev, char *buf) \ +static ssize_t show_##NAME(struct sys_device *dev, \ + struct sysdev_attribute *attr, char *buf) \ { \ cpuinfo_sparc *c = &cpu_data(dev->id); \ return sprintf(buf, "%u\n", c->MEMBER); \ diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index c4a7ec31394..e6a4d5f6764 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c @@ -762,10 +762,14 @@ DEFINE_PER_CPU(struct sys_device, device_mce); /* Why are there no generic functions for this? */ #define ACCESSOR(name, var, start) \ - static ssize_t show_ ## name(struct sys_device *s, char *buf) { \ + static ssize_t show_ ## name(struct sys_device *s, \ + struct sysdev_attribute *attr, \ + char *buf) { \ return sprintf(buf, "%lx\n", (unsigned long)var); \ } \ - static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \ + static ssize_t set_ ## name(struct sys_device *s, \ + struct sysdev_attribute *attr, \ + const char *buf, size_t siz) { \ char *end; \ unsigned long new = simple_strtoul(buf, &end, 0); \ if (end == buf) return -EINVAL; \ @@ -786,14 +790,16 @@ ACCESSOR(bank3ctl,bank[3],mce_restart()) ACCESSOR(bank4ctl,bank[4],mce_restart()) ACCESSOR(bank5ctl,bank[5],mce_restart()) -static ssize_t show_trigger(struct sys_device *s, char *buf) +static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr, + char *buf) { strcpy(buf, trigger); strcat(buf, "\n"); return strlen(trigger) + 1; } -static ssize_t set_trigger(struct sys_device *s,const char *buf,size_t siz) +static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr, + const char *buf,size_t siz) { char *p; int len; diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 1f4cc48c14c..d5ae2243f0b 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -35,6 +35,7 @@ atomic_t therm_throt_en = ATOMIC_INIT(0); #define define_therm_throt_sysdev_show_func(name) \ static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \ + struct sysdev_attribute *attr, \ char *buf) \ { \ unsigned int cpu = dev->id; \ diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c index 56b933119a0..fc4790638b6 100644 --- a/arch/x86/kernel/microcode.c +++ b/arch/x86/kernel/microcode.c @@ -644,7 +644,9 @@ static void microcode_fini_cpu(int cpu) mutex_unlock(µcode_mutex); } -static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz) +static ssize_t reload_store(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, size_t sz) { struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; char *end; @@ -674,14 +676,16 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz) return sz; } -static ssize_t version_show(struct sys_device *dev, char *buf) +static ssize_t version_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; return sprintf(buf, "0x%x\n", uci->rev); } -static ssize_t pf_show(struct sys_device *dev, char *buf) +static ssize_t pf_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index e38dfed41d8..20537d50790 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -21,15 +21,16 @@ EXPORT_SYMBOL(cpu_sysdev_class); static DEFINE_PER_CPU(struct sys_device *, cpu_sys_devices); #ifdef CONFIG_HOTPLUG_CPU -static ssize_t show_online(struct sys_device *dev, char *buf) +static ssize_t show_online(struct sys_device *dev, struct sysdev_attribute *attr, + char *buf) { struct cpu *cpu = container_of(dev, struct cpu, sysdev); return sprintf(buf, "%u\n", !!cpu_online(cpu->sysdev.id)); } -static ssize_t __ref store_online(struct sys_device *dev, const char *buf, - size_t count) +static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribute *attr, + const char *buf, size_t count) { struct cpu *cpu = container_of(dev, struct cpu, sysdev); ssize_t ret; @@ -80,7 +81,8 @@ static inline void register_cpu_control(struct cpu *cpu) #ifdef CONFIG_KEXEC #include -static ssize_t show_crash_notes(struct sys_device *dev, char *buf) +static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute *attr, + char *buf) { struct cpu *cpu = container_of(dev, struct cpu, sysdev); ssize_t rc; diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 937e8258981..4d4e0e7b6e9 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -92,7 +92,8 @@ unregister_memory(struct memory_block *memory, struct mem_section *section) * uses. */ -static ssize_t show_mem_phys_index(struct sys_device *dev, char *buf) +static ssize_t show_mem_phys_index(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct memory_block *mem = container_of(dev, struct memory_block, sysdev); @@ -102,7 +103,8 @@ static ssize_t show_mem_phys_index(struct sys_device *dev, char *buf) /* * online, offline, going offline, etc. */ -static ssize_t show_mem_state(struct sys_device *dev, char *buf) +static ssize_t show_mem_state(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct memory_block *mem = container_of(dev, struct memory_block, sysdev); @@ -217,7 +219,8 @@ out: } static ssize_t -store_mem_state(struct sys_device *dev, const char *buf, size_t count) +store_mem_state(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { struct memory_block *mem; unsigned int phys_section_nr; @@ -248,7 +251,8 @@ out: * s.t. if I offline all of these sections I can then * remove the physical device? */ -static ssize_t show_phys_device(struct sys_device *dev, char *buf) +static ssize_t show_phys_device(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { struct memory_block *mem = container_of(dev, struct memory_block, sysdev); diff --git a/drivers/base/node.c b/drivers/base/node.c index 0f867a08333..5116b78c632 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -36,11 +36,13 @@ static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf) return len; } -static inline ssize_t node_read_cpumask(struct sys_device *dev, char *buf) +static inline ssize_t node_read_cpumask(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { return node_read_cpumap(dev, 0, buf); } -static inline ssize_t node_read_cpulist(struct sys_device *dev, char *buf) +static inline ssize_t node_read_cpulist(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { return node_read_cpumap(dev, 1, buf); } @@ -49,7 +51,8 @@ static SYSDEV_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL); static SYSDEV_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL); #define K(x) ((x) << (PAGE_SHIFT - 10)) -static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) +static ssize_t node_read_meminfo(struct sys_device * dev, + struct sysdev_attribute *attr, char * buf) { int n; int nid = dev->id; @@ -112,7 +115,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) #undef K static SYSDEV_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL); -static ssize_t node_read_numastat(struct sys_device * dev, char * buf) +static ssize_t node_read_numastat(struct sys_device * dev, + struct sysdev_attribute *attr, char * buf) { return sprintf(buf, "numa_hit %lu\n" @@ -130,7 +134,8 @@ static ssize_t node_read_numastat(struct sys_device * dev, char * buf) } static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL); -static ssize_t node_read_distance(struct sys_device * dev, char * buf) +static ssize_t node_read_distance(struct sys_device * dev, + struct sysdev_attribute *attr, char * buf) { int nid = dev->id; int len = 0; diff --git a/drivers/base/sys.c b/drivers/base/sys.c index 50690d9df24..dc7dace14e1 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c @@ -36,7 +36,7 @@ sysdev_show(struct kobject * kobj, struct attribute * attr, char * buffer) struct sysdev_attribute * sysdev_attr = to_sysdev_attr(attr); if (sysdev_attr->show) - return sysdev_attr->show(sysdev, buffer); + return sysdev_attr->show(sysdev, sysdev_attr, buffer); return -EIO; } @@ -49,7 +49,7 @@ sysdev_store(struct kobject * kobj, struct attribute * attr, struct sysdev_attribute * sysdev_attr = to_sysdev_attr(attr); if (sysdev_attr->store) - return sysdev_attr->store(sysdev, buffer, count); + return sysdev_attr->store(sysdev, sysdev_attr, buffer, count); return -EIO; } diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 3f6d9b0a6ab..199cd97e32e 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -34,7 +34,8 @@ static SYSDEV_ATTR(_name, 0444, show_##_name, NULL) #define define_id_show_func(name) \ -static ssize_t show_##name(struct sys_device *dev, char *buf) \ +static ssize_t show_##name(struct sys_device *dev, \ + struct sysdev_attribute *attr, char *buf) \ { \ unsigned int cpu = dev->id; \ return sprintf(buf, "%d\n", topology_##name(cpu)); \ @@ -59,14 +60,17 @@ static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf) #ifdef arch_provides_topology_pointers #define define_siblings_show_map(name) \ -static ssize_t show_##name(struct sys_device *dev, char *buf) \ +static ssize_t show_##name(struct sys_device *dev, \ + struct sysdev_attribute *attr, char *buf) \ { \ unsigned int cpu = dev->id; \ return show_cpumap(0, &(topology_##name(cpu)), buf); \ } #define define_siblings_show_list(name) \ -static ssize_t show_##name##_list(struct sys_device *dev, char *buf) \ +static ssize_t show_##name##_list(struct sys_device *dev, \ + struct sysdev_attribute *attr, \ + char *buf) \ { \ unsigned int cpu = dev->id; \ return show_cpumap(1, &(topology_##name(cpu)), buf); \ @@ -74,7 +78,8 @@ static ssize_t show_##name##_list(struct sys_device *dev, char *buf) \ #else #define define_siblings_show_map(name) \ -static ssize_t show_##name(struct sys_device *dev, char *buf) \ +static ssize_t show_##name(struct sys_device *dev, \ + struct sysdev_attribute *attr, char *buf) \ { \ unsigned int cpu = dev->id; \ cpumask_t mask = topology_##name(cpu); \ @@ -82,7 +87,9 @@ static ssize_t show_##name(struct sys_device *dev, char *buf) \ } #define define_siblings_show_list(name) \ -static ssize_t show_##name##_list(struct sys_device *dev, char *buf) \ +static ssize_t show_##name##_list(struct sys_device *dev, \ + struct sysdev_attribute *attr, \ + char *buf) \ { \ unsigned int cpu = dev->id; \ cpumask_t mask = topology_##name(cpu); \ diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index e949618b9be..31a0e0b455b 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -21,7 +21,8 @@ static int __init cpuidle_sysfs_setup(char *unused) } __setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup); -static ssize_t show_available_governors(struct sys_device *dev, char *buf) +static ssize_t show_available_governors(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { ssize_t i = 0; struct cpuidle_governor *tmp; @@ -39,7 +40,8 @@ out: return i; } -static ssize_t show_current_driver(struct sys_device *dev, char *buf) +static ssize_t show_current_driver(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { ssize_t ret; @@ -53,7 +55,8 @@ static ssize_t show_current_driver(struct sys_device *dev, char *buf) return ret; } -static ssize_t show_current_governor(struct sys_device *dev, char *buf) +static ssize_t show_current_governor(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { ssize_t ret; @@ -68,6 +71,7 @@ static ssize_t show_current_governor(struct sys_device *dev, char *buf) } static ssize_t store_current_governor(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { char gov_name[CPUIDLE_NAME_LEN]; diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 591bc29b55f..d4427cb8697 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -610,6 +610,7 @@ static ssize_t show_target_kb(struct sys_device *dev, char *buf) } static ssize_t store_target_kb(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h index f2767bc6b73..8dcf3162b21 100644 --- a/include/linux/sysdev.h +++ b/include/linux/sysdev.h @@ -99,8 +99,9 @@ extern void sysdev_unregister(struct sys_device *); struct sysdev_attribute { struct attribute attr; - ssize_t (*show)(struct sys_device *, char *); - ssize_t (*store)(struct sys_device *, const char *, size_t); + ssize_t (*show)(struct sys_device *, struct sysdev_attribute *, char *); + ssize_t (*store)(struct sys_device *, struct sysdev_attribute *, + const char *, size_t); }; diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c index 092e4c620af..a56f629b057 100644 --- a/kernel/rtmutex-tester.c +++ b/kernel/rtmutex-tester.c @@ -297,8 +297,8 @@ static int test_func(void *data) * * opcode:data */ -static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf, - size_t count) +static ssize_t sysfs_test_command(struct sys_device *dev, struct sysdev_attribute *attr, + const char *buf, size_t count) { struct sched_param schedpar; struct test_thread_data *td; @@ -360,7 +360,8 @@ static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf, * @dev: thread to query * @buf: char buffer to be filled with thread status info */ -static ssize_t sysfs_test_status(struct sys_device *dev, char *buf) +static ssize_t sysfs_test_status(struct sys_device *dev, struct sysdev_attribute *attr, + char *buf) { struct test_thread_data *td; struct task_struct *tsk; diff --git a/kernel/sched.c b/kernel/sched.c index 99e6d850eca..b1104ea5d25 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7737,11 +7737,13 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) } #ifdef CONFIG_SCHED_MC -static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) +static ssize_t sched_mc_power_savings_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *page) { return sprintf(page, "%u\n", sched_mc_power_savings); } static ssize_t sched_mc_power_savings_store(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { return sched_power_savings_store(buf, count, 0); @@ -7751,11 +7753,13 @@ static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, #endif #ifdef CONFIG_SCHED_SMT -static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page) +static ssize_t sched_smt_power_savings_show(struct sys_device *dev, + struct sysdev_attribute *attr, char *page) { return sprintf(page, "%u\n", sched_smt_power_savings); } static ssize_t sched_smt_power_savings_store(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { return sched_power_savings_store(buf, count, 1); diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index dadde5361f3..b1c2da81b05 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -376,7 +376,8 @@ void clocksource_unregister(struct clocksource *cs) * Provides sysfs interface for listing current clocksource. */ static ssize_t -sysfs_show_current_clocksources(struct sys_device *dev, char *buf) +sysfs_show_current_clocksources(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) { ssize_t count = 0; @@ -397,6 +398,7 @@ sysfs_show_current_clocksources(struct sys_device *dev, char *buf) * clocksource selction. */ static ssize_t sysfs_override_clocksource(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t count) { struct clocksource *ovr = NULL; @@ -449,7 +451,9 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, * Provides sysfs interface for listing registered clocksources */ static ssize_t -sysfs_show_available_clocksources(struct sys_device *dev, char *buf) +sysfs_show_available_clocksources(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) { struct clocksource *src; ssize_t count = 0; -- cgit v1.2.3-70-g09d2 From d95d62c018209355c0dc998682ff792432aa870c Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 1 Jul 2008 18:48:43 +0200 Subject: sysdev: Convert the x86 mce tolerant sysdev attribute to generic attribute Use the new generic int attribute accessors for the x86 mce tolerant attribute. Simple example to illustrate the new macros. There are much more places all over the tree that could be converted like this. Signed-off-by: Andi Kleen Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/mcheck/mce_64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index e6a4d5f6764..9ab65be8242 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c @@ -812,12 +812,12 @@ static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr, } static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger); -ACCESSOR(tolerant,tolerant,) +static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); ACCESSOR(check_interval,check_interval,mce_restart()) static struct sysdev_attribute *mce_attributes[] = { &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl, &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl, - &attr_tolerant, &attr_check_interval, &attr_trigger, + &attr_tolerant.attr, &attr_check_interval, &attr_trigger, NULL }; -- cgit v1.2.3-70-g09d2 From 988781dc3e1d9209192b04458d279815923f5e76 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 21 Jul 2008 11:21:43 -0700 Subject: x86: use setup_clear_cpu_cap with disable_apic, fix beauty fix: /proc/cpuinfo will still show apic feature even if we booted up with it disabled. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index ec952aa5394..b4aacb9f52e 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -680,7 +680,7 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_X86_LOCAL_APIC disable_apic = 1; #endif - clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); + setup_clear_cpu_cap(X86_FEATURE_APIC); } #ifdef CONFIG_PCI -- cgit v1.2.3-70-g09d2 From c2e3277f875b83e5adc34e96989d6d87ec5f80f7 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 22 Jul 2008 15:40:46 +1000 Subject: x86: fix pte_flags() to only return flags, fix lguest (updated) (Jeremy said: rusty: use PTE_MASK rusty: use PTE_MASK rusty: use PTE_MASK When I asked: jsgf: does that include the NX flag? He responded eloquently: rusty: use PTE_MASK rusty: use PTE_MASK yes, it's the official constant of masking flags out of ptes ) Change a15af1c9ea2750a9ff01e51615c45950bad8221b 'x86/paravirt: add pte_flags to just get pte flags' removed lguest's private pte_flags() in favor of a generic one. Unfortunately, the generic one doesn't filter out the non-flags bits: this results in lguest creating corrupt shadow page tables and blowing up host memory. Since noone is supposed to use the pfn part of pte_flags(), it seems safest to always do the filtering. Signed-off-by: Rusty Russell Acked-by: Jeremy Fitzhardinge Signed-off-and-morning-tea-spilled-by: Ingo Molnar --- arch/x86/kernel/paravirt.c | 2 +- include/asm-x86/page.h | 7 ++++++- include/asm-x86/paravirt.h | 3 +++ 3 files changed, 10 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 097d8a6797f..94da4d52d79 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -443,7 +443,7 @@ struct pv_mmu_ops pv_mmu_ops = { #endif /* PAGETABLE_LEVELS >= 3 */ .pte_val = native_pte_val, - .pte_flags = native_pte_val, + .pte_flags = native_pte_flags, .pgd_val = native_pgd_val, .make_pte = native_make_pte, diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index 28d7b4533b1..05d9bea2bfd 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h @@ -144,6 +144,11 @@ static inline pteval_t native_pte_val(pte_t pte) return pte.pte; } +static inline pteval_t native_pte_flags(pte_t pte) +{ + return native_pte_val(pte) & ~PTE_MASK; +} + #define pgprot_val(x) ((x).pgprot) #define __pgprot(x) ((pgprot_t) { (x) } ) @@ -165,7 +170,7 @@ static inline pteval_t native_pte_val(pte_t pte) #endif #define pte_val(x) native_pte_val(x) -#define pte_flags(x) native_pte_val(x) +#define pte_flags(x) native_pte_flags(x) #define __pte(x) native_make_pte(x) #endif /* CONFIG_PARAVIRT */ diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index aec9767836b..5ca4639dc7d 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h @@ -1088,6 +1088,9 @@ static inline pteval_t pte_flags(pte_t pte) ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags, pte.pte); +#ifdef CONFIG_PARAVIRT_DEBUG + BUG_ON(ret & PTE_MASK); +#endif return ret; } -- cgit v1.2.3-70-g09d2 From 59438c9fc4f7a92c808c9049bc6b396f98bf954c Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 21 Jul 2008 22:59:42 -0700 Subject: x86: rename PTE_MASK to PTE_PFN_MASK Rusty, in his peevish way, complained that macros defining constants should have a name which somewhat accurately reflects the actual purpose of the constant. Aside from the fact that PTE_MASK gives no clue as to what's actually being masked, and is misleadingly similar to the functionally entirely different PMD_MASK, PUD_MASK and PGD_MASK, I don't really see what the problem is. But if this patch silences the incessent noise, then it will have achieved its goal (TODO: write test-case). Signed-off-by: Jeremy Fitzhardinge Cc: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/mm/dump_pagetables.c | 10 +++++----- arch/x86/xen/enlighten.c | 2 +- arch/x86/xen/mmu.c | 8 ++++---- include/asm-x86/page.h | 6 +++--- include/asm-x86/paravirt.h | 2 +- include/asm-x86/pgtable-3level.h | 8 ++++---- include/asm-x86/pgtable.h | 4 ++-- include/asm-x86/pgtable_32.h | 4 ++-- include/asm-x86/pgtable_64.h | 10 +++++----- include/asm-x86/xen/page.h | 2 +- 10 files changed, 28 insertions(+), 28 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 0bb0caed897..cc174fc412b 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -148,8 +148,8 @@ static void note_page(struct seq_file *m, struct pg_state *st, * we have now. "break" is either changing perms, levels or * address space marker. */ - prot = pgprot_val(new_prot) & ~(PTE_MASK); - cur = pgprot_val(st->current_prot) & ~(PTE_MASK); + prot = pgprot_val(new_prot) & ~(PTE_PFN_MASK); + cur = pgprot_val(st->current_prot) & ~(PTE_PFN_MASK); if (!st->level) { /* First entry */ @@ -221,7 +221,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, for (i = 0; i < PTRS_PER_PMD; i++) { st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT); if (!pmd_none(*start)) { - pgprotval_t prot = pmd_val(*start) & ~PTE_MASK; + pgprotval_t prot = pmd_val(*start) & ~PTE_PFN_MASK; if (pmd_large(*start) || !pmd_present(*start)) note_page(m, st, __pgprot(prot), 3); @@ -253,7 +253,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr, for (i = 0; i < PTRS_PER_PUD; i++) { st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT); if (!pud_none(*start)) { - pgprotval_t prot = pud_val(*start) & ~PTE_MASK; + pgprotval_t prot = pud_val(*start) & ~PTE_PFN_MASK; if (pud_large(*start) || !pud_present(*start)) note_page(m, st, __pgprot(prot), 2); @@ -288,7 +288,7 @@ static void walk_pgd_level(struct seq_file *m) for (i = 0; i < PTRS_PER_PGD; i++) { st.current_address = normalize_addr(i * PGD_LEVEL_MULT); if (!pgd_none(*start)) { - pgprotval_t prot = pgd_val(*start) & ~PTE_MASK; + pgprotval_t prot = pgd_val(*start) & ~PTE_PFN_MASK; if (pgd_large(*start) || !pgd_present(*start)) note_page(m, &st, __pgprot(prot), 1); diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 194bbd6e324..9ff6e3cbf08 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1435,7 +1435,7 @@ static unsigned long m2p(phys_addr_t maddr) { phys_addr_t paddr; - maddr &= PTE_MASK; + maddr &= PTE_PFN_MASK; paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT; return paddr; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index a44d56e38bd..0db6912395e 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -343,8 +343,8 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, static pteval_t pte_mfn_to_pfn(pteval_t val) { if (val & _PAGE_PRESENT) { - unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT; - pteval_t flags = val & ~PTE_MASK; + unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; + pteval_t flags = val & ~PTE_PFN_MASK; val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; } @@ -354,8 +354,8 @@ static pteval_t pte_mfn_to_pfn(pteval_t val) static pteval_t pte_pfn_to_mfn(pteval_t val) { if (val & _PAGE_PRESENT) { - unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT; - pteval_t flags = val & ~PTE_MASK; + unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; + pteval_t flags = val & ~PTE_PFN_MASK; val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags; } diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index 05d9bea2bfd..e99fb9fe6f8 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h @@ -18,8 +18,8 @@ (ie, 32-bit PAE). */ #define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) -/* PTE_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ -#define PTE_MASK ((pteval_t)PHYSICAL_PAGE_MASK) +/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ +#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) #define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) #define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) @@ -146,7 +146,7 @@ static inline pteval_t native_pte_val(pte_t pte) static inline pteval_t native_pte_flags(pte_t pte) { - return native_pte_val(pte) & ~PTE_MASK; + return native_pte_val(pte) & ~PTE_PFN_MASK; } #define pgprot_val(x) ((x).pgprot) diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index 5ca4639dc7d..fbbde93f12d 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h @@ -1089,7 +1089,7 @@ static inline pteval_t pte_flags(pte_t pte) pte.pte); #ifdef CONFIG_PARAVIRT_DEBUG - BUG_ON(ret & PTE_MASK); + BUG_ON(ret & PTE_PFN_MASK); #endif return ret; } diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h index c93dbb6c262..105057f3403 100644 --- a/include/asm-x86/pgtable-3level.h +++ b/include/asm-x86/pgtable-3level.h @@ -25,7 +25,7 @@ static inline int pud_none(pud_t pud) static inline int pud_bad(pud_t pud) { - return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; + return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; } static inline int pud_present(pud_t pud) @@ -120,9 +120,9 @@ static inline void pud_clear(pud_t *pudp) write_cr3(pgd); } -#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_MASK)) +#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_PFN_MASK)) -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_MASK)) +#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK)) /* Find an entry in the second-level page table.. */ @@ -160,7 +160,7 @@ static inline int pte_none(pte_t pte) static inline unsigned long pte_pfn(pte_t pte) { - return (pte_val(pte) & PTE_MASK) >> PAGE_SHIFT; + return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; } /* diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 96aa76e691d..2b1746c9237 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -53,7 +53,7 @@ _PAGE_DIRTY) /* Set of bits not changed in pte_modify */ -#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_PCD | _PAGE_PWT | \ +#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) @@ -286,7 +286,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) return __pgprot(preservebits | addbits); } -#define pte_pgprot(x) __pgprot(pte_flags(x) & ~PTE_MASK) +#define pte_pgprot(x) __pgprot(pte_flags(x) & ~PTE_PFN_MASK) #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 0611abf96a5..525b53e65b4 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h @@ -88,7 +88,7 @@ extern unsigned long pg0[]; /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)pmd_val((x))) #define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) -#define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) +#define pmd_bad(x) ((pmd_val(x) & (~PTE_PFN_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) @@ -139,7 +139,7 @@ static inline int pud_large(pud_t pud) { return 0; } #define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) #define pmd_page_vaddr(pmd) \ - ((unsigned long)__va(pmd_val((pmd)) & PTE_MASK)) + ((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK)) #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 805d3128bfc..ac5fff4cc58 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -158,17 +158,17 @@ static inline void native_pgd_clear(pgd_t *pgd) static inline int pgd_bad(pgd_t pgd) { - return (pgd_val(pgd) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE; + return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; } static inline int pud_bad(pud_t pud) { - return (pud_val(pud) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE; + return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; } static inline int pmd_bad(pmd_t pmd) { - return (pmd_val(pmd) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE; + return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE; } #define pte_none(x) (!pte_val((x))) @@ -193,7 +193,7 @@ static inline int pmd_bad(pmd_t pmd) * Level 4 access. */ #define pgd_page_vaddr(pgd) \ - ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_MASK)) + ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK)) #define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT)) #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) static inline int pgd_large(pgd_t pgd) { return 0; } @@ -216,7 +216,7 @@ static inline int pud_large(pud_t pte) } /* PMD - Level 2 access */ -#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_MASK)) +#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK)) #define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h index 05e678a8662..7b3835d3b77 100644 --- a/include/asm-x86/xen/page.h +++ b/include/asm-x86/xen/page.h @@ -124,7 +124,7 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) static inline unsigned long pte_mfn(pte_t pte) { - return (pte.pte & PTE_MASK) >> PAGE_SHIFT; + return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT; } static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot) -- cgit v1.2.3-70-g09d2 From 77be1fabd024b37423d12f832b1fbdb95dbdf494 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 21 Jul 2008 22:59:56 -0700 Subject: x86: add PTE_FLAGS_MASK PTE_PFN_MASK was getting lonely, so I made it a friend. Signed-off-by: Jeremy Fitzhardinge Cc: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/mm/dump_pagetables.c | 6 +++--- arch/x86/xen/mmu.c | 4 ++-- include/asm-x86/page.h | 5 ++++- include/asm-x86/pgtable.h | 2 +- include/asm-x86/pgtable_32.h | 2 +- 5 files changed, 11 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index cc174fc412b..a20d1fa64b4 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -221,7 +221,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, for (i = 0; i < PTRS_PER_PMD; i++) { st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT); if (!pmd_none(*start)) { - pgprotval_t prot = pmd_val(*start) & ~PTE_PFN_MASK; + pgprotval_t prot = pmd_val(*start) & PTE_FLAGS_MASK; if (pmd_large(*start) || !pmd_present(*start)) note_page(m, st, __pgprot(prot), 3); @@ -253,7 +253,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr, for (i = 0; i < PTRS_PER_PUD; i++) { st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT); if (!pud_none(*start)) { - pgprotval_t prot = pud_val(*start) & ~PTE_PFN_MASK; + pgprotval_t prot = pud_val(*start) & PTE_FLAGS_MASK; if (pud_large(*start) || !pud_present(*start)) note_page(m, st, __pgprot(prot), 2); @@ -288,7 +288,7 @@ static void walk_pgd_level(struct seq_file *m) for (i = 0; i < PTRS_PER_PGD; i++) { st.current_address = normalize_addr(i * PGD_LEVEL_MULT); if (!pgd_none(*start)) { - pgprotval_t prot = pgd_val(*start) & ~PTE_PFN_MASK; + pgprotval_t prot = pgd_val(*start) & PTE_FLAGS_MASK; if (pgd_large(*start) || !pgd_present(*start)) note_page(m, &st, __pgprot(prot), 1); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 0db6912395e..aa37469da69 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -344,7 +344,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val) { if (val & _PAGE_PRESENT) { unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; - pteval_t flags = val & ~PTE_PFN_MASK; + pteval_t flags = val & PTE_FLAGS_MASK; val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; } @@ -355,7 +355,7 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) { if (val & _PAGE_PRESENT) { unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; - pteval_t flags = val & ~PTE_PFN_MASK; + pteval_t flags = val & PTE_FLAGS_MASK; val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags; } diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index e99fb9fe6f8..6c846228948 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h @@ -21,6 +21,9 @@ /* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) +/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */ +#define PTE_FLAGS_MASK (~PTE_PFN_MASK) + #define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) #define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) @@ -146,7 +149,7 @@ static inline pteval_t native_pte_val(pte_t pte) static inline pteval_t native_pte_flags(pte_t pte) { - return native_pte_val(pte) & ~PTE_PFN_MASK; + return native_pte_val(pte) & PTE_FLAGS_MASK; } #define pgprot_val(x) ((x).pgprot) diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 2b1746c9237..3e5dbc4195f 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -286,7 +286,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) return __pgprot(preservebits | addbits); } -#define pte_pgprot(x) __pgprot(pte_flags(x) & ~PTE_PFN_MASK) +#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 525b53e65b4..5c3b26567a9 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h @@ -88,7 +88,7 @@ extern unsigned long pg0[]; /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)pmd_val((x))) #define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) -#define pmd_bad(x) ((pmd_val(x) & (~PTE_PFN_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) +#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) -- cgit v1.2.3-70-g09d2 From d536b1f86591fb081c7a56eab04e711eb4dab951 Mon Sep 17 00:00:00 2001 From: Jan Kratochvil Date: Tue, 22 Jul 2008 14:00:47 +0200 Subject: x86: fix crash due to missing debugctlmsr on AMD K6-3 currently if you use PTRACE_SINGLEBLOCK on AMD K6-3 (i586) it will crash. Kernel now wrongly assumes existing DEBUGCTLMSR MSR register there. Removed the assumption also for some other non-K6 CPUs but I am not sure there (but it can only bring small inefficiency there if my assumption is wrong). Based on info from Roland McGrath, Chuck Ebbert and Mikulas Patocka. More info at: https://bugzilla.redhat.com/show_bug.cgi?id=456175 Signed-off-by: Jan Kratochvil Cc: Signed-off-by: Ingo Molnar --- arch/x86/Kconfig.cpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 54b8c02c71e..2c518fbc52e 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -414,4 +414,4 @@ config X86_MINIMUM_CPU_FAMILY config X86_DEBUGCTLMSR def_bool y - depends on !(M586MMX || M586TSC || M586 || M486 || M386) + depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) -- cgit v1.2.3-70-g09d2 From 15e8f348db372dec21229fda5d52ae6ee7e64666 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Mon, 23 Jun 2008 20:41:12 -0700 Subject: x86_64: remove bogus optimization in sysret_signal This short-circuit path in sysret_signal looks wrong to me. AFAICT, in practice the branch is never taken--and if it were, it would go wrong. To wit, try loading a module whose init function does set_thread_flag(TIF_IRET), and see insmod crash (presumably with a wrong user stack pointer). This is because the FIXUP_TOP_OF_STACK work hasn't been done yet when we jump around the call to ptregscall_common and get to int_with_check--where it expects the user RSP,SS,CS and EFLAGS to have been stored by FIXUP_TOP_OF_STACK. I don't think it's normally possible to get to sysret_signal with no _TIF_DO_NOTIFY_MASK bits set anyway, so these two instructions are already superfluous. If it ever did happen, it is harmless to call do_notify_resume with nothing for it to do. Signed-off-by: Roland McGrath --- arch/x86/kernel/entry_64.S | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 8410e26f418..a169225869c 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -402,16 +402,12 @@ sysret_careful: sysret_signal: TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) - testl $_TIF_DO_NOTIFY_MASK,%edx - jz 1f - - /* Really a signal */ /* edx: work flags (arg3) */ leaq do_notify_resume(%rip),%rax leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 xorl %esi,%esi # oldset -> arg2 call ptregscall_common -1: movl $_TIF_WORK_MASK,%edi + movl $_TIF_WORK_MASK,%edi /* Use IRET because user could have changed frame. This works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ DISABLE_INTERRUPTS(CLBR_NONE) -- cgit v1.2.3-70-g09d2 From 86a1c34a929f30fde8ad01ea8245df61ddcf58b7 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Mon, 23 Jun 2008 15:37:04 -0700 Subject: x86_64 syscall audit fast-path This adds a fast path for 64-bit syscall entry and exit when TIF_SYSCALL_AUDIT is set, but no other kind of syscall tracing. This path does not need to save and restore all registers as the general case of tracing does. Avoiding the iret return path when syscall audit is enabled helps performance a lot. Signed-off-by: Roland McGrath --- arch/x86/kernel/entry_64.S | 48 ++++++++++++++++++++++++++++++++++++++++++++++ kernel/auditsc.c | 3 ++- 2 files changed, 50 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index a169225869c..db7d34a89d2 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -53,6 +53,12 @@ #include #include +/* Avoid __ASSEMBLER__'ifying just for this. */ +#include +#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) +#define __AUDIT_ARCH_64BIT 0x80000000 +#define __AUDIT_ARCH_LE 0x40000000 + .code64 #ifdef CONFIG_FTRACE @@ -351,6 +357,7 @@ ENTRY(system_call_after_swapgs) GET_THREAD_INFO(%rcx) testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx) jnz tracesys +system_call_fastpath: cmpq $__NR_syscall_max,%rax ja badsys movq %r10,%rcx @@ -402,6 +409,10 @@ sysret_careful: sysret_signal: TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) +#ifdef CONFIG_AUDITSYSCALL + bt $TIF_SYSCALL_AUDIT,%edx + jc sysret_audit +#endif /* edx: work flags (arg3) */ leaq do_notify_resume(%rip),%rax leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 @@ -418,8 +429,45 @@ badsys: movq $-ENOSYS,RAX-ARGOFFSET(%rsp) jmp ret_from_sys_call +#ifdef CONFIG_AUDITSYSCALL + /* + * Fast path for syscall audit without full syscall trace. + * We just call audit_syscall_entry() directly, and then + * jump back to the normal fast path. + */ +auditsys: + movq %r10,%r9 /* 6th arg: 4th syscall arg */ + movq %rdx,%r8 /* 5th arg: 3rd syscall arg */ + movq %rsi,%rcx /* 4th arg: 2nd syscall arg */ + movq %rdi,%rdx /* 3rd arg: 1st syscall arg */ + movq %rax,%rsi /* 2nd arg: syscall number */ + movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */ + call audit_syscall_entry + LOAD_ARGS 0 /* reload call-clobbered registers */ + jmp system_call_fastpath + + /* + * Return fast path for syscall audit. Call audit_syscall_exit() + * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT + * masked off. + */ +sysret_audit: + movq %rax,%rsi /* second arg, syscall return value */ + cmpq $0,%rax /* is it < 0? */ + setl %al /* 1 if so, 0 if not */ + movzbl %al,%edi /* zero-extend that into %edi */ + inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ + call audit_syscall_exit + movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi + jmp sysret_check +#endif /* CONFIG_AUDITSYSCALL */ + /* Do syscall tracing */ tracesys: +#ifdef CONFIG_AUDITSYSCALL + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx) + jz auditsys +#endif SAVE_REST movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ FIXUP_TOP_OF_STACK %rdi diff --git a/kernel/auditsc.c b/kernel/auditsc.c index c10e7aae04d..4699950e65b 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1476,7 +1476,8 @@ void audit_syscall_entry(int arch, int major, struct audit_context *context = tsk->audit_context; enum audit_state state; - BUG_ON(!context); + if (unlikely(!context)) + return; /* * This happens only on certain architectures that make system -- cgit v1.2.3-70-g09d2 From 5cbf1565f29eb57a86a305b08836613508e294d7 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Tue, 24 Jun 2008 01:13:31 -0700 Subject: x86_64 ia32 syscall audit fast-path This adds fast paths for 32-bit syscall entry and exit when TIF_SYSCALL_AUDIT is set, but no other kind of syscall tracing. These paths does not need to save and restore all registers as the general case of tracing does. Avoiding the iret return path when syscall audit is enabled helps performance a lot. Signed-off-by: Roland McGrath --- arch/x86/ia32/ia32entry.S | 91 ++++++++++++++++++++++++++++++++++++++++++++-- arch/x86/kernel/entry_64.S | 1 + 2 files changed, 88 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 23d146ce676..021d71bc69b 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -15,6 +15,16 @@ #include #include +/* Avoid __ASSEMBLER__'ifying just for this. */ +#include +#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) +#define __AUDIT_ARCH_LE 0x40000000 + +#ifndef CONFIG_AUDITSYSCALL +#define sysexit_audit int_ret_from_sys_call +#define sysretl_audit int_ret_from_sys_call +#endif + #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) .macro IA32_ARG_FIXUP noebp=0 @@ -148,13 +158,15 @@ ENTRY(ia32_sysenter_target) ja ia32_badsys sysenter_do_call: IA32_ARG_FIXUP 1 +sysenter_dispatch: call *ia32_sys_call_table(,%rax,8) movq %rax,RAX-ARGOFFSET(%rsp) GET_THREAD_INFO(%r10) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl $_TIF_ALLWORK_MASK,TI_flags(%r10) - jnz int_ret_from_sys_call + jnz sysexit_audit +sysexit_from_sys_call: andl $~TS_COMPAT,TI_status(%r10) /* clear IF, that popfq doesn't enable interrupts early */ andl $~0x200,EFLAGS-R11(%rsp) @@ -170,9 +182,63 @@ sysenter_do_call: TRACE_IRQS_ON ENABLE_INTERRUPTS_SYSEXIT32 -sysenter_tracesys: +#ifdef CONFIG_AUDITSYSCALL + .macro auditsys_entry_common + movl %esi,%r9d /* 6th arg: 4th syscall arg */ + movl %edx,%r8d /* 5th arg: 3rd syscall arg */ + /* (already in %ecx) 4th arg: 2nd syscall arg */ + movl %ebx,%edx /* 3rd arg: 1st syscall arg */ + movl %eax,%esi /* 2nd arg: syscall number */ + movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ + call audit_syscall_entry + movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ + cmpl $(IA32_NR_syscalls-1),%eax + ja ia32_badsys + movl %ebx,%edi /* reload 1st syscall arg */ + movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */ + movl RDX-ARGOFFSET(%rsp),%edx /* reload 3rd syscall arg */ + movl RSI-ARGOFFSET(%rsp),%ecx /* reload 4th syscall arg */ + movl RDI-ARGOFFSET(%rsp),%r8d /* reload 5th syscall arg */ + .endm + + .macro auditsys_exit exit + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) + jnz int_ret_from_sys_call + TRACE_IRQS_ON + sti + movl %eax,%esi /* second arg, syscall return value */ + cmpl $0,%eax /* is it < 0? */ + setl %al /* 1 if so, 0 if not */ + movzbl %al,%edi /* zero-extend that into %edi */ + inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ + call audit_syscall_exit + GET_THREAD_INFO(%r10) + movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */ + movl RBP-ARGOFFSET(%rsp),%ebp /* reload user register value */ + movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi + cli + TRACE_IRQS_OFF + testl %edi,TI_flags(%r10) + jnz int_with_check + jmp \exit + .endm + +sysenter_auditsys: CFI_RESTORE_STATE + auditsys_entry_common + movl %ebp,%r9d /* reload 6th syscall arg */ + jmp sysenter_dispatch + +sysexit_audit: + auditsys_exit sysexit_from_sys_call +#endif + +sysenter_tracesys: xchgl %r9d,%ebp +#ifdef CONFIG_AUDITSYSCALL + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) + jz sysenter_auditsys +#endif SAVE_REST CLEAR_RREGS movq %r9,R9(%rsp) @@ -252,13 +318,15 @@ cstar_do_call: cmpl $IA32_NR_syscalls-1,%eax ja ia32_badsys IA32_ARG_FIXUP 1 +cstar_dispatch: call *ia32_sys_call_table(,%rax,8) movq %rax,RAX-ARGOFFSET(%rsp) GET_THREAD_INFO(%r10) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl $_TIF_ALLWORK_MASK,TI_flags(%r10) - jnz int_ret_from_sys_call + jnz sysretl_audit +sysretl_from_sys_call: andl $~TS_COMPAT,TI_status(%r10) RESTORE_ARGS 1,-ARG_SKIP,1,1,1 movl RIP-ARGOFFSET(%rsp),%ecx @@ -270,8 +338,23 @@ cstar_do_call: CFI_RESTORE rsp USERGS_SYSRET32 -cstar_tracesys: +#ifdef CONFIG_AUDITSYSCALL +cstar_auditsys: CFI_RESTORE_STATE + movl %r9d,R9-ARGOFFSET(%rsp) /* register to be clobbered by call */ + auditsys_entry_common + movl R9-ARGOFFSET(%rsp),%r9d /* reload 6th syscall arg */ + jmp cstar_dispatch + +sysretl_audit: + auditsys_exit sysretl_from_sys_call +#endif + +cstar_tracesys: +#ifdef CONFIG_AUDITSYSCALL + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) + jz cstar_auditsys +#endif xchgl %r9d,%ebp SAVE_REST CLEAR_RREGS diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index db7d34a89d2..89434d43960 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -492,6 +492,7 @@ tracesys: * Has correct top of stack, but partial stack frame. */ .globl int_ret_from_sys_call + .globl int_with_check int_ret_from_sys_call: DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF -- cgit v1.2.3-70-g09d2 From af0575bba0f46dd9054d46e0a88c57afad3bf4d2 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Tue, 24 Jun 2008 04:16:52 -0700 Subject: i386 syscall audit fast-path This adds fast paths for 32-bit syscall entry and exit when TIF_SYSCALL_AUDIT is set, but no other kind of syscall tracing. These paths does not need to save and restore all registers as the general case of tracing does. Avoiding the iret return path when syscall audit is enabled helps performance a lot. Signed-off-by: Roland McGrath --- arch/x86/kernel/entry_32.S | 55 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index cdfd94cc6b1..109792bc7cf 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -54,6 +54,16 @@ #include #include +/* Avoid __ASSEMBLER__'ifying just for this. */ +#include +#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) +#define __AUDIT_ARCH_LE 0x40000000 + +#ifndef CONFIG_AUDITSYSCALL +#define sysenter_audit syscall_trace_entry +#define sysexit_audit syscall_exit_work +#endif + /* * We use macros for low-level operations which need to be overridden * for paravirtualization. The following will never clobber any registers: @@ -333,7 +343,8 @@ sysenter_past_esp: /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) - jnz syscall_trace_entry + jnz sysenter_audit +sysenter_do_call: cmpl $(nr_syscalls), %eax jae syscall_badsys call *sys_call_table(,%eax,4) @@ -343,7 +354,8 @@ sysenter_past_esp: TRACE_IRQS_OFF movl TI_flags(%ebp), %ecx testw $_TIF_ALLWORK_MASK, %cx - jne syscall_exit_work + jne sysexit_audit +sysenter_exit: /* if something modifies registers it must also disable sysexit */ movl PT_EIP(%esp), %edx movl PT_OLDESP(%esp), %ecx @@ -351,6 +363,45 @@ sysenter_past_esp: TRACE_IRQS_ON 1: mov PT_FS(%esp), %fs ENABLE_INTERRUPTS_SYSEXIT + +#ifdef CONFIG_AUDITSYSCALL +sysenter_audit: + testw $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) + jnz syscall_trace_entry + addl $4,%esp + CFI_ADJUST_CFA_OFFSET -4 + /* %esi already in 8(%esp) 6th arg: 4th syscall arg */ + /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */ + /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */ + movl %ebx,%ecx /* 3rd arg: 1st syscall arg */ + movl %eax,%edx /* 2nd arg: syscall number */ + movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ + call audit_syscall_entry + pushl %ebx + CFI_ADJUST_CFA_OFFSET 4 + movl PT_EAX(%esp),%eax /* reload syscall number */ + jmp sysenter_do_call + +sysexit_audit: + testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx + jne syscall_exit_work + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_ANY) + movl %eax,%edx /* second arg, syscall return value */ + cmpl $0,%eax /* is it < 0? */ + setl %al /* 1 if so, 0 if not */ + movzbl %al,%eax /* zero-extend that */ + inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ + call audit_syscall_exit + DISABLE_INTERRUPTS(CLBR_ANY) + TRACE_IRQS_OFF + movl TI_flags(%ebp), %ecx + testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx + jne syscall_exit_work + movl PT_EAX(%esp),%eax /* reload syscall return value */ + jmp sysenter_exit +#endif + CFI_ENDPROC .pushsection .fixup,"ax" 2: movl $0,PT_FS(%esp) -- cgit v1.2.3-70-g09d2 From 9e882c9282512cc622752f29ec7c29ce338fc1eb Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 21 Jul 2008 16:49:54 -0700 Subject: x86: call early_cpu_init at the same point Call early_cpu_init() at the same (early) point in setup_arch(). The x86_64 code was calling it relatively late, after when other arch code need to do cpu-related setup which depends on it. Signed-off-by: Jeremy Fitzhardinge Cc: Mark McLoughlin Cc: Eduardo Habkost Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b4aacb9f52e..b520dae02bf 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -597,11 +597,11 @@ void __init setup_arch(char **cmdline_p) memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); visws_early_detect(); pre_setup_arch_hook(); - early_cpu_init(); #else printk(KERN_INFO "Command line: %s\n", boot_command_line); #endif + early_cpu_init(); early_ioremap_init(); ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); @@ -665,9 +665,6 @@ void __init setup_arch(char **cmdline_p) bss_resource.start = virt_to_phys(&__bss_start); bss_resource.end = virt_to_phys(&__bss_stop)-1; -#ifdef CONFIG_X86_64 - early_cpu_init(); -#endif strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; -- cgit v1.2.3-70-g09d2 From 2dc1697eb355c34f9f7bcbbb83f490de248c360a Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 21 Jul 2008 16:49:58 -0700 Subject: xen: don't use sysret for sysexit32 When implementing sysexit32, don't let Xen use sysret to return to userspace. That results in usermode register state being trashed. Signed-off-by: Jeremy Fitzhardinge Cc: Mark McLoughlin Cc: Eduardo Habkost Signed-off-by: Ingo Molnar --- arch/x86/xen/xen-asm_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index 4038cbfe333..7f58304fafb 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S @@ -173,7 +173,7 @@ ENTRY(xen_sysexit) pushq $__USER32_CS pushq %rdx - pushq $VGCF_in_syscall + pushq $0 1: jmp hypercall_iret ENDPATCH(xen_sysexit) RELOC(xen_sysexit, 1b+1) -- cgit v1.2.3-70-g09d2 From 9d25d4db81833029d30b7b03cc1000cbbe09e192 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 21 Jul 2008 18:41:26 +0100 Subject: x86: BUILD_IRQ say .text to avoid .data.percpu When I edit the x86_64 Makefile to -fno-unit-at-a-time, bootup panics on 0xCCs in IRQ0x3e_interrupt(): IRQ0x20_interrupt etc. have got linked into .data.percpu. Perhaps there are other ways of triggering that: specify ".text" in the BUILD_IRQ() macro for safety. I've been using -fno-unit-at-a-time (to lessen inlining, for easier debugging) for a long time. Signed-off-by: Hugh Dickins Cc: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/irqinit_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 0373e88de95..9414125f19c 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c @@ -43,7 +43,7 @@ #define BUILD_IRQ(nr) \ asmlinkage void IRQ_NAME(nr); \ - asm("\n.p2align\n" \ + asm("\n.text\n.p2align\n" \ "IRQ" #nr "_interrupt:\n\t" \ "push $~(" #nr ") ; " \ "jmp common_interrupt"); -- cgit v1.2.3-70-g09d2 From b61bfa3c462671c48a51fb5c31af337c5a996a04 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 23 Jul 2008 21:26:55 -0700 Subject: mm: move bootmem descriptors definition to a single place There are a lot of places that define either a single bootmem descriptor or an array of them. Use only one central array with MAX_NUMNODES items instead. Signed-off-by: Johannes Weiner Acked-by: Ralf Baechle Cc: Ingo Molnar Cc: Richard Henderson Cc: Russell King Cc: Tony Luck Cc: Hirokazu Takata Cc: Geert Uytterhoeven Cc: Kyle McMartin Cc: Paul Mackerras Cc: Paul Mundt Cc: David S. Miller Cc: Yinghai Lu Cc: Christoph Lameter Cc: Mel Gorman Cc: Andy Whitcroft Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/mm/numa.c | 8 ++++---- arch/arm/mm/discontig.c | 34 ++++++++++++++++------------------ arch/ia64/mm/discontig.c | 11 +++++------ arch/m32r/mm/discontig.c | 4 +--- arch/m68k/mm/init.c | 4 +--- arch/mips/sgi-ip27/ip27-memory.c | 4 +--- arch/parisc/mm/init.c | 3 +-- arch/powerpc/mm/numa.c | 3 +-- arch/sh/mm/numa.c | 5 ++--- arch/sparc64/mm/init.c | 3 +-- arch/x86/mm/discontig_32.c | 3 +-- arch/x86/mm/numa_64.c | 4 +--- include/linux/bootmem.h | 2 ++ mm/bootmem.c | 2 ++ mm/page_alloc.c | 4 +--- 15 files changed, 40 insertions(+), 54 deletions(-) (limited to 'arch/x86') diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c index 10ab7833e83..a53fda0481c 100644 --- a/arch/alpha/mm/numa.c +++ b/arch/alpha/mm/numa.c @@ -19,7 +19,6 @@ #include pg_data_t node_data[MAX_NUMNODES]; -bootmem_data_t node_bdata[MAX_NUMNODES]; EXPORT_SYMBOL(node_data); #undef DEBUG_DISCONTIG @@ -141,7 +140,7 @@ setup_memory_node(int nid, void *kernel_end) printk(" not enough mem to reserve NODE_DATA"); return; } - NODE_DATA(nid)->bdata = &node_bdata[nid]; + NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; printk(" Detected node memory: start %8lu, end %8lu\n", node_min_pfn, node_max_pfn); @@ -304,8 +303,9 @@ void __init paging_init(void) dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; for_each_online_node(nid) { - unsigned long start_pfn = node_bdata[nid].node_boot_start >> PAGE_SHIFT; - unsigned long end_pfn = node_bdata[nid].node_low_pfn; + bootmem_data_t *bdata = &bootmem_node_data[nid]; + unsigned long start_pfn = bdata->node_boot_start >> PAGE_SHIFT; + unsigned long end_pfn = bdata->node_low_pfn; if (dma_local_pfn >= end_pfn - start_pfn) zones_size[ZONE_DMA] = end_pfn - start_pfn; diff --git a/arch/arm/mm/discontig.c b/arch/arm/mm/discontig.c index 1e560218950..c8c0c4b0f0a 100644 --- a/arch/arm/mm/discontig.c +++ b/arch/arm/mm/discontig.c @@ -21,26 +21,24 @@ * Our node_data structure for discontiguous memory. */ -static bootmem_data_t node_bootmem_data[MAX_NUMNODES]; - pg_data_t discontig_node_data[MAX_NUMNODES] = { - { .bdata = &node_bootmem_data[0] }, - { .bdata = &node_bootmem_data[1] }, - { .bdata = &node_bootmem_data[2] }, - { .bdata = &node_bootmem_data[3] }, + { .bdata = &bootmem_node_data[0] }, + { .bdata = &bootmem_node_data[1] }, + { .bdata = &bootmem_node_data[2] }, + { .bdata = &bootmem_node_data[3] }, #if MAX_NUMNODES == 16 - { .bdata = &node_bootmem_data[4] }, - { .bdata = &node_bootmem_data[5] }, - { .bdata = &node_bootmem_data[6] }, - { .bdata = &node_bootmem_data[7] }, - { .bdata = &node_bootmem_data[8] }, - { .bdata = &node_bootmem_data[9] }, - { .bdata = &node_bootmem_data[10] }, - { .bdata = &node_bootmem_data[11] }, - { .bdata = &node_bootmem_data[12] }, - { .bdata = &node_bootmem_data[13] }, - { .bdata = &node_bootmem_data[14] }, - { .bdata = &node_bootmem_data[15] }, + { .bdata = &bootmem_node_data[4] }, + { .bdata = &bootmem_node_data[5] }, + { .bdata = &bootmem_node_data[6] }, + { .bdata = &bootmem_node_data[7] }, + { .bdata = &bootmem_node_data[8] }, + { .bdata = &bootmem_node_data[9] }, + { .bdata = &bootmem_node_data[10] }, + { .bdata = &bootmem_node_data[11] }, + { .bdata = &bootmem_node_data[12] }, + { .bdata = &bootmem_node_data[13] }, + { .bdata = &bootmem_node_data[14] }, + { .bdata = &bootmem_node_data[15] }, #endif }; diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 544dc420c65..2fcf8464331 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -36,7 +36,6 @@ struct early_node_data { struct ia64_node_data *node_data; unsigned long pernode_addr; unsigned long pernode_size; - struct bootmem_data bootmem_data; unsigned long num_physpages; #ifdef CONFIG_ZONE_DMA unsigned long num_dma_physpages; @@ -76,7 +75,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len, int node) { unsigned long cstart, epfn, end = start + len; - struct bootmem_data *bdp = &mem_data[node].bootmem_data; + struct bootmem_data *bdp = &bootmem_node_data[node]; epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT; cstart = GRANULEROUNDDOWN(start); @@ -167,7 +166,7 @@ static void __init fill_pernode(int node, unsigned long pernode, { void *cpu_data; int cpus = early_nr_cpus_node(node); - struct bootmem_data *bdp = &mem_data[node].bootmem_data; + struct bootmem_data *bdp = &bootmem_node_data[node]; mem_data[node].pernode_addr = pernode; mem_data[node].pernode_size = pernodesize; @@ -224,7 +223,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, { unsigned long epfn; unsigned long pernodesize = 0, pernode, pages, mapsize; - struct bootmem_data *bdp = &mem_data[node].bootmem_data; + struct bootmem_data *bdp = &bootmem_node_data[node]; epfn = (start + len) >> PAGE_SHIFT; @@ -440,7 +439,7 @@ void __init find_memory(void) efi_memmap_walk(find_max_min_low_pfn, NULL); for_each_online_node(node) - if (mem_data[node].bootmem_data.node_low_pfn) { + if (bootmem_node_data[node].node_low_pfn) { node_clear(node, memory_less_mask); mem_data[node].min_pfn = ~0UL; } @@ -460,7 +459,7 @@ void __init find_memory(void) else if (node_isset(node, memory_less_mask)) continue; - bdp = &mem_data[node].bootmem_data; + bdp = &bootmem_node_data[node]; pernode = mem_data[node].pernode_addr; pernodesize = mem_data[node].pernode_size; map = pernode + pernodesize; diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c index 07c1af7dc0e..aa9145ef6cc 100644 --- a/arch/m32r/mm/discontig.c +++ b/arch/m32r/mm/discontig.c @@ -20,7 +20,6 @@ extern char _end[]; struct pglist_data *node_data[MAX_NUMNODES]; EXPORT_SYMBOL(node_data); -static bootmem_data_t node_bdata[MAX_NUMNODES] __initdata; pg_data_t m32r_node_data[MAX_NUMNODES]; @@ -81,7 +80,7 @@ unsigned long __init setup_memory(void) for_each_online_node(nid) { mp = &mem_prof[nid]; NODE_DATA(nid)=(pg_data_t *)&m32r_node_data[nid]; - NODE_DATA(nid)->bdata = &node_bdata[nid]; + NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; min_pfn = mp->start_pfn; max_pfn = mp->start_pfn + mp->pages; bootmap_size = init_bootmem_node(NODE_DATA(nid), mp->free_pfn, @@ -163,4 +162,3 @@ unsigned long __init zone_sizes_init(void) return holes; } - diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index d8fb9c5303c..79f5f94d480 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -32,8 +32,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); -static bootmem_data_t __initdata bootmem_data[MAX_NUMNODES]; - pg_data_t pg_data_map[MAX_NUMNODES]; EXPORT_SYMBOL(pg_data_map); @@ -58,7 +56,7 @@ void __init m68k_setup_node(int node) pg_data_table[i] = pg_data_map + node; } #endif - pg_data_map[node].bdata = bootmem_data + node; + pg_data_map[node].bdata = bootmem_node_data + node; node_set_online(node); } diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 42cd1095630..060d853d7b3 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -33,8 +33,6 @@ #define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT) #define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT) -static struct bootmem_data __initdata plat_node_bdata[MAX_COMPACT_NODES]; - struct node_data *__node_data[MAX_COMPACT_NODES]; EXPORT_SYMBOL(__node_data); @@ -403,7 +401,7 @@ static void __init node_mem_init(cnodeid_t node) */ __node_data[node] = __va(slot_freepfn << PAGE_SHIFT); - NODE_DATA(node)->bdata = &plat_node_bdata[node]; + NODE_DATA(node)->bdata = &bootmem_node_data[node]; NODE_DATA(node)->node_start_pfn = start_pfn; NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index b4d6c8777ed..0ddf4904640 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -36,7 +36,6 @@ extern int data_start; #ifdef CONFIG_DISCONTIGMEM struct node_map_data node_data[MAX_NUMNODES] __read_mostly; -bootmem_data_t bmem_data[MAX_NUMNODES] __read_mostly; unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; #endif @@ -262,7 +261,7 @@ static void __init setup_bootmem(void) #ifdef CONFIG_DISCONTIGMEM for (i = 0; i < MAX_PHYSMEM_RANGES; i++) { memset(NODE_DATA(i), 0, sizeof(pg_data_t)); - NODE_DATA(i)->bdata = &bmem_data[i]; + NODE_DATA(i)->bdata = &bootmem_node_data[i]; } memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index cf4bffba6f7..d9a18135133 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -39,7 +39,6 @@ EXPORT_SYMBOL(numa_cpu_lookup_table); EXPORT_SYMBOL(numa_cpumask_lookup_table); EXPORT_SYMBOL(node_data); -static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES]; static int min_common_depth; static int n_mem_addr_cells, n_mem_size_cells; @@ -816,7 +815,7 @@ void __init do_init_bootmem(void) dbg("node %d\n", nid); dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); - NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; + NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; NODE_DATA(nid)->node_start_pfn = start_pfn; NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index 1663199ce88..095d93bec7c 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c @@ -14,7 +14,6 @@ #include #include -static bootmem_data_t plat_node_bdata[MAX_NUMNODES]; struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL_GPL(node_data); @@ -35,7 +34,7 @@ void __init setup_memory(void) NODE_DATA(0) = pfn_to_kaddr(free_pfn); memset(NODE_DATA(0), 0, sizeof(struct pglist_data)); free_pfn += PFN_UP(sizeof(struct pglist_data)); - NODE_DATA(0)->bdata = &plat_node_bdata[0]; + NODE_DATA(0)->bdata = &bootmem_node_data[0]; /* Set up node 0 */ setup_bootmem_allocator(free_pfn); @@ -66,7 +65,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) free_pfn += PFN_UP(sizeof(struct pglist_data)); memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); - NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; + NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; NODE_DATA(nid)->node_start_pfn = start_pfn; NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 84898c44dd4..71329747395 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -788,7 +788,6 @@ int numa_cpu_lookup_table[NR_CPUS]; cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; #ifdef CONFIG_NEED_MULTIPLE_NODES -static bootmem_data_t plat_node_bdata[MAX_NUMNODES]; struct mdesc_mblock { u64 base; @@ -871,7 +870,7 @@ static void __init allocate_node_data(int nid) NODE_DATA(nid) = __va(paddr); memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); - NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; + NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; #endif p = NODE_DATA(nid); diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c index 5dfef9fa061..62fa440678d 100644 --- a/arch/x86/mm/discontig_32.c +++ b/arch/x86/mm/discontig_32.c @@ -42,7 +42,6 @@ struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); -static bootmem_data_t node0_bdata; /* * numa interface - we expect the numa architecture specific code to have @@ -385,7 +384,7 @@ void __init initmem_init(unsigned long start_pfn, for_each_online_node(nid) memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); - NODE_DATA(0)->bdata = &node0_bdata; + NODE_DATA(0)->bdata = &bootmem_node_data[0]; setup_bootmem_allocator(); } diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 9782f42dd31..a4dd793d600 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -23,8 +23,6 @@ struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); -static bootmem_data_t plat_node_bdata[MAX_NUMNODES]; - struct memnode memnode; s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { @@ -198,7 +196,7 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, nodedata_phys + pgdat_size - 1); memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t)); - NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid]; + NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid]; NODE_DATA(nodeid)->node_start_pfn = start_pfn; NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn; diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index a1d9b79078e..2599c741405 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -38,6 +38,8 @@ typedef struct bootmem_data { struct list_head list; } bootmem_data_t; +extern bootmem_data_t bootmem_node_data[]; + extern unsigned long bootmem_bootmap_pages(unsigned long); extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); extern void free_bootmem(unsigned long addr, unsigned long size); diff --git a/mm/bootmem.c b/mm/bootmem.c index 9f4bbc5da73..35b3cb66703 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -36,6 +36,8 @@ static LIST_HEAD(bdata_list); unsigned long saved_max_pfn; #endif +bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; + /* return the number of _pages_ that will be allocated for the boot bitmap */ unsigned long __init bootmem_bootmap_pages(unsigned long pages) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9ece07ce65b..e089b92cdff 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4040,9 +4040,7 @@ void __init set_dma_reserve(unsigned long new_dma_reserve) } #ifndef CONFIG_NEED_MULTIPLE_NODES -static bootmem_data_t contig_bootmem_data; -struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; - +struct pglist_data contig_page_data = { .bdata = &bootmem_node_data[0] }; EXPORT_SYMBOL(contig_page_data); #endif -- cgit v1.2.3-70-g09d2 From 28b2ee20c7cba812b6f2ccf6d722cf86d00a84dc Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Wed, 23 Jul 2008 21:27:05 -0700 Subject: access_process_vm device memory infrastructure In order to be able to debug things like the X server and programs using the PPC Cell SPUs, the debugger needs to be able to access device memory through ptrace and /proc/pid/mem. This patch: Add the generic_access_phys access function and put the hooks in place to allow access_process_vm to access device or PPC Cell SPU memory. [riel@redhat.com: Add documentation for the vm_ops->access function] Signed-off-by: Rik van Riel Signed-off-by: Benjamin Herrensmidt Cc: Dave Airlie Cc: Hugh Dickins Cc: Paul Mackerras Cc: Arnd Bergmann Acked-by: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/Locking | 7 ++ arch/Kconfig | 3 + arch/x86/Kconfig | 1 + arch/x86/mm/ioremap.c | 8 +++ include/asm-x86/io_32.h | 2 + include/asm-x86/io_64.h | 2 + include/linux/mm.h | 8 +++ mm/memory.c | 131 ++++++++++++++++++++++++++++++++------ 8 files changed, 144 insertions(+), 18 deletions(-) (limited to 'arch/x86') diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 8b22d7d8b99..680fb566b92 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -510,6 +510,7 @@ prototypes: void (*close)(struct vm_area_struct*); int (*fault)(struct vm_area_struct*, struct vm_fault *); int (*page_mkwrite)(struct vm_area_struct *, struct page *); + int (*access)(struct vm_area_struct *, unsigned long, void*, int, int); locking rules: BKL mmap_sem PageLocked(page) @@ -517,6 +518,7 @@ open: no yes close: no yes fault: no yes page_mkwrite: no yes no +access: no yes ->page_mkwrite() is called when a previously read-only page is about to become writeable. The file system is responsible for @@ -525,6 +527,11 @@ taking to lock out truncate, the page range should be verified to be within i_size. The page mapping should also be checked that it is not NULL. + ->access() is called when get_user_pages() fails in +acces_process_vm(), typically used to debug a process through +/proc/pid/mem or ptrace. This function is needed only for +VM_IO | VM_PFNMAP VMAs. + ================================================================================ Dubious stuff diff --git a/arch/Kconfig b/arch/Kconfig index 4d5ebbc1e72..6093c0be58b 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -31,6 +31,9 @@ config KRETPROBES def_bool y depends on KPROBES && HAVE_KRETPROBES +config HAVE_IOREMAP_PROT + def_bool n + config HAVE_KPROBES def_bool n diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 03980cb0429..b2ddfcf0172 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -21,6 +21,7 @@ config X86 select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_IDE select HAVE_OPROFILE + select HAVE_IOREMAP_PROT select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_DYNAMIC_FTRACE diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 24c1d3c3018..016f335bbee 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -330,6 +330,14 @@ static void __iomem *ioremap_default(resource_size_t phys_addr, return (void __iomem *)ret; } +void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, + unsigned long prot_val) +{ + return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK), + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_prot); + /** * iounmap - Free a IO remapping * @addr: virtual address from ioremap_* diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h index 4df44ed5407..e876d89ac15 100644 --- a/include/asm-x86/io_32.h +++ b/include/asm-x86/io_32.h @@ -110,6 +110,8 @@ static inline void *phys_to_virt(unsigned long address) */ extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); +extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, + unsigned long prot_val); /* * The default ioremap() behavior is non-cached: diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h index ddd8058a502..22995c5c5ad 100644 --- a/include/asm-x86/io_64.h +++ b/include/asm-x86/io_64.h @@ -175,6 +175,8 @@ extern void early_iounmap(void *addr, unsigned long size); */ extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); +extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, + unsigned long prot_val); /* * The default ioremap() behavior is non-cached: diff --git a/include/linux/mm.h b/include/linux/mm.h index eb815cfc1b3..5c7f8f64f70 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -170,6 +170,12 @@ struct vm_operations_struct { /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); + + /* called by access_process_vm when get_user_pages() fails, typically + * for use by special VMAs that can switch between memory and hardware + */ + int (*access)(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); #ifdef CONFIG_NUMA /* * set_policy() op must add a reference to any non-NULL @new mempolicy @@ -771,6 +777,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); +int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); static inline void unmap_shared_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen) diff --git a/mm/memory.c b/mm/memory.c index 46dbed4b744..87350321e66 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2751,6 +2751,86 @@ int in_gate_area_no_task(unsigned long addr) #endif /* __HAVE_ARCH_GATE_AREA */ +#ifdef CONFIG_HAVE_IOREMAP_PROT +static resource_size_t follow_phys(struct vm_area_struct *vma, + unsigned long address, unsigned int flags, + unsigned long *prot) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *ptep, pte; + spinlock_t *ptl; + resource_size_t phys_addr = 0; + struct mm_struct *mm = vma->vm_mm; + + VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP))); + + pgd = pgd_offset(mm, address); + if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) + goto no_page_table; + + pud = pud_offset(pgd, address); + if (pud_none(*pud) || unlikely(pud_bad(*pud))) + goto no_page_table; + + pmd = pmd_offset(pud, address); + if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) + goto no_page_table; + + /* We cannot handle huge page PFN maps. Luckily they don't exist. */ + if (pmd_huge(*pmd)) + goto no_page_table; + + ptep = pte_offset_map_lock(mm, pmd, address, &ptl); + if (!ptep) + goto out; + + pte = *ptep; + if (!pte_present(pte)) + goto unlock; + if ((flags & FOLL_WRITE) && !pte_write(pte)) + goto unlock; + phys_addr = pte_pfn(pte); + phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */ + + *prot = pgprot_val(pte_pgprot(pte)); + +unlock: + pte_unmap_unlock(ptep, ptl); +out: + return phys_addr; +no_page_table: + return 0; +} + +int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write) +{ + resource_size_t phys_addr; + unsigned long prot = 0; + void *maddr; + int offset = addr & (PAGE_SIZE-1); + + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) + return -EINVAL; + + phys_addr = follow_phys(vma, addr, write, &prot); + + if (!phys_addr) + return -EINVAL; + + maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); + if (write) + memcpy_toio(maddr + offset, buf, len); + else + memcpy_fromio(buf, maddr + offset, len); + iounmap(maddr); + + return len; +} +#endif + /* * Access another process' address space. * Source/target buffer must be kernel space, @@ -2760,7 +2840,6 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in { struct mm_struct *mm; struct vm_area_struct *vma; - struct page *page; void *old_buf = buf; mm = get_task_mm(tsk); @@ -2772,28 +2851,44 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in while (len) { int bytes, ret, offset; void *maddr; + struct page *page = NULL; ret = get_user_pages(tsk, mm, addr, 1, write, 1, &page, &vma); - if (ret <= 0) - break; - - bytes = len; - offset = addr & (PAGE_SIZE-1); - if (bytes > PAGE_SIZE-offset) - bytes = PAGE_SIZE-offset; - - maddr = kmap(page); - if (write) { - copy_to_user_page(vma, page, addr, - maddr + offset, buf, bytes); - set_page_dirty_lock(page); + if (ret <= 0) { + /* + * Check if this is a VM_IO | VM_PFNMAP VMA, which + * we can access using slightly different code. + */ +#ifdef CONFIG_HAVE_IOREMAP_PROT + vma = find_vma(mm, addr); + if (!vma) + break; + if (vma->vm_ops && vma->vm_ops->access) + ret = vma->vm_ops->access(vma, addr, buf, + len, write); + if (ret <= 0) +#endif + break; + bytes = ret; } else { - copy_from_user_page(vma, page, addr, - buf, maddr + offset, bytes); + bytes = len; + offset = addr & (PAGE_SIZE-1); + if (bytes > PAGE_SIZE-offset) + bytes = PAGE_SIZE-offset; + + maddr = kmap(page); + if (write) { + copy_to_user_page(vma, page, addr, + maddr + offset, buf, bytes); + set_page_dirty_lock(page); + } else { + copy_from_user_page(vma, page, addr, + buf, maddr + offset, bytes); + } + kunmap(page); + page_cache_release(page); } - kunmap(page); - page_cache_release(page); len -= bytes; buf += bytes; addr += bytes; -- cgit v1.2.3-70-g09d2 From 7ae8ed5053a39082d224a3f48409e016baca9c16 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Wed, 23 Jul 2008 21:27:07 -0700 Subject: use generic_access_phys for /dev/mem mappings Use generic_access_phys as the access_process_vm access function for /dev/mem mappings. This makes it possible to debug the X server. [akpm@linux-foundation.org: repair all the architectures which broke] Signed-off-by: Rik van Riel Cc: Benjamin Herrensmidt Cc: Dave Airlie Cc: Hugh Dickins Cc: Paul Mackerras Cc: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/pci/i386.c | 1 + drivers/char/mem.c | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 2aafb67dc5f..a09505806b8 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -280,6 +280,7 @@ static void pci_track_mmap_page_range(struct vm_area_struct *vma) static struct vm_operations_struct pci_mmap_ops = { .open = pci_track_mmap_page_range, .close = pci_unmap_page_range, + .access = generic_access_phys, }; int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, diff --git a/drivers/char/mem.c b/drivers/char/mem.c index c2dba82eb5f..672b08e694d 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -327,7 +327,10 @@ static void mmap_mem_close(struct vm_area_struct *vma) static struct vm_operations_struct mmap_mem_ops = { .open = mmap_mem_open, - .close = mmap_mem_close + .close = mmap_mem_close, +#ifdef CONFIG_HAVE_IOREMAP_PROT + .access = generic_access_phys +#endif }; static int mmap_mem(struct file * file, struct vm_area_struct * vma) -- cgit v1.2.3-70-g09d2 From a5516438959d90b071ff0a484ce4f3f523dc3152 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 23 Jul 2008 21:27:41 -0700 Subject: hugetlb: modular state for hugetlb page size The goal of this patchset is to support multiple hugetlb page sizes. This is achieved by introducing a new struct hstate structure, which encapsulates the important hugetlb state and constants (eg. huge page size, number of huge pages currently allocated, etc). The hstate structure is then passed around the code which requires these fields, they will do the right thing regardless of the exact hstate they are operating on. This patch adds the hstate structure, with a single global instance of it (default_hstate), and does the basic work of converting hugetlb to use the hstate. Future patches will add more hstate structures to allow for different hugetlbfs mounts to have different page sizes. [akpm@linux-foundation.org: coding-style fixes] Acked-by: Adam Litke Acked-by: Nishanth Aravamudan Signed-off-by: Andi Kleen Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/mm/hugetlbpage.c | 7 +- arch/powerpc/mm/hugetlbpage.c | 3 +- arch/s390/mm/hugetlbpage.c | 3 +- arch/sh/mm/hugetlbpage.c | 3 +- arch/sparc64/mm/hugetlbpage.c | 5 +- arch/x86/mm/hugetlbpage.c | 5 +- fs/hugetlbfs/inode.c | 52 +++--- include/asm-ia64/hugetlb.h | 3 +- include/asm-powerpc/hugetlb.h | 3 +- include/asm-s390/hugetlb.h | 3 +- include/asm-sh/hugetlb.h | 3 +- include/asm-sparc/hugetlb.h | 3 +- include/asm-x86/hugetlb.h | 8 +- include/linux/hugetlb.h | 88 +++++++++- ipc/shm.c | 3 +- mm/hugetlb.c | 368 +++++++++++++++++++++++------------------- mm/memory.c | 2 +- mm/mempolicy.c | 9 +- mm/mmap.c | 3 +- 19 files changed, 356 insertions(+), 218 deletions(-) (limited to 'arch/x86') diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index cd49e2860ee..6170f097d25 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -24,7 +24,7 @@ unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT; pte_t * -huge_pte_alloc (struct mm_struct *mm, unsigned long addr) +huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; @@ -75,7 +75,8 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) * Don't actually need to do any preparation, but need to make sure * the address is in the right region. */ -int prepare_hugepage_range(unsigned long addr, unsigned long len) +int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) { if (len & ~HPAGE_MASK) return -EINVAL; @@ -149,7 +150,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u /* Handle MAP_FIXED */ if (flags & MAP_FIXED) { - if (prepare_hugepage_range(addr, len)) + if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 1a96cc891cf..c94dc71af98 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -128,7 +128,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) return NULL; } -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) +pte_t *huge_pte_alloc(struct mm_struct *mm, + unsigned long addr, unsigned long sz) { pgd_t *pg; pud_t *pu; diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index f4b6124fdb7..9162dc84f77 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -72,7 +72,8 @@ void arch_release_hugepage(struct page *page) page[1].index = 0; } -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) +pte_t *huge_pte_alloc(struct mm_struct *mm, + unsigned long addr, unsigned long sz) { pgd_t *pgdp; pud_t *pudp; diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index ae8c321d6e2..2f9dbe0ef4a 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -22,7 +22,8 @@ #include #include -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) +pte_t *huge_pte_alloc(struct mm_struct *mm, + unsigned long addr, unsigned long sz) { pgd_t *pgd; pud_t *pud; diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index ebefd2a1437..1307b23f6a7 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c @@ -175,7 +175,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return -ENOMEM; if (flags & MAP_FIXED) { - if (prepare_hugepage_range(addr, len)) + if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } @@ -195,7 +195,8 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, pgoff, flags); } -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) +pte_t *huge_pte_alloc(struct mm_struct *mm, + unsigned long addr, unsigned long sz) { pgd_t *pgd; pud_t *pud; diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 0b3d567e686..52476fde899 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -124,7 +124,8 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) return 1; } -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) +pte_t *huge_pte_alloc(struct mm_struct *mm, + unsigned long addr, unsigned long sz) { pgd_t *pgd; pud_t *pud; @@ -368,7 +369,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return -ENOMEM; if (flags & MAP_FIXED) { - if (prepare_hugepage_range(addr, len)) + if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 428eff5b73f..516c581b537 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -80,6 +80,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) struct inode *inode = file->f_path.dentry->d_inode; loff_t len, vma_len; int ret; + struct hstate *h = hstate_file(file); /* * vma address alignment (but not the pgoff alignment) has @@ -92,7 +93,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_flags |= VM_HUGETLB | VM_RESERVED; vma->vm_ops = &hugetlb_vm_ops; - if (vma->vm_pgoff & ~(HPAGE_MASK >> PAGE_SHIFT)) + if (vma->vm_pgoff & ~(huge_page_mask(h) >> PAGE_SHIFT)) return -EINVAL; vma_len = (loff_t)(vma->vm_end - vma->vm_start); @@ -104,8 +105,8 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); if (hugetlb_reserve_pages(inode, - vma->vm_pgoff >> (HPAGE_SHIFT-PAGE_SHIFT), - len >> HPAGE_SHIFT, vma)) + vma->vm_pgoff >> huge_page_order(h), + len >> huge_page_shift(h), vma)) goto out; ret = 0; @@ -130,20 +131,21 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr; + struct hstate *h = hstate_file(file); - if (len & ~HPAGE_MASK) + if (len & ~huge_page_mask(h)) return -EINVAL; if (len > TASK_SIZE) return -ENOMEM; if (flags & MAP_FIXED) { - if (prepare_hugepage_range(addr, len)) + if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } if (addr) { - addr = ALIGN(addr, HPAGE_SIZE); + addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) @@ -156,7 +158,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, start_addr = TASK_UNMAPPED_BASE; full_search: - addr = ALIGN(start_addr, HPAGE_SIZE); + addr = ALIGN(start_addr, huge_page_size(h)); for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ @@ -174,7 +176,7 @@ full_search: if (!vma || addr + len <= vma->vm_start) return addr; - addr = ALIGN(vma->vm_end, HPAGE_SIZE); + addr = ALIGN(vma->vm_end, huge_page_size(h)); } } #endif @@ -225,10 +227,11 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset, static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) { + struct hstate *h = hstate_file(filp); struct address_space *mapping = filp->f_mapping; struct inode *inode = mapping->host; - unsigned long index = *ppos >> HPAGE_SHIFT; - unsigned long offset = *ppos & ~HPAGE_MASK; + unsigned long index = *ppos >> huge_page_shift(h); + unsigned long offset = *ppos & ~huge_page_mask(h); unsigned long end_index; loff_t isize; ssize_t retval = 0; @@ -243,17 +246,17 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, if (!isize) goto out; - end_index = (isize - 1) >> HPAGE_SHIFT; + end_index = (isize - 1) >> huge_page_shift(h); for (;;) { struct page *page; - int nr, ret; + unsigned long nr, ret; /* nr is the maximum number of bytes to copy from this page */ - nr = HPAGE_SIZE; + nr = huge_page_size(h); if (index >= end_index) { if (index > end_index) goto out; - nr = ((isize - 1) & ~HPAGE_MASK) + 1; + nr = ((isize - 1) & ~huge_page_mask(h)) + 1; if (nr <= offset) { goto out; } @@ -287,8 +290,8 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, offset += ret; retval += ret; len -= ret; - index += offset >> HPAGE_SHIFT; - offset &= ~HPAGE_MASK; + index += offset >> huge_page_shift(h); + offset &= ~huge_page_mask(h); if (page) page_cache_release(page); @@ -298,7 +301,7 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, break; } out: - *ppos = ((loff_t)index << HPAGE_SHIFT) + offset; + *ppos = ((loff_t)index << huge_page_shift(h)) + offset; mutex_unlock(&inode->i_mutex); return retval; } @@ -339,8 +342,9 @@ static void truncate_huge_page(struct page *page) static void truncate_hugepages(struct inode *inode, loff_t lstart) { + struct hstate *h = hstate_inode(inode); struct address_space *mapping = &inode->i_data; - const pgoff_t start = lstart >> HPAGE_SHIFT; + const pgoff_t start = lstart >> huge_page_shift(h); struct pagevec pvec; pgoff_t next; int i, freed = 0; @@ -449,8 +453,9 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) { pgoff_t pgoff; struct address_space *mapping = inode->i_mapping; + struct hstate *h = hstate_inode(inode); - BUG_ON(offset & ~HPAGE_MASK); + BUG_ON(offset & ~huge_page_mask(h)); pgoff = offset >> PAGE_SHIFT; i_size_write(inode, offset); @@ -465,6 +470,7 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; + struct hstate *h = hstate_inode(inode); int error; unsigned int ia_valid = attr->ia_valid; @@ -476,7 +482,7 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) if (ia_valid & ATTR_SIZE) { error = -EINVAL; - if (!(attr->ia_size & ~HPAGE_MASK)) + if (!(attr->ia_size & ~huge_page_mask(h))) error = hugetlb_vmtruncate(inode, attr->ia_size); if (error) goto out; @@ -610,9 +616,10 @@ static int hugetlbfs_set_page_dirty(struct page *page) static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); + struct hstate *h = hstate_inode(dentry->d_inode); buf->f_type = HUGETLBFS_MAGIC; - buf->f_bsize = HPAGE_SIZE; + buf->f_bsize = huge_page_size(h); if (sbinfo) { spin_lock(&sbinfo->stat_lock); /* If no limits set, just report 0 for max/free/used @@ -942,7 +949,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size) goto out_dentry; error = -ENOMEM; - if (hugetlb_reserve_pages(inode, 0, size >> HPAGE_SHIFT, NULL)) + if (hugetlb_reserve_pages(inode, 0, + size >> huge_page_shift(hstate_inode(inode)), NULL)) goto out_inode; d_instantiate(dentry, inode); diff --git a/include/asm-ia64/hugetlb.h b/include/asm-ia64/hugetlb.h index e9d1e5e2382..da55c63728e 100644 --- a/include/asm-ia64/hugetlb.h +++ b/include/asm-ia64/hugetlb.h @@ -8,7 +8,8 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); -int prepare_hugepage_range(unsigned long addr, unsigned long len); +int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len); static inline int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, diff --git a/include/asm-powerpc/hugetlb.h b/include/asm-powerpc/hugetlb.h index 0a37aa5ecaa..ca37c4af27b 100644 --- a/include/asm-powerpc/hugetlb.h +++ b/include/asm-powerpc/hugetlb.h @@ -21,7 +21,8 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, * If the arch doesn't supply something else, assume that hugepage * size aligned regions are ok without further preparation. */ -static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) { if (len & ~HPAGE_MASK) return -EINVAL; diff --git a/include/asm-s390/hugetlb.h b/include/asm-s390/hugetlb.h index 600a776f8f7..670a1d1745d 100644 --- a/include/asm-s390/hugetlb.h +++ b/include/asm-s390/hugetlb.h @@ -22,7 +22,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, * If the arch doesn't supply something else, assume that hugepage * size aligned regions are ok without further preparation. */ -static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) { if (len & ~HPAGE_MASK) return -EINVAL; diff --git a/include/asm-sh/hugetlb.h b/include/asm-sh/hugetlb.h index fb30018938c..967068fb79a 100644 --- a/include/asm-sh/hugetlb.h +++ b/include/asm-sh/hugetlb.h @@ -14,7 +14,8 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, * If the arch doesn't supply something else, assume that hugepage * size aligned regions are ok without further preparation. */ -static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) { if (len & ~HPAGE_MASK) return -EINVAL; diff --git a/include/asm-sparc/hugetlb.h b/include/asm-sparc/hugetlb.h index aeb92374ca3..177061064ee 100644 --- a/include/asm-sparc/hugetlb.h +++ b/include/asm-sparc/hugetlb.h @@ -22,7 +22,8 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, * If the arch doesn't supply something else, assume that hugepage * size aligned regions are ok without further preparation. */ -static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) { if (len & ~HPAGE_MASK) return -EINVAL; diff --git a/include/asm-x86/hugetlb.h b/include/asm-x86/hugetlb.h index 7eed6e0883b..439a9acc132 100644 --- a/include/asm-x86/hugetlb.h +++ b/include/asm-x86/hugetlb.h @@ -14,11 +14,13 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, * If the arch doesn't supply something else, assume that hugepage * size aligned regions are ok without further preparation. */ -static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) { - if (len & ~HPAGE_MASK) + struct hstate *h = hstate_file(file); + if (len & ~huge_page_mask(h)) return -EINVAL; - if (addr & ~HPAGE_MASK) + if (addr & ~huge_page_mask(h)) return -EINVAL; return 0; } diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index abbc187193a..ad2271e11f9 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -8,7 +8,6 @@ #include #include #include -#include struct ctl_table; @@ -45,7 +44,8 @@ extern int sysctl_hugetlb_shm_group; /* arch callbacks */ -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr); +pte_t *huge_pte_alloc(struct mm_struct *mm, + unsigned long addr, unsigned long sz); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr); int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, @@ -80,7 +80,7 @@ static inline unsigned long hugetlb_total_pages(void) #define hugetlb_report_meminfo(buf) 0 #define hugetlb_report_node_meminfo(n, buf) 0 #define follow_huge_pmd(mm, addr, pmd, write) NULL -#define prepare_hugepage_range(addr,len) (-EINVAL) +#define prepare_hugepage_range(file, addr, len) (-EINVAL) #define pmd_huge(x) 0 #define is_hugepage_only_range(mm, addr, len) 0 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) @@ -134,8 +134,6 @@ struct file *hugetlb_file_setup(const char *name, size_t); int hugetlb_get_quota(struct address_space *mapping, long delta); void hugetlb_put_quota(struct address_space *mapping, long delta); -#define BLOCKS_PER_HUGEPAGE (HPAGE_SIZE / 512) - static inline int is_file_hugepages(struct file *file) { if (file->f_op == &hugetlbfs_file_operations) @@ -164,4 +162,84 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long flags); #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ +#ifdef CONFIG_HUGETLB_PAGE + +/* Defines one hugetlb page size */ +struct hstate { + int hugetlb_next_nid; + unsigned int order; + unsigned long mask; + unsigned long max_huge_pages; + unsigned long nr_huge_pages; + unsigned long free_huge_pages; + unsigned long resv_huge_pages; + unsigned long surplus_huge_pages; + unsigned long nr_overcommit_huge_pages; + struct list_head hugepage_freelists[MAX_NUMNODES]; + unsigned int nr_huge_pages_node[MAX_NUMNODES]; + unsigned int free_huge_pages_node[MAX_NUMNODES]; + unsigned int surplus_huge_pages_node[MAX_NUMNODES]; +}; + +extern struct hstate default_hstate; + +static inline struct hstate *hstate_vma(struct vm_area_struct *vma) +{ + return &default_hstate; +} + +static inline struct hstate *hstate_file(struct file *f) +{ + return &default_hstate; +} + +static inline struct hstate *hstate_inode(struct inode *i) +{ + return &default_hstate; +} + +static inline unsigned long huge_page_size(struct hstate *h) +{ + return (unsigned long)PAGE_SIZE << h->order; +} + +static inline unsigned long huge_page_mask(struct hstate *h) +{ + return h->mask; +} + +static inline unsigned int huge_page_order(struct hstate *h) +{ + return h->order; +} + +static inline unsigned huge_page_shift(struct hstate *h) +{ + return h->order + PAGE_SHIFT; +} + +static inline unsigned int pages_per_huge_page(struct hstate *h) +{ + return 1 << h->order; +} + +static inline unsigned int blocks_per_huge_page(struct hstate *h) +{ + return huge_page_size(h) / 512; +} + +#include + +#else +struct hstate {}; +#define hstate_file(f) NULL +#define hstate_vma(v) NULL +#define hstate_inode(i) NULL +#define huge_page_size(h) PAGE_SIZE +#define huge_page_mask(h) PAGE_MASK +#define huge_page_order(h) 0 +#define huge_page_shift(h) PAGE_SHIFT +#define pages_per_huge_page(h) 1 +#endif + #endif /* _LINUX_HUGETLB_H */ diff --git a/ipc/shm.c b/ipc/shm.c index 790240cd067..a726aebce7d 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -577,7 +577,8 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, if (is_file_hugepages(shp->shm_file)) { struct address_space *mapping = inode->i_mapping; - *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages; + struct hstate *h = hstate_file(shp->shm_file); + *rss += pages_per_huge_page(h) * mapping->nrpages; } else { struct shmem_inode_info *info = SHMEM_I(inode); spin_lock(&info->lock); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 32dff4290c6..0d8153e25f0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -22,18 +22,12 @@ #include "internal.h" const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; -static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; -static unsigned long surplus_huge_pages; -static unsigned long nr_overcommit_huge_pages; unsigned long max_huge_pages; unsigned long sysctl_overcommit_huge_pages; -static struct list_head hugepage_freelists[MAX_NUMNODES]; -static unsigned int nr_huge_pages_node[MAX_NUMNODES]; -static unsigned int free_huge_pages_node[MAX_NUMNODES]; -static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; static gfp_t htlb_alloc_mask = GFP_HIGHUSER; unsigned long hugepages_treat_as_movable; -static int hugetlb_next_nid; + +struct hstate default_hstate; /* * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages @@ -203,11 +197,11 @@ static long region_count(struct list_head *head, long f, long t) * Convert the address within this vma to the page offset within * the mapping, in pagecache page units; huge pages here. */ -static pgoff_t vma_hugecache_offset(struct vm_area_struct *vma, - unsigned long address) +static pgoff_t vma_hugecache_offset(struct hstate *h, + struct vm_area_struct *vma, unsigned long address) { - return ((address - vma->vm_start) >> HPAGE_SHIFT) + - (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); + return ((address - vma->vm_start) >> huge_page_shift(h)) + + (vma->vm_pgoff >> huge_page_order(h)); } /* @@ -309,20 +303,21 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) } /* Decrement the reserved pages in the hugepage pool by one */ -static void decrement_hugepage_resv_vma(struct vm_area_struct *vma) +static void decrement_hugepage_resv_vma(struct hstate *h, + struct vm_area_struct *vma) { if (vma->vm_flags & VM_NORESERVE) return; if (vma->vm_flags & VM_SHARED) { /* Shared mappings always use reserves */ - resv_huge_pages--; + h->resv_huge_pages--; } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { /* * Only the process that called mmap() has reserves for * private mappings. */ - resv_huge_pages--; + h->resv_huge_pages--; } } @@ -344,12 +339,13 @@ static int vma_has_private_reserves(struct vm_area_struct *vma) return 1; } -static void clear_huge_page(struct page *page, unsigned long addr) +static void clear_huge_page(struct page *page, + unsigned long addr, unsigned long sz) { int i; might_sleep(); - for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { + for (i = 0; i < sz/PAGE_SIZE; i++) { cond_resched(); clear_user_highpage(page + i, addr + i * PAGE_SIZE); } @@ -359,41 +355,43 @@ static void copy_huge_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma) { int i; + struct hstate *h = hstate_vma(vma); might_sleep(); - for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { + for (i = 0; i < pages_per_huge_page(h); i++) { cond_resched(); copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); } } -static void enqueue_huge_page(struct page *page) +static void enqueue_huge_page(struct hstate *h, struct page *page) { int nid = page_to_nid(page); - list_add(&page->lru, &hugepage_freelists[nid]); - free_huge_pages++; - free_huge_pages_node[nid]++; + list_add(&page->lru, &h->hugepage_freelists[nid]); + h->free_huge_pages++; + h->free_huge_pages_node[nid]++; } -static struct page *dequeue_huge_page(void) +static struct page *dequeue_huge_page(struct hstate *h) { int nid; struct page *page = NULL; for (nid = 0; nid < MAX_NUMNODES; ++nid) { - if (!list_empty(&hugepage_freelists[nid])) { - page = list_entry(hugepage_freelists[nid].next, + if (!list_empty(&h->hugepage_freelists[nid])) { + page = list_entry(h->hugepage_freelists[nid].next, struct page, lru); list_del(&page->lru); - free_huge_pages--; - free_huge_pages_node[nid]--; + h->free_huge_pages--; + h->free_huge_pages_node[nid]--; break; } } return page; } -static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma, +static struct page *dequeue_huge_page_vma(struct hstate *h, + struct vm_area_struct *vma, unsigned long address, int avoid_reserve) { int nid; @@ -411,26 +409,26 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma, * not "stolen". The child may still get SIGKILLed */ if (!vma_has_private_reserves(vma) && - free_huge_pages - resv_huge_pages == 0) + h->free_huge_pages - h->resv_huge_pages == 0) return NULL; /* If reserves cannot be used, ensure enough pages are in the pool */ - if (avoid_reserve && free_huge_pages - resv_huge_pages == 0) + if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) return NULL; for_each_zone_zonelist_nodemask(zone, z, zonelist, MAX_NR_ZONES - 1, nodemask) { nid = zone_to_nid(zone); if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) && - !list_empty(&hugepage_freelists[nid])) { - page = list_entry(hugepage_freelists[nid].next, + !list_empty(&h->hugepage_freelists[nid])) { + page = list_entry(h->hugepage_freelists[nid].next, struct page, lru); list_del(&page->lru); - free_huge_pages--; - free_huge_pages_node[nid]--; + h->free_huge_pages--; + h->free_huge_pages_node[nid]--; if (!avoid_reserve) - decrement_hugepage_resv_vma(vma); + decrement_hugepage_resv_vma(h, vma); break; } @@ -439,12 +437,13 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma, return page; } -static void update_and_free_page(struct page *page) +static void update_and_free_page(struct hstate *h, struct page *page) { int i; - nr_huge_pages--; - nr_huge_pages_node[page_to_nid(page)]--; - for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { + + h->nr_huge_pages--; + h->nr_huge_pages_node[page_to_nid(page)]--; + for (i = 0; i < pages_per_huge_page(h); i++) { page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 1 << PG_private | 1<< PG_writeback); @@ -452,11 +451,16 @@ static void update_and_free_page(struct page *page) set_compound_page_dtor(page, NULL); set_page_refcounted(page); arch_release_hugepage(page); - __free_pages(page, HUGETLB_PAGE_ORDER); + __free_pages(page, huge_page_order(h)); } static void free_huge_page(struct page *page) { + /* + * Can't pass hstate in here because it is called from the + * compound page destructor. + */ + struct hstate *h = &default_hstate; int nid = page_to_nid(page); struct address_space *mapping; @@ -466,12 +470,12 @@ static void free_huge_page(struct page *page) INIT_LIST_HEAD(&page->lru); spin_lock(&hugetlb_lock); - if (surplus_huge_pages_node[nid]) { - update_and_free_page(page); - surplus_huge_pages--; - surplus_huge_pages_node[nid]--; + if (h->surplus_huge_pages_node[nid]) { + update_and_free_page(h, page); + h->surplus_huge_pages--; + h->surplus_huge_pages_node[nid]--; } else { - enqueue_huge_page(page); + enqueue_huge_page(h, page); } spin_unlock(&hugetlb_lock); if (mapping) @@ -483,7 +487,7 @@ static void free_huge_page(struct page *page) * balanced by operating on them in a round-robin fashion. * Returns 1 if an adjustment was made. */ -static int adjust_pool_surplus(int delta) +static int adjust_pool_surplus(struct hstate *h, int delta) { static int prev_nid; int nid = prev_nid; @@ -496,15 +500,15 @@ static int adjust_pool_surplus(int delta) nid = first_node(node_online_map); /* To shrink on this node, there must be a surplus page */ - if (delta < 0 && !surplus_huge_pages_node[nid]) + if (delta < 0 && !h->surplus_huge_pages_node[nid]) continue; /* Surplus cannot exceed the total number of pages */ - if (delta > 0 && surplus_huge_pages_node[nid] >= - nr_huge_pages_node[nid]) + if (delta > 0 && h->surplus_huge_pages_node[nid] >= + h->nr_huge_pages_node[nid]) continue; - surplus_huge_pages += delta; - surplus_huge_pages_node[nid] += delta; + h->surplus_huge_pages += delta; + h->surplus_huge_pages_node[nid] += delta; ret = 1; break; } while (nid != prev_nid); @@ -513,46 +517,46 @@ static int adjust_pool_surplus(int delta) return ret; } -static void prep_new_huge_page(struct page *page, int nid) +static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) { set_compound_page_dtor(page, free_huge_page); spin_lock(&hugetlb_lock); - nr_huge_pages++; - nr_huge_pages_node[nid]++; + h->nr_huge_pages++; + h->nr_huge_pages_node[nid]++; spin_unlock(&hugetlb_lock); put_page(page); /* free it into the hugepage allocator */ } -static struct page *alloc_fresh_huge_page_node(int nid) +static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) { struct page *page; page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| __GFP_REPEAT|__GFP_NOWARN, - HUGETLB_PAGE_ORDER); + huge_page_order(h)); if (page) { if (arch_prepare_hugepage(page)) { __free_pages(page, HUGETLB_PAGE_ORDER); return NULL; } - prep_new_huge_page(page, nid); + prep_new_huge_page(h, page, nid); } return page; } -static int alloc_fresh_huge_page(void) +static int alloc_fresh_huge_page(struct hstate *h) { struct page *page; int start_nid; int next_nid; int ret = 0; - start_nid = hugetlb_next_nid; + start_nid = h->hugetlb_next_nid; do { - page = alloc_fresh_huge_page_node(hugetlb_next_nid); + page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid); if (page) ret = 1; /* @@ -566,11 +570,11 @@ static int alloc_fresh_huge_page(void) * if we just successfully allocated a hugepage so that * the next caller gets hugepages on the next node. */ - next_nid = next_node(hugetlb_next_nid, node_online_map); + next_nid = next_node(h->hugetlb_next_nid, node_online_map); if (next_nid == MAX_NUMNODES) next_nid = first_node(node_online_map); - hugetlb_next_nid = next_nid; - } while (!page && hugetlb_next_nid != start_nid); + h->hugetlb_next_nid = next_nid; + } while (!page && h->hugetlb_next_nid != start_nid); if (ret) count_vm_event(HTLB_BUDDY_PGALLOC); @@ -580,8 +584,8 @@ static int alloc_fresh_huge_page(void) return ret; } -static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, - unsigned long address) +static struct page *alloc_buddy_huge_page(struct hstate *h, + struct vm_area_struct *vma, unsigned long address) { struct page *page; unsigned int nid; @@ -610,18 +614,18 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, * per-node value is checked there. */ spin_lock(&hugetlb_lock); - if (surplus_huge_pages >= nr_overcommit_huge_pages) { + if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { spin_unlock(&hugetlb_lock); return NULL; } else { - nr_huge_pages++; - surplus_huge_pages++; + h->nr_huge_pages++; + h->surplus_huge_pages++; } spin_unlock(&hugetlb_lock); page = alloc_pages(htlb_alloc_mask|__GFP_COMP| __GFP_REPEAT|__GFP_NOWARN, - HUGETLB_PAGE_ORDER); + huge_page_order(h)); spin_lock(&hugetlb_lock); if (page) { @@ -636,12 +640,12 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, /* * We incremented the global counters already */ - nr_huge_pages_node[nid]++; - surplus_huge_pages_node[nid]++; + h->nr_huge_pages_node[nid]++; + h->surplus_huge_pages_node[nid]++; __count_vm_event(HTLB_BUDDY_PGALLOC); } else { - nr_huge_pages--; - surplus_huge_pages--; + h->nr_huge_pages--; + h->surplus_huge_pages--; __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); } spin_unlock(&hugetlb_lock); @@ -653,16 +657,16 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, * Increase the hugetlb pool such that it can accomodate a reservation * of size 'delta'. */ -static int gather_surplus_pages(int delta) +static int gather_surplus_pages(struct hstate *h, int delta) { struct list_head surplus_list; struct page *page, *tmp; int ret, i; int needed, allocated; - needed = (resv_huge_pages + delta) - free_huge_pages; + needed = (h->resv_huge_pages + delta) - h->free_huge_pages; if (needed <= 0) { - resv_huge_pages += delta; + h->resv_huge_pages += delta; return 0; } @@ -673,7 +677,7 @@ static int gather_surplus_pages(int delta) retry: spin_unlock(&hugetlb_lock); for (i = 0; i < needed; i++) { - page = alloc_buddy_huge_page(NULL, 0); + page = alloc_buddy_huge_page(h, NULL, 0); if (!page) { /* * We were not able to allocate enough pages to @@ -694,7 +698,8 @@ retry: * because either resv_huge_pages or free_huge_pages may have changed. */ spin_lock(&hugetlb_lock); - needed = (resv_huge_pages + delta) - (free_huge_pages + allocated); + needed = (h->resv_huge_pages + delta) - + (h->free_huge_pages + allocated); if (needed > 0) goto retry; @@ -707,7 +712,7 @@ retry: * before they are reserved. */ needed += allocated; - resv_huge_pages += delta; + h->resv_huge_pages += delta; ret = 0; free: /* Free the needed pages to the hugetlb pool */ @@ -715,7 +720,7 @@ free: if ((--needed) < 0) break; list_del(&page->lru); - enqueue_huge_page(page); + enqueue_huge_page(h, page); } /* Free unnecessary surplus pages to the buddy allocator */ @@ -743,7 +748,8 @@ free: * allocated to satisfy the reservation must be explicitly freed if they were * never used. */ -static void return_unused_surplus_pages(unsigned long unused_resv_pages) +static void return_unused_surplus_pages(struct hstate *h, + unsigned long unused_resv_pages) { static int nid = -1; struct page *page; @@ -758,27 +764,27 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages) unsigned long remaining_iterations = num_online_nodes(); /* Uncommit the reservation */ - resv_huge_pages -= unused_resv_pages; + h->resv_huge_pages -= unused_resv_pages; - nr_pages = min(unused_resv_pages, surplus_huge_pages); + nr_pages = min(unused_resv_pages, h->surplus_huge_pages); while (remaining_iterations-- && nr_pages) { nid = next_node(nid, node_online_map); if (nid == MAX_NUMNODES) nid = first_node(node_online_map); - if (!surplus_huge_pages_node[nid]) + if (!h->surplus_huge_pages_node[nid]) continue; - if (!list_empty(&hugepage_freelists[nid])) { - page = list_entry(hugepage_freelists[nid].next, + if (!list_empty(&h->hugepage_freelists[nid])) { + page = list_entry(h->hugepage_freelists[nid].next, struct page, lru); list_del(&page->lru); - update_and_free_page(page); - free_huge_pages--; - free_huge_pages_node[nid]--; - surplus_huge_pages--; - surplus_huge_pages_node[nid]--; + update_and_free_page(h, page); + h->free_huge_pages--; + h->free_huge_pages_node[nid]--; + h->surplus_huge_pages--; + h->surplus_huge_pages_node[nid]--; nr_pages--; remaining_iterations = num_online_nodes(); } @@ -794,13 +800,14 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages) * an instantiated the change should be committed via vma_commit_reservation. * No action is required on failure. */ -static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr) +static int vma_needs_reservation(struct hstate *h, + struct vm_area_struct *vma, unsigned long addr) { struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; if (vma->vm_flags & VM_SHARED) { - pgoff_t idx = vma_hugecache_offset(vma, addr); + pgoff_t idx = vma_hugecache_offset(h, vma, addr); return region_chg(&inode->i_mapping->private_list, idx, idx + 1); @@ -809,7 +816,7 @@ static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr) } else { int err; - pgoff_t idx = vma_hugecache_offset(vma, addr); + pgoff_t idx = vma_hugecache_offset(h, vma, addr); struct resv_map *reservations = vma_resv_map(vma); err = region_chg(&reservations->regions, idx, idx + 1); @@ -818,18 +825,18 @@ static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr) return 0; } } -static void vma_commit_reservation(struct vm_area_struct *vma, - unsigned long addr) +static void vma_commit_reservation(struct hstate *h, + struct vm_area_struct *vma, unsigned long addr) { struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; if (vma->vm_flags & VM_SHARED) { - pgoff_t idx = vma_hugecache_offset(vma, addr); + pgoff_t idx = vma_hugecache_offset(h, vma, addr); region_add(&inode->i_mapping->private_list, idx, idx + 1); } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { - pgoff_t idx = vma_hugecache_offset(vma, addr); + pgoff_t idx = vma_hugecache_offset(h, vma, addr); struct resv_map *reservations = vma_resv_map(vma); /* Mark this page used in the map. */ @@ -840,6 +847,7 @@ static void vma_commit_reservation(struct vm_area_struct *vma, static struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { + struct hstate *h = hstate_vma(vma); struct page *page; struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; @@ -852,7 +860,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, * MAP_NORESERVE mappings may also need pages and quota allocated * if no reserve mapping overlaps. */ - chg = vma_needs_reservation(vma, addr); + chg = vma_needs_reservation(h, vma, addr); if (chg < 0) return ERR_PTR(chg); if (chg) @@ -860,11 +868,11 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, return ERR_PTR(-ENOSPC); spin_lock(&hugetlb_lock); - page = dequeue_huge_page_vma(vma, addr, avoid_reserve); + page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); spin_unlock(&hugetlb_lock); if (!page) { - page = alloc_buddy_huge_page(vma, addr); + page = alloc_buddy_huge_page(h, vma, addr); if (!page) { hugetlb_put_quota(inode->i_mapping, chg); return ERR_PTR(-VM_FAULT_OOM); @@ -874,7 +882,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, set_page_refcounted(page); set_page_private(page, (unsigned long) mapping); - vma_commit_reservation(vma, addr); + vma_commit_reservation(h, vma, addr); return page; } @@ -882,21 +890,28 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, static int __init hugetlb_init(void) { unsigned long i; + struct hstate *h = &default_hstate; if (HPAGE_SHIFT == 0) return 0; + if (!h->order) { + h->order = HPAGE_SHIFT - PAGE_SHIFT; + h->mask = HPAGE_MASK; + } + for (i = 0; i < MAX_NUMNODES; ++i) - INIT_LIST_HEAD(&hugepage_freelists[i]); + INIT_LIST_HEAD(&h->hugepage_freelists[i]); - hugetlb_next_nid = first_node(node_online_map); + h->hugetlb_next_nid = first_node(node_online_map); for (i = 0; i < max_huge_pages; ++i) { - if (!alloc_fresh_huge_page()) + if (!alloc_fresh_huge_page(h)) break; } - max_huge_pages = free_huge_pages = nr_huge_pages = i; - printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages); + max_huge_pages = h->free_huge_pages = h->nr_huge_pages = i; + printk(KERN_INFO "Total HugeTLB memory allocated, %ld\n", + h->free_huge_pages); return 0; } module_init(hugetlb_init); @@ -922,34 +937,36 @@ static unsigned int cpuset_mems_nr(unsigned int *array) #ifdef CONFIG_SYSCTL #ifdef CONFIG_HIGHMEM -static void try_to_free_low(unsigned long count) +static void try_to_free_low(struct hstate *h, unsigned long count) { int i; for (i = 0; i < MAX_NUMNODES; ++i) { struct page *page, *next; - list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { - if (count >= nr_huge_pages) + struct list_head *freel = &h->hugepage_freelists[i]; + list_for_each_entry_safe(page, next, freel, lru) { + if (count >= h->nr_huge_pages) return; if (PageHighMem(page)) continue; list_del(&page->lru); update_and_free_page(page); - free_huge_pages--; - free_huge_pages_node[page_to_nid(page)]--; + h->free_huge_pages--; + h->free_huge_pages_node[page_to_nid(page)]--; } } } #else -static inline void try_to_free_low(unsigned long count) +static inline void try_to_free_low(struct hstate *h, unsigned long count) { } #endif -#define persistent_huge_pages (nr_huge_pages - surplus_huge_pages) +#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) static unsigned long set_max_huge_pages(unsigned long count) { unsigned long min_count, ret; + struct hstate *h = &default_hstate; /* * Increase the pool size @@ -963,19 +980,19 @@ static unsigned long set_max_huge_pages(unsigned long count) * within all the constraints specified by the sysctls. */ spin_lock(&hugetlb_lock); - while (surplus_huge_pages && count > persistent_huge_pages) { - if (!adjust_pool_surplus(-1)) + while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { + if (!adjust_pool_surplus(h, -1)) break; } - while (count > persistent_huge_pages) { + while (count > persistent_huge_pages(h)) { /* * If this allocation races such that we no longer need the * page, free_huge_page will handle it by freeing the page * and reducing the surplus. */ spin_unlock(&hugetlb_lock); - ret = alloc_fresh_huge_page(); + ret = alloc_fresh_huge_page(h); spin_lock(&hugetlb_lock); if (!ret) goto out; @@ -997,21 +1014,21 @@ static unsigned long set_max_huge_pages(unsigned long count) * and won't grow the pool anywhere else. Not until one of the * sysctls are changed, or the surplus pages go out of use. */ - min_count = resv_huge_pages + nr_huge_pages - free_huge_pages; + min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; min_count = max(count, min_count); - try_to_free_low(min_count); - while (min_count < persistent_huge_pages) { - struct page *page = dequeue_huge_page(); + try_to_free_low(h, min_count); + while (min_count < persistent_huge_pages(h)) { + struct page *page = dequeue_huge_page(h); if (!page) break; - update_and_free_page(page); + update_and_free_page(h, page); } - while (count < persistent_huge_pages) { - if (!adjust_pool_surplus(1)) + while (count < persistent_huge_pages(h)) { + if (!adjust_pool_surplus(h, 1)) break; } out: - ret = persistent_huge_pages; + ret = persistent_huge_pages(h); spin_unlock(&hugetlb_lock); return ret; } @@ -1041,9 +1058,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, struct file *file, void __user *buffer, size_t *length, loff_t *ppos) { + struct hstate *h = &default_hstate; proc_doulongvec_minmax(table, write, file, buffer, length, ppos); spin_lock(&hugetlb_lock); - nr_overcommit_huge_pages = sysctl_overcommit_huge_pages; + h->nr_overcommit_huge_pages = sysctl_overcommit_huge_pages; spin_unlock(&hugetlb_lock); return 0; } @@ -1052,37 +1070,40 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, int hugetlb_report_meminfo(char *buf) { + struct hstate *h = &default_hstate; return sprintf(buf, "HugePages_Total: %5lu\n" "HugePages_Free: %5lu\n" "HugePages_Rsvd: %5lu\n" "HugePages_Surp: %5lu\n" "Hugepagesize: %5lu kB\n", - nr_huge_pages, - free_huge_pages, - resv_huge_pages, - surplus_huge_pages, - HPAGE_SIZE/1024); + h->nr_huge_pages, + h->free_huge_pages, + h->resv_huge_pages, + h->surplus_huge_pages, + 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); } int hugetlb_report_node_meminfo(int nid, char *buf) { + struct hstate *h = &default_hstate; return sprintf(buf, "Node %d HugePages_Total: %5u\n" "Node %d HugePages_Free: %5u\n" "Node %d HugePages_Surp: %5u\n", - nid, nr_huge_pages_node[nid], - nid, free_huge_pages_node[nid], - nid, surplus_huge_pages_node[nid]); + nid, h->nr_huge_pages_node[nid], + nid, h->free_huge_pages_node[nid], + nid, h->surplus_huge_pages_node[nid]); } /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ unsigned long hugetlb_total_pages(void) { - return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); + struct hstate *h = &default_hstate; + return h->nr_huge_pages * pages_per_huge_page(h); } -static int hugetlb_acct_memory(long delta) +static int hugetlb_acct_memory(struct hstate *h, long delta) { int ret = -ENOMEM; @@ -1105,18 +1126,18 @@ static int hugetlb_acct_memory(long delta) * semantics that cpuset has. */ if (delta > 0) { - if (gather_surplus_pages(delta) < 0) + if (gather_surplus_pages(h, delta) < 0) goto out; - if (delta > cpuset_mems_nr(free_huge_pages_node)) { - return_unused_surplus_pages(delta); + if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { + return_unused_surplus_pages(h, delta); goto out; } } ret = 0; if (delta < 0) - return_unused_surplus_pages((unsigned long) -delta); + return_unused_surplus_pages(h, (unsigned long) -delta); out: spin_unlock(&hugetlb_lock); @@ -1141,14 +1162,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma) static void hugetlb_vm_op_close(struct vm_area_struct *vma) { + struct hstate *h = hstate_vma(vma); struct resv_map *reservations = vma_resv_map(vma); unsigned long reserve; unsigned long start; unsigned long end; if (reservations) { - start = vma_hugecache_offset(vma, vma->vm_start); - end = vma_hugecache_offset(vma, vma->vm_end); + start = vma_hugecache_offset(h, vma, vma->vm_start); + end = vma_hugecache_offset(h, vma, vma->vm_end); reserve = (end - start) - region_count(&reservations->regions, start, end); @@ -1156,7 +1178,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma) kref_put(&reservations->refs, resv_map_release); if (reserve) - hugetlb_acct_memory(-reserve); + hugetlb_acct_memory(h, -reserve); } } @@ -1214,14 +1236,16 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct page *ptepage; unsigned long addr; int cow; + struct hstate *h = hstate_vma(vma); + unsigned long sz = huge_page_size(h); cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; - for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { + for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { src_pte = huge_pte_offset(src, addr); if (!src_pte) continue; - dst_pte = huge_pte_alloc(dst, addr); + dst_pte = huge_pte_alloc(dst, addr, sz); if (!dst_pte) goto nomem; @@ -1257,6 +1281,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, pte_t pte; struct page *page; struct page *tmp; + struct hstate *h = hstate_vma(vma); + unsigned long sz = huge_page_size(h); + /* * A page gathering list, protected by per file i_mmap_lock. The * lock is used to avoid list corruption from multiple unmapping @@ -1265,11 +1292,11 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, LIST_HEAD(page_list); WARN_ON(!is_vm_hugetlb_page(vma)); - BUG_ON(start & ~HPAGE_MASK); - BUG_ON(end & ~HPAGE_MASK); + BUG_ON(start & ~huge_page_mask(h)); + BUG_ON(end & ~huge_page_mask(h)); spin_lock(&mm->page_table_lock); - for (address = start; address < end; address += HPAGE_SIZE) { + for (address = start; address < end; address += sz) { ptep = huge_pte_offset(mm, address); if (!ptep) continue; @@ -1383,6 +1410,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t pte, struct page *pagecache_page) { + struct hstate *h = hstate_vma(vma); struct page *old_page, *new_page; int avoidcopy; int outside_reserve = 0; @@ -1443,7 +1471,7 @@ retry_avoidcopy: __SetPageUptodate(new_page); spin_lock(&mm->page_table_lock); - ptep = huge_pte_offset(mm, address & HPAGE_MASK); + ptep = huge_pte_offset(mm, address & huge_page_mask(h)); if (likely(pte_same(huge_ptep_get(ptep), pte))) { /* Break COW */ huge_ptep_clear_flush(vma, address, ptep); @@ -1458,14 +1486,14 @@ retry_avoidcopy: } /* Return the pagecache page at a given address within a VMA */ -static struct page *hugetlbfs_pagecache_page(struct vm_area_struct *vma, - unsigned long address) +static struct page *hugetlbfs_pagecache_page(struct hstate *h, + struct vm_area_struct *vma, unsigned long address) { struct address_space *mapping; pgoff_t idx; mapping = vma->vm_file->f_mapping; - idx = vma_hugecache_offset(vma, address); + idx = vma_hugecache_offset(h, vma, address); return find_lock_page(mapping, idx); } @@ -1473,6 +1501,7 @@ static struct page *hugetlbfs_pagecache_page(struct vm_area_struct *vma, static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, int write_access) { + struct hstate *h = hstate_vma(vma); int ret = VM_FAULT_SIGBUS; pgoff_t idx; unsigned long size; @@ -1493,7 +1522,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, } mapping = vma->vm_file->f_mapping; - idx = vma_hugecache_offset(vma, address); + idx = vma_hugecache_offset(h, vma, address); /* * Use page lock to guard against racing truncation @@ -1502,7 +1531,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, retry: page = find_lock_page(mapping, idx); if (!page) { - size = i_size_read(mapping->host) >> HPAGE_SHIFT; + size = i_size_read(mapping->host) >> huge_page_shift(h); if (idx >= size) goto out; page = alloc_huge_page(vma, address, 0); @@ -1510,7 +1539,7 @@ retry: ret = -PTR_ERR(page); goto out; } - clear_huge_page(page, address); + clear_huge_page(page, address, huge_page_size(h)); __SetPageUptodate(page); if (vma->vm_flags & VM_SHARED) { @@ -1526,14 +1555,14 @@ retry: } spin_lock(&inode->i_lock); - inode->i_blocks += BLOCKS_PER_HUGEPAGE; + inode->i_blocks += blocks_per_huge_page(h); spin_unlock(&inode->i_lock); } else lock_page(page); } spin_lock(&mm->page_table_lock); - size = i_size_read(mapping->host) >> HPAGE_SHIFT; + size = i_size_read(mapping->host) >> huge_page_shift(h); if (idx >= size) goto backout; @@ -1569,8 +1598,9 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, pte_t entry; int ret; static DEFINE_MUTEX(hugetlb_instantiation_mutex); + struct hstate *h = hstate_vma(vma); - ptep = huge_pte_alloc(mm, address); + ptep = huge_pte_alloc(mm, address, huge_page_size(h)); if (!ptep) return VM_FAULT_OOM; @@ -1594,7 +1624,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (likely(pte_same(entry, huge_ptep_get(ptep)))) if (write_access && !pte_write(entry)) { struct page *page; - page = hugetlbfs_pagecache_page(vma, address); + page = hugetlbfs_pagecache_page(h, vma, address); ret = hugetlb_cow(mm, vma, address, ptep, entry, page); if (page) { unlock_page(page); @@ -1615,6 +1645,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long pfn_offset; unsigned long vaddr = *position; int remainder = *length; + struct hstate *h = hstate_vma(vma); spin_lock(&mm->page_table_lock); while (vaddr < vma->vm_end && remainder) { @@ -1626,7 +1657,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, * each hugepage. We have to make * sure we get the * first, for the page indexing below to work. */ - pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); + pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); if (!pte || huge_pte_none(huge_ptep_get(pte)) || (write && !pte_write(huge_ptep_get(pte)))) { @@ -1644,7 +1675,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, break; } - pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; + pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; page = pte_page(huge_ptep_get(pte)); same_page: if (pages) { @@ -1660,7 +1691,7 @@ same_page: --remainder; ++i; if (vaddr < vma->vm_end && remainder && - pfn_offset < HPAGE_SIZE/PAGE_SIZE) { + pfn_offset < pages_per_huge_page(h)) { /* * We use pfn_offset to avoid touching the pageframes * of this compound page. @@ -1682,13 +1713,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma, unsigned long start = address; pte_t *ptep; pte_t pte; + struct hstate *h = hstate_vma(vma); BUG_ON(address >= end); flush_cache_range(vma, address, end); spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); spin_lock(&mm->page_table_lock); - for (; address < end; address += HPAGE_SIZE) { + for (; address < end; address += huge_page_size(h)) { ptep = huge_pte_offset(mm, address); if (!ptep) continue; @@ -1711,6 +1743,7 @@ int hugetlb_reserve_pages(struct inode *inode, struct vm_area_struct *vma) { long ret, chg; + struct hstate *h = hstate_inode(inode); if (vma && vma->vm_flags & VM_NORESERVE) return 0; @@ -1739,7 +1772,7 @@ int hugetlb_reserve_pages(struct inode *inode, if (hugetlb_get_quota(inode->i_mapping, chg)) return -ENOSPC; - ret = hugetlb_acct_memory(chg); + ret = hugetlb_acct_memory(h, chg); if (ret < 0) { hugetlb_put_quota(inode->i_mapping, chg); return ret; @@ -1751,12 +1784,13 @@ int hugetlb_reserve_pages(struct inode *inode, void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) { + struct hstate *h = hstate_inode(inode); long chg = region_truncate(&inode->i_mapping->private_list, offset); spin_lock(&inode->i_lock); - inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed; + inode->i_blocks -= blocks_per_huge_page(h); spin_unlock(&inode->i_lock); hugetlb_put_quota(inode->i_mapping, (chg - freed)); - hugetlb_acct_memory(-(chg - freed)); + hugetlb_acct_memory(h, -(chg - freed)); } diff --git a/mm/memory.c b/mm/memory.c index 72932489a08..c1c1d6d8c22 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -903,7 +903,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp, if (unlikely(is_vm_hugetlb_page(vma))) { unmap_hugepage_range(vma, start, end, NULL); zap_work -= (end - start) / - (HPAGE_SIZE / PAGE_SIZE); + pages_per_huge_page(hstate_vma(vma)); start = end; } else start = unmap_page_range(*tlbp, vma, diff --git a/mm/mempolicy.c b/mm/mempolicy.c index c94e58b192c..e550bec2058 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1481,7 +1481,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { zl = node_zonelist(interleave_nid(*mpol, vma, addr, - HPAGE_SHIFT), gfp_flags); + huge_page_shift(hstate_vma(vma))), gfp_flags); } else { zl = policy_zonelist(gfp_flags, *mpol); if ((*mpol)->mode == MPOL_BIND) @@ -2220,9 +2220,12 @@ static void check_huge_range(struct vm_area_struct *vma, { unsigned long addr; struct page *page; + struct hstate *h = hstate_vma(vma); + unsigned long sz = huge_page_size(h); - for (addr = start; addr < end; addr += HPAGE_SIZE) { - pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK); + for (addr = start; addr < end; addr += sz) { + pte_t *ptep = huge_pte_offset(vma->vm_mm, + addr & huge_page_mask(h)); pte_t pte; if (!ptep) diff --git a/mm/mmap.c b/mm/mmap.c index 57d3b6097de..5e0cc99e9cd 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1812,7 +1812,8 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, struct mempolicy *pol; struct vm_area_struct *new; - if (is_vm_hugetlb_page(vma) && (addr & ~HPAGE_MASK)) + if (is_vm_hugetlb_page(vma) && (addr & + ~(huge_page_mask(hstate_vma(vma))))) return -EINVAL; if (mm->map_count >= sysctl_max_map_count) -- cgit v1.2.3-70-g09d2 From ceb868796181dc95ea01a110e123afd391639873 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 23 Jul 2008 21:27:50 -0700 Subject: hugetlb: introduce pud_huge Straight forward extensions for huge pages located in the PUD instead of PMDs. Signed-off-by: Andi Kleen Signed-off-by: Nick Piggin Cc: Martin Schwidefsky Cc: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/mm/hugetlbpage.c | 6 ++++++ arch/powerpc/mm/hugetlbpage.c | 5 +++++ arch/s390/mm/hugetlbpage.c | 5 +++++ arch/sh/mm/hugetlbpage.c | 5 +++++ arch/sparc64/mm/hugetlbpage.c | 5 +++++ arch/x86/mm/hugetlbpage.c | 25 ++++++++++++++++++++++++- include/linux/hugetlb.h | 5 +++++ mm/hugetlb.c | 9 +++++++++ mm/memory.c | 15 +++++++++++---- 9 files changed, 75 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 6170f097d25..c45fc7f5a97 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -107,6 +107,12 @@ int pmd_huge(pmd_t pmd) { return 0; } + +int pud_huge(pud_t pud) +{ + return 0; +} + struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index c94dc71af98..63db7adce71 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -369,6 +369,11 @@ int pmd_huge(pmd_t pmd) return 0; } +int pud_huge(pud_t pud) +{ + return 0; +} + struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 9162dc84f77..f28c43d2f61 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -120,6 +120,11 @@ int pmd_huge(pmd_t pmd) return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); } +int pud_huge(pud_t pud) +{ + return 0; +} + struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmdp, int write) { diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 2f9dbe0ef4a..9304117039c 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -79,6 +79,11 @@ int pmd_huge(pmd_t pmd) return 0; } +int pud_huge(pud_t pud) +{ + return 0; +} + struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index 1307b23f6a7..f27d10369e0 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c @@ -295,6 +295,11 @@ int pmd_huge(pmd_t pmd) return 0; } +int pud_huge(pud_t pud) +{ + return 0; +} + struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 52476fde899..a4789e87a31 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -189,6 +189,11 @@ int pmd_huge(pmd_t pmd) return 0; } +int pud_huge(pud_t pud) +{ + return 0; +} + struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) @@ -209,6 +214,11 @@ int pmd_huge(pmd_t pmd) return !!(pmd_val(pmd) & _PAGE_PSE); } +int pud_huge(pud_t pud) +{ + return 0; +} + struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) @@ -217,9 +227,22 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, page = pte_page(*(pte_t *)pmd); if (page) - page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); + page += ((address & ~PMD_MASK) >> PAGE_SHIFT); return page; } + +struct page * +follow_huge_pud(struct mm_struct *mm, unsigned long address, + pud_t *pud, int write) +{ + struct page *page; + + page = pte_page(*(pte_t *)pud); + if (page) + page += ((address & ~PUD_MASK) >> PAGE_SHIFT); + return page; +} + #endif /* x86_64 also uses this file */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 58c0de32e7f..b2c17f62cac 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -50,7 +50,10 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write); struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write); +struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, + pud_t *pud, int write); int pmd_huge(pmd_t pmd); +int pud_huge(pud_t pmd); void hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot); @@ -78,8 +81,10 @@ static inline unsigned long hugetlb_total_pages(void) #define hugetlb_report_meminfo(buf) 0 #define hugetlb_report_node_meminfo(n, buf) 0 #define follow_huge_pmd(mm, addr, pmd, write) NULL +#define follow_huge_pud(mm, addr, pud, write) NULL #define prepare_hugepage_range(file, addr, len) (-EINVAL) #define pmd_huge(x) 0 +#define pud_huge(x) 0 #define is_hugepage_only_range(mm, addr, len) 0 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) #define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0c74c14dd2f..107c1ce223c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1996,6 +1996,15 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, return ret; } +/* Can be overriden by architectures */ +__attribute__((weak)) struct page * +follow_huge_pud(struct mm_struct *mm, unsigned long address, + pud_t *pud, int write) +{ + BUG(); + return NULL; +} + int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, int *length, int i, diff --git a/mm/memory.c b/mm/memory.c index 02fc6b1047b..262e3eb6601 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -998,19 +998,24 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, goto no_page_table; pud = pud_offset(pgd, address); - if (pud_none(*pud) || unlikely(pud_bad(*pud))) + if (pud_none(*pud)) + goto no_page_table; + if (pud_huge(*pud)) { + BUG_ON(flags & FOLL_GET); + page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); + goto out; + } + if (unlikely(pud_bad(*pud))) goto no_page_table; - + pmd = pmd_offset(pud, address); if (pmd_none(*pmd)) goto no_page_table; - if (pmd_huge(*pmd)) { BUG_ON(flags & FOLL_GET); page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); goto out; } - if (unlikely(pmd_bad(*pmd))) goto no_page_table; @@ -1567,6 +1572,8 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long next; int err; + BUG_ON(pud_huge(*pud)); + pmd = pmd_alloc(mm, pud, addr); if (!pmd) return -ENOMEM; -- cgit v1.2.3-70-g09d2 From 39c11e6c05b7fedbf7ed4df3908b25f622d56204 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 23 Jul 2008 21:27:50 -0700 Subject: x86: support GB hugepages on 64-bit Acked-by: Adam Litke Signed-off-by: Andi Kleen Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/hugetlbpage.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index a4789e87a31..b7a65a07af0 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -134,9 +134,14 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); if (pud) { - if (pud_none(*pud)) - huge_pmd_share(mm, addr, pud); - pte = (pte_t *) pmd_alloc(mm, pud, addr); + if (sz == PUD_SIZE) { + pte = (pte_t *)pud; + } else { + BUG_ON(sz != PMD_SIZE); + if (pud_none(*pud)) + huge_pmd_share(mm, addr, pud); + pte = (pte_t *) pmd_alloc(mm, pud, addr); + } } BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); @@ -152,8 +157,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) pgd = pgd_offset(mm, addr); if (pgd_present(*pgd)) { pud = pud_offset(pgd, addr); - if (pud_present(*pud)) + if (pud_present(*pud)) { + if (pud_large(*pud)) + return (pte_t *)pud; pmd = pmd_offset(pud, addr); + } } return (pte_t *) pmd; } @@ -216,7 +224,7 @@ int pmd_huge(pmd_t pmd) int pud_huge(pud_t pud) { - return 0; + return !!(pud_val(pud) & _PAGE_PSE); } struct page * @@ -252,6 +260,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { + struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr; @@ -264,7 +273,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, } full_search: - addr = ALIGN(start_addr, HPAGE_SIZE); + addr = ALIGN(start_addr, huge_page_size(h)); for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ @@ -286,7 +295,7 @@ full_search: } if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; - addr = ALIGN(vma->vm_end, HPAGE_SIZE); + addr = ALIGN(vma->vm_end, huge_page_size(h)); } } @@ -294,6 +303,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags) { + struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev_vma; unsigned long base = mm->mmap_base, addr = addr0; @@ -314,7 +324,7 @@ try_again: goto fail; /* either no address requested or cant fit in requested address hole */ - addr = (mm->free_area_cache - len) & HPAGE_MASK; + addr = (mm->free_area_cache - len) & huge_page_mask(h); do { /* * Lookup failure means no vma is above this address, @@ -345,7 +355,7 @@ try_again: largest_hole = vma->vm_start - addr; /* try just below the current vma->vm_start */ - addr = (vma->vm_start - len) & HPAGE_MASK; + addr = (vma->vm_start - len) & huge_page_mask(h); } while (len <= vma->vm_start); fail: @@ -383,10 +393,11 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { + struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - if (len & ~HPAGE_MASK) + if (len & ~huge_page_mask(h)) return -EINVAL; if (len > TASK_SIZE) return -ENOMEM; @@ -398,7 +409,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } if (addr) { - addr = ALIGN(addr, HPAGE_SIZE); + addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) -- cgit v1.2.3-70-g09d2 From b4718e628dbf68a2dee23b5709e2aa3190409c56 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 23 Jul 2008 21:27:51 -0700 Subject: x86: add hugepagesz option on 64-bit Add an hugepagesz=... option similar to IA64, PPC etc. to x86-64. This finally allows to select GB pages for hugetlbfs in x86 now that all the infrastructure is in place. Signed-off-by: Andi Kleen Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/kernel-parameters.txt | 11 +++++++++-- arch/x86/mm/hugetlbpage.c | 17 +++++++++++++++++ include/asm-x86/page.h | 2 ++ 3 files changed, 28 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 5e20ccb5a73..d55fd88fd0a 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -774,8 +774,15 @@ and is between 256 and 4096 characters. It is defined in the file hisax= [HW,ISDN] See Documentation/isdn/README.HiSax. - hugepages= [HW,X86-32,IA-64] Maximal number of HugeTLB pages. - hugepagesz= [HW,IA-64,PPC] The size of the HugeTLB pages. + hugepages= [HW,X86-32,IA-64] HugeTLB pages to allocate at boot. + hugepagesz= [HW,IA-64,PPC,X86-64] The size of the HugeTLB pages. + On x86 this option can be specified multiple times + interleaved with hugepages= to reserve huge pages + of different sizes. Valid pages sizes on x86-64 + are 2M (when the CPU supports "pse") and 1G (when the + CPU supports the "pdpe1gb" cpuinfo flag) + Note that 1GB pages can only be allocated at boot time + using hugepages= and not freed afterwards. i8042.direct [HW] Put keyboard port into non-translated mode i8042.dumbkbd [HW] Pretend that controller can only read data from diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index b7a65a07af0..8f307d914c2 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -425,3 +425,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ +#ifdef CONFIG_X86_64 +static __init int setup_hugepagesz(char *opt) +{ + unsigned long ps = memparse(opt, &opt); + if (ps == PMD_SIZE) { + hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); + } else if (ps == PUD_SIZE && cpu_has_gbpages) { + hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); + } else { + printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n", + ps >> 20); + return 0; + } + return 1; +} +__setup("hugepagesz=", setup_hugepagesz); +#endif diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index 6c846228948..6e02098b160 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h @@ -32,6 +32,8 @@ #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) +#define HUGE_MAX_HSTATE 2 + /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) -- cgit v1.2.3-70-g09d2 From 27ac792ca0b0a1e7e65f20342260650516c95864 Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Wed, 23 Jul 2008 21:28:13 -0700 Subject: PAGE_ALIGN(): correctly handle 64-bit values on 32-bit architectures On 32-bit architectures PAGE_ALIGN() truncates 64-bit values to the 32-bit boundary. For example: u64 val = PAGE_ALIGN(size); always returns a value < 4GB even if size is greater than 4GB. The problem resides in PAGE_MASK definition (from include/asm-x86/page.h for example): #define PAGE_SHIFT 12 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) ... #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) The "~" is performed on a 32-bit value, so everything in "and" with PAGE_MASK greater than 4GB will be truncated to the 32-bit boundary. Using the ALIGN() macro seems to be the right way, because it uses typeof(addr) for the mask. Also move the PAGE_ALIGN() definitions out of include/asm-*/page.h in include/linux/mm.h. See also lkml discussion: http://lkml.org/lkml/2008/6/11/237 [akpm@linux-foundation.org: fix drivers/media/video/uvc/uvc_queue.c] [akpm@linux-foundation.org: fix v850] [akpm@linux-foundation.org: fix powerpc] [akpm@linux-foundation.org: fix arm] [akpm@linux-foundation.org: fix mips] [akpm@linux-foundation.org: fix drivers/media/video/pvrusb2/pvrusb2-dvb.c] [akpm@linux-foundation.org: fix drivers/mtd/maps/uclinux.c] [akpm@linux-foundation.org: fix powerpc] Signed-off-by: Andrea Righi Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/kernel/module.c | 1 + arch/arm/plat-omap/fb.c | 1 + arch/avr32/mm/ioremap.c | 1 + arch/h8300/kernel/setup.c | 1 + arch/m68k/amiga/chipram.c | 1 + arch/m68knommu/kernel/setup.c | 1 + arch/mips/kernel/module.c | 1 + arch/mips/sgi-ip27/ip27-klnuma.c | 1 + arch/powerpc/kernel/suspend.c | 1 + arch/powerpc/lib/code-patching.c | 1 + arch/sparc64/kernel/iommu_common.h | 2 +- arch/x86/kernel/module_64.c | 1 + arch/xtensa/kernel/setup.c | 1 + drivers/char/random.c | 1 + drivers/ieee1394/iso.c | 1 + drivers/media/video/pvrusb2/pvrusb2-dvb.c | 1 + drivers/media/video/pvrusb2/pvrusb2-ioread.c | 1 + drivers/media/video/uvc/uvc_queue.c | 1 + drivers/media/video/videobuf-core.c | 1 + drivers/mtd/maps/uclinux.c | 1 + drivers/net/mlx4/eq.c | 1 + drivers/pcmcia/electra_cf.c | 1 + drivers/scsi/sun_esp.c | 1 + drivers/video/acornfb.c | 1 + drivers/video/imxfb.c | 1 + drivers/video/omap/dispc.c | 1 + drivers/video/omap/omapfb_main.c | 1 + drivers/video/pxafb.c | 1 + drivers/video/sa1100fb.c | 1 + include/asm-alpha/page.h | 3 --- include/asm-arm/page-nommu.h | 4 +--- include/asm-arm/page.h | 3 --- include/asm-avr32/page.h | 3 --- include/asm-blackfin/page.h | 3 --- include/asm-cris/page.h | 3 --- include/asm-frv/page.h | 3 --- include/asm-h8300/page.h | 3 --- include/asm-ia64/page.h | 1 - include/asm-m32r/page.h | 3 --- include/asm-m68k/dvma.h | 2 +- include/asm-m68k/page.h | 3 --- include/asm-m68knommu/page.h | 3 --- include/asm-mips/page.h | 3 --- include/asm-mips/processor.h | 2 +- include/asm-mn10300/page.h | 3 --- include/asm-parisc/page.h | 4 ---- include/asm-powerpc/page.h | 3 --- include/asm-s390/page.h | 3 --- include/asm-sh/page.h | 3 --- include/asm-sparc/page_32.h | 3 --- include/asm-sparc/page_64.h | 3 --- include/asm-um/page.h | 3 --- include/asm-v850/page.h | 4 ---- include/asm-x86/page.h | 3 --- include/asm-xtensa/page.h | 2 -- include/linux/mm.h | 3 +++ sound/core/info.c | 1 + 57 files changed, 36 insertions(+), 74 deletions(-) (limited to 'arch/x86') diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 79b7e5cf541..a68259a0ccc 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c index 96d6f061973..5d107520e6b 100644 --- a/arch/arm/plat-omap/fb.c +++ b/arch/arm/plat-omap/fb.c @@ -23,6 +23,7 @@ #include #include +#include #include #include #include diff --git a/arch/avr32/mm/ioremap.c b/arch/avr32/mm/ioremap.c index 3437c82434a..f03b79f0e0a 100644 --- a/arch/avr32/mm/ioremap.c +++ b/arch/avr32/mm/ioremap.c @@ -6,6 +6,7 @@ * published by the Free Software Foundation. */ #include +#include #include #include diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c index b1f25c20a5d..7fda657110e 100644 --- a/arch/h8300/kernel/setup.c +++ b/arch/h8300/kernel/setup.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/m68k/amiga/chipram.c b/arch/m68k/amiga/chipram.c index cbe36538af4..61df1d33c05 100644 --- a/arch/m68k/amiga/chipram.c +++ b/arch/m68k/amiga/chipram.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c index 03f4fe6a2fc..5985f198902 100644 --- a/arch/m68knommu/kernel/setup.c +++ b/arch/m68knommu/kernel/setup.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index e7ed0ac4853..1f60e27523d 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c index 48932ce1d73..d9c79d8be81 100644 --- a/arch/mips/sgi-ip27/ip27-klnuma.c +++ b/arch/mips/sgi-ip27/ip27-klnuma.c @@ -4,6 +4,7 @@ * Copyright 2000 - 2001 Kanoj Sarcar (kanoj@sgi.com) */ #include +#include #include #include #include diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c index 8cee5710754..6fc6328dc62 100644 --- a/arch/powerpc/kernel/suspend.c +++ b/arch/powerpc/kernel/suspend.c @@ -7,6 +7,7 @@ * Copyright (c) 2001 Patrick Mochel */ +#include #include /* References to section boundaries */ diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 0559fe086eb..7c975d43e3f 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include diff --git a/arch/sparc64/kernel/iommu_common.h b/arch/sparc64/kernel/iommu_common.h index f3575a614fa..53b19c8231a 100644 --- a/arch/sparc64/kernel/iommu_common.h +++ b/arch/sparc64/kernel/iommu_common.h @@ -23,7 +23,7 @@ #define IO_PAGE_SHIFT 13 #define IO_PAGE_SIZE (1UL << IO_PAGE_SHIFT) #define IO_PAGE_MASK (~(IO_PAGE_SIZE-1)) -#define IO_PAGE_ALIGN(addr) (((addr)+IO_PAGE_SIZE-1)&IO_PAGE_MASK) +#define IO_PAGE_ALIGN(addr) ALIGN(addr, IO_PAGE_SIZE) #define IO_TSB_ENTRIES (128*1024) #define IO_TSB_SIZE (IO_TSB_ENTRIES * 8) diff --git a/arch/x86/kernel/module_64.c b/arch/x86/kernel/module_64.c index 0e867676b5a..6ba87830d4b 100644 --- a/arch/x86/kernel/module_64.c +++ b/arch/x86/kernel/module_64.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 5e6d75c9f92..a00359e8f7a 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -16,6 +16,7 @@ #include #include +#include #include #include #include diff --git a/drivers/char/random.c b/drivers/char/random.c index 0cf98bd4f2d..e0d0e371909 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -236,6 +236,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/ieee1394/iso.c b/drivers/ieee1394/iso.c index 07ca35c98f9..1cf6487b65b 100644 --- a/drivers/ieee1394/iso.c +++ b/drivers/ieee1394/iso.c @@ -11,6 +11,7 @@ #include #include +#include #include #include "hosts.h" diff --git a/drivers/media/video/pvrusb2/pvrusb2-dvb.c b/drivers/media/video/pvrusb2/pvrusb2-dvb.c index 6ec4bf81fc7..77b3c338506 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-dvb.c +++ b/drivers/media/video/pvrusb2/pvrusb2-dvb.c @@ -20,6 +20,7 @@ #include #include +#include #include "dvbdev.h" #include "pvrusb2-debug.h" #include "pvrusb2-hdw-internal.h" diff --git a/drivers/media/video/pvrusb2/pvrusb2-ioread.c b/drivers/media/video/pvrusb2/pvrusb2-ioread.c index 05a1376405e..b4824782d85 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-ioread.c +++ b/drivers/media/video/pvrusb2/pvrusb2-ioread.c @@ -22,6 +22,7 @@ #include "pvrusb2-debug.h" #include #include +#include #include #include #include diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c index 7388d0cee3d..5646a6a3293 100644 --- a/drivers/media/video/uvc/uvc_queue.c +++ b/drivers/media/video/uvc/uvc_queue.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c index 0a88c44ace0..b7b05842cf2 100644 --- a/drivers/media/video/videobuf-core.c +++ b/drivers/media/video/videobuf-core.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c index c42f4b83f68..3fcf92130aa 100644 --- a/drivers/mtd/maps/uclinux.c +++ b/drivers/mtd/maps/uclinux.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index e141a1513f0..ea3a09aaa84 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c @@ -33,6 +33,7 @@ #include #include +#include #include #include diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c index c21f9a9c3e3..a34284b1482 100644 --- a/drivers/pcmcia/electra_cf.c +++ b/drivers/pcmcia/electra_cf.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c index 2c87db98cdf..f9cf7015136 100644 --- a/drivers/scsi/sun_esp.c +++ b/drivers/scsi/sun_esp.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c index eedb8285e32..017233d0c48 100644 --- a/drivers/video/acornfb.c +++ b/drivers/video/acornfb.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c index 94e4d3ac1a0..0c5a475c1ca 100644 --- a/drivers/video/imxfb.c +++ b/drivers/video/imxfb.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c index ab32ceb0617..ab77c51fe9d 100644 --- a/drivers/video/omap/dispc.c +++ b/drivers/video/omap/dispc.c @@ -20,6 +20,7 @@ */ #include #include +#include #include #include #include diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c index 14d0f7a1114..f85af5c4fa6 100644 --- a/drivers/video/omap/omapfb_main.c +++ b/drivers/video/omap/omapfb_main.c @@ -25,6 +25,7 @@ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include +#include #include #include diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index bb251436950..5e8a140399f 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c index ab2b2110478..4a9f7e12180 100644 --- a/drivers/video/sa1100fb.c +++ b/drivers/video/sa1100fb.c @@ -167,6 +167,7 @@ #include #include #include +#include #include #include #include diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h index 22ff9762d17..0995f9d1341 100644 --- a/include/asm-alpha/page.h +++ b/include/asm-alpha/page.h @@ -80,9 +80,6 @@ typedef struct page *pgtable_t; #endif /* !__ASSEMBLY__ */ -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) #ifndef CONFIG_DISCONTIGMEM diff --git a/include/asm-arm/page-nommu.h b/include/asm-arm/page-nommu.h index a1bcad06048..ea1cde84f50 100644 --- a/include/asm-arm/page-nommu.h +++ b/include/asm-arm/page-nommu.h @@ -7,6 +7,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ + #ifndef _ASMARM_PAGE_NOMMU_H #define _ASMARM_PAGE_NOMMU_H @@ -42,9 +43,6 @@ typedef unsigned long pgprot_t; #define __pmd(x) (x) #define __pgprot(x) (x) -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - extern unsigned long memory_start; extern unsigned long memory_end; diff --git a/include/asm-arm/page.h b/include/asm-arm/page.h index 8e05bdb5f12..7c5fc5582e5 100644 --- a/include/asm-arm/page.h +++ b/include/asm-arm/page.h @@ -15,9 +15,6 @@ #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - #ifndef __ASSEMBLY__ #ifndef CONFIG_MMU diff --git a/include/asm-avr32/page.h b/include/asm-avr32/page.h index cbbc5ca9728..f805d1cb11b 100644 --- a/include/asm-avr32/page.h +++ b/include/asm-avr32/page.h @@ -57,9 +57,6 @@ static inline int get_order(unsigned long size) #endif /* !__ASSEMBLY__ */ -/* Align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) - /* * The hardware maps the virtual addresses 0x80000000 -> 0x9fffffff * permanently to the physical addresses 0x00000000 -> 0x1fffffff when diff --git a/include/asm-blackfin/page.h b/include/asm-blackfin/page.h index c7db0220fbd..344f6a8c1f2 100644 --- a/include/asm-blackfin/page.h +++ b/include/asm-blackfin/page.h @@ -51,9 +51,6 @@ typedef struct page *pgtable_t; #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - extern unsigned long memory_start; extern unsigned long memory_end; diff --git a/include/asm-cris/page.h b/include/asm-cris/page.h index c45bb1ef397..d19272ba6b6 100644 --- a/include/asm-cris/page.h +++ b/include/asm-cris/page.h @@ -60,9 +60,6 @@ typedef struct page *pgtable_t; #define page_to_phys(page) __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - #ifndef __ASSEMBLY__ #endif /* __ASSEMBLY__ */ diff --git a/include/asm-frv/page.h b/include/asm-frv/page.h index c2c1e89e747..bd9c220094c 100644 --- a/include/asm-frv/page.h +++ b/include/asm-frv/page.h @@ -40,9 +40,6 @@ typedef struct page *pgtable_t; #define __pgprot(x) ((pgprot_t) { (x) } ) #define PTE_MASK PAGE_MASK -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) - #define devmem_is_allowed(pfn) 1 #define __pa(vaddr) virt_to_phys((void *) (unsigned long) (vaddr)) diff --git a/include/asm-h8300/page.h b/include/asm-h8300/page.h index d6a3eaf3b27..0b6acf0b03a 100644 --- a/include/asm-h8300/page.h +++ b/include/asm-h8300/page.h @@ -43,9 +43,6 @@ typedef struct page *pgtable_t; #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - extern unsigned long memory_start; extern unsigned long memory_end; diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 36f39321b76..5f271bc712e 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -40,7 +40,6 @@ #define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE - 1)) -#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) #define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */ #define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT) diff --git a/include/asm-m32r/page.h b/include/asm-m32r/page.h index 8a677f3fca6..c9333089fe1 100644 --- a/include/asm-m32r/page.h +++ b/include/asm-m32r/page.h @@ -41,9 +41,6 @@ typedef struct page *pgtable_t; #endif /* !__ASSEMBLY__ */ -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) - /* * This handles the memory map.. We could make this a config * option, but too many people screw it up, and too few need diff --git a/include/asm-m68k/dvma.h b/include/asm-m68k/dvma.h index 4fff408d015..890bbf7e775 100644 --- a/include/asm-m68k/dvma.h +++ b/include/asm-m68k/dvma.h @@ -13,7 +13,7 @@ #define DVMA_PAGE_SHIFT 13 #define DVMA_PAGE_SIZE (1UL << DVMA_PAGE_SHIFT) #define DVMA_PAGE_MASK (~(DVMA_PAGE_SIZE-1)) -#define DVMA_PAGE_ALIGN(addr) (((addr)+DVMA_PAGE_SIZE-1)&DVMA_PAGE_MASK) +#define DVMA_PAGE_ALIGN(addr) ALIGN(addr, DVMA_PAGE_SIZE) extern void dvma_init(void); extern int dvma_map_iommu(unsigned long kaddr, unsigned long baddr, diff --git a/include/asm-m68k/page.h b/include/asm-m68k/page.h index 880c2cbff8a..a34b8bad784 100644 --- a/include/asm-m68k/page.h +++ b/include/asm-m68k/page.h @@ -103,9 +103,6 @@ typedef struct page *pgtable_t; #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - #endif /* !__ASSEMBLY__ */ #include diff --git a/include/asm-m68knommu/page.h b/include/asm-m68knommu/page.h index 1e82ebb7d64..3a1ede4544c 100644 --- a/include/asm-m68knommu/page.h +++ b/include/asm-m68knommu/page.h @@ -43,9 +43,6 @@ typedef struct page *pgtable_t; #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - extern unsigned long memory_start; extern unsigned long memory_end; diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h index 494f00ba954..fe7a88ea066 100644 --- a/include/asm-mips/page.h +++ b/include/asm-mips/page.h @@ -137,9 +137,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; #endif /* !__ASSEMBLY__ */ -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) - /* * __pa()/__va() should be used only during mem init. */ diff --git a/include/asm-mips/processor.h b/include/asm-mips/processor.h index 58cbac5a64e..a1e4453469f 100644 --- a/include/asm-mips/processor.h +++ b/include/asm-mips/processor.h @@ -45,7 +45,7 @@ extern unsigned int vced_count, vcei_count; * This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) +#define TASK_UNMAPPED_BASE ((TASK_SIZE / 3) & ~(PAGE_SIZE)) #endif #ifdef CONFIG_64BIT diff --git a/include/asm-mn10300/page.h b/include/asm-mn10300/page.h index 124971b9fb9..8288e124165 100644 --- a/include/asm-mn10300/page.h +++ b/include/asm-mn10300/page.h @@ -61,9 +61,6 @@ typedef struct page *pgtable_t; #endif /* !__ASSEMBLY__ */ -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) - /* * This handles the memory map.. We could make this a config * option, but too many people screw it up, and too few need diff --git a/include/asm-parisc/page.h b/include/asm-parisc/page.h index 27d50b85954..c3941f09a87 100644 --- a/include/asm-parisc/page.h +++ b/include/asm-parisc/page.h @@ -119,10 +119,6 @@ extern int npmem_ranges; #define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY) #define PTE_ENTRY_SIZE (1UL << BITS_PER_PTE_ENTRY) -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - - #define LINUX_GATEWAY_SPACE 0 /* This governs the relationship between virtual and physical addresses. diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h index cffdf0eb0df..e088545cb3f 100644 --- a/include/asm-powerpc/page.h +++ b/include/asm-powerpc/page.h @@ -119,9 +119,6 @@ extern phys_addr_t kernstart_addr; /* align addr on a size boundary - adjust address up if needed */ #define _ALIGN(addr,size) _ALIGN_UP(addr,size) -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE) - /* * Don't compare things with KERNELBASE or PAGE_OFFSET to test for * "kernelness", use is_kernel_addr() - it should do what you want. diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h index 12fd9c4f0f1..991ba939408 100644 --- a/include/asm-s390/page.h +++ b/include/asm-s390/page.h @@ -138,9 +138,6 @@ void arch_alloc_page(struct page *page, int order); #endif /* !__ASSEMBLY__ */ -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - #define __PAGE_OFFSET 0x0UL #define PAGE_OFFSET 0x0UL #define __pa(x) (unsigned long)(x) diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index 304c30b5d94..5dc01d2fcc4 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h @@ -22,9 +22,6 @@ #define PAGE_MASK (~(PAGE_SIZE-1)) #define PTE_MASK PAGE_MASK -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) #define HPAGE_SHIFT 16 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) diff --git a/include/asm-sparc/page_32.h b/include/asm-sparc/page_32.h index 14de518cc38..cf5fb70ca1c 100644 --- a/include/asm-sparc/page_32.h +++ b/include/asm-sparc/page_32.h @@ -134,9 +134,6 @@ BTFIXUPDEF_SETHI(sparc_unmapped_base) #endif /* !(__ASSEMBLY__) */ -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - #define PAGE_OFFSET 0xf0000000 #ifndef __ASSEMBLY__ extern unsigned long phys_base; diff --git a/include/asm-sparc/page_64.h b/include/asm-sparc/page_64.h index a8a2bba032c..b579b910ef5 100644 --- a/include/asm-sparc/page_64.h +++ b/include/asm-sparc/page_64.h @@ -106,9 +106,6 @@ typedef struct page *pgtable_t; #endif /* !(__ASSEMBLY__) */ -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - /* We used to stick this into a hard-coded global register (%g4) * but that does not make sense anymore. */ diff --git a/include/asm-um/page.h b/include/asm-um/page.h index 916e1a61999..335c57383c0 100644 --- a/include/asm-um/page.h +++ b/include/asm-um/page.h @@ -92,9 +92,6 @@ typedef struct page *pgtable_t; #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - extern unsigned long uml_physmem; #define PAGE_OFFSET (uml_physmem) diff --git a/include/asm-v850/page.h b/include/asm-v850/page.h index 74a539a9bd5..f9de35d873f 100644 --- a/include/asm-v850/page.h +++ b/include/asm-v850/page.h @@ -94,10 +94,6 @@ typedef unsigned long pgprot_t; #endif /* !__ASSEMBLY__ */ -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) - - /* No current v850 processor has virtual memory. */ #define __virt_to_phys(addr) (addr) #define __phys_to_virt(addr) (addr) diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index 6e02098b160..49982110e4d 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h @@ -34,9 +34,6 @@ #define HUGE_MAX_HSTATE 2 -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - #ifndef __ASSEMBLY__ #include #endif diff --git a/include/asm-xtensa/page.h b/include/asm-xtensa/page.h index 80a6ae0dd25..11f7dc2dbec 100644 --- a/include/asm-xtensa/page.h +++ b/include/asm-xtensa/page.h @@ -26,13 +26,11 @@ /* * PAGE_SHIFT determines the page size - * PAGE_ALIGN(x) aligns the pointer to the (next) page boundary */ #define PAGE_SHIFT 12 #define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK) #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR #define MAX_MEM_PFN XCHAL_KSEG_SIZE diff --git a/include/linux/mm.h b/include/linux/mm.h index df322fb4df3..d87a5a5fe87 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -41,6 +41,9 @@ extern unsigned long mmap_min_addr; #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) + /* * Linux kernel virtual memory manager primitives. * The idea being to have a "virtual" mm in the same way diff --git a/sound/core/info.c b/sound/core/info.c index cb5ead3e202..c67773ad929 100644 --- a/sound/core/info.c +++ b/sound/core/info.c @@ -21,6 +21,7 @@ #include #include +#include #include #include #include -- cgit v1.2.3-70-g09d2 From d75f65fd247fe85d90a3880d143b1bb22fe13a48 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Wed, 23 Jul 2008 21:28:34 -0700 Subject: remove include/linux/pm_legacy.h Remove the obsolete and no longer used include/linux/pm_legacy.h Reviewed-by: Robert P. J. Day Signed-off-by: Adrian Bunk Cc: Pavel Machek Acked-by: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/frv/kernel/pm.c | 1 - arch/mips/au1000/common/power.c | 1 - arch/x86/kernel/apm_32.c | 1 - include/linux/pm_legacy.h | 35 ----------------------------------- 4 files changed, 38 deletions(-) delete mode 100644 include/linux/pm_legacy.h (limited to 'arch/x86') diff --git a/arch/frv/kernel/pm.c b/arch/frv/kernel/pm.c index 73f3aeefd20..d1113c5031f 100644 --- a/arch/frv/kernel/pm.c +++ b/arch/frv/kernel/pm.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/mips/au1000/common/power.c b/arch/mips/au1000/common/power.c index 2166b9e1e80..bd854a6d1d8 100644 --- a/arch/mips/au1000/common/power.c +++ b/arch/mips/au1000/common/power.c @@ -31,7 +31,6 @@ #include #include -#include #include #include diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index bf9b441331e..9ee24e6bc4b 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -219,7 +219,6 @@ #include #include #include -#include #include #include #include diff --git a/include/linux/pm_legacy.h b/include/linux/pm_legacy.h deleted file mode 100644 index 446f4f42b95..00000000000 --- a/include/linux/pm_legacy.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __LINUX_PM_LEGACY_H__ -#define __LINUX_PM_LEGACY_H__ - - -#ifdef CONFIG_PM_LEGACY - -/* - * Register a device with power management - */ -struct pm_dev __deprecated * -pm_register(pm_dev_t type, unsigned long id, pm_callback callback); - -/* - * Send a request to all devices - */ -int __deprecated pm_send_all(pm_request_t rqst, void *data); - -#else /* CONFIG_PM_LEGACY */ - -static inline struct pm_dev *pm_register(pm_dev_t type, - unsigned long id, - pm_callback callback) -{ - return NULL; -} - -static inline int pm_send_all(pm_request_t rqst, void *data) -{ - return 0; -} - -#endif /* CONFIG_PM_LEGACY */ - -#endif /* __LINUX_PM_LEGACY_H__ */ - -- cgit v1.2.3-70-g09d2 From bdfe6b7c681669148dae4db27eb24ee5408ba371 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 23 Jul 2008 21:28:41 -0700 Subject: pm: acpi hibernation: utilize hardware signature ACPI defines a hardware signature. BIOS calculates the signature according to hardware configure and if hardware changes while hibernated, the signature will change. In that case, S4 resume should fail. Still, there may be systems on which this mechanism does not work correctly, so it is better to provide a workaround for them. For this reason, add a new switch to the acpi_sleep= command line argument allowing one to disable hardware signature checking. [shaohua.li@intel.com: build fix] Signed-off-by: Shaohua Li Signed-off-by: Rafael J. Wysocki Cc: Andi Kleen Cc: Len Brown Acked-by: Pavel Machek Cc: Cc: Shaohua Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/kernel-parameters.txt | 4 +++- arch/x86/kernel/acpi/sleep.c | 4 ++++ drivers/acpi/sleep/main.c | 22 ++++++++++++++++++++++ include/linux/acpi.h | 1 + 4 files changed, 30 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 4d705713cab..497a98dafda 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -148,10 +148,12 @@ and is between 256 and 4096 characters. It is defined in the file default: 0 acpi_sleep= [HW,ACPI] Sleep options - Format: { s3_bios, s3_mode, s3_beep, old_ordering } + Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig, old_ordering } See Documentation/power/video.txt for s3_bios and s3_mode. s3_beep is for debugging; it makes the PC's speaker beep as soon as the kernel's real-mode entry point is called. + s4_nohwsig prevents ACPI hardware signature from being + used during resume from hibernation. old_ordering causes the ACPI 1.0 ordering of the _PTS control method, wrt putting devices into low power states, to be enforced (the ACPI 2.0 ordering of _PTS is diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index a3ddad18aaa..fa2161d5003 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -150,6 +150,10 @@ static int __init acpi_sleep_setup(char *str) acpi_realmode_flags |= 2; if (strncmp(str, "s3_beep", 7) == 0) acpi_realmode_flags |= 4; +#ifdef CONFIG_HIBERNATION + if (strncmp(str, "s4_nohwsig", 10) == 0) + acpi_no_s4_hw_signature(); +#endif if (strncmp(str, "old_ordering", 12) == 0) acpi_old_suspend_ordering(); str = strchr(str, ','); diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c index 0489a7d1d42..313507accf1 100644 --- a/drivers/acpi/sleep/main.c +++ b/drivers/acpi/sleep/main.c @@ -283,6 +283,15 @@ static struct platform_suspend_ops acpi_suspend_ops_old = { #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATION +static unsigned long s4_hardware_signature; +static struct acpi_table_facs *facs; +static bool nosigcheck; + +void __init acpi_no_s4_hw_signature(void) +{ + nosigcheck = true; +} + static int acpi_hibernation_begin(void) { acpi_target_sleep_state = ACPI_STATE_S4; @@ -316,6 +325,12 @@ static void acpi_hibernation_leave(void) acpi_enable(); /* Reprogram control registers and execute _BFS */ acpi_leave_sleep_state_prep(ACPI_STATE_S4); + /* Check the hardware signature */ + if (facs && s4_hardware_signature != facs->hardware_signature) { + printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " + "cannot resume!\n"); + panic("ACPI S4 hardware signature mismatch"); + } } static void acpi_pm_enable_gpes(void) @@ -544,6 +559,13 @@ int __init acpi_sleep_init(void) &acpi_hibernation_ops_old : &acpi_hibernation_ops); sleep_states[ACPI_STATE_S4] = 1; printk(" S4"); + if (!nosigcheck) { + acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, + (struct acpi_table_header **)&facs); + if (facs) + s4_hardware_signature = + facs->hardware_signature; + } } #endif status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index a1717763937..702f79dad16 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -236,6 +236,7 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n, const char *name); #ifdef CONFIG_PM_SLEEP +void __init acpi_no_s4_hw_signature(void); void __init acpi_old_suspend_ordering(void); #endif /* CONFIG_PM_SLEEP */ #else /* CONFIG_ACPI */ -- cgit v1.2.3-70-g09d2 From 9deb27baedb79759c3ab9435a7d8b841842d56e9 Mon Sep 17 00:00:00 2001 From: Ulrich Drepper Date: Wed, 23 Jul 2008 21:29:24 -0700 Subject: flag parameters: signalfd This patch adds the new signalfd4 syscall. It extends the old signalfd syscall by one parameter which is meant to hold a flag value. In this patch the only flag support is SFD_CLOEXEC which causes the close-on-exec flag for the returned file descriptor to be set. A new name SFD_CLOEXEC is introduced which in this implementation must have the same value as O_CLOEXEC. The following test must be adjusted for architectures other than x86 and x86-64 and in case the syscall numbers changed. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #include #include #include #include #include #ifndef __NR_signalfd4 # ifdef __x86_64__ # define __NR_signalfd4 289 # elif defined __i386__ # define __NR_signalfd4 327 # else # error "need __NR_signalfd4" # endif #endif #define SFD_CLOEXEC O_CLOEXEC int main (void) { sigset_t ss; sigemptyset (&ss); sigaddset (&ss, SIGUSR1); int fd = syscall (__NR_signalfd4, -1, &ss, 8, 0); if (fd == -1) { puts ("signalfd4(0) failed"); return 1; } int coe = fcntl (fd, F_GETFD); if (coe == -1) { puts ("fcntl failed"); return 1; } if (coe & FD_CLOEXEC) { puts ("signalfd4(0) set close-on-exec flag"); return 1; } close (fd); fd = syscall (__NR_signalfd4, -1, &ss, 8, SFD_CLOEXEC); if (fd == -1) { puts ("signalfd4(SFD_CLOEXEC) failed"); return 1; } coe = fcntl (fd, F_GETFD); if (coe == -1) { puts ("fcntl failed"); return 1; } if ((coe & FD_CLOEXEC) == 0) { puts ("signalfd4(SFD_CLOEXEC) does not set close-on-exec flag"); return 1; } close (fd); puts ("OK"); return 0; } ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ [akpm@linux-foundation.org: add sys_ni stub] Signed-off-by: Ulrich Drepper Acked-by: Davide Libenzi Cc: Michael Kerrisk Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/ia32/ia32entry.S | 1 + arch/x86/kernel/syscall_table_32.S | 1 + fs/compat.c | 14 ++++++++++---- fs/signalfd.c | 14 ++++++++++++-- include/asm-x86/unistd_32.h | 1 + include/asm-x86/unistd_64.h | 2 ++ include/linux/signalfd.h | 5 +++++ include/linux/syscalls.h | 1 + kernel/sys_ni.c | 1 + 9 files changed, 34 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 021d71bc69b..c308128b925 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -826,4 +826,5 @@ ia32_sys_call_table: .quad sys32_fallocate .quad compat_sys_timerfd_settime /* 325 */ .quad compat_sys_timerfd_gettime + .quad compat_sys_signalfd4 ia32_syscall_end: diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index adff5562f5f..c12a36c9fd5 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -326,3 +326,4 @@ ENTRY(sys_call_table) .long sys_fallocate .long sys_timerfd_settime /* 325 */ .long sys_timerfd_gettime + .long sys_signalfd4 diff --git a/fs/compat.c b/fs/compat.c index b4660428176..106eba28ec5 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -2131,9 +2131,9 @@ asmlinkage long compat_sys_epoll_pwait(int epfd, #ifdef CONFIG_SIGNALFD -asmlinkage long compat_sys_signalfd(int ufd, - const compat_sigset_t __user *sigmask, - compat_size_t sigsetsize) +asmlinkage long compat_sys_signalfd4(int ufd, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize, int flags) { compat_sigset_t ss32; sigset_t tmp; @@ -2148,9 +2148,15 @@ asmlinkage long compat_sys_signalfd(int ufd, if (copy_to_user(ksigmask, &tmp, sizeof(sigset_t))) return -EFAULT; - return sys_signalfd(ufd, ksigmask, sizeof(sigset_t)); + return sys_signalfd4(ufd, ksigmask, sizeof(sigset_t), flags); } +asmlinkage long compat_sys_signalfd(int ufd, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize) +{ + return compat_sys_signalfd4(ufd, sigmask, sigsetsize, 0); +} #endif /* CONFIG_SIGNALFD */ #ifdef CONFIG_TIMERFD diff --git a/fs/signalfd.c b/fs/signalfd.c index ddb328b74bd..c8609fa51a1 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -205,11 +205,15 @@ static const struct file_operations signalfd_fops = { .read = signalfd_read, }; -asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask) +asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, + size_t sizemask, int flags) { sigset_t sigmask; struct signalfd_ctx *ctx; + if (flags & ~SFD_CLOEXEC) + return -EINVAL; + if (sizemask != sizeof(sigset_t) || copy_from_user(&sigmask, user_mask, sizeof(sigmask))) return -EINVAL; @@ -228,7 +232,7 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas * anon_inode_getfd() will install the fd. */ ufd = anon_inode_getfd("[signalfd]", &signalfd_fops, ctx, - 0); + flags & O_CLOEXEC); if (ufd < 0) kfree(ctx); } else { @@ -250,3 +254,9 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas return ufd; } + +asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, + size_t sizemask) +{ + return sys_signalfd4(ufd, user_mask, sizemask, 0); +} diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h index 8317d94771d..c310371f561 100644 --- a/include/asm-x86/unistd_32.h +++ b/include/asm-x86/unistd_32.h @@ -332,6 +332,7 @@ #define __NR_fallocate 324 #define __NR_timerfd_settime 325 #define __NR_timerfd_gettime 326 +#define __NR_signalfd4 327 #ifdef __KERNEL__ diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h index e323994a370..e0a9b45b234 100644 --- a/include/asm-x86/unistd_64.h +++ b/include/asm-x86/unistd_64.h @@ -641,6 +641,8 @@ __SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) __SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) #define __NR_paccept 288 __SYSCALL(__NR_paccept, sys_paccept) +#define __NR_signalfd4 289 +__SYSCALL(__NR_signalfd4, sys_signalfd4) #ifndef __NO_STUBS diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h index ea037f28df9..8b3f7b7420a 100644 --- a/include/linux/signalfd.h +++ b/include/linux/signalfd.h @@ -8,6 +8,11 @@ #ifndef _LINUX_SIGNALFD_H #define _LINUX_SIGNALFD_H +/* For O_CLOEXEC */ +#include + +/* Flags for signalfd4. */ +#define SFD_CLOEXEC O_CLOEXEC struct signalfd_siginfo { __u32 ssi_signo; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 2a2a40af6b2..1c270779784 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -610,6 +610,7 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, size_t len); asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache); asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask); +asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, int flags); asmlinkage long sys_timerfd_create(int clockid, int flags); asmlinkage long sys_timerfd_settime(int ufd, int flags, const struct itimerspec __user *utmr, diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 2f0b8a2e600..8627c89ae9e 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -156,6 +156,7 @@ cond_syscall(sys_ioprio_get); /* New file descriptors */ cond_syscall(sys_signalfd); +cond_syscall(sys_signalfd4); cond_syscall(compat_sys_signalfd); cond_syscall(sys_timerfd_create); cond_syscall(sys_timerfd_settime); -- cgit v1.2.3-70-g09d2 From b087498eb5605673b0f260a7620d91818cd72304 Mon Sep 17 00:00:00 2001 From: Ulrich Drepper Date: Wed, 23 Jul 2008 21:29:25 -0700 Subject: flag parameters: eventfd This patch adds the new eventfd2 syscall. It extends the old eventfd syscall by one parameter which is meant to hold a flag value. In this patch the only flag support is EFD_CLOEXEC which causes the close-on-exec flag for the returned file descriptor to be set. A new name EFD_CLOEXEC is introduced which in this implementation must have the same value as O_CLOEXEC. The following test must be adjusted for architectures other than x86 and x86-64 and in case the syscall numbers changed. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #include #include #include #include #ifndef __NR_eventfd2 # ifdef __x86_64__ # define __NR_eventfd2 290 # elif defined __i386__ # define __NR_eventfd2 328 # else # error "need __NR_eventfd2" # endif #endif #define EFD_CLOEXEC O_CLOEXEC int main (void) { int fd = syscall (__NR_eventfd2, 1, 0); if (fd == -1) { puts ("eventfd2(0) failed"); return 1; } int coe = fcntl (fd, F_GETFD); if (coe == -1) { puts ("fcntl failed"); return 1; } if (coe & FD_CLOEXEC) { puts ("eventfd2(0) sets close-on-exec flag"); return 1; } close (fd); fd = syscall (__NR_eventfd2, 1, EFD_CLOEXEC); if (fd == -1) { puts ("eventfd2(EFD_CLOEXEC) failed"); return 1; } coe = fcntl (fd, F_GETFD); if (coe == -1) { puts ("fcntl failed"); return 1; } if ((coe & FD_CLOEXEC) == 0) { puts ("eventfd2(EFD_CLOEXEC) does not set close-on-exec flag"); return 1; } close (fd); puts ("OK"); return 0; } ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ [akpm@linux-foundation.org: add sys_ni stub] Signed-off-by: Ulrich Drepper Acked-by: Davide Libenzi Cc: Michael Kerrisk Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/ia32/ia32entry.S | 1 + arch/x86/kernel/syscall_table_32.S | 1 + fs/eventfd.c | 13 +++++++++++-- include/asm-x86/unistd_32.h | 1 + include/asm-x86/unistd_64.h | 2 ++ include/linux/eventfd.h | 6 ++++++ include/linux/syscalls.h | 1 + kernel/sys_ni.c | 1 + 8 files changed, 24 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index c308128b925..cf0eb31745c 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -827,4 +827,5 @@ ia32_sys_call_table: .quad compat_sys_timerfd_settime /* 325 */ .quad compat_sys_timerfd_gettime .quad compat_sys_signalfd4 + .quad sys_eventfd2 ia32_syscall_end: diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index c12a36c9fd5..cf112cb11c3 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -327,3 +327,4 @@ ENTRY(sys_call_table) .long sys_timerfd_settime /* 325 */ .long sys_timerfd_gettime .long sys_signalfd4 + .long sys_eventfd2 diff --git a/fs/eventfd.c b/fs/eventfd.c index 6094265ca40..bd420e6478a 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -198,11 +198,14 @@ struct file *eventfd_fget(int fd) return file; } -asmlinkage long sys_eventfd(unsigned int count) +asmlinkage long sys_eventfd2(unsigned int count, int flags) { int fd; struct eventfd_ctx *ctx; + if (flags & ~EFD_CLOEXEC) + return -EINVAL; + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -214,9 +217,15 @@ asmlinkage long sys_eventfd(unsigned int count) * When we call this, the initialization must be complete, since * anon_inode_getfd() will install the fd. */ - fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx, 0); + fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx, + flags & O_CLOEXEC); if (fd < 0) kfree(ctx); return fd; } +asmlinkage long sys_eventfd(unsigned int count) +{ + return sys_eventfd2(count, 0); +} + diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h index c310371f561..edbd8723c93 100644 --- a/include/asm-x86/unistd_32.h +++ b/include/asm-x86/unistd_32.h @@ -333,6 +333,7 @@ #define __NR_timerfd_settime 325 #define __NR_timerfd_gettime 326 #define __NR_signalfd4 327 +#define __NR_eventfd2 328 #ifdef __KERNEL__ diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h index e0a9b45b234..fb059a6feeb 100644 --- a/include/asm-x86/unistd_64.h +++ b/include/asm-x86/unistd_64.h @@ -643,6 +643,8 @@ __SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) __SYSCALL(__NR_paccept, sys_paccept) #define __NR_signalfd4 289 __SYSCALL(__NR_signalfd4, sys_signalfd4) +#define __NR_eventfd2 290 +__SYSCALL(__NR_eventfd2, sys_eventfd2) #ifndef __NO_STUBS diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index a701399b7fe..a6c0eaedb1b 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -10,6 +10,12 @@ #ifdef CONFIG_EVENTFD +/* For O_CLOEXEC */ +#include + +/* Flags for eventfd2. */ +#define EFD_CLOEXEC O_CLOEXEC + struct file *eventfd_fget(int fd); int eventfd_signal(struct file *file, int n); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 1c270779784..9ab09926a7f 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -617,6 +617,7 @@ asmlinkage long sys_timerfd_settime(int ufd, int flags, struct itimerspec __user *otmr); asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr); asmlinkage long sys_eventfd(unsigned int count); +asmlinkage long sys_eventfd2(unsigned int count, int flags); asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); int kernel_execve(const char *filename, char *const argv[], char *const envp[]); diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 8627c89ae9e..2a361ccdc7c 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -164,3 +164,4 @@ cond_syscall(sys_timerfd_gettime); cond_syscall(compat_sys_timerfd_settime); cond_syscall(compat_sys_timerfd_gettime); cond_syscall(sys_eventfd); +cond_syscall(sys_eventfd2); -- cgit v1.2.3-70-g09d2 From a0998b50c3f0b8fdd265c63e0032f86ebe377dbf Mon Sep 17 00:00:00 2001 From: Ulrich Drepper Date: Wed, 23 Jul 2008 21:29:27 -0700 Subject: flag parameters: epoll_create This patch adds the new epoll_create2 syscall. It extends the old epoll_create syscall by one parameter which is meant to hold a flag value. In this patch the only flag support is EPOLL_CLOEXEC which causes the close-on-exec flag for the returned file descriptor to be set. A new name EPOLL_CLOEXEC is introduced which in this implementation must have the same value as O_CLOEXEC. The following test must be adjusted for architectures other than x86 and x86-64 and in case the syscall numbers changed. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #include #include #include #include #include #ifndef __NR_epoll_create2 # ifdef __x86_64__ # define __NR_epoll_create2 291 # elif defined __i386__ # define __NR_epoll_create2 329 # else # error "need __NR_epoll_create2" # endif #endif #define EPOLL_CLOEXEC O_CLOEXEC int main (void) { int fd = syscall (__NR_epoll_create2, 1, 0); if (fd == -1) { puts ("epoll_create2(0) failed"); return 1; } int coe = fcntl (fd, F_GETFD); if (coe == -1) { puts ("fcntl failed"); return 1; } if (coe & FD_CLOEXEC) { puts ("epoll_create2(0) set close-on-exec flag"); return 1; } close (fd); fd = syscall (__NR_epoll_create2, 1, EPOLL_CLOEXEC); if (fd == -1) { puts ("epoll_create2(EPOLL_CLOEXEC) failed"); return 1; } coe = fcntl (fd, F_GETFD); if (coe == -1) { puts ("fcntl failed"); return 1; } if ((coe & FD_CLOEXEC) == 0) { puts ("epoll_create2(EPOLL_CLOEXEC) set close-on-exec flag"); return 1; } close (fd); puts ("OK"); return 0; } ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Signed-off-by: Ulrich Drepper Acked-by: Davide Libenzi Cc: Michael Kerrisk Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/ia32/ia32entry.S | 1 + arch/x86/kernel/syscall_table_32.S | 1 + fs/eventpoll.c | 13 +++++++++++-- include/asm-x86/unistd_32.h | 1 + include/asm-x86/unistd_64.h | 2 ++ include/linux/eventpoll.h | 4 ++++ include/linux/syscalls.h | 1 + 7 files changed, 21 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index cf0eb31745c..04366f08f42 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -828,4 +828,5 @@ ia32_sys_call_table: .quad compat_sys_timerfd_gettime .quad compat_sys_signalfd4 .quad sys_eventfd2 + .quad sys_epoll_create2 ia32_syscall_end: diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index cf112cb11c3..4d7007ca263 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -328,3 +328,4 @@ ENTRY(sys_call_table) .long sys_timerfd_gettime .long sys_signalfd4 .long sys_eventfd2 + .long sys_epoll_create2 diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 9392dd96812..3fd4014f3c5 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1046,11 +1046,14 @@ retry: * RB tree. With the current implementation, the "size" parameter is ignored * (besides sanity checks). */ -asmlinkage long sys_epoll_create(int size) +asmlinkage long sys_epoll_create2(int size, int flags) { int error, fd = -1; struct eventpoll *ep; + if (flags & ~EPOLL_CLOEXEC) + return -EINVAL; + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n", current, size)); @@ -1068,7 +1071,8 @@ asmlinkage long sys_epoll_create(int size) * Creates all the items needed to setup an eventpoll file. That is, * a file structure and a free file descriptor. */ - fd = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep, 0); + fd = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep, + flags & O_CLOEXEC); if (fd < 0) ep_free(ep); @@ -1079,6 +1083,11 @@ error_return: return fd; } +asmlinkage long sys_epoll_create(int size) +{ + return sys_epoll_create2(size, 0); +} + /* * The following function implements the controller interface for * the eventpoll file that enables the insertion/removal/change of diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h index edbd8723c93..a37d6b0c4e1 100644 --- a/include/asm-x86/unistd_32.h +++ b/include/asm-x86/unistd_32.h @@ -334,6 +334,7 @@ #define __NR_timerfd_gettime 326 #define __NR_signalfd4 327 #define __NR_eventfd2 328 +#define __NR_epoll_create2 329 #ifdef __KERNEL__ diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h index fb059a6feeb..a1a4a5b6e5e 100644 --- a/include/asm-x86/unistd_64.h +++ b/include/asm-x86/unistd_64.h @@ -645,6 +645,8 @@ __SYSCALL(__NR_paccept, sys_paccept) __SYSCALL(__NR_signalfd4, sys_signalfd4) #define __NR_eventfd2 290 __SYSCALL(__NR_eventfd2, sys_eventfd2) +#define __NR_epoll_create2 291 +__SYSCALL(__NR_epoll_create2, sys_epoll_create2) #ifndef __NO_STUBS diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index cf79853967f..1cfaa40059c 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -14,8 +14,12 @@ #ifndef _LINUX_EVENTPOLL_H #define _LINUX_EVENTPOLL_H +/* For O_CLOEXEC */ +#include #include +/* Flags for epoll_create2. */ +#define EPOLL_CLOEXEC O_CLOEXEC /* Valid opcodes to issue to sys_epoll_ctl() */ #define EPOLL_CTL_ADD 1 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 9ab09926a7f..85953240f28 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -430,6 +430,7 @@ asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds, asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp); asmlinkage long sys_epoll_create(int size); +asmlinkage long sys_epoll_create2(int size, int flags); asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, struct epoll_event __user *event); asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events, -- cgit v1.2.3-70-g09d2 From 336dd1f70ff62d7dd8655228caed4c5bfc818c56 Mon Sep 17 00:00:00 2001 From: Ulrich Drepper Date: Wed, 23 Jul 2008 21:29:29 -0700 Subject: flag parameters: dup2 This patch adds the new dup3 syscall. It extends the old dup2 syscall by one parameter which is meant to hold a flag value. Support for the O_CLOEXEC flag is added in this patch. The following test must be adjusted for architectures other than x86 and x86-64 and in case the syscall numbers changed. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #include #include #include #include #include #ifndef __NR_dup3 # ifdef __x86_64__ # define __NR_dup3 292 # elif defined __i386__ # define __NR_dup3 330 # else # error "need __NR_dup3" # endif #endif int main (void) { int fd = syscall (__NR_dup3, 1, 4, 0); if (fd == -1) { puts ("dup3(0) failed"); return 1; } int coe = fcntl (fd, F_GETFD); if (coe == -1) { puts ("fcntl failed"); return 1; } if (coe & FD_CLOEXEC) { puts ("dup3(0) set close-on-exec flag"); return 1; } close (fd); fd = syscall (__NR_dup3, 1, 4, O_CLOEXEC); if (fd == -1) { puts ("dup3(O_CLOEXEC) failed"); return 1; } coe = fcntl (fd, F_GETFD); if (coe == -1) { puts ("fcntl failed"); return 1; } if ((coe & FD_CLOEXEC) == 0) { puts ("dup3(O_CLOEXEC) set close-on-exec flag"); return 1; } close (fd); puts ("OK"); return 0; } ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Signed-off-by: Ulrich Drepper Acked-by: Davide Libenzi Cc: Michael Kerrisk Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/ia32/ia32entry.S | 1 + arch/x86/kernel/syscall_table_32.S | 1 + fs/fcntl.c | 15 +++++++++++++-- include/asm-x86/unistd_32.h | 1 + include/asm-x86/unistd_64.h | 2 ++ include/linux/syscalls.h | 1 + 6 files changed, 19 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 04366f08f42..5614a8f7bed 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -829,4 +829,5 @@ ia32_sys_call_table: .quad compat_sys_signalfd4 .quad sys_eventfd2 .quad sys_epoll_create2 + .quad sys_dup3 /* 330 */ ia32_syscall_end: diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index 4d7007ca263..24a3f1ea6a0 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -329,3 +329,4 @@ ENTRY(sys_call_table) .long sys_signalfd4 .long sys_eventfd2 .long sys_epoll_create2 + .long sys_dup3 /* 330 */ diff --git a/fs/fcntl.c b/fs/fcntl.c index 330a7d78259..9679fcbdeaa 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -125,13 +125,16 @@ static int dupfd(struct file *file, unsigned int start, int cloexec) return fd; } -asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd) +asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags) { int err = -EBADF; struct file * file, *tofree; struct files_struct * files = current->files; struct fdtable *fdt; + if ((flags & ~O_CLOEXEC) != 0) + return -EINVAL; + spin_lock(&files->file_lock); if (!(file = fcheck(oldfd))) goto out_unlock; @@ -163,7 +166,10 @@ asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd) rcu_assign_pointer(fdt->fd[newfd], file); FD_SET(newfd, fdt->open_fds); - FD_CLR(newfd, fdt->close_on_exec); + if (flags & O_CLOEXEC) + FD_SET(newfd, fdt->close_on_exec); + else + FD_CLR(newfd, fdt->close_on_exec); spin_unlock(&files->file_lock); if (tofree) @@ -181,6 +187,11 @@ out_fput: goto out; } +asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd) +{ + return sys_dup3(oldfd, newfd, 0); +} + asmlinkage long sys_dup(unsigned int fildes) { int ret = -EBADF; diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h index a37d6b0c4e1..a1f6383bf69 100644 --- a/include/asm-x86/unistd_32.h +++ b/include/asm-x86/unistd_32.h @@ -335,6 +335,7 @@ #define __NR_signalfd4 327 #define __NR_eventfd2 328 #define __NR_epoll_create2 329 +#define __NR_dup3 330 #ifdef __KERNEL__ diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h index a1a4a5b6e5e..f0fb2bd40cd 100644 --- a/include/asm-x86/unistd_64.h +++ b/include/asm-x86/unistd_64.h @@ -647,6 +647,8 @@ __SYSCALL(__NR_signalfd4, sys_signalfd4) __SYSCALL(__NR_eventfd2, sys_eventfd2) #define __NR_epoll_create2 291 __SYSCALL(__NR_epoll_create2, sys_epoll_create2) +#define __NR_dup3 292 +__SYSCALL(__NR_dup3, sys_dup3) #ifndef __NO_STUBS diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 85953240f28..034d3358549 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -305,6 +305,7 @@ asmlinkage long sys_fcntl64(unsigned int fd, #endif asmlinkage long sys_dup(unsigned int fildes); asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd); +asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags); asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on); asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); -- cgit v1.2.3-70-g09d2 From ed8cae8ba01348bfd83333f4648dd807b04d7f08 Mon Sep 17 00:00:00 2001 From: Ulrich Drepper Date: Wed, 23 Jul 2008 21:29:30 -0700 Subject: flag parameters: pipe This patch introduces the new syscall pipe2 which is like pipe but it also takes an additional parameter which takes a flag value. This patch implements the handling of O_CLOEXEC for the flag. I did not add support for the new syscall for the architectures which have a special sys_pipe implementation. I think the maintainers of those archs have the chance to go with the unified implementation but that's up to them. The implementation introduces do_pipe_flags. I did that instead of changing all callers of do_pipe because some of the callers are written in assembler. I would probably screw up changing the assembly code. To avoid breaking code do_pipe is now a small wrapper around do_pipe_flags. Once all callers are changed over to do_pipe_flags the old do_pipe function can be removed. The following test must be adjusted for architectures other than x86 and x86-64 and in case the syscall numbers changed. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #include #include #include #include #ifndef __NR_pipe2 # ifdef __x86_64__ # define __NR_pipe2 293 # elif defined __i386__ # define __NR_pipe2 331 # else # error "need __NR_pipe2" # endif #endif int main (void) { int fd[2]; if (syscall (__NR_pipe2, fd, 0) != 0) { puts ("pipe2(0) failed"); return 1; } for (int i = 0; i < 2; ++i) { int coe = fcntl (fd[i], F_GETFD); if (coe == -1) { puts ("fcntl failed"); return 1; } if (coe & FD_CLOEXEC) { printf ("pipe2(0) set close-on-exit for fd[%d]\n", i); return 1; } } close (fd[0]); close (fd[1]); if (syscall (__NR_pipe2, fd, O_CLOEXEC) != 0) { puts ("pipe2(O_CLOEXEC) failed"); return 1; } for (int i = 0; i < 2; ++i) { int coe = fcntl (fd[i], F_GETFD); if (coe == -1) { puts ("fcntl failed"); return 1; } if ((coe & FD_CLOEXEC) == 0) { printf ("pipe2(O_CLOEXEC) does not set close-on-exit for fd[%d]\n", i); return 1; } } close (fd[0]); close (fd[1]); puts ("OK"); return 0; } ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Signed-off-by: Ulrich Drepper Acked-by: Davide Libenzi Cc: Michael Kerrisk Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/ia32/sys_ia32.c | 2 +- arch/ia64/kernel/sys_ia64.c | 2 +- arch/mips/kernel/syscall.c | 2 +- arch/parisc/hpux/sys_hpux.c | 2 +- arch/sh/kernel/sys_sh32.c | 2 +- arch/sparc/kernel/sys_sparc.c | 2 +- arch/sparc64/kernel/sys_sparc.c | 2 +- arch/x86/ia32/ia32entry.S | 1 + arch/x86/ia32/sys_ia32.c | 2 +- arch/x86/kernel/syscall_table_32.S | 1 + arch/xtensa/kernel/syscall.c | 2 +- fs/pipe.c | 23 ++++++++++++++++++----- include/asm-x86/unistd_32.h | 1 + include/asm-x86/unistd_64.h | 2 ++ include/linux/fs.h | 1 + 15 files changed, 33 insertions(+), 14 deletions(-) (limited to 'arch/x86') diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 7e028ceb93b..465116aecb8 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -1139,7 +1139,7 @@ sys32_pipe (int __user *fd) int retval; int fds[2]; - retval = do_pipe(fds); + retval = do_pipe_flags(fds, 0); if (retval) goto out; if (copy_to_user(fd, fds, sizeof(fds))) diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index 1eda194b955..bcbb6d8792d 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -160,7 +160,7 @@ sys_pipe (void) int fd[2]; int retval; - retval = do_pipe(fd); + retval = do_pipe_flags(fd, 0); if (retval) goto out; retval = fd[0]; diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 3523c8d12ed..343015a2f41 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -52,7 +52,7 @@ asmlinkage int sysm_pipe(nabi_no_regargs volatile struct pt_regs regs) int fd[2]; int error, res; - error = do_pipe(fd); + error = do_pipe_flags(fd, 0); if (error) { res = error; goto out; diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c index 0c5b9dabb47..be255ebb609 100644 --- a/arch/parisc/hpux/sys_hpux.c +++ b/arch/parisc/hpux/sys_hpux.c @@ -448,7 +448,7 @@ int hpux_pipe(int *kstack_fildes) int error; lock_kernel(); - error = do_pipe(kstack_fildes); + error = do_pipe_flags(kstack_fildes, 0); unlock_kernel(); return error; } diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c index 125e493ead8..f0aa5c39865 100644 --- a/arch/sh/kernel/sys_sh32.c +++ b/arch/sh/kernel/sys_sh32.c @@ -29,7 +29,7 @@ asmlinkage int sys_pipe(unsigned long r4, unsigned long r5, int fd[2]; int error; - error = do_pipe(fd); + error = do_pipe_flags(fd, 0); if (!error) { regs->regs[1] = fd[1]; return fd[0]; diff --git a/arch/sparc/kernel/sys_sparc.c b/arch/sparc/kernel/sys_sparc.c index 3c6b49a53ae..4d73421559c 100644 --- a/arch/sparc/kernel/sys_sparc.c +++ b/arch/sparc/kernel/sys_sparc.c @@ -97,7 +97,7 @@ asmlinkage int sparc_pipe(struct pt_regs *regs) int fd[2]; int error; - error = do_pipe(fd); + error = do_pipe_flags(fd, 0); if (error) goto out; regs->u_regs[UREG_I1] = fd[1]; diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c index e1f4eba2e57..39749e32dc7 100644 --- a/arch/sparc64/kernel/sys_sparc.c +++ b/arch/sparc64/kernel/sys_sparc.c @@ -418,7 +418,7 @@ asmlinkage long sparc_pipe(struct pt_regs *regs) int fd[2]; int error; - error = do_pipe(fd); + error = do_pipe_flags(fd, 0); if (error) goto out; regs->u_regs[UREG_I1] = fd[1]; diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 5614a8f7bed..18808b16457 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -830,4 +830,5 @@ ia32_sys_call_table: .quad sys_eventfd2 .quad sys_epoll_create2 .quad sys_dup3 /* 330 */ + .quad sys_pipe2 ia32_syscall_end: diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index f00afdf61e6..d3c64088b98 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c @@ -238,7 +238,7 @@ asmlinkage long sys32_pipe(int __user *fd) int retval; int fds[2]; - retval = do_pipe(fds); + retval = do_pipe_flags(fds, 0); if (retval) goto out; if (copy_to_user(fd, fds, sizeof(fds))) diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index 24a3f1ea6a0..66154769d52 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -330,3 +330,4 @@ ENTRY(sys_call_table) .long sys_eventfd2 .long sys_epoll_create2 .long sys_dup3 /* 330 */ + .long sys_pipe2 diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index f3e16efcd47..ac15ecbdf91 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -49,7 +49,7 @@ asmlinkage long xtensa_pipe(int __user *userfds) int fd[2]; int error; - error = do_pipe(fd); + error = do_pipe_flags(fd, 0); if (!error) { if (copy_to_user(userfds, fd, 2 * sizeof(int))) error = -EFAULT; diff --git a/fs/pipe.c b/fs/pipe.c index 700f4e0d957..68e82061070 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -1027,12 +1027,15 @@ struct file *create_read_pipe(struct file *wrf) return f; } -int do_pipe(int *fd) +int do_pipe_flags(int *fd, int flags) { struct file *fw, *fr; int error; int fdw, fdr; + if (flags & ~O_CLOEXEC) + return -EINVAL; + fw = create_write_pipe(); if (IS_ERR(fw)) return PTR_ERR(fw); @@ -1041,12 +1044,12 @@ int do_pipe(int *fd) if (IS_ERR(fr)) goto err_write_pipe; - error = get_unused_fd(); + error = get_unused_fd_flags(flags); if (error < 0) goto err_read_pipe; fdr = error; - error = get_unused_fd(); + error = get_unused_fd_flags(flags); if (error < 0) goto err_fdr; fdw = error; @@ -1074,16 +1077,21 @@ int do_pipe(int *fd) return error; } +int do_pipe(int *fd) +{ + return do_pipe_flags(fd, 0); +} + /* * sys_pipe() is the normal C calling standard for creating * a pipe. It's not the way Unix traditionally does this, though. */ -asmlinkage long __weak sys_pipe(int __user *fildes) +asmlinkage long __weak sys_pipe2(int __user *fildes, int flags) { int fd[2]; int error; - error = do_pipe(fd); + error = do_pipe_flags(fd, flags); if (!error) { if (copy_to_user(fildes, fd, sizeof(fd))) { sys_close(fd[0]); @@ -1094,6 +1102,11 @@ asmlinkage long __weak sys_pipe(int __user *fildes) return error; } +asmlinkage long __weak sys_pipe(int __user *fildes) +{ + return sys_pipe2(fildes, 0); +} + /* * pipefs should _never_ be mounted by userland - too much of security hassle, * no real gain from having the whole whorehouse mounted. So we don't need diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h index a1f6383bf69..748a05c77da 100644 --- a/include/asm-x86/unistd_32.h +++ b/include/asm-x86/unistd_32.h @@ -336,6 +336,7 @@ #define __NR_eventfd2 328 #define __NR_epoll_create2 329 #define __NR_dup3 330 +#define __NR_pipe2 331 #ifdef __KERNEL__ diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h index f0fb2bd40cd..d2284b43ad5 100644 --- a/include/asm-x86/unistd_64.h +++ b/include/asm-x86/unistd_64.h @@ -649,6 +649,8 @@ __SYSCALL(__NR_eventfd2, sys_eventfd2) __SYSCALL(__NR_epoll_create2, sys_epoll_create2) #define __NR_dup3 292 __SYSCALL(__NR_dup3, sys_dup3) +#define __NR_pipe2 293 +__SYSCALL(__NR_pipe2, sys_pipe2) #ifndef __NO_STUBS diff --git a/include/linux/fs.h b/include/linux/fs.h index e5e6a244096..0e80cd717d3 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1777,6 +1777,7 @@ static inline void allow_write_access(struct file *file) atomic_inc(&file->f_path.dentry->d_inode->i_writecount); } extern int do_pipe(int *); +extern int do_pipe_flags(int *, int); extern struct file *create_read_pipe(struct file *f); extern struct file *create_write_pipe(void); extern void free_write_pipe(struct file *); -- cgit v1.2.3-70-g09d2 From 4006553b06306b34054529477b06b68a1c66249b Mon Sep 17 00:00:00 2001 From: Ulrich Drepper Date: Wed, 23 Jul 2008 21:29:32 -0700 Subject: flag parameters: inotify_init This patch introduces the new syscall inotify_init1 (note: the 1 stands for the one parameter the syscall takes, as opposed to no parameter before). The values accepted for this parameter are function-specific and defined in the inotify.h header. Here the values must match the O_* flags, though. In this patch CLOEXEC support is introduced. The following test must be adjusted for architectures other than x86 and x86-64 and in case the syscall numbers changed. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #include #include #include #include #ifndef __NR_inotify_init1 # ifdef __x86_64__ # define __NR_inotify_init1 294 # elif defined __i386__ # define __NR_inotify_init1 332 # else # error "need __NR_inotify_init1" # endif #endif #define IN_CLOEXEC O_CLOEXEC int main (void) { int fd; fd = syscall (__NR_inotify_init1, 0); if (fd == -1) { puts ("inotify_init1(0) failed"); return 1; } int coe = fcntl (fd, F_GETFD); if (coe == -1) { puts ("fcntl failed"); return 1; } if (coe & FD_CLOEXEC) { puts ("inotify_init1(0) set close-on-exit"); return 1; } close (fd); fd = syscall (__NR_inotify_init1, IN_CLOEXEC); if (fd == -1) { puts ("inotify_init1(IN_CLOEXEC) failed"); return 1; } coe = fcntl (fd, F_GETFD); if (coe == -1) { puts ("fcntl failed"); return 1; } if ((coe & FD_CLOEXEC) == 0) { puts ("inotify_init1(O_CLOEXEC) does not set close-on-exit"); return 1; } close (fd); puts ("OK"); return 0; } ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ [akpm@linux-foundation.org: add sys_ni stub] Signed-off-by: Ulrich Drepper Acked-by: Davide Libenzi Cc: Michael Kerrisk Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/ia32/ia32entry.S | 1 + arch/x86/kernel/syscall_table_32.S | 1 + fs/inotify_user.c | 12 ++++++++++-- include/asm-x86/unistd_32.h | 1 + include/asm-x86/unistd_64.h | 2 ++ include/linux/inotify.h | 5 +++++ include/linux/syscalls.h | 1 + kernel/sys_ni.c | 1 + 8 files changed, 22 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 18808b16457..4541073dd83 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -831,4 +831,5 @@ ia32_sys_call_table: .quad sys_epoll_create2 .quad sys_dup3 /* 330 */ .quad sys_pipe2 + .quad sys_inotify_init1 ia32_syscall_end: diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index 66154769d52..f59aba5ff0f 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -331,3 +331,4 @@ ENTRY(sys_call_table) .long sys_epoll_create2 .long sys_dup3 /* 330 */ .long sys_pipe2 + .long sys_inotify_init1 diff --git a/fs/inotify_user.c b/fs/inotify_user.c index 6676c06bb7c..851005998cd 100644 --- a/fs/inotify_user.c +++ b/fs/inotify_user.c @@ -566,7 +566,7 @@ static const struct inotify_operations inotify_user_ops = { .destroy_watch = free_inotify_user_watch, }; -asmlinkage long sys_inotify_init(void) +asmlinkage long sys_inotify_init1(int flags) { struct inotify_device *dev; struct inotify_handle *ih; @@ -574,7 +574,10 @@ asmlinkage long sys_inotify_init(void) struct file *filp; int fd, ret; - fd = get_unused_fd(); + if (flags & ~IN_CLOEXEC) + return -EINVAL; + + fd = get_unused_fd_flags(flags & O_CLOEXEC); if (fd < 0) return fd; @@ -638,6 +641,11 @@ out_put_fd: return ret; } +asmlinkage long sys_inotify_init(void) +{ + return sys_inotify_init1(0); +} + asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) { struct inode *inode; diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h index 748a05c77da..b3daf503ab9 100644 --- a/include/asm-x86/unistd_32.h +++ b/include/asm-x86/unistd_32.h @@ -337,6 +337,7 @@ #define __NR_epoll_create2 329 #define __NR_dup3 330 #define __NR_pipe2 331 +#define __NR_inotify_init1 332 #ifdef __KERNEL__ diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h index d2284b43ad5..c8cb88d70c6 100644 --- a/include/asm-x86/unistd_64.h +++ b/include/asm-x86/unistd_64.h @@ -651,6 +651,8 @@ __SYSCALL(__NR_epoll_create2, sys_epoll_create2) __SYSCALL(__NR_dup3, sys_dup3) #define __NR_pipe2 293 __SYSCALL(__NR_pipe2, sys_pipe2) +#define __NR_inotify_init1 294 +__SYSCALL(__NR_inotify_init1, sys_inotify_init1) #ifndef __NO_STUBS diff --git a/include/linux/inotify.h b/include/linux/inotify.h index 742b917e7d1..72ef8212051 100644 --- a/include/linux/inotify.h +++ b/include/linux/inotify.h @@ -7,6 +7,8 @@ #ifndef _LINUX_INOTIFY_H #define _LINUX_INOTIFY_H +/* For O_CLOEXEC */ +#include #include /* @@ -63,6 +65,9 @@ struct inotify_event { IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF | \ IN_MOVE_SELF) +/* Flags for sys_inotify_init1. */ +#define IN_CLOEXEC O_CLOEXEC + #ifdef __KERNEL__ #include diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 034d3358549..93a7e7f017a 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -547,6 +547,7 @@ asmlinkage long sys_get_mempolicy(int __user *policy, unsigned long addr, unsigned long flags); asmlinkage long sys_inotify_init(void); +asmlinkage long sys_inotify_init1(int flags); asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask); asmlinkage long sys_inotify_rm_watch(int fd, u32 wd); diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 2a361ccdc7c..bd66ac5406f 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -96,6 +96,7 @@ cond_syscall(sys_keyctl); cond_syscall(compat_sys_keyctl); cond_syscall(compat_sys_socketcall); cond_syscall(sys_inotify_init); +cond_syscall(sys_inotify_init1); cond_syscall(sys_inotify_add_watch); cond_syscall(sys_inotify_rm_watch); cond_syscall(sys_migrate_pages); -- cgit v1.2.3-70-g09d2 From 9fe5ad9c8cef9ad5873d8ee55d1cf00d9b607df0 Mon Sep 17 00:00:00 2001 From: Ulrich Drepper Date: Wed, 23 Jul 2008 21:29:43 -0700 Subject: flag parameters add-on: remove epoll_create size param Remove the size parameter from the new epoll_create syscall and renames the syscall itself. The updated test program follows. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #include #include #include #include #include #ifndef __NR_epoll_create2 # ifdef __x86_64__ # define __NR_epoll_create2 291 # elif defined __i386__ # define __NR_epoll_create2 329 # else # error "need __NR_epoll_create2" # endif #endif #define EPOLL_CLOEXEC O_CLOEXEC int main (void) { int fd = syscall (__NR_epoll_create2, 0); if (fd == -1) { puts ("epoll_create2(0) failed"); return 1; } int coe = fcntl (fd, F_GETFD); if (coe == -1) { puts ("fcntl failed"); return 1; } if (coe & FD_CLOEXEC) { puts ("epoll_create2(0) set close-on-exec flag"); return 1; } close (fd); fd = syscall (__NR_epoll_create2, EPOLL_CLOEXEC); if (fd == -1) { puts ("epoll_create2(EPOLL_CLOEXEC) failed"); return 1; } coe = fcntl (fd, F_GETFD); if (coe == -1) { puts ("fcntl failed"); return 1; } if ((coe & FD_CLOEXEC) == 0) { puts ("epoll_create2(EPOLL_CLOEXEC) set close-on-exec flag"); return 1; } close (fd); puts ("OK"); return 0; } ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Signed-off-by: Ulrich Drepper Acked-by: Davide Libenzi Cc: Michael Kerrisk Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/ia32/ia32entry.S | 2 +- arch/x86/kernel/syscall_table_32.S | 2 +- fs/eventpoll.c | 18 ++++++++++-------- include/asm-x86/unistd_32.h | 2 +- include/asm-x86/unistd_64.h | 4 ++-- include/linux/eventpoll.h | 2 +- include/linux/syscalls.h | 2 +- 7 files changed, 17 insertions(+), 15 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 4541073dd83..e4bd1793a5e 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -828,7 +828,7 @@ ia32_sys_call_table: .quad compat_sys_timerfd_gettime .quad compat_sys_signalfd4 .quad sys_eventfd2 - .quad sys_epoll_create2 + .quad sys_epoll_create1 .quad sys_dup3 /* 330 */ .quad sys_pipe2 .quad sys_inotify_init1 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index f59aba5ff0f..d44395ff34c 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -328,7 +328,7 @@ ENTRY(sys_call_table) .long sys_timerfd_gettime .long sys_signalfd4 .long sys_eventfd2 - .long sys_epoll_create2 + .long sys_epoll_create1 .long sys_dup3 /* 330 */ .long sys_pipe2 .long sys_inotify_init1 diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 2fdad420404..0c87474f791 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1046,7 +1046,7 @@ retry: * RB tree. With the current implementation, the "size" parameter is ignored * (besides sanity checks). */ -asmlinkage long sys_epoll_create2(int size, int flags) +asmlinkage long sys_epoll_create1(int flags) { int error, fd = -1; struct eventpoll *ep; @@ -1058,14 +1058,13 @@ asmlinkage long sys_epoll_create2(int size, int flags) return -EINVAL; DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n", - current, size)); + current, flags)); /* - * Sanity check on the size parameter, and create the internal data - * structure ( "struct eventpoll" ). + * Create the internal data structure ( "struct eventpoll" ). */ - error = -EINVAL; - if (size <= 0 || (error = ep_alloc(&ep)) < 0) { + error = ep_alloc(&ep); + if (error < 0) { fd = error; goto error_return; } @@ -1081,14 +1080,17 @@ asmlinkage long sys_epoll_create2(int size, int flags) error_return: DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", - current, size, fd)); + current, flags, fd)); return fd; } asmlinkage long sys_epoll_create(int size) { - return sys_epoll_create2(size, 0); + if (size < 0) + return -EINVAL; + + return sys_epoll_create1(0); } /* diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h index b3daf503ab9..d7394673b77 100644 --- a/include/asm-x86/unistd_32.h +++ b/include/asm-x86/unistd_32.h @@ -334,7 +334,7 @@ #define __NR_timerfd_gettime 326 #define __NR_signalfd4 327 #define __NR_eventfd2 328 -#define __NR_epoll_create2 329 +#define __NR_epoll_create1 329 #define __NR_dup3 330 #define __NR_pipe2 331 #define __NR_inotify_init1 332 diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h index c8cb88d70c6..3a341d79179 100644 --- a/include/asm-x86/unistd_64.h +++ b/include/asm-x86/unistd_64.h @@ -645,8 +645,8 @@ __SYSCALL(__NR_paccept, sys_paccept) __SYSCALL(__NR_signalfd4, sys_signalfd4) #define __NR_eventfd2 290 __SYSCALL(__NR_eventfd2, sys_eventfd2) -#define __NR_epoll_create2 291 -__SYSCALL(__NR_epoll_create2, sys_epoll_create2) +#define __NR_epoll_create1 291 +__SYSCALL(__NR_epoll_create1, sys_epoll_create1) #define __NR_dup3 292 __SYSCALL(__NR_dup3, sys_dup3) #define __NR_pipe2 293 diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index 1cfaa40059c..f1e1d3c4712 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -18,7 +18,7 @@ #include #include -/* Flags for epoll_create2. */ +/* Flags for epoll_create1. */ #define EPOLL_CLOEXEC O_CLOEXEC /* Valid opcodes to issue to sys_epoll_ctl() */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 93a7e7f017a..06f2bf76c03 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -431,7 +431,7 @@ asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds, asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp); asmlinkage long sys_epoll_create(int size); -asmlinkage long sys_epoll_create2(int size, int flags); +asmlinkage long sys_epoll_create1(int flags); asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, struct epoll_event __user *event); asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events, -- cgit v1.2.3-70-g09d2 From 7e2a31da854dcf8324012a83a31b40bc11e52589 Mon Sep 17 00:00:00 2001 From: David Brownell Date: Wed, 23 Jul 2008 21:30:47 -0700 Subject: rtc-cmos: avoid spurious irqs This fixes kernel http://bugzilla.kernel.org/show_bug.cgi?id=11112 (bogus RTC update IRQs reported) for rtc-cmos, in two ways: - When HPET is stealing the IRQs, use the first IRQ to grab the seconds counter which will be monitored (instead of using whatever was previously in that memory); - In sane IRQ handling modes, scrub out old IRQ status before enabling IRQs. That latter is done by tightening up IRQ handling for rtc-cmos everywhere, also ensuring that when HPET is used it's the only thing triggering IRQ reports to userspace; net object shrink. Also fix a bogus HPET message related to its RTC emulation. Signed-off-by: David Brownell Report-by: W Unruh Cc: Andrew Victor Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/hpet.c | 10 ++-- drivers/rtc/rtc-cmos.c | 140 ++++++++++++++++++++++--------------------------- 2 files changed, 70 insertions(+), 80 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 0ea6a19bfdf..ad2b15a1334 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -468,7 +468,7 @@ void hpet_disable(void) #define RTC_NUM_INTS 1 static unsigned long hpet_rtc_flags; -static unsigned long hpet_prev_update_sec; +static int hpet_prev_update_sec; static struct rtc_time hpet_alarm_time; static unsigned long hpet_pie_count; static unsigned long hpet_t1_cmp; @@ -575,6 +575,9 @@ int hpet_set_rtc_irq_bit(unsigned long bit_mask) hpet_rtc_flags |= bit_mask; + if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE)) + hpet_prev_update_sec = -1; + if (!oldbits) hpet_rtc_timer_init(); @@ -652,7 +655,7 @@ static void hpet_rtc_timer_reinit(void) if (hpet_rtc_flags & RTC_PIE) hpet_pie_count += lost_ints; if (printk_ratelimit()) - printk(KERN_WARNING "rtc: lost %d interrupts\n", + printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n", lost_ints); } } @@ -670,7 +673,8 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) if (hpet_rtc_flags & RTC_UIE && curr_time.tm_sec != hpet_prev_update_sec) { - rtc_int_flag = RTC_UF; + if (hpet_prev_update_sec >= 0) + rtc_int_flag = RTC_UF; hpet_prev_update_sec = curr_time.tm_sec; } diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index e9984650ea9..6ea349aba3b 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -235,11 +235,56 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t) return 0; } +static void cmos_checkintr(struct cmos_rtc *cmos, unsigned char rtc_control) +{ + unsigned char rtc_intr; + + /* NOTE after changing RTC_xIE bits we always read INTR_FLAGS; + * allegedly some older rtcs need that to handle irqs properly + */ + rtc_intr = CMOS_READ(RTC_INTR_FLAGS); + + if (is_hpet_enabled()) + return; + + rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF; + if (is_intr(rtc_intr)) + rtc_update_irq(cmos->rtc, 1, rtc_intr); +} + +static void cmos_irq_enable(struct cmos_rtc *cmos, unsigned char mask) +{ + unsigned char rtc_control; + + /* flush any pending IRQ status, notably for update irqs, + * before we enable new IRQs + */ + rtc_control = CMOS_READ(RTC_CONTROL); + cmos_checkintr(cmos, rtc_control); + + rtc_control |= mask; + CMOS_WRITE(rtc_control, RTC_CONTROL); + hpet_set_rtc_irq_bit(mask); + + cmos_checkintr(cmos, rtc_control); +} + +static void cmos_irq_disable(struct cmos_rtc *cmos, unsigned char mask) +{ + unsigned char rtc_control; + + rtc_control = CMOS_READ(RTC_CONTROL); + rtc_control &= ~mask; + CMOS_WRITE(rtc_control, RTC_CONTROL); + hpet_mask_rtc_irq_bit(mask); + + cmos_checkintr(cmos, rtc_control); +} + static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) { struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned char mon, mday, hrs, min, sec; - unsigned char rtc_control, rtc_intr; if (!is_valid_irq(cmos->irq)) return -EIO; @@ -266,15 +311,7 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) spin_lock_irq(&rtc_lock); /* next rtc irq must not be from previous alarm setting */ - rtc_control = CMOS_READ(RTC_CONTROL); - rtc_control &= ~RTC_AIE; - CMOS_WRITE(rtc_control, RTC_CONTROL); - hpet_mask_rtc_irq_bit(RTC_AIE); - - rtc_intr = CMOS_READ(RTC_INTR_FLAGS); - rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF; - if (is_intr(rtc_intr)) - rtc_update_irq(cmos->rtc, 1, rtc_intr); + cmos_irq_disable(cmos, RTC_AIE); /* update alarm */ CMOS_WRITE(hrs, RTC_HOURS_ALARM); @@ -293,16 +330,8 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) */ hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min, t->time.tm_sec); - if (t->enabled) { - rtc_control |= RTC_AIE; - CMOS_WRITE(rtc_control, RTC_CONTROL); - hpet_set_rtc_irq_bit(RTC_AIE); - - rtc_intr = CMOS_READ(RTC_INTR_FLAGS); - rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF; - if (is_intr(rtc_intr)) - rtc_update_irq(cmos->rtc, 1, rtc_intr); - } + if (t->enabled) + cmos_irq_enable(cmos, RTC_AIE); spin_unlock_irq(&rtc_lock); @@ -335,28 +364,17 @@ static int cmos_irq_set_freq(struct device *dev, int freq) static int cmos_irq_set_state(struct device *dev, int enabled) { struct cmos_rtc *cmos = dev_get_drvdata(dev); - unsigned char rtc_control, rtc_intr; unsigned long flags; if (!is_valid_irq(cmos->irq)) return -ENXIO; spin_lock_irqsave(&rtc_lock, flags); - rtc_control = CMOS_READ(RTC_CONTROL); - - if (enabled) { - rtc_control |= RTC_PIE; - hpet_set_rtc_irq_bit(RTC_PIE); - } else { - rtc_control &= ~RTC_PIE; - hpet_mask_rtc_irq_bit(RTC_PIE); - } - CMOS_WRITE(rtc_control, RTC_CONTROL); - rtc_intr = CMOS_READ(RTC_INTR_FLAGS); - rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF; - if (is_intr(rtc_intr)) - rtc_update_irq(cmos->rtc, 1, rtc_intr); + if (enabled) + cmos_irq_enable(cmos, RTC_PIE); + else + cmos_irq_disable(cmos, RTC_PIE); spin_unlock_irqrestore(&rtc_lock, flags); return 0; @@ -368,7 +386,6 @@ static int cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { struct cmos_rtc *cmos = dev_get_drvdata(dev); - unsigned char rtc_control, rtc_intr; unsigned long flags; switch (cmd) { @@ -385,32 +402,20 @@ cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) } spin_lock_irqsave(&rtc_lock, flags); - rtc_control = CMOS_READ(RTC_CONTROL); switch (cmd) { case RTC_AIE_OFF: /* alarm off */ - rtc_control &= ~RTC_AIE; - hpet_mask_rtc_irq_bit(RTC_AIE); + cmos_irq_disable(cmos, RTC_AIE); break; case RTC_AIE_ON: /* alarm on */ - rtc_control |= RTC_AIE; - hpet_set_rtc_irq_bit(RTC_AIE); + cmos_irq_enable(cmos, RTC_AIE); break; case RTC_UIE_OFF: /* update off */ - rtc_control &= ~RTC_UIE; - hpet_mask_rtc_irq_bit(RTC_UIE); + cmos_irq_disable(cmos, RTC_UIE); break; case RTC_UIE_ON: /* update on */ - rtc_control |= RTC_UIE; - hpet_set_rtc_irq_bit(RTC_UIE); + cmos_irq_enable(cmos, RTC_UIE); break; } - CMOS_WRITE(rtc_control, RTC_CONTROL); - - rtc_intr = CMOS_READ(RTC_INTR_FLAGS); - rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF; - if (is_intr(rtc_intr)) - rtc_update_irq(cmos->rtc, 1, rtc_intr); - spin_unlock_irqrestore(&rtc_lock, flags); return 0; } @@ -571,7 +576,6 @@ static irqreturn_t cmos_interrupt(int irq, void *p) * alarm woke the system. */ if (irqstat & RTC_AIE) { - rtc_control = CMOS_READ(RTC_CONTROL); rtc_control &= ~RTC_AIE; CMOS_WRITE(rtc_control, RTC_CONTROL); hpet_mask_rtc_irq_bit(RTC_AIE); @@ -685,17 +689,10 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq); CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT); - /* disable irqs. - * - * NOTE after changing RTC_xIE bits we always read INTR_FLAGS; - * allegedly some older rtcs need that to handle irqs properly - */ - rtc_control = CMOS_READ(RTC_CONTROL); - rtc_control &= ~(RTC_PIE | RTC_AIE | RTC_UIE); - CMOS_WRITE(rtc_control, RTC_CONTROL); - hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE); + /* disable irqs */ + cmos_irq_disable(&cmos_rtc, RTC_PIE | RTC_AIE | RTC_UIE); - CMOS_READ(RTC_INTR_FLAGS); + rtc_control = CMOS_READ(RTC_CONTROL); spin_unlock_irq(&rtc_lock); @@ -768,15 +765,8 @@ cleanup0: static void cmos_do_shutdown(void) { - unsigned char rtc_control; - spin_lock_irq(&rtc_lock); - rtc_control = CMOS_READ(RTC_CONTROL); - rtc_control &= ~RTC_IRQMASK; - CMOS_WRITE(rtc_control, RTC_CONTROL); - hpet_mask_rtc_irq_bit(RTC_IRQMASK); - - CMOS_READ(RTC_INTR_FLAGS); + cmos_irq_disable(&cmos_rtc, RTC_IRQMASK); spin_unlock_irq(&rtc_lock); } @@ -817,7 +807,6 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg) spin_lock_irq(&rtc_lock); cmos->suspend_ctrl = tmp = CMOS_READ(RTC_CONTROL); if (tmp & (RTC_PIE|RTC_AIE|RTC_UIE)) { - unsigned char irqstat; unsigned char mask; if (do_wake) @@ -828,10 +817,7 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg) CMOS_WRITE(tmp, RTC_CONTROL); hpet_mask_rtc_irq_bit(mask); - irqstat = CMOS_READ(RTC_INTR_FLAGS); - irqstat &= (tmp & RTC_IRQMASK) | RTC_IRQF; - if (is_intr(irqstat)) - rtc_update_irq(cmos->rtc, 1, irqstat); + cmos_checkintr(cmos, tmp); } spin_unlock_irq(&rtc_lock); @@ -875,7 +861,7 @@ static int cmos_resume(struct device *dev) mask = CMOS_READ(RTC_INTR_FLAGS); mask &= (tmp & RTC_IRQMASK) | RTC_IRQF; - if (!is_intr(mask)) + if (!is_hpet_enabled() || !is_intr(mask)) break; /* force one-shot behavior if HPET blocked -- cgit v1.2.3-70-g09d2 From 6209ed9d8443b63c36d340908530fa470c4d4fff Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 24 Jul 2008 12:49:26 -0700 Subject: x86-64: make BUILD_IRQ() also reset section back Commit 9d25d4db81833029d30b7b03cc1000cbbe09e192 ("x86: BUILD_IRQ say .text to avoid .data.percpu") added a ".text" specifier to make sure that BUILD_IRQ() builds the irq trampoline in the text segment rather than in some random left-over segment that the compiler happened to leave the asm in. However, we should also make sure that we switch back by adding a ".previous" at the end, so that there are no subtle issues with subsequent compiler-generated code. Signed-off-by: Linus Torvalds --- arch/x86/kernel/irqinit_64.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 9414125f19c..1f26fd9ec4f 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c @@ -46,7 +46,8 @@ asm("\n.text\n.p2align\n" \ "IRQ" #nr "_interrupt:\n\t" \ "push $~(" #nr ") ; " \ - "jmp common_interrupt"); + "jmp common_interrupt\n" \ + ".previous"); #define BI(x,y) \ BUILD_IRQ(x##y) -- cgit v1.2.3-70-g09d2 From b30f3ae50cd03ef2ff433a5030fbf88dd8323528 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 24 Jul 2008 15:43:44 -0700 Subject: x86-64: Clean up 'save/restore_i387()' usage Suresh Siddha wants to fix a possible FPU leakage in error conditions, but the fact that save/restore_i387() are inlines in a header file makes that harder to do than necessary. So start off with an obvious cleanup. This just moves the x86-64 version of save/restore_i387() out of the header file, and moves it to the only file that it is actually used in: arch/x86/kernel/signal_64.c. So exposing it in a header file was wrong to begin with. [ Side note: I'd like to fix up some of the games we play with the 32-bit version of these functions too, but that's a separate matter. The 32-bit versions are shared - under different names at that! - by both the native x86-32 code and the x86-64 32-bit compatibility code ] Acked-by: Suresh Siddha Signed-off-by: Linus Torvalds --- arch/x86/kernel/signal_64.c | 53 ++++++++++++++++++++++++++++++++++++++++++++ include/asm-x86/i387.h | 54 --------------------------------------------- 2 files changed, 53 insertions(+), 54 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index 47c3d249e63..b45ef8ddd65 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -53,6 +53,59 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, return do_sigaltstack(uss, uoss, regs->sp); } +/* + * Signal frame handlers. + */ + +static inline int save_i387(struct _fpstate __user *buf) +{ + struct task_struct *tsk = current; + int err = 0; + + BUILD_BUG_ON(sizeof(struct user_i387_struct) != + sizeof(tsk->thread.xstate->fxsave)); + + if ((unsigned long)buf % 16) + printk("save_i387: bad fpstate %p\n", buf); + + if (!used_math()) + return 0; + clear_used_math(); /* trigger finit */ + if (task_thread_info(tsk)->status & TS_USEDFPU) { + err = save_i387_checking((struct i387_fxsave_struct __user *) + buf); + if (err) + return err; + task_thread_info(tsk)->status &= ~TS_USEDFPU; + stts(); + } else { + if (__copy_to_user(buf, &tsk->thread.xstate->fxsave, + sizeof(struct i387_fxsave_struct))) + return -1; + } + return 1; +} + +/* + * This restores directly out of user space. Exceptions are handled. + */ +static inline int restore_i387(struct _fpstate __user *buf) +{ + struct task_struct *tsk = current; + int err; + + if (!used_math()) { + err = init_fpu(tsk); + if (err) + return err; + } + + if (!(task_thread_info(current)->status & TS_USEDFPU)) { + clts(); + task_thread_info(current)->status |= TS_USEDFPU; + } + return restore_fpu_checking((__force struct i387_fxsave_struct *)buf); +} /* * Do a signal return; undo the signal stack. diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index 37672f79dcc..96fa8449ff1 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h @@ -137,60 +137,6 @@ static inline void __save_init_fpu(struct task_struct *tsk) task_thread_info(tsk)->status &= ~TS_USEDFPU; } -/* - * Signal frame handlers. - */ - -static inline int save_i387(struct _fpstate __user *buf) -{ - struct task_struct *tsk = current; - int err = 0; - - BUILD_BUG_ON(sizeof(struct user_i387_struct) != - sizeof(tsk->thread.xstate->fxsave)); - - if ((unsigned long)buf % 16) - printk("save_i387: bad fpstate %p\n", buf); - - if (!used_math()) - return 0; - clear_used_math(); /* trigger finit */ - if (task_thread_info(tsk)->status & TS_USEDFPU) { - err = save_i387_checking((struct i387_fxsave_struct __user *) - buf); - if (err) - return err; - task_thread_info(tsk)->status &= ~TS_USEDFPU; - stts(); - } else { - if (__copy_to_user(buf, &tsk->thread.xstate->fxsave, - sizeof(struct i387_fxsave_struct))) - return -1; - } - return 1; -} - -/* - * This restores directly out of user space. Exceptions are handled. - */ -static inline int restore_i387(struct _fpstate __user *buf) -{ - struct task_struct *tsk = current; - int err; - - if (!used_math()) { - err = init_fpu(tsk); - if (err) - return err; - } - - if (!(task_thread_info(current)->status & TS_USEDFPU)) { - clts(); - task_thread_info(current)->status |= TS_USEDFPU; - } - return restore_fpu_checking((__force struct i387_fxsave_struct *)buf); -} - #else /* CONFIG_X86_32 */ extern void finit(void); -- cgit v1.2.3-70-g09d2 From 4b9f12a3779c548b68bc9af7d94030868ad3aa1b Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 24 Jul 2008 17:29:00 -0700 Subject: x86/oprofile/nmi_int: add Nehalem to list of ppro cores ..otherwise oprofile will fall back on that poor timer interrupt. Also replace the unreadable chain of if-statements with a "switch()" statement instead. It generates better code, and is a lot clearer. Signed-off-by: Linus Torvalds --- arch/x86/oprofile/nmi_int.c | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 7f3329b55d2..3f90289410e 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -369,20 +369,34 @@ static int __init ppro_init(char **cpu_type) { __u8 cpu_model = boot_cpu_data.x86_model; - if (cpu_model == 14) + switch (cpu_model) { + case 0 ... 2: + *cpu_type = "i386/ppro"; + break; + case 3 ... 5: + *cpu_type = "i386/pii"; + break; + case 6 ... 8: + *cpu_type = "i386/piii"; + break; + case 9: + *cpu_type = "i386/p6_mobile"; + break; + case 10 ... 13: + *cpu_type = "i386/p6"; + break; + case 14: *cpu_type = "i386/core"; - else if (cpu_model == 15 || cpu_model == 23) + break; + case 15: case 23: + *cpu_type = "i386/core_2"; + break; + case 26: *cpu_type = "i386/core_2"; - else if (cpu_model > 0xd) + break; + default: + /* Unknown */ return 0; - else if (cpu_model == 9) { - *cpu_type = "i386/p6_mobile"; - } else if (cpu_model > 5) { - *cpu_type = "i386/piii"; - } else if (cpu_model > 2) { - *cpu_type = "i386/pii"; - } else { - *cpu_type = "i386/ppro"; } model = &op_ppro_spec; -- cgit v1.2.3-70-g09d2 From 58340a07c194e0aed7bc58b61ff24330bb2a409f Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 25 Jul 2008 01:45:33 -0700 Subject: introduce HAVE_EFFICIENT_UNALIGNED_ACCESS Kconfig symbol In many cases, especially in networking, it can be beneficial to know at compile time whether the architecture can do unaligned accesses efficiently. This patch introduces a new Kconfig symbol HAVE_EFFICIENT_UNALIGNED_ACCESS for that purpose and adds it to the powerpc and x86 architectures. Also add some documentation about alignment and networking, and especially one intended use of this symbol. Signed-off-by: Johannes Berg Acked-by: Sam Ravnborg Acked-by: Ingo Molnar [x86 architecture part] Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/unaligned-memory-access.txt | 32 ++++++++++++++++++++++++++++--- arch/Kconfig | 19 ++++++++++++++++++ arch/powerpc/Kconfig | 1 + arch/x86/Kconfig | 1 + 4 files changed, 50 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/Documentation/unaligned-memory-access.txt b/Documentation/unaligned-memory-access.txt index b0472ac5226..f866c72291b 100644 --- a/Documentation/unaligned-memory-access.txt +++ b/Documentation/unaligned-memory-access.txt @@ -218,9 +218,35 @@ If use of such macros is not convenient, another option is to use memcpy(), where the source or destination (or both) are of type u8* or unsigned char*. Due to the byte-wise nature of this operation, unaligned accesses are avoided. + +Alignment vs. Networking +======================== + +On architectures that require aligned loads, networking requires that the IP +header is aligned on a four-byte boundary to optimise the IP stack. For +regular ethernet hardware, the constant NET_IP_ALIGN is used. On most +architectures this constant has the value 2 because the normal ethernet +header is 14 bytes long, so in order to get proper alignment one needs to +DMA to an address which can be expressed as 4*n + 2. One notable exception +here is powerpc which defines NET_IP_ALIGN to 0 because DMA to unaligned +addresses can be very expensive and dwarf the cost of unaligned loads. + +For some ethernet hardware that cannot DMA to unaligned addresses like +4*n+2 or non-ethernet hardware, this can be a problem, and it is then +required to copy the incoming frame into an aligned buffer. Because this is +unnecessary on architectures that can do unaligned accesses, the code can be +made dependent on CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS like so: + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + skb = original skb +#else + skb = copy skb +#endif + -- -Author: Daniel Drake +Authors: Daniel Drake , + Johannes Berg With help from: Alan Cox, Avuton Olrich, Heikki Orsila, Jan Engelhardt, -Johannes Berg, Kyle McMartin, Kyle Moffett, Randy Dunlap, Robert Hancock, -Uli Kunitz, Vadim Lobanov +Kyle McMartin, Kyle Moffett, Randy Dunlap, Robert Hancock, Uli Kunitz, +Vadim Lobanov diff --git a/arch/Kconfig b/arch/Kconfig index 6093c0be58b..b0fabfa864f 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -27,6 +27,25 @@ config KPROBES for kernel debugging, non-intrusive instrumentation and testing. If in doubt, say "N". +config HAVE_EFFICIENT_UNALIGNED_ACCESS + def_bool n + help + Some architectures are unable to perform unaligned accesses + without the use of get_unaligned/put_unaligned. Others are + unable to perform such accesses efficiently (e.g. trap on + unaligned access and require fixing it up in the exception + handler.) + + This symbol should be selected by an architecture if it can + perform unaligned accesses efficiently to allow different + code paths to be selected for these cases. Some network + drivers, for example, could opt to not fix up alignment + problems with received packets if doing so would not help + much. + + See Documentation/unaligned-memory-access.txt for more + information on the topic of unaligned memory accesses. + config KRETPROBES def_bool y depends on KPROBES && HAVE_KRETPROBES diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a487671c282..de6b49cd6be 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -112,6 +112,7 @@ config PPC select HAVE_FTRACE select HAVE_IDE select HAVE_IOREMAP_PROT + select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_KPROBES select HAVE_ARCH_KGDB select HAVE_KRETPROBES diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index b2ddfcf0172..66f3ab05b18 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -28,6 +28,7 @@ config X86 select HAVE_FTRACE select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) select HAVE_ARCH_KGDB if !X86_VOYAGER + select HAVE_EFFICIENT_UNALIGNED_ACCESS config ARCH_DEFCONFIG string -- cgit v1.2.3-70-g09d2 From 2d6ffcca623a9a16df6cdfbe8250b7a5904a5f5e Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Fri, 25 Jul 2008 01:45:44 -0700 Subject: inflate: refactor inflate malloc code Inflate requires some dynamic memory allocation very early in the boot process and this is provided with a set of four functions: malloc/free/gzip_mark/gzip_release. The old inflate code used a mark/release strategy rather than implement free. This new version instead keeps a count on the number of outstanding allocations and when it hits zero, it resets the malloc arena. This allows removing all the mark and release implementations and unifying all the malloc/free implementations. The architecture-dependent code must define two addresses: - free_mem_ptr, the address of the beginning of the area in which allocations should be made - free_mem_end_ptr, the address of the end of the area in which allocations should be made. If set to 0, then no check is made on the number of allocations, it just grows as much as needed The architecture-dependent code can also provide an arch_decomp_wdog() function call. This function will be called several times during the decompression process, and allow to notify the watchdog that the system is still running. If an architecture provides such a call, then it must define ARCH_HAS_DECOMP_WDOG so that the generic inflate code calls arch_decomp_wdog(). Work initially done by Matt Mackall, updated to a recent version of the kernel and improved by me. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Thomas Petazzoni Cc: Matt Mackall Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Mikael Starvik Cc: Jesper Nilsson Cc: Haavard Skinnemoen Cc: David Howells Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Andi Kleen Cc: "H. Peter Anvin" Acked-by: Paul Mundt Acked-by: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/boot/misc.c | 39 ++------------------ arch/arm/boot/compressed/misc.c | 59 ++++--------------------------- arch/cris/arch-v10/boot/compressed/misc.c | 36 +------------------ arch/cris/arch-v32/boot/compressed/misc.c | 39 ++------------------ arch/h8300/boot/compressed/misc.c | 38 -------------------- arch/m32r/boot/compressed/misc.c | 37 ------------------- arch/mn10300/boot/compressed/misc.c | 37 ------------------- arch/sh/boot/compressed/misc_32.c | 38 -------------------- arch/sh/boot/compressed/misc_64.c | 40 --------------------- arch/x86/boot/compressed/misc.c | 39 -------------------- init/do_mounts_rd.c | 25 ++----------- init/initramfs.c | 22 ++---------- lib/inflate.c | 52 +++++++++++++++++++++++---- 13 files changed, 62 insertions(+), 439 deletions(-) (limited to 'arch/x86') diff --git a/arch/alpha/boot/misc.c b/arch/alpha/boot/misc.c index c00646b25f6..3047a1b3a51 100644 --- a/arch/alpha/boot/misc.c +++ b/arch/alpha/boot/misc.c @@ -78,8 +78,6 @@ static unsigned outcnt; /* bytes in output buffer */ static int fill_inbuf(void); static void flush_window(void); static void error(char *m); -static void gzip_mark(void **); -static void gzip_release(void **); static char *input_data; static int input_data_size; @@ -88,51 +86,18 @@ static uch *output_data; static ulg output_ptr; static ulg bytes_out; -static void *malloc(int size); -static void free(void *where); static void error(char *m); static void gzip_mark(void **); static void gzip_release(void **); extern int end; static ulg free_mem_ptr; -static ulg free_mem_ptr_end; +static ulg free_mem_end_ptr; #define HEAP_SIZE 0x3000 #include "../../../lib/inflate.c" -static void *malloc(int size) -{ - void *p; - - if (size <0) error("Malloc error"); - if (free_mem_ptr <= 0) error("Memory error"); - - free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ - - p = (void *)free_mem_ptr; - free_mem_ptr += size; - - if (free_mem_ptr >= free_mem_ptr_end) - error("Out of memory"); - return p; -} - -static void free(void *where) -{ /* gzip_mark & gzip_release do the free */ -} - -static void gzip_mark(void **ptr) -{ - *ptr = (void *) free_mem_ptr; -} - -static void gzip_release(void **ptr) -{ - free_mem_ptr = (long) *ptr; -} - /* =========================================================================== * Fill the input buffer. This is called only when the buffer is empty * and at least one byte is really needed. @@ -193,7 +158,7 @@ decompress_kernel(void *output_start, /* FIXME FIXME FIXME */ free_mem_ptr = (ulg)output_start + ksize; - free_mem_ptr_end = (ulg)output_start + ksize + 0x200000; + free_mem_end_ptr = (ulg)output_start + ksize + 0x200000; /* FIXME FIXME FIXME */ /* put in temp area to reduce initial footprint */ diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index 9b444022cb9..7145cc7c04f 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c @@ -217,8 +217,6 @@ static unsigned outcnt; /* bytes in output buffer */ static int fill_inbuf(void); static void flush_window(void); static void error(char *m); -static void gzip_mark(void **); -static void gzip_release(void **); extern char input_data[]; extern char input_data_end[]; @@ -227,64 +225,21 @@ static uch *output_data; static ulg output_ptr; static ulg bytes_out; -static void *malloc(int size); -static void free(void *where); static void error(char *m); -static void gzip_mark(void **); -static void gzip_release(void **); static void putstr(const char *); extern int end; static ulg free_mem_ptr; -static ulg free_mem_ptr_end; +static ulg free_mem_end_ptr; -#define HEAP_SIZE 0x3000 - -#include "../../../../lib/inflate.c" - -#ifndef STANDALONE_DEBUG -static void *malloc(int size) -{ - void *p; - - if (size <0) error("Malloc error"); - if (free_mem_ptr <= 0) error("Memory error"); - - free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ - - p = (void *)free_mem_ptr; - free_mem_ptr += size; - - if (free_mem_ptr >= free_mem_ptr_end) - error("Out of memory"); - return p; -} - -static void free(void *where) -{ /* gzip_mark & gzip_release do the free */ -} - -static void gzip_mark(void **ptr) -{ - arch_decomp_wdog(); - *ptr = (void *) free_mem_ptr; -} +#ifdef STANDALONE_DEBUG +#define NO_INFLATE_MALLOC +#endif -static void gzip_release(void **ptr) -{ - arch_decomp_wdog(); - free_mem_ptr = (long) *ptr; -} -#else -static void gzip_mark(void **ptr) -{ -} +#define ARCH_HAS_DECOMP_WDOG -static void gzip_release(void **ptr) -{ -} -#endif +#include "../../../../lib/inflate.c" /* =========================================================================== * Fill the input buffer. This is called only when the buffer is empty @@ -348,7 +303,7 @@ decompress_kernel(ulg output_start, ulg free_mem_ptr_p, ulg free_mem_ptr_end_p, { output_data = (uch *)output_start; /* Points to kernel start */ free_mem_ptr = free_mem_ptr_p; - free_mem_ptr_end = free_mem_ptr_end_p; + free_mem_end_ptr = free_mem_ptr_end_p; __machine_arch_type = arch_id; arch_decomp_setup(); diff --git a/arch/cris/arch-v10/boot/compressed/misc.c b/arch/cris/arch-v10/boot/compressed/misc.c index 18e13bce140..d933c89889d 100644 --- a/arch/cris/arch-v10/boot/compressed/misc.c +++ b/arch/cris/arch-v10/boot/compressed/misc.c @@ -102,50 +102,16 @@ extern char *input_data; /* lives in head.S */ static long bytes_out = 0; static uch *output_data; static unsigned long output_ptr = 0; - -static void *malloc(int size); -static void free(void *where); -static void gzip_mark(void **); -static void gzip_release(void **); - static void puts(const char *); /* the "heap" is put directly after the BSS ends, at end */ extern int _end; static long free_mem_ptr = (long)&_end; +static long free_mem_end_ptr; #include "../../../../../lib/inflate.c" -static void *malloc(int size) -{ - void *p; - - if (size < 0) - error("Malloc error"); - - free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ - - p = (void *)free_mem_ptr; - free_mem_ptr += size; - - return p; -} - -static void free(void *where) -{ /* Don't care */ -} - -static void gzip_mark(void **ptr) -{ - *ptr = (void *) free_mem_ptr; -} - -static void gzip_release(void **ptr) -{ - free_mem_ptr = (long) *ptr; -} - /* decompressor info and error messages to serial console */ static void diff --git a/arch/cris/arch-v32/boot/compressed/misc.c b/arch/cris/arch-v32/boot/compressed/misc.c index 55b2695c5d7..3595e16e82b 100644 --- a/arch/cris/arch-v32/boot/compressed/misc.c +++ b/arch/cris/arch-v32/boot/compressed/misc.c @@ -89,20 +89,14 @@ static unsigned outcnt = 0; /* bytes in output buffer */ static void flush_window(void); static void error(char *m); -static void gzip_mark(void **); -static void gzip_release(void **); extern char *input_data; /* lives in head.S */ -static long bytes_out = 0; +static long bytes_out; static uch *output_data; -static unsigned long output_ptr = 0; +static unsigned long output_ptr; -static void *malloc(int size); -static void free(void *where); static void error(char *m); -static void gzip_mark(void **); -static void gzip_release(void **); static void puts(const char *); @@ -110,37 +104,10 @@ static void puts(const char *); extern int _end; static long free_mem_ptr = (long)&_end; +static long free_mem_end_ptr; #include "../../../../../lib/inflate.c" -static void *malloc(int size) -{ - void *p; - - if (size <0) error("Malloc error"); - - free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ - - p = (void *)free_mem_ptr; - free_mem_ptr += size; - - return p; -} - -static void free(void *where) -{ /* Don't care */ -} - -static void gzip_mark(void **ptr) -{ - *ptr = (void *) free_mem_ptr; -} - -static void gzip_release(void **ptr) -{ - free_mem_ptr = (long) *ptr; -} - /* decompressor info and error messages to serial console */ static inline void diff --git a/arch/h8300/boot/compressed/misc.c b/arch/h8300/boot/compressed/misc.c index 845074588af..51ab6cbd030 100644 --- a/arch/h8300/boot/compressed/misc.c +++ b/arch/h8300/boot/compressed/misc.c @@ -67,8 +67,6 @@ static unsigned outcnt = 0; /* bytes in output buffer */ static int fill_inbuf(void); static void flush_window(void); static void error(char *m); -static void gzip_mark(void **); -static void gzip_release(void **); extern char input_data[]; extern int input_len; @@ -77,11 +75,7 @@ static long bytes_out = 0; static uch *output_data; static unsigned long output_ptr = 0; -static void *malloc(int size); -static void free(void *where); static void error(char *m); -static void gzip_mark(void **); -static void gzip_release(void **); int puts(const char *); @@ -98,38 +92,6 @@ static unsigned long free_mem_end_ptr; #define TDR *((volatile unsigned char *)0xffff8b) #define SSR *((volatile unsigned char *)0xffff8c) -static void *malloc(int size) -{ - void *p; - - if (size <0) error("Malloc error"); - if (free_mem_ptr == 0) error("Memory error"); - - free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ - - p = (void *)free_mem_ptr; - free_mem_ptr += size; - - if (free_mem_ptr >= free_mem_end_ptr) - error("Out of memory"); - - return p; -} - -static void free(void *where) -{ /* Don't care */ -} - -static void gzip_mark(void **ptr) -{ - *ptr = (void *) free_mem_ptr; -} - -static void gzip_release(void **ptr) -{ - free_mem_ptr = (long) *ptr; -} - int puts(const char *s) { return 0; diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c index 600d40e3349..d394292498c 100644 --- a/arch/m32r/boot/compressed/misc.c +++ b/arch/m32r/boot/compressed/misc.c @@ -70,8 +70,6 @@ static unsigned outcnt = 0; /* bytes in output buffer */ static int fill_inbuf(void); static void flush_window(void); static void error(char *m); -static void gzip_mark(void **); -static void gzip_release(void **); static unsigned char *input_data; static int input_len; @@ -82,9 +80,6 @@ static unsigned long output_ptr = 0; #include "m32r_sio.c" -static void *malloc(int size); -static void free(void *where); - static unsigned long free_mem_ptr; static unsigned long free_mem_end_ptr; @@ -92,38 +87,6 @@ static unsigned long free_mem_end_ptr; #include "../../../../lib/inflate.c" -static void *malloc(int size) -{ - void *p; - - if (size <0) error("Malloc error"); - if (free_mem_ptr == 0) error("Memory error"); - - free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ - - p = (void *)free_mem_ptr; - free_mem_ptr += size; - - if (free_mem_ptr >= free_mem_end_ptr) - error("Out of memory"); - - return p; -} - -static void free(void *where) -{ /* Don't care */ -} - -static void gzip_mark(void **ptr) -{ - *ptr = (void *) free_mem_ptr; -} - -static void gzip_release(void **ptr) -{ - free_mem_ptr = (long) *ptr; -} - void* memset(void* s, int c, size_t n) { int i; diff --git a/arch/mn10300/boot/compressed/misc.c b/arch/mn10300/boot/compressed/misc.c index ded207efc97..f673383518e 100644 --- a/arch/mn10300/boot/compressed/misc.c +++ b/arch/mn10300/boot/compressed/misc.c @@ -153,26 +153,9 @@ static uch *output_data; static unsigned long output_ptr; -static void *malloc(int size); - -static inline void free(void *where) -{ /* Don't care */ -} - static unsigned long free_mem_ptr = (unsigned long) &end; static unsigned long free_mem_end_ptr = (unsigned long) &end + 0x90000; -static inline void gzip_mark(void **ptr) -{ - kputs("."); - *ptr = (void *) free_mem_ptr; -} - -static inline void gzip_release(void **ptr) -{ - free_mem_ptr = (unsigned long) *ptr; -} - #define INPLACE_MOVE_ROUTINE 0x1000 #define LOW_BUFFER_START 0x2000 #define LOW_BUFFER_END 0x90000 @@ -186,26 +169,6 @@ static int lines, cols; #include "../../../../lib/inflate.c" -static void *malloc(int size) -{ - void *p; - - if (size < 0) - error("Malloc error\n"); - if (!free_mem_ptr) - error("Memory error\n"); - - free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ - - p = (void *) free_mem_ptr; - free_mem_ptr += size; - - if (free_mem_ptr >= free_mem_end_ptr) - error("\nOut of memory\n"); - - return p; -} - static inline void scroll(void) { int i; diff --git a/arch/sh/boot/compressed/misc_32.c b/arch/sh/boot/compressed/misc_32.c index adcea31e663..f386997e4d9 100644 --- a/arch/sh/boot/compressed/misc_32.c +++ b/arch/sh/boot/compressed/misc_32.c @@ -74,8 +74,6 @@ static unsigned outcnt = 0; /* bytes in output buffer */ static int fill_inbuf(void); static void flush_window(void); static void error(char *m); -static void gzip_mark(void **); -static void gzip_release(void **); extern char input_data[]; extern int input_len; @@ -84,11 +82,7 @@ static long bytes_out = 0; static uch *output_data; static unsigned long output_ptr = 0; -static void *malloc(int size); -static void free(void *where); static void error(char *m); -static void gzip_mark(void **); -static void gzip_release(void **); int puts(const char *); @@ -101,38 +95,6 @@ static unsigned long free_mem_end_ptr; #include "../../../../lib/inflate.c" -static void *malloc(int size) -{ - void *p; - - if (size <0) error("Malloc error"); - if (free_mem_ptr == 0) error("Memory error"); - - free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ - - p = (void *)free_mem_ptr; - free_mem_ptr += size; - - if (free_mem_ptr >= free_mem_end_ptr) - error("Out of memory"); - - return p; -} - -static void free(void *where) -{ /* Don't care */ -} - -static void gzip_mark(void **ptr) -{ - *ptr = (void *) free_mem_ptr; -} - -static void gzip_release(void **ptr) -{ - free_mem_ptr = (long) *ptr; -} - #ifdef CONFIG_SH_STANDARD_BIOS size_t strlen(const char *s) { diff --git a/arch/sh/boot/compressed/misc_64.c b/arch/sh/boot/compressed/misc_64.c index a006ef89b9d..2941657e18a 100644 --- a/arch/sh/boot/compressed/misc_64.c +++ b/arch/sh/boot/compressed/misc_64.c @@ -72,8 +72,6 @@ static unsigned outcnt = 0; /* bytes in output buffer */ static int fill_inbuf(void); static void flush_window(void); static void error(char *m); -static void gzip_mark(void **); -static void gzip_release(void **); extern char input_data[]; extern int input_len; @@ -82,11 +80,7 @@ static long bytes_out = 0; static uch *output_data; static unsigned long output_ptr = 0; -static void *malloc(int size); -static void free(void *where); static void error(char *m); -static void gzip_mark(void **); -static void gzip_release(void **); static void puts(const char *); @@ -99,40 +93,6 @@ static unsigned long free_mem_end_ptr; #include "../../../../lib/inflate.c" -static void *malloc(int size) -{ - void *p; - - if (size < 0) - error("Malloc error\n"); - if (free_mem_ptr == 0) - error("Memory error\n"); - - free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ - - p = (void *) free_mem_ptr; - free_mem_ptr += size; - - if (free_mem_ptr >= free_mem_end_ptr) - error("\nOut of memory\n"); - - return p; -} - -static void free(void *where) -{ /* Don't care */ -} - -static void gzip_mark(void **ptr) -{ - *ptr = (void *) free_mem_ptr; -} - -static void gzip_release(void **ptr) -{ - free_mem_ptr = (long) *ptr; -} - void puts(const char *s) { } diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index bc5553b496f..9fea7370647 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -182,8 +182,6 @@ static unsigned outcnt; static int fill_inbuf(void); static void flush_window(void); static void error(char *m); -static void gzip_mark(void **); -static void gzip_release(void **); /* * This is set up by the setup-routine at boot-time @@ -196,9 +194,6 @@ extern int input_len; static long bytes_out; -static void *malloc(int size); -static void free(void *where); - static void *memset(void *s, int c, unsigned n); static void *memcpy(void *dest, const void *src, unsigned n); @@ -220,40 +215,6 @@ static int lines, cols; #include "../../../../lib/inflate.c" -static void *malloc(int size) -{ - void *p; - - if (size < 0) - error("Malloc error"); - if (free_mem_ptr <= 0) - error("Memory error"); - - free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ - - p = (void *)free_mem_ptr; - free_mem_ptr += size; - - if (free_mem_ptr >= free_mem_end_ptr) - error("Out of memory"); - - return p; -} - -static void free(void *where) -{ /* Don't care */ -} - -static void gzip_mark(void **ptr) -{ - *ptr = (void *) free_mem_ptr; -} - -static void gzip_release(void **ptr) -{ - free_mem_ptr = (memptr) *ptr; -} - static void scroll(void) { int i; diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c index 470a328d145..fedef93b586 100644 --- a/init/do_mounts_rd.c +++ b/init/do_mounts_rd.c @@ -303,32 +303,11 @@ static int crd_infd, crd_outfd; static int __init fill_inbuf(void); static void __init flush_window(void); -static void __init *malloc(size_t size); -static void __init free(void *where); static void __init error(char *m); -static void __init gzip_mark(void **); -static void __init gzip_release(void **); -#include "../lib/inflate.c" - -static void __init *malloc(size_t size) -{ - return kmalloc(size, GFP_KERNEL); -} - -static void __init free(void *where) -{ - kfree(where); -} - -static void __init gzip_mark(void **ptr) -{ -} - -static void __init gzip_release(void **ptr) -{ -} +#define NO_INFLATE_MALLOC +#include "../lib/inflate.c" /* =========================================================================== * Fill the input buffer. This is called only when the buffer is empty diff --git a/init/initramfs.c b/init/initramfs.c index 8eeeccb328c..644fc01ad5f 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -14,16 +14,6 @@ static void __init error(char *x) message = x; } -static void __init *malloc(size_t size) -{ - return kmalloc(size, GFP_KERNEL); -} - -static void __init free(void *where) -{ - kfree(where); -} - /* link hash */ #define N_ALIGN(len) ((((len) + 1) & ~3) + 2) @@ -407,18 +397,10 @@ static long bytes_out; static void __init flush_window(void); static void __init error(char *m); -static void __init gzip_mark(void **); -static void __init gzip_release(void **); -#include "../lib/inflate.c" +#define NO_INFLATE_MALLOC -static void __init gzip_mark(void **ptr) -{ -} - -static void __init gzip_release(void **ptr) -{ -} +#include "../lib/inflate.c" /* =========================================================================== * Write the output window window[0..outcnt-1] and update crc and bytes_out. diff --git a/lib/inflate.c b/lib/inflate.c index 9762294be06..1a8e8a97812 100644 --- a/lib/inflate.c +++ b/lib/inflate.c @@ -230,6 +230,45 @@ STATIC const ush mask_bits[] = { #define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<>=(n);k-=(n);} +#ifndef NO_INFLATE_MALLOC +/* A trivial malloc implementation, adapted from + * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 + */ + +static unsigned long malloc_ptr; +static int malloc_count; + +static void *malloc(int size) +{ + void *p; + + if (size < 0) + error("Malloc error"); + if (!malloc_ptr) + malloc_ptr = free_mem_ptr; + + malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */ + + p = (void *)malloc_ptr; + malloc_ptr += size; + + if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) + error("Out of memory"); + + malloc_count++; + return p; +} + +static void free(void *where) +{ + malloc_count--; + if (!malloc_count) + malloc_ptr = free_mem_ptr; +} +#else +#define malloc(a) kmalloc(a, GFP_KERNEL) +#define free(a) kfree(a) +#endif /* Huffman code decoding is performed using a multi-level table lookup. @@ -1045,7 +1084,6 @@ STATIC int INIT inflate(void) int e; /* last block flag */ int r; /* result code */ unsigned h; /* maximum struct huft's malloc'ed */ - void *ptr; /* initialize window, bit buffer */ wp = 0; @@ -1057,12 +1095,12 @@ STATIC int INIT inflate(void) h = 0; do { hufts = 0; - gzip_mark(&ptr); - if ((r = inflate_block(&e)) != 0) { - gzip_release(&ptr); - return r; - } - gzip_release(&ptr); +#ifdef ARCH_HAS_DECOMP_WDOG + arch_decomp_wdog(); +#endif + r = inflate_block(&e); + if (r) + return r; if (hufts > h) h = hufts; } while (!e); -- cgit v1.2.3-70-g09d2 From ef53d9c5e4da147ecaa43c44c5e5945eb83970a2 Mon Sep 17 00:00:00 2001 From: Srinivasa D S Date: Fri, 25 Jul 2008 01:46:04 -0700 Subject: kprobes: improve kretprobe scalability with hashed locking Currently list of kretprobe instances are stored in kretprobe object (as used_instances,free_instances) and in kretprobe hash table. We have one global kretprobe lock to serialise the access to these lists. This causes only one kretprobe handler to execute at a time. Hence affects system performance, particularly on SMP systems and when return probe is set on lot of functions (like on all systemcalls). Solution proposed here gives fine-grain locks that performs better on SMP system compared to present kretprobe implementation. Solution: 1) Instead of having one global lock to protect kretprobe instances present in kretprobe object and kretprobe hash table. We will have two locks, one lock for protecting kretprobe hash table and another lock for kretporbe object. 2) We hold lock present in kretprobe object while we modify kretprobe instance in kretprobe object and we hold per-hash-list lock while modifying kretprobe instances present in that hash list. To prevent deadlock, we never grab a per-hash-list lock while holding a kretprobe lock. 3) We can remove used_instances from struct kretprobe, as we can track used instances of kretprobe instances using kretprobe hash table. Time duration for kernel compilation ("make -j 8") on a 8-way ppc64 system with return probes set on all systemcalls looks like this. cacheline non-cacheline Un-patched kernel aligned patch aligned patch =============================================================================== real 9m46.784s 9m54.412s 10m2.450s user 40m5.715s 40m7.142s 40m4.273s sys 2m57.754s 2m58.583s 3m17.430s =========================================================== Time duration for kernel compilation ("make -j 8) on the same system, when kernel is not probed. ========================= real 9m26.389s user 40m8.775s sys 2m7.283s ========================= Signed-off-by: Srinivasa DS Signed-off-by: Jim Keniston Acked-by: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: David S. Miller Cc: Masami Hiramatsu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/kernel/kprobes.c | 6 +- arch/ia64/kernel/kprobes.c | 6 +- arch/powerpc/kernel/kprobes.c | 6 +- arch/s390/kernel/kprobes.c | 6 +- arch/sparc64/kernel/kprobes.c | 11 ++-- arch/x86/kernel/kprobes.c | 6 +- include/linux/kprobes.h | 7 ++- kernel/kprobes.c | 127 +++++++++++++++++++++++++++++------------- 8 files changed, 108 insertions(+), 67 deletions(-) (limited to 'arch/x86') diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 5ee39e10c8d..d28513f14d0 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c @@ -296,8 +296,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; INIT_HLIST_HEAD(&empty_rp); - spin_lock_irqsave(&kretprobe_lock, flags); - head = kretprobe_inst_table_head(current); + kretprobe_hash_lock(current, &head, &flags); /* * It is possible to have multiple instances associated with a given @@ -337,7 +336,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) } kretprobe_assert(ri, orig_ret_address, trampoline_address); - spin_unlock_irqrestore(&kretprobe_lock, flags); + kretprobe_hash_unlock(current, &flags); hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); @@ -347,7 +346,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) return (void *)orig_ret_address; } -/* Called with kretprobe_lock held. */ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 233434f4f88..f07688da947 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -429,8 +429,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) ((struct fnptr *)kretprobe_trampoline)->ip; INIT_HLIST_HEAD(&empty_rp); - spin_lock_irqsave(&kretprobe_lock, flags); - head = kretprobe_inst_table_head(current); + kretprobe_hash_lock(current, &head, &flags); /* * It is possible to have multiple instances associated with a given @@ -485,7 +484,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) kretprobe_assert(ri, orig_ret_address, trampoline_address); reset_current_kprobe(); - spin_unlock_irqrestore(&kretprobe_lock, flags); + kretprobe_hash_unlock(current, &flags); preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { @@ -500,7 +499,6 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) return 1; } -/* Called with kretprobe_lock held */ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 4ba2af12545..de79915452c 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -144,7 +144,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, kcb->kprobe_saved_msr = regs->msr; } -/* Called with kretprobe_lock held */ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -312,8 +311,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; INIT_HLIST_HEAD(&empty_rp); - spin_lock_irqsave(&kretprobe_lock, flags); - head = kretprobe_inst_table_head(current); + kretprobe_hash_lock(current, &head, &flags); /* * It is possible to have multiple instances associated with a given @@ -352,7 +350,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, regs->nip = orig_ret_address; reset_current_kprobe(); - spin_unlock_irqrestore(&kretprobe_lock, flags); + kretprobe_hash_unlock(current, &flags); preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 288ad490a6d..4f82e5b5f87 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -270,7 +270,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, __ctl_store(kcb->kprobe_saved_ctl, 9, 11); } -/* Called with kretprobe_lock held */ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -377,8 +376,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; INIT_HLIST_HEAD(&empty_rp); - spin_lock_irqsave(&kretprobe_lock, flags); - head = kretprobe_inst_table_head(current); + kretprobe_hash_lock(current, &head, &flags); /* * It is possible to have multiple instances associated with a given @@ -417,7 +415,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; reset_current_kprobe(); - spin_unlock_irqrestore(&kretprobe_lock, flags); + kretprobe_hash_unlock(current, &flags); preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c index f43b5d75535..201a6e547e4 100644 --- a/arch/sparc64/kernel/kprobes.c +++ b/arch/sparc64/kernel/kprobes.c @@ -478,9 +478,9 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) return 0; } -/* Called with kretprobe_lock held. The value stored in the return - * address register is actually 2 instructions before where the - * callee will return to. Sequences usually look something like this +/* The value stored in the return address register is actually 2 + * instructions before where the callee will return to. + * Sequences usually look something like this * * call some_function <--- return register points here * nop <--- call delay slot @@ -512,8 +512,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; INIT_HLIST_HEAD(&empty_rp); - spin_lock_irqsave(&kretprobe_lock, flags); - head = kretprobe_inst_table_head(current); + kretprobe_hash_lock(current, &head, &flags); /* * It is possible to have multiple instances associated with a given @@ -553,7 +552,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) regs->tnpc = orig_ret_address + 4; reset_current_kprobe(); - spin_unlock_irqrestore(&kretprobe_lock, flags); + kretprobe_hash_unlock(current, &flags); preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 43c019f85f0..6c27679ec6a 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -431,7 +431,6 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) regs->ip = (unsigned long)p->ainsn.insn; } -/* Called with kretprobe_lock held */ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -682,8 +681,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; INIT_HLIST_HEAD(&empty_rp); - spin_lock_irqsave(&kretprobe_lock, flags); - head = kretprobe_inst_table_head(current); + kretprobe_hash_lock(current, &head, &flags); /* fixup registers */ #ifdef CONFIG_X86_64 regs->cs = __KERNEL_CS; @@ -732,7 +730,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) kretprobe_assert(ri, orig_ret_address, trampoline_address); - spin_unlock_irqrestore(&kretprobe_lock, flags); + kretprobe_hash_unlock(current, &flags); hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 04a3556bdea..0be7795655f 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -157,11 +157,10 @@ struct kretprobe { int nmissed; size_t data_size; struct hlist_head free_instances; - struct hlist_head used_instances; + spinlock_t lock; }; struct kretprobe_instance { - struct hlist_node uflist; /* either on free list or used list */ struct hlist_node hlist; struct kretprobe *rp; kprobe_opcode_t *ret_addr; @@ -201,7 +200,6 @@ static inline int init_test_probes(void) } #endif /* CONFIG_KPROBES_SANITY_TEST */ -extern spinlock_t kretprobe_lock; extern struct mutex kprobe_mutex; extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); @@ -214,6 +212,9 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p); /* Get the kprobe at this addr (if any) - called with preemption disabled */ struct kprobe *get_kprobe(void *addr); +void kretprobe_hash_lock(struct task_struct *tsk, + struct hlist_head **head, unsigned long *flags); +void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags); struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); /* kprobe_running() will just return the current_kprobe on this CPU */ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 1485ca8d0e0..cb0b3bde361 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -62,6 +62,7 @@ addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) #endif +static int kprobes_initialized; static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; @@ -69,8 +70,15 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; static bool kprobe_enabled; DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ -DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; +static struct { + spinlock_t lock ____cacheline_aligned; +} kretprobe_table_locks[KPROBE_TABLE_SIZE]; + +static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) +{ + return &(kretprobe_table_locks[hash].lock); +} /* * Normally, functions that we'd want to prohibit kprobes in, are marked @@ -368,26 +376,53 @@ void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) return; } -/* Called with kretprobe_lock held */ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head) { + struct kretprobe *rp = ri->rp; + /* remove rp inst off the rprobe_inst_table */ hlist_del(&ri->hlist); - if (ri->rp) { - /* remove rp inst off the used list */ - hlist_del(&ri->uflist); - /* put rp inst back onto the free list */ - INIT_HLIST_NODE(&ri->uflist); - hlist_add_head(&ri->uflist, &ri->rp->free_instances); + INIT_HLIST_NODE(&ri->hlist); + if (likely(rp)) { + spin_lock(&rp->lock); + hlist_add_head(&ri->hlist, &rp->free_instances); + spin_unlock(&rp->lock); } else /* Unregistering */ hlist_add_head(&ri->hlist, head); } -struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk) +void kretprobe_hash_lock(struct task_struct *tsk, + struct hlist_head **head, unsigned long *flags) +{ + unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); + spinlock_t *hlist_lock; + + *head = &kretprobe_inst_table[hash]; + hlist_lock = kretprobe_table_lock_ptr(hash); + spin_lock_irqsave(hlist_lock, *flags); +} + +void kretprobe_table_lock(unsigned long hash, unsigned long *flags) { - return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; + spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); + spin_lock_irqsave(hlist_lock, *flags); +} + +void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags) +{ + unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); + spinlock_t *hlist_lock; + + hlist_lock = kretprobe_table_lock_ptr(hash); + spin_unlock_irqrestore(hlist_lock, *flags); +} + +void kretprobe_table_unlock(unsigned long hash, unsigned long *flags) +{ + spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); + spin_unlock_irqrestore(hlist_lock, *flags); } /* @@ -401,17 +436,21 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) struct kretprobe_instance *ri; struct hlist_head *head, empty_rp; struct hlist_node *node, *tmp; - unsigned long flags = 0; + unsigned long hash, flags = 0; - INIT_HLIST_HEAD(&empty_rp); - spin_lock_irqsave(&kretprobe_lock, flags); - head = kretprobe_inst_table_head(tk); + if (unlikely(!kprobes_initialized)) + /* Early boot. kretprobe_table_locks not yet initialized. */ + return; + + hash = hash_ptr(tk, KPROBE_HASH_BITS); + head = &kretprobe_inst_table[hash]; + kretprobe_table_lock(hash, &flags); hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { if (ri->task == tk) recycle_rp_inst(ri, &empty_rp); } - spin_unlock_irqrestore(&kretprobe_lock, flags); - + kretprobe_table_unlock(hash, &flags); + INIT_HLIST_HEAD(&empty_rp); hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); @@ -423,24 +462,29 @@ static inline void free_rp_inst(struct kretprobe *rp) struct kretprobe_instance *ri; struct hlist_node *pos, *next; - hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) { - hlist_del(&ri->uflist); + hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) { + hlist_del(&ri->hlist); kfree(ri); } } static void __kprobes cleanup_rp_inst(struct kretprobe *rp) { - unsigned long flags; + unsigned long flags, hash; struct kretprobe_instance *ri; struct hlist_node *pos, *next; + struct hlist_head *head; + /* No race here */ - spin_lock_irqsave(&kretprobe_lock, flags); - hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) { - ri->rp = NULL; - hlist_del(&ri->uflist); + for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { + kretprobe_table_lock(hash, &flags); + head = &kretprobe_inst_table[hash]; + hlist_for_each_entry_safe(ri, pos, next, head, hlist) { + if (ri->rp == rp) + ri->rp = NULL; + } + kretprobe_table_unlock(hash, &flags); } - spin_unlock_irqrestore(&kretprobe_lock, flags); free_rp_inst(rp); } @@ -831,32 +875,37 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) { struct kretprobe *rp = container_of(p, struct kretprobe, kp); - unsigned long flags = 0; + unsigned long hash, flags = 0; + struct kretprobe_instance *ri; /*TODO: consider to only swap the RA after the last pre_handler fired */ - spin_lock_irqsave(&kretprobe_lock, flags); + hash = hash_ptr(current, KPROBE_HASH_BITS); + spin_lock_irqsave(&rp->lock, flags); if (!hlist_empty(&rp->free_instances)) { - struct kretprobe_instance *ri; - ri = hlist_entry(rp->free_instances.first, - struct kretprobe_instance, uflist); + struct kretprobe_instance, hlist); + hlist_del(&ri->hlist); + spin_unlock_irqrestore(&rp->lock, flags); + ri->rp = rp; ri->task = current; if (rp->entry_handler && rp->entry_handler(ri, regs)) { - spin_unlock_irqrestore(&kretprobe_lock, flags); + spin_unlock_irqrestore(&rp->lock, flags); return 0; } arch_prepare_kretprobe(ri, regs); /* XXX(hch): why is there no hlist_move_head? */ - hlist_del(&ri->uflist); - hlist_add_head(&ri->uflist, &ri->rp->used_instances); - hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task)); - } else + INIT_HLIST_NODE(&ri->hlist); + kretprobe_table_lock(hash, &flags); + hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]); + kretprobe_table_unlock(hash, &flags); + } else { rp->nmissed++; - spin_unlock_irqrestore(&kretprobe_lock, flags); + spin_unlock_irqrestore(&rp->lock, flags); + } return 0; } @@ -892,7 +941,7 @@ static int __kprobes __register_kretprobe(struct kretprobe *rp, rp->maxactive = NR_CPUS; #endif } - INIT_HLIST_HEAD(&rp->used_instances); + spin_lock_init(&rp->lock); INIT_HLIST_HEAD(&rp->free_instances); for (i = 0; i < rp->maxactive; i++) { inst = kmalloc(sizeof(struct kretprobe_instance) + @@ -901,8 +950,8 @@ static int __kprobes __register_kretprobe(struct kretprobe *rp, free_rp_inst(rp); return -ENOMEM; } - INIT_HLIST_NODE(&inst->uflist); - hlist_add_head(&inst->uflist, &rp->free_instances); + INIT_HLIST_NODE(&inst->hlist); + hlist_add_head(&inst->hlist, &rp->free_instances); } rp->nmissed = 0; @@ -1009,6 +1058,7 @@ static int __init init_kprobes(void) for (i = 0; i < KPROBE_TABLE_SIZE; i++) { INIT_HLIST_HEAD(&kprobe_table[i]); INIT_HLIST_HEAD(&kretprobe_inst_table[i]); + spin_lock_init(&(kretprobe_table_locks[i].lock)); } /* @@ -1050,6 +1100,7 @@ static int __init init_kprobes(void) err = arch_init_kprobes(); if (!err) err = register_die_notifier(&kprobe_exceptions_nb); + kprobes_initialized = (err == 0); if (!err) init_test_probes(); -- cgit v1.2.3-70-g09d2 From 7444a72effa632fcd8edc566f880d96fe213c73b Mon Sep 17 00:00:00 2001 From: Michael Buesch Date: Fri, 25 Jul 2008 01:46:11 -0700 Subject: gpiolib: allow user-selection This patch adds functionality to the gpio-lib subsystem to make it possible to enable the gpio-lib code even if the architecture code didn't request to get it built in. The archtitecture code does still need to implement the gpiolib accessor functions in its asm/gpio.h file. This patch adds the implementations for x86 and PPC. With these changes it is possible to run generic GPIO expansion cards on every architecture that implements the trivial wrapper functions. Support for more architectures can easily be added. Signed-off-by: Michael Buesch Cc: Benjamin Herrenschmidt Cc: Stephen Rothwell Cc: David Brownell Cc: Russell King Cc: Haavard Skinnemoen Cc: Jesper Nilsson Cc: Ralf Baechle Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Jean Delvare Cc: Samuel Ortiz Cc: Kumar Gala Cc: Sam Ravnborg Cc: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/gpio.txt | 12 +++++++- arch/arm/Kconfig | 8 +++--- arch/avr32/Kconfig | 2 +- arch/mips/Kconfig | 2 +- arch/powerpc/Kconfig | 1 + arch/powerpc/platforms/52xx/Kconfig | 2 +- arch/powerpc/sysdev/qe_lib/Kconfig | 2 +- arch/x86/Kconfig | 1 + drivers/Makefile | 2 +- drivers/gpio/Kconfig | 33 ++++++++++++++++++--- drivers/gpio/Makefile | 2 +- drivers/i2c/chips/Kconfig | 2 +- drivers/mfd/Kconfig | 4 +-- drivers/of/Kconfig | 2 +- include/asm-generic/gpio.h | 2 +- include/asm-mips/mach-generic/gpio.h | 2 +- include/asm-powerpc/gpio.h | 4 +-- include/asm-x86/gpio.h | 56 ++++++++++++++++++++++++++++++++++++ 18 files changed, 116 insertions(+), 23 deletions(-) (limited to 'arch/x86') diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt index 8b69811a964..18022e249c5 100644 --- a/Documentation/gpio.txt +++ b/Documentation/gpio.txt @@ -389,11 +389,21 @@ either NULL or the label associated with that GPIO when it was requested. Platform Support ---------------- -To support this framework, a platform's Kconfig will "select HAVE_GPIO_LIB" +To support this framework, a platform's Kconfig will "select" either +ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB and arrange that its includes and defines three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep(). They may also want to provide a custom value for ARCH_NR_GPIOS. +ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled +into the kernel on that architecture. + +ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user +can enable it and build it into the kernel optionally. + +If neither of these options are selected, the platform does not support +GPIOs through GPIO-lib and the code cannot be enabled by the user. + Trivial implementations of those functions can directly use framework code, which always dispatches through the gpio_chip: diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 6fb4f03369f..dabb015aa40 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -268,7 +268,7 @@ config ARCH_EP93XX select GENERIC_GPIO select HAVE_CLK select HAVE_CLK - select HAVE_GPIO_LIB + select ARCH_REQUIRE_GPIOLIB help This enables support for the Cirrus EP93xx series of CPUs. @@ -447,7 +447,7 @@ config ARCH_PXA select ARCH_MTD_XIP select GENERIC_GPIO select HAVE_CLK - select HAVE_GPIO_LIB + select ARCH_REQUIRE_GPIOLIB select GENERIC_TIME select GENERIC_CLOCKEVENTS select TICK_ONESHOT @@ -479,7 +479,7 @@ config ARCH_SA1100 select GENERIC_CLOCKEVENTS select HAVE_CLK select TICK_ONESHOT - select HAVE_GPIO_LIB + select ARCH_REQUIRE_GPIOLIB help Support for StrongARM 11x0 based boards. @@ -522,7 +522,7 @@ config ARCH_OMAP bool "TI OMAP" select GENERIC_GPIO select HAVE_CLK - select HAVE_GPIO_LIB + select ARCH_REQUIRE_GPIOLIB select GENERIC_TIME select GENERIC_CLOCKEVENTS help diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index df4adefedb4..7c239a91627 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -88,7 +88,7 @@ config PLATFORM_AT32AP select SUBARCH_AVR32B select MMU select PERFORMANCE_COUNTERS - select HAVE_GPIO_LIB + select ARCH_REQUIRE_GPIOLIB select GENERIC_ALLOCATOR # diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index b9c754f4070..b4c4eaa5dd2 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -713,7 +713,7 @@ config CSRC_SB1250 config GPIO_TXX9 select GENERIC_GPIO - select HAVE_GPIO_LIB + select ARCH_REQUIRE_GPIOLIB bool config CFE diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index de6b49cd6be..fe88418167c 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -110,6 +110,7 @@ config PPC default y select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE + select ARCH_WANT_OPTIONAL_GPIOLIB select HAVE_IDE select HAVE_IOREMAP_PROT select HAVE_EFFICIENT_UNALIGNED_ACCESS diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig index d664b1bce38..ccbd4958412 100644 --- a/arch/powerpc/platforms/52xx/Kconfig +++ b/arch/powerpc/platforms/52xx/Kconfig @@ -48,6 +48,6 @@ config PPC_MPC5200_BUGFIX config PPC_MPC5200_GPIO bool "MPC5200 GPIO support" depends on PPC_MPC52xx - select HAVE_GPIO_LIB + select ARCH_REQUIRE_GPIOLIB help Enable gpiolib support for mpc5200 based boards diff --git a/arch/powerpc/sysdev/qe_lib/Kconfig b/arch/powerpc/sysdev/qe_lib/Kconfig index 4bb18f57901..1ce546462be 100644 --- a/arch/powerpc/sysdev/qe_lib/Kconfig +++ b/arch/powerpc/sysdev/qe_lib/Kconfig @@ -29,7 +29,7 @@ config QE_GPIO bool "QE GPIO support" depends on QUICC_ENGINE select GENERIC_GPIO - select HAVE_GPIO_LIB + select ARCH_REQUIRE_GPIOLIB help Say Y here if you're going to use hardware that connects to the QE GPIOs. diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 66f3ab05b18..e3cba0b4560 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -23,6 +23,7 @@ config X86 select HAVE_OPROFILE select HAVE_IOREMAP_PROT select HAVE_KPROBES + select ARCH_WANT_OPTIONAL_GPIOLIB if !X86_RDC321X select HAVE_KRETPROBES select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE diff --git a/drivers/Makefile b/drivers/Makefile index 808e0ae66aa..54ec5e718c0 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -5,7 +5,7 @@ # Rewritten to use lists instead of if-statements. # -obj-$(CONFIG_HAVE_GPIO_LIB) += gpio/ +obj-y += gpio/ obj-$(CONFIG_PCI) += pci/ obj-$(CONFIG_PARISC) += parisc/ obj-$(CONFIG_RAPIDIO) += rapidio/ diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index de202dbe530..5a355f82916 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -2,15 +2,40 @@ # GPIO infrastructure and expanders # -config HAVE_GPIO_LIB +config ARCH_WANT_OPTIONAL_GPIOLIB bool + help + Select this config option from the architecture Kconfig, if + it is possible to use gpiolib on the architecture, but let the + user decide whether to actually build it or not. + Select this instead of ARCH_REQUIRE_GPIOLIB, if your architecture does + not depend on GPIOs being available, but rather let the user + decide whether he needs it or not. + +config ARCH_REQUIRE_GPIOLIB + bool + select GPIOLIB help Platforms select gpiolib if they use this infrastructure for all their GPIOs, usually starting with ones integrated into SOC processors. + Selecting this from the architecture code will cause the gpiolib + code to always get built in. + + + +menuconfig GPIOLIB + bool "GPIO Support" + depends on ARCH_WANT_OPTIONAL_GPIOLIB || ARCH_REQUIRE_GPIOLIB + select GENERIC_GPIO + help + This enables GPIO support through the generic GPIO library. + You only need to enable this, if you also want to enable + one or more of the GPIO expansion card drivers below. + + If unsure, say N. -menu "GPIO Support" - depends on HAVE_GPIO_LIB +if GPIOLIB config DEBUG_GPIO bool "Debug GPIO calls" @@ -116,4 +141,4 @@ config GPIO_MCP23S08 SPI driver for Microchip MCP23S08 I/O expander. This provides a GPIO interface supporting inputs and outputs. -endmenu +endif diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index eeb2f2b2028..8c45948d1fe 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -2,7 +2,7 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG -obj-$(CONFIG_HAVE_GPIO_LIB) += gpiolib.o +obj-$(CONFIG_GPIOLIB) += gpiolib.o obj-$(CONFIG_GPIO_MAX7301) += max7301.o obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig index 50e0a465374..a95cb9465d6 100644 --- a/drivers/i2c/chips/Kconfig +++ b/drivers/i2c/chips/Kconfig @@ -126,7 +126,7 @@ config ISP1301_OMAP config TPS65010 tristate "TPS6501x Power Management chips" - depends on HAVE_GPIO_LIB + depends on GPIOLIB default y if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_OSK help If you say yes here you get support for the TPS6501x series of diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index bac9e973ece..1f57a99fd96 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -36,7 +36,7 @@ config MFD_ASIC3 config HTC_EGPIO bool "HTC EGPIO support" - depends on GENERIC_HARDIRQS && HAVE_GPIO_LIB && ARM + depends on GENERIC_HARDIRQS && GPIOLIB && ARM help This driver supports the CPLD egpio chip present on several HTC phones. It provides basic support for input @@ -52,7 +52,7 @@ config HTC_PASIC3 config MFD_TC6393XB bool "Support Toshiba TC6393XB" - depends on HAVE_GPIO_LIB + depends on GPIOLIB select MFD_CORE help Support for Toshiba Mobile IO Controller TC6393XB diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 3a7a11a75fb..1d7ec312934 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -4,7 +4,7 @@ config OF_DEVICE config OF_GPIO def_bool y - depends on OF && PPC_OF && HAVE_GPIO_LIB + depends on OF && PPC_OF && GPIOLIB help OpenFirmware GPIO accessors diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index 1beff5166e5..a3034d20ebd 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -3,7 +3,7 @@ #include -#ifdef CONFIG_HAVE_GPIO_LIB +#ifdef CONFIG_GPIOLIB #include diff --git a/include/asm-mips/mach-generic/gpio.h b/include/asm-mips/mach-generic/gpio.h index e6b376bd9d0..b4e70208da6 100644 --- a/include/asm-mips/mach-generic/gpio.h +++ b/include/asm-mips/mach-generic/gpio.h @@ -1,7 +1,7 @@ #ifndef __ASM_MACH_GENERIC_GPIO_H #define __ASM_MACH_GENERIC_GPIO_H -#ifdef CONFIG_HAVE_GPIO_LIB +#ifdef CONFIG_GPIOLIB #define gpio_get_value __gpio_get_value #define gpio_set_value __gpio_set_value #define gpio_cansleep __gpio_cansleep diff --git a/include/asm-powerpc/gpio.h b/include/asm-powerpc/gpio.h index 77ad3a890f3..ea04632399d 100644 --- a/include/asm-powerpc/gpio.h +++ b/include/asm-powerpc/gpio.h @@ -17,7 +17,7 @@ #include #include -#ifdef CONFIG_HAVE_GPIO_LIB +#ifdef CONFIG_GPIOLIB /* * We don't (yet) implement inlined/rapid versions for on-chip gpios. @@ -51,6 +51,6 @@ static inline int irq_to_gpio(unsigned int irq) return -EINVAL; } -#endif /* CONFIG_HAVE_GPIO_LIB */ +#endif /* CONFIG_GPIOLIB */ #endif /* __ASM_POWERPC_GPIO_H */ diff --git a/include/asm-x86/gpio.h b/include/asm-x86/gpio.h index ff87fca0caf..116e9147fe6 100644 --- a/include/asm-x86/gpio.h +++ b/include/asm-x86/gpio.h @@ -1,6 +1,62 @@ +/* + * Generic GPIO API implementation for x86. + * + * Derived from the generic GPIO API for powerpc: + * + * Copyright (c) 2007-2008 MontaVista Software, Inc. + * + * Author: Anton Vorontsov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + #ifndef _ASM_I386_GPIO_H #define _ASM_I386_GPIO_H +#ifdef CONFIG_X86_RDC321X #include +#else /* CONFIG_X86_RDC321X */ + +#include + +#ifdef CONFIG_GPIOLIB + +/* + * Just call gpiolib. + */ +static inline int gpio_get_value(unsigned int gpio) +{ + return __gpio_get_value(gpio); +} + +static inline void gpio_set_value(unsigned int gpio, int value) +{ + __gpio_set_value(gpio, value); +} + +static inline int gpio_cansleep(unsigned int gpio) +{ + return __gpio_cansleep(gpio); +} + +/* + * Not implemented, yet. + */ +static inline int gpio_to_irq(unsigned int gpio) +{ + return -ENOSYS; +} + +static inline int irq_to_gpio(unsigned int irq) +{ + return -EINVAL; +} + +#endif /* CONFIG_GPIOLIB */ + +#endif /* CONFIG_X86_RDC321X */ #endif /* _ASM_I386_GPIO_H */ -- cgit v1.2.3-70-g09d2 From 95b68dec0d52c7b8fea3698b3938cf3ab936436b Mon Sep 17 00:00:00 2001 From: Chandru Date: Fri, 25 Jul 2008 01:47:55 -0700 Subject: calgary iommu: use the first kernels TCE tables in kdump kdump kernel fails to boot with calgary iommu and aacraid driver on a x366 box. The ongoing dma's of aacraid from the first kernel continue to exist until the driver is loaded in the kdump kernel. Calgary is initialized prior to aacraid and creation of new tce tables causes wrong dma's to occur. Here we try to get the tce tables of the first kernel in kdump kernel and use them. While in the kdump kernel we do not allocate new tce tables but instead read the base address register contents of calgary iommu and use the tables that the registers point to. With these changes the kdump kernel and hence aacraid now boots normally. Signed-off-by: Chandru Siddalingappa Acked-by: Muli Ben-Yehuda Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/pci-calgary_64.c | 85 +++++++++++++++++++++++++++++++++++++--- include/linux/crash_dump.h | 8 ++++ 2 files changed, 87 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 151f2d171f7..19e7fc7c2c4 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -167,6 +168,8 @@ static void calgary_dump_error_regs(struct iommu_table *tbl); static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev); static void calioc2_tce_cache_blast(struct iommu_table *tbl); static void calioc2_dump_error_regs(struct iommu_table *tbl); +static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl); +static void get_tce_space_from_tar(void); static struct cal_chipset_ops calgary_chip_ops = { .handle_quirks = calgary_handle_quirks, @@ -830,7 +833,11 @@ static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar) tbl = pci_iommu(dev->bus); tbl->it_base = (unsigned long)bus_info[dev->bus->number].tce_space; - tce_free(tbl, 0, tbl->it_size); + + if (is_kdump_kernel()) + calgary_init_bitmap_from_tce_table(tbl); + else + tce_free(tbl, 0, tbl->it_size); if (is_calgary(dev->device)) tbl->chip_ops = &calgary_chip_ops; @@ -1209,6 +1216,10 @@ static int __init calgary_init(void) if (ret) return ret; + /* Purely for kdump kernel case */ + if (is_kdump_kernel()) + get_tce_space_from_tar(); + do { dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev); if (!dev) @@ -1339,6 +1350,61 @@ static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev) return (val != 0xffffffff); } +/* + * calgary_init_bitmap_from_tce_table(): + * Funtion for kdump case. In the second/kdump kernel initialize + * the bitmap based on the tce table entries obtained from first kernel + */ +static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl) +{ + u64 *tp; + unsigned int index; + tp = ((u64 *)tbl->it_base); + for (index = 0 ; index < tbl->it_size; index++) { + if (*tp != 0x0) + set_bit(index, tbl->it_map); + tp++; + } +} + +/* + * get_tce_space_from_tar(): + * Function for kdump case. Get the tce tables from first kernel + * by reading the contents of the base adress register of calgary iommu + */ +static void get_tce_space_from_tar() +{ + int bus; + void __iomem *target; + unsigned long tce_space; + + for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) { + struct calgary_bus_info *info = &bus_info[bus]; + unsigned short pci_device; + u32 val; + + val = read_pci_config(bus, 0, 0, 0); + pci_device = (val & 0xFFFF0000) >> 16; + + if (!is_cal_pci_dev(pci_device)) + continue; + if (info->translation_disabled) + continue; + + if (calgary_bus_has_devices(bus, pci_device) || + translate_empty_slots) { + target = calgary_reg(bus_info[bus].bbar, + tar_offset(bus)); + tce_space = be64_to_cpu(readq(target)); + tce_space = tce_space & TAR_SW_BITS; + + tce_space = tce_space & (~specified_table_size); + info->tce_space = (u64 *)__va(tce_space); + } + } + return; +} + void __init detect_calgary(void) { int bus; @@ -1394,7 +1460,8 @@ void __init detect_calgary(void) return; } - specified_table_size = determine_tce_table_size(max_pfn * PAGE_SIZE); + specified_table_size = determine_tce_table_size((is_kdump_kernel() ? + saved_max_pfn : max_pfn) * PAGE_SIZE); for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) { struct calgary_bus_info *info = &bus_info[bus]; @@ -1412,10 +1479,16 @@ void __init detect_calgary(void) if (calgary_bus_has_devices(bus, pci_device) || translate_empty_slots) { - tbl = alloc_tce_table(); - if (!tbl) - goto cleanup; - info->tce_space = tbl; + /* + * If it is kdump kernel, find and use tce tables + * from first kernel, else allocate tce tables here + */ + if (!is_kdump_kernel()) { + tbl = alloc_tce_table(); + if (!tbl) + goto cleanup; + info->tce_space = tbl; + } calgary_found = 1; } } diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 22c7ac5cd80..6cd39a927e1 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -22,5 +22,13 @@ extern struct proc_dir_entry *proc_vmcore; #define vmcore_elf_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x)) +static inline int is_kdump_kernel(void) +{ + return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0; +} +#else /* !CONFIG_CRASH_DUMP */ +static inline int is_kdump_kernel(void) { return 0; } #endif /* CONFIG_CRASH_DUMP */ + +extern unsigned long saved_max_pfn; #endif /* LINUX_CRASHDUMP_H */ -- cgit v1.2.3-70-g09d2 From 024e8ac04453b3525448c31ef39848cf675ba6db Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Fri, 25 Jul 2008 20:00:10 -0700 Subject: x86_64: fix ia32 AMD syscall audit fast-path The new code in commit 5cbf1565f29eb57a86a305b08836613508e294d7 has a bug in the version supporting the AMD 'syscall' instruction. It clobbers the user's %ecx register value (with the %ebp value). This change fixes it. Signed-off-by: Roland McGrath --- arch/x86/ia32/ia32entry.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index e4bd1793a5e..ffc1bb4fed7 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -201,7 +201,7 @@ sysexit_from_sys_call: movl RDI-ARGOFFSET(%rsp),%r8d /* reload 5th syscall arg */ .endm - .macro auditsys_exit exit + .macro auditsys_exit exit,ebpsave=RBP testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) jnz int_ret_from_sys_call TRACE_IRQS_ON @@ -214,7 +214,7 @@ sysexit_from_sys_call: call audit_syscall_exit GET_THREAD_INFO(%r10) movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */ - movl RBP-ARGOFFSET(%rsp),%ebp /* reload user register value */ + movl \ebpsave-ARGOFFSET(%rsp),%ebp /* reload user register value */ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi cli TRACE_IRQS_OFF @@ -347,7 +347,7 @@ cstar_auditsys: jmp cstar_dispatch sysretl_audit: - auditsys_exit sysretl_from_sys_call + auditsys_exit sysretl_from_sys_call, RCX /* user %ebp in RCX slot */ #endif cstar_tracesys: -- cgit v1.2.3-70-g09d2 From 1f972768a1df1518f45adb6b8ffbf04fa1c99737 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 26 Jul 2008 13:52:50 +0200 Subject: x86, RDC321x: add to mach-default first step to add RDC321x support to the default PC architecture. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 26 +++++++++++--------------- arch/x86/Makefile | 5 ----- 2 files changed, 11 insertions(+), 20 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e3cba0b4560..39ae6798595 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -23,7 +23,7 @@ config X86 select HAVE_OPROFILE select HAVE_IOREMAP_PROT select HAVE_KPROBES - select ARCH_WANT_OPTIONAL_GPIOLIB if !X86_RDC321X + select ARCH_WANT_OPTIONAL_GPIOLIB select HAVE_KRETPROBES select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE @@ -332,20 +332,6 @@ config X86_BIGSMP endif -config X86_RDC321X - bool "RDC R-321x SoC" - depends on X86_32 - select M486 - select X86_REBOOTFIXUPS - select GENERIC_GPIO - select LEDS_CLASS - select LEDS_GPIO - select NEW_LEDS - help - This option is needed for RDC R-321x system-on-chip, also known - as R-8610-(G). - If you don't have one of these chips, you should say N here. - config X86_VSMP bool "Support for ScaleMP vSMP" select PARAVIRT @@ -369,6 +355,16 @@ config X86_VISWS A kernel compiled for the Visual Workstation will run on general PCs as well. See for details. +config X86_RDC321X + bool "RDC R-321x SoC" + depends on X86_32 + select M486 + select X86_REBOOTFIXUPS + help + This option is needed for RDC R-321x system-on-chip, also known + as R-8610-(G). + If you don't have one of these chips, you should say N here. + config SCHED_NO_NO_OMIT_FRAME_POINTER def_bool y prompt "Single-depth WCHAN output" diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 919ce21ea65..f5631da585b 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -118,11 +118,6 @@ mflags-$(CONFIG_X86_GENERICARCH):= -Iinclude/asm-x86/mach-generic fcore-$(CONFIG_X86_GENERICARCH) += arch/x86/mach-generic/ mcore-$(CONFIG_X86_GENERICARCH) := arch/x86/mach-default/ -# RDC R-321x subarch support -mflags-$(CONFIG_X86_RDC321X) := -Iinclude/asm-x86/mach-rdc321x -mcore-$(CONFIG_X86_RDC321X) := arch/x86/mach-default/ -core-$(CONFIG_X86_RDC321X) += arch/x86/mach-rdc321x/ - # default subarch .h files mflags-y += -Iinclude/asm-x86/mach-default -- cgit v1.2.3-70-g09d2 From a8132e5fe2c4f3f780b8bd3cce7851640f79f1c7 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 25 Jul 2008 14:57:59 +0200 Subject: x86, AMD IOMMU: replace to_pages macro with iommu_num_pages This patch removes the to_pages macro from AMD IOMMU code and calls the generic iommu_num_pages function instead. Signed-off-by: Joerg Roedel Cc: iommu@lists.linux-foundation.org Cc: bhavna.sarathy@amd.com Cc: robert.richter@amd.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/amd_iommu.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index c25210e6ac8..79eff735fde 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -29,9 +29,6 @@ #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) -#define to_pages(addr, size) \ - (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) - #define EXIT_LOOP_COUNT 10000000 static DEFINE_RWLOCK(amd_iommu_devtable_lock); @@ -185,7 +182,7 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, u64 address, size_t size) { int s = 0; - unsigned pages = to_pages(address, size); + unsigned pages = iommu_num_pages(address, size); address &= PAGE_MASK; @@ -557,8 +554,8 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, if (iommu->exclusion_start && iommu->exclusion_start < dma_dom->aperture_size) { unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; - int pages = to_pages(iommu->exclusion_start, - iommu->exclusion_length); + int pages = iommu_num_pages(iommu->exclusion_start, + iommu->exclusion_length); dma_ops_reserve_addresses(dma_dom, startpage, pages); } @@ -767,7 +764,7 @@ static dma_addr_t __map_single(struct device *dev, unsigned int pages; int i; - pages = to_pages(paddr, size); + pages = iommu_num_pages(paddr, size); paddr &= PAGE_MASK; address = dma_ops_alloc_addresses(dev, dma_dom, pages); @@ -802,7 +799,7 @@ static void __unmap_single(struct amd_iommu *iommu, if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) return; - pages = to_pages(dma_addr, size); + pages = iommu_num_pages(dma_addr, size); dma_addr &= PAGE_MASK; start = dma_addr; -- cgit v1.2.3-70-g09d2 From 87e39ea5714dd59ba31e36c25833d2b20255a29d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 25 Jul 2008 14:58:00 +0200 Subject: x86 gart: replace to_pages macro with iommu_num_pages This patch removes the to_pages macro from x86 GART code and calls the generic iommu_num_pages function instead. Signed-off-by: Joerg Roedel Cc: iommu@lists.linux-foundation.org Cc: bhavna.sarathy@amd.com Cc: robert.richter@amd.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/pci-gart_64.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index df5f142657d..c9824043156 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -67,9 +67,6 @@ static u32 gart_unmapped_entry; (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) -#define to_pages(addr, size) \ - (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) - #define EMERGENCY_PAGES 32 /* = 128KB */ #ifdef CONFIG_AGP @@ -241,7 +238,7 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size) static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, size_t size, int dir) { - unsigned long npages = to_pages(phys_mem, size); + unsigned long npages = iommu_num_pages(phys_mem, size); unsigned long iommu_page = alloc_iommu(dev, npages); int i; @@ -304,7 +301,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, return; iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; - npages = to_pages(dma_addr, size); + npages = iommu_num_pages(dma_addr, size); for (i = 0; i < npages; i++) { iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; CLEAR_LEAK(iommu_page + i); @@ -387,7 +384,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start, } addr = phys_addr; - pages = to_pages(s->offset, s->length); + pages = iommu_num_pages(s->offset, s->length); while (pages--) { iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); SET_LEAK(iommu_page); @@ -470,7 +467,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) seg_size += s->length; need = nextneed; - pages += to_pages(s->offset, s->length); + pages += iommu_num_pages(s->offset, s->length); ps = s; } if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) -- cgit v1.2.3-70-g09d2 From 3a61ec387c9092dfc91a5959145d36835a72fc4c Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 25 Jul 2008 13:07:50 +0200 Subject: x86, AMD IOMMU: include amd_iommu_last_bdf in device initialization All the values read while searching for amd_iommu_last_bdf are defined as inclusive. Let the code handle this value as such. Found by Wei Wang. Thanks Wei. Signed-off-by: Joerg Roedel Cc: iommu@lists.linux-foundation.org Cc: bhavna.sarathy@amd.com Cc: robert.richter@amd.com Cc: Wei Wang Signed-off-by: Ingo Molnar --- arch/x86/kernel/amd_iommu.c | 4 ++-- arch/x86/kernel/amd_iommu_init.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index c25210e6ac8..74697408576 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -667,7 +667,7 @@ static int get_device_resources(struct device *dev, _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); /* device not translated by any IOMMU in the system? */ - if (_bdf >= amd_iommu_last_bdf) { + if (_bdf > amd_iommu_last_bdf) { *iommu = NULL; *domain = NULL; *bdf = 0xffff; @@ -1085,7 +1085,7 @@ void prealloc_protection_domains(void) while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { devid = (dev->bus->number << 8) | dev->devfn; - if (devid >= amd_iommu_last_bdf) + if (devid > amd_iommu_last_bdf) continue; devid = amd_iommu_alias_table[devid]; if (domain_for_device(devid)) diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index c9d8ff2eb13..d9a9da597e7 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -732,7 +732,7 @@ static int __init init_exclusion_range(struct ivmd_header *m) set_device_exclusion_range(m->devid, m); break; case ACPI_IVMD_TYPE_ALL: - for (i = 0; i < amd_iommu_last_bdf; ++i) + for (i = 0; i <= amd_iommu_last_bdf; ++i) set_device_exclusion_range(i, m); break; case ACPI_IVMD_TYPE_RANGE: @@ -934,7 +934,7 @@ int __init amd_iommu_init(void) /* * let all alias entries point to itself */ - for (i = 0; i < amd_iommu_last_bdf; ++i) + for (i = 0; i <= amd_iommu_last_bdf; ++i) amd_iommu_alias_table[i] = i; /* -- cgit v1.2.3-70-g09d2 From 6524d938b3360504b43a1278b5a8403e85383d1a Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Thu, 24 Jul 2008 18:21:30 -0700 Subject: cpumask: put cpumask_of_cpu_map in the initdata section * Create the cpumask_of_cpu_map statically in the init data section using NR_CPUS but replace it during boot up with one sized by nr_cpu_ids (num possible cpus). Signed-off-by: Mike Travis Cc: Andrew Morton Cc: Jack Steiner Cc: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_percpu.c | 10 ++++++---- kernel/cpu.c | 8 +++++--- 2 files changed, 11 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index f7745f94c00..1cd53dfcd30 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -81,10 +81,12 @@ static void __init setup_per_cpu_maps(void) } #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP -cpumask_t *cpumask_of_cpu_map __read_mostly; -EXPORT_SYMBOL(cpumask_of_cpu_map); - -/* requires nr_cpu_ids to be initialized */ +/* + * Replace static cpumask_of_cpu_map in the initdata section, + * with one that's allocated sized by the possible number of cpus. + * + * (requires nr_cpu_ids to be initialized) + */ static void __init setup_cpumask_of_cpu(void) { int i; diff --git a/kernel/cpu.c b/kernel/cpu.c index fe31ff3d380..9d4e1c28c05 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -462,7 +462,6 @@ out: #endif /* CONFIG_SMP */ -#ifndef CONFIG_HAVE_CPUMASK_OF_CPU_MAP /* 64 bits of zeros, for initializers. */ #if BITS_PER_LONG == 32 #define Z64 0, 0 @@ -509,7 +508,11 @@ out: /* We want this statically initialized, just to be safe. We try not * to waste too much space, either. */ -static const cpumask_t cpumask_map[] = { +static const cpumask_t cpumask_map[] +#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP +__initdata +#endif += { CMI0(0), CMI0(1), CMI0(2), CMI0(3), #if NR_CPUS > 4 CMI0(4), CMI0(5), CMI0(6), CMI0(7), @@ -569,4 +572,3 @@ static const cpumask_t cpumask_map[] = { }; const cpumask_t *cpumask_of_cpu_map = cpumask_map; -#endif /* !CONFIG_HAVE_CPUMASK_OF_CPU_MAP */ -- cgit v1.2.3-70-g09d2 From 0bc3cc03fa6e1c20aecb5a33356bcaae410640b9 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Thu, 24 Jul 2008 18:21:31 -0700 Subject: cpumask: change cpumask_of_cpu_ptr to use new cpumask_of_cpu * Replace previous instances of the cpumask_of_cpu_ptr* macros with a the new (lvalue capable) generic cpumask_of_cpu(). Signed-off-by: Mike Travis Cc: Andrew Morton Cc: Jack Steiner Cc: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/cstate.c | 3 +-- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 10 +++------- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 15 +++++---------- arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 12 ++++-------- arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 3 +-- arch/x86/kernel/cpu/intel_cacheinfo.c | 3 +-- arch/x86/kernel/ldt.c | 6 ++---- arch/x86/kernel/microcode.c | 17 +++++------------ arch/x86/kernel/reboot.c | 11 +++-------- drivers/acpi/processor_throttling.c | 11 +++-------- drivers/firmware/dcdbas.c | 3 +-- drivers/misc/sgi-xp/xpc_main.c | 3 +-- kernel/stop_machine.c | 3 +-- kernel/time/tick-common.c | 8 +++----- kernel/trace/trace_sysprof.c | 4 +--- lib/smp_processor_id.c | 5 +---- net/sunrpc/svc.c | 3 +-- 17 files changed, 37 insertions(+), 83 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 9220cf46aa1..c2502eb9aa8 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -73,7 +73,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, struct cpuinfo_x86 *c = &cpu_data(cpu); cpumask_t saved_mask; - cpumask_of_cpu_ptr(new_mask, cpu); int retval; unsigned int eax, ebx, ecx, edx; unsigned int edx_part; @@ -92,7 +91,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, /* Make sure we are running on right CPU */ saved_mask = current->cpus_allowed; - retval = set_cpus_allowed_ptr(current, new_mask); + retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); if (retval) return -1; diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index ff2fff56f0a..dd097b83583 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -200,12 +200,10 @@ static void drv_read(struct drv_cmd *cmd) static void drv_write(struct drv_cmd *cmd) { cpumask_t saved_mask = current->cpus_allowed; - cpumask_of_cpu_ptr_declare(cpu_mask); unsigned int i; for_each_cpu_mask_nr(i, cmd->mask) { - cpumask_of_cpu_ptr_next(cpu_mask, i); - set_cpus_allowed_ptr(current, cpu_mask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); do_drv_write(cmd); } @@ -269,12 +267,11 @@ static unsigned int get_measured_perf(unsigned int cpu) } aperf_cur, mperf_cur; cpumask_t saved_mask; - cpumask_of_cpu_ptr(cpu_mask, cpu); unsigned int perf_percent; unsigned int retval; saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpu_mask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); if (get_cpu() != cpu) { /* We were not able to run on requested processor */ put_cpu(); @@ -340,7 +337,6 @@ static unsigned int get_measured_perf(unsigned int cpu) static unsigned int get_cur_freq_on_cpu(unsigned int cpu) { - cpumask_of_cpu_ptr(cpu_mask, cpu); struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); unsigned int freq; unsigned int cached_freq; @@ -353,7 +349,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) } cached_freq = data->freq_table[data->acpi_data->state].frequency; - freq = extract_freq(get_cur_val(cpu_mask), data); + freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); if (freq != cached_freq) { /* * The dreaded BIOS frequency change behind our back. diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 53c7b693697..c45ca6d4dce 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -479,12 +479,11 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi static int check_supported_cpu(unsigned int cpu) { cpumask_t oldmask; - cpumask_of_cpu_ptr(cpu_mask, cpu); u32 eax, ebx, ecx, edx; unsigned int rc = 0; oldmask = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpu_mask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); if (smp_processor_id() != cpu) { printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); @@ -1017,7 +1016,6 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) { cpumask_t oldmask; - cpumask_of_cpu_ptr(cpu_mask, pol->cpu); struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); u32 checkfid; u32 checkvid; @@ -1032,7 +1030,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi /* only run on specific CPU from here on */ oldmask = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpu_mask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); if (smp_processor_id() != pol->cpu) { printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); @@ -1107,7 +1105,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) { struct powernow_k8_data *data; cpumask_t oldmask; - cpumask_of_cpu_ptr_declare(newmask); int rc; if (!cpu_online(pol->cpu)) @@ -1159,8 +1156,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) /* only run on specific CPU from here on */ oldmask = current->cpus_allowed; - cpumask_of_cpu_ptr_next(newmask, pol->cpu); - set_cpus_allowed_ptr(current, newmask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); if (smp_processor_id() != pol->cpu) { printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); @@ -1182,7 +1178,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) set_cpus_allowed_ptr(current, &oldmask); if (cpu_family == CPU_HW_PSTATE) - pol->cpus = *newmask; + pol->cpus = cpumask_of_cpu(pol->cpu); else pol->cpus = per_cpu(cpu_core_map, pol->cpu); data->available_cores = &(pol->cpus); @@ -1248,7 +1244,6 @@ static unsigned int powernowk8_get (unsigned int cpu) { struct powernow_k8_data *data; cpumask_t oldmask = current->cpus_allowed; - cpumask_of_cpu_ptr(newmask, cpu); unsigned int khz = 0; unsigned int first; @@ -1258,7 +1253,7 @@ static unsigned int powernowk8_get (unsigned int cpu) if (!data) return -EINVAL; - set_cpus_allowed_ptr(current, newmask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); if (smp_processor_id() != cpu) { printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu); diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index ca2ac13b7af..15e13c01cc3 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -324,10 +324,9 @@ static unsigned int get_cur_freq(unsigned int cpu) unsigned l, h; unsigned clock_freq; cpumask_t saved_mask; - cpumask_of_cpu_ptr(new_mask, cpu); saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, new_mask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); if (smp_processor_id() != cpu) return 0; @@ -585,15 +584,12 @@ static int centrino_target (struct cpufreq_policy *policy, * Best effort undo.. */ - if (!cpus_empty(*covered_cpus)) { - cpumask_of_cpu_ptr_declare(new_mask); - + if (!cpus_empty(*covered_cpus)) for_each_cpu_mask_nr(j, *covered_cpus) { - cpumask_of_cpu_ptr_next(new_mask, j); - set_cpus_allowed_ptr(current, new_mask); + set_cpus_allowed_ptr(current, + &cpumask_of_cpu(j)); wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); } - } tmp = freqs.new; freqs.new = freqs.old; diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 2f3728dc24f..191f7263c61 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c @@ -244,8 +244,7 @@ static unsigned int _speedstep_get(const cpumask_t *cpus) static unsigned int speedstep_get(unsigned int cpu) { - cpumask_of_cpu_ptr(newmask, cpu); - return _speedstep_get(newmask); + return _speedstep_get(&cpumask_of_cpu(cpu)); } /** diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 650d40f7912..6b0a10b002f 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -516,7 +516,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) unsigned long j; int retval; cpumask_t oldmask; - cpumask_of_cpu_ptr(newmask, cpu); if (num_cache_leaves == 0) return -ENOENT; @@ -527,7 +526,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) return -ENOMEM; oldmask = current->cpus_allowed; - retval = set_cpus_allowed_ptr(current, newmask); + retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); if (retval) goto out; diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 3fee2aa50f3..b68e21f06f4 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -62,12 +62,10 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) if (reload) { #ifdef CONFIG_SMP - cpumask_of_cpu_ptr_declare(mask); - preempt_disable(); load_LDT(pc); - cpumask_of_cpu_ptr_next(mask, smp_processor_id()); - if (!cpus_equal(current->mm->cpu_vm_mask, *mask)) + if (!cpus_equal(current->mm->cpu_vm_mask, + cpumask_of_cpu(smp_processor_id()))) smp_call_function(flush_ldt, current->mm, 1); preempt_enable(); #else diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c index 6994c751590..652fa5c38eb 100644 --- a/arch/x86/kernel/microcode.c +++ b/arch/x86/kernel/microcode.c @@ -388,7 +388,6 @@ static int do_microcode_update (void) void *new_mc = NULL; int cpu; cpumask_t old; - cpumask_of_cpu_ptr_declare(newmask); old = current->cpus_allowed; @@ -405,8 +404,7 @@ static int do_microcode_update (void) if (!uci->valid) continue; - cpumask_of_cpu_ptr_next(newmask, cpu); - set_cpus_allowed_ptr(current, newmask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); error = get_maching_microcode(new_mc, cpu); if (error < 0) goto out; @@ -576,7 +574,6 @@ static int apply_microcode_check_cpu(int cpu) struct cpuinfo_x86 *c = &cpu_data(cpu); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; cpumask_t old; - cpumask_of_cpu_ptr(newmask, cpu); unsigned int val[2]; int err = 0; @@ -585,7 +582,7 @@ static int apply_microcode_check_cpu(int cpu) return 0; old = current->cpus_allowed; - set_cpus_allowed_ptr(current, newmask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); /* Check if the microcode we have in memory matches the CPU */ if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || @@ -623,12 +620,11 @@ static int apply_microcode_check_cpu(int cpu) static void microcode_init_cpu(int cpu, int resume) { cpumask_t old; - cpumask_of_cpu_ptr(newmask, cpu); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; old = current->cpus_allowed; - set_cpus_allowed_ptr(current, newmask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); mutex_lock(µcode_mutex); collect_cpu_info(cpu); if (uci->valid && system_state == SYSTEM_RUNNING && !resume) @@ -661,13 +657,10 @@ static ssize_t reload_store(struct sys_device *dev, if (end == buf) return -EINVAL; if (val == 1) { - cpumask_t old; - cpumask_of_cpu_ptr(newmask, cpu); - - old = current->cpus_allowed; + cpumask_t old = current->cpus_allowed; get_online_cpus(); - set_cpus_allowed_ptr(current, newmask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); mutex_lock(µcode_mutex); if (uci->valid) diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 06a9f643817..724adfc63cb 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -414,25 +414,20 @@ void native_machine_shutdown(void) /* The boot cpu is always logical cpu 0 */ int reboot_cpu_id = 0; - cpumask_of_cpu_ptr(newmask, reboot_cpu_id); #ifdef CONFIG_X86_32 /* See if there has been given a command line override */ if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && - cpu_online(reboot_cpu)) { + cpu_online(reboot_cpu)) reboot_cpu_id = reboot_cpu; - cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id); - } #endif /* Make certain the cpu I'm about to reboot on is online */ - if (!cpu_online(reboot_cpu_id)) { + if (!cpu_online(reboot_cpu_id)) reboot_cpu_id = smp_processor_id(); - cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id); - } /* Make certain I only run on the appropriate processor */ - set_cpus_allowed_ptr(current, newmask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); /* O.K Now that I'm on the appropriate processor, * stop all of the others. diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index a2c3f9cfa54..a56fc6c4394 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -827,7 +827,6 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) static int acpi_processor_get_throttling(struct acpi_processor *pr) { cpumask_t saved_mask; - cpumask_of_cpu_ptr_declare(new_mask); int ret; if (!pr) @@ -839,8 +838,7 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) * Migrate task to the cpu pointed by pr. */ saved_mask = current->cpus_allowed; - cpumask_of_cpu_ptr_next(new_mask, pr->id); - set_cpus_allowed_ptr(current, new_mask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); ret = pr->throttling.acpi_processor_get_throttling(pr); /* restore the previous state */ set_cpus_allowed_ptr(current, &saved_mask); @@ -989,7 +987,6 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, int acpi_processor_set_throttling(struct acpi_processor *pr, int state) { cpumask_t saved_mask; - cpumask_of_cpu_ptr_declare(new_mask); int ret = 0; unsigned int i; struct acpi_processor *match_pr; @@ -1028,8 +1025,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) * it can be called only for the cpu pointed by pr. */ if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { - cpumask_of_cpu_ptr_next(new_mask, pr->id); - set_cpus_allowed_ptr(current, new_mask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); ret = p_throttling->acpi_processor_set_throttling(pr, t_state.target_state); } else { @@ -1060,8 +1056,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) continue; } t_state.cpu = i; - cpumask_of_cpu_ptr_next(new_mask, i); - set_cpus_allowed_ptr(current, new_mask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); ret = match_pr->throttling. acpi_processor_set_throttling( match_pr, t_state.target_state); diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index c66817e7717..50a071f1c94 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c @@ -245,7 +245,6 @@ static ssize_t host_control_on_shutdown_store(struct device *dev, static int smi_request(struct smi_cmd *smi_cmd) { cpumask_t old_mask; - cpumask_of_cpu_ptr(new_mask, 0); int ret = 0; if (smi_cmd->magic != SMI_CMD_MAGIC) { @@ -256,7 +255,7 @@ static int smi_request(struct smi_cmd *smi_cmd) /* SMI requires CPU 0 */ old_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, new_mask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); if (smp_processor_id() != 0) { dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", __func__); diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 579b01ff82d..c3b4227f48a 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -229,11 +229,10 @@ xpc_hb_checker(void *ignore) int last_IRQ_count = 0; int new_IRQ_count; int force_IRQ = 0; - cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU); /* this thread was marked active by xpc_hb_init() */ - set_cpus_allowed_ptr(current, cpumask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU)); /* set our heartbeating to other partitions into motion */ xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 738b411ff2d..ba9b2054ecb 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -33,9 +33,8 @@ static int stopmachine(void *cpu) { int irqs_disabled = 0; int prepared = 0; - cpumask_of_cpu_ptr(cpumask, (int)(long)cpu); - set_cpus_allowed_ptr(current, cpumask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu)); /* Ack: we are alive */ smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index bf43284d685..80c4336f418 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -196,12 +196,10 @@ static int tick_check_new_device(struct clock_event_device *newdev) struct tick_device *td; int cpu, ret = NOTIFY_OK; unsigned long flags; - cpumask_of_cpu_ptr_declare(cpumask); spin_lock_irqsave(&tick_device_lock, flags); cpu = smp_processor_id(); - cpumask_of_cpu_ptr_next(cpumask, cpu); if (!cpu_isset(cpu, newdev->cpumask)) goto out_bc; @@ -209,7 +207,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) curdev = td->evtdev; /* cpu local device ? */ - if (!cpus_equal(newdev->cpumask, *cpumask)) { + if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) { /* * If the cpu affinity of the device interrupt can not @@ -222,7 +220,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) * If we have a cpu local device already, do not replace it * by a non cpu local device */ - if (curdev && cpus_equal(curdev->cpumask, *cpumask)) + if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu))) goto out_bc; } @@ -254,7 +252,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) curdev = NULL; } clockevents_exchange_device(curdev, newdev); - tick_setup_device(td, newdev, cpu, cpumask); + tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu)); if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) tick_oneshot_notify(); diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index ce2d723c10e..bb948e52ce2 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c @@ -213,9 +213,7 @@ static void start_stack_timers(void) int cpu; for_each_online_cpu(cpu) { - cpumask_of_cpu_ptr(new_mask, cpu); - - set_cpus_allowed_ptr(current, new_mask); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); start_stack_timer(cpu); } set_cpus_allowed_ptr(current, &saved_mask); diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index c4381d9516f..0f8fc22ed10 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -11,7 +11,6 @@ notrace unsigned int debug_smp_processor_id(void) { unsigned long preempt_count = preempt_count(); int this_cpu = raw_smp_processor_id(); - cpumask_of_cpu_ptr_declare(this_mask); if (likely(preempt_count)) goto out; @@ -23,9 +22,7 @@ notrace unsigned int debug_smp_processor_id(void) * Kernel threads bound to a single CPU can safely use * smp_processor_id(): */ - cpumask_of_cpu_ptr_next(this_mask, this_cpu); - - if (cpus_equal(current->cpus_allowed, *this_mask)) + if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu))) goto out; /* diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 835d2741308..5a32cb7c4bb 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -310,8 +310,7 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) switch (m->mode) { case SVC_POOL_PERCPU: { - cpumask_of_cpu_ptr(cpumask, node); - set_cpus_allowed_ptr(task, cpumask); + set_cpus_allowed_ptr(task, &cpumask_of_cpu(node)); break; } case SVC_POOL_PERNODE: -- cgit v1.2.3-70-g09d2 From 8d8bb39b9eba32dd70e87fd5ad5c5dd4ba118e06 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 25 Jul 2008 19:44:49 -0700 Subject: dma-mapping: add the device argument to dma_mapping_error() Add per-device dma_mapping_ops support for CONFIG_X86_64 as POWER architecture does: This enables us to cleanly fix the Calgary IOMMU issue that some devices are not behind the IOMMU (http://lkml.org/lkml/2008/5/8/423). I think that per-device dma_mapping_ops support would be also helpful for KVM people to support PCI passthrough but Andi thinks that this makes it difficult to support the PCI passthrough (see the above thread). So I CC'ed this to KVM camp. Comments are appreciated. A pointer to dma_mapping_ops to struct dev_archdata is added. If the pointer is non NULL, DMA operations in asm/dma-mapping.h use it. If it's NULL, the system-wide dma_ops pointer is used as before. If it's useful for KVM people, I plan to implement a mechanism to register a hook called when a new pci (or dma capable) device is created (it works with hot plugging). It enables IOMMUs to set up an appropriate dma_mapping_ops per device. The major obstacle is that dma_mapping_error doesn't take a pointer to the device unlike other DMA operations. So x86 can't have dma_mapping_ops per device. Note all the POWER IOMMUs use the same dma_mapping_error function so this is not a problem for POWER but x86 IOMMUs use different dma_mapping_error functions. The first patch adds the device argument to dma_mapping_error. The patch is trivial but large since it touches lots of drivers and dma-mapping.h in all the architecture. This patch: dma_mapping_error() doesn't take a pointer to the device unlike other DMA operations. So we can't have dma_mapping_ops per device. Note that POWER already has dma_mapping_ops per device but all the POWER IOMMUs use the same dma_mapping_error function. x86 IOMMUs use device argument. [akpm@linux-foundation.org: fix sge] [akpm@linux-foundation.org: fix svc_rdma] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix bnx2x] [akpm@linux-foundation.org: fix s2io] [akpm@linux-foundation.org: fix pasemi_mac] [akpm@linux-foundation.org: fix sdhci] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix sparc] [akpm@linux-foundation.org: fix ibmvscsi] Signed-off-by: FUJITA Tomonori Cc: Muli Ben-Yehuda Cc: Andi Kleen Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Avi Kivity Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/DMA-API.txt | 4 +- arch/arm/common/dmabounce.c | 2 +- arch/ia64/hp/common/hwsw_iommu.c | 5 +- arch/ia64/hp/common/sba_iommu.c | 2 +- arch/ia64/sn/pci/pci_dma.c | 2 +- arch/mips/mm/dma-default.c | 2 +- arch/powerpc/platforms/cell/celleb_scc_pciex.c | 2 +- arch/powerpc/platforms/cell/spider-pci.c | 2 +- arch/powerpc/platforms/iseries/mf.c | 2 +- arch/x86/kernel/pci-calgary_64.c | 2 +- arch/x86/kernel/pci-dma.c | 27 ++++--- arch/x86/kernel/pci-gart_64.c | 3 +- arch/x86/kernel/pci-nommu.c | 14 +--- arch/x86/kernel/pci-swiotlb_64.c | 2 +- drivers/firewire/fw-iso.c | 2 +- drivers/firewire/fw-ohci.c | 2 +- drivers/firewire/fw-sbp2.c | 8 +-- drivers/infiniband/hw/ipath/ipath_sdma.c | 2 +- drivers/infiniband/hw/ipath/ipath_user_sdma.c | 6 +- drivers/infiniband/hw/mthca/mthca_eq.c | 2 +- drivers/media/dvb/pluto2/pluto2.c | 2 +- drivers/mmc/host/sdhci.c | 4 +- drivers/net/arm/ep93xx_eth.c | 4 +- drivers/net/bnx2x_main.c | 4 +- drivers/net/cxgb3/sge.c | 2 +- drivers/net/e100.c | 2 +- drivers/net/e1000e/ethtool.c | 4 +- drivers/net/e1000e/netdev.c | 11 +-- drivers/net/ibmveth.c | 38 +++++----- drivers/net/iseries_veth.c | 4 +- drivers/net/mlx4/eq.c | 2 +- drivers/net/pasemi_mac.c | 6 +- drivers/net/qla3xxx.c | 12 ++-- drivers/net/s2io.c | 48 +++++++------ drivers/net/sfc/rx.c | 4 +- drivers/net/sfc/tx.c | 7 +- drivers/net/spider_net.c | 4 +- drivers/net/tc35815.c | 4 +- drivers/net/wireless/ath5k/base.c | 4 +- drivers/scsi/ibmvscsi/ibmvfc.c | 4 +- drivers/scsi/ibmvscsi/ibmvscsi.c | 4 +- drivers/scsi/ibmvscsi/ibmvstgt.c | 2 +- drivers/scsi/ibmvscsi/rpa_vscsi.c | 2 +- drivers/spi/atmel_spi.c | 4 +- drivers/spi/au1550_spi.c | 6 +- drivers/spi/omap2_mcspi.c | 4 +- drivers/spi/pxa2xx_spi.c | 4 +- drivers/spi/spi_imx.c | 6 +- include/asm-alpha/dma-mapping.h | 6 +- include/asm-alpha/pci.h | 2 +- include/asm-arm/dma-mapping.h | 2 +- include/asm-avr32/dma-mapping.h | 2 +- include/asm-cris/dma-mapping.h | 2 +- include/asm-frv/dma-mapping.h | 2 +- include/asm-generic/dma-mapping-broken.h | 2 +- include/asm-generic/dma-mapping.h | 4 +- include/asm-generic/pci-dma-compat.h | 4 +- include/asm-ia64/machvec.h | 2 +- include/asm-m68k/dma-mapping.h | 2 +- include/asm-mips/dma-mapping.h | 2 +- include/asm-mn10300/dma-mapping.h | 2 +- include/asm-parisc/dma-mapping.h | 2 +- include/asm-powerpc/dma-mapping.h | 2 +- include/asm-sh/dma-mapping.h | 2 +- include/asm-sparc/dma-mapping_64.h | 2 +- include/asm-sparc/pci_32.h | 3 +- include/asm-sparc/pci_64.h | 5 +- include/asm-x86/device.h | 3 + include/asm-x86/dma-mapping.h | 99 ++++++++++++++++++-------- include/asm-x86/swiotlb.h | 2 +- include/asm-xtensa/dma-mapping.h | 2 +- include/linux/i2o.h | 2 +- include/linux/ssb/ssb.h | 4 +- include/rdma/ib_verbs.h | 2 +- lib/swiotlb.c | 4 +- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 3 +- 76 files changed, 256 insertions(+), 210 deletions(-) (limited to 'arch/x86') diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 80d150458c8..d8b63d164e4 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt @@ -298,10 +298,10 @@ recommended that you never use these unless you really know what the cache width is. int -dma_mapping_error(dma_addr_t dma_addr) +dma_mapping_error(struct device *dev, dma_addr_t dma_addr) int -pci_dma_mapping_error(dma_addr_t dma_addr) +pci_dma_mapping_error(struct pci_dev *hwdev, dma_addr_t dma_addr) In some circumstances dma_map_single and dma_map_page will fail to create a mapping. A driver can check for these errors by testing the returned diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index dd294734260..69130f36590 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -280,7 +280,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, /* * Trying to unmap an invalid mapping */ - if (dma_mapping_error(dma_addr)) { + if (dma_mapping_error(dev, dma_addr)) { dev_err(dev, "Trying to unmap invalid mapping\n"); return; } diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 1c44ec2a1d5..88b6e6f3fd8 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c @@ -186,9 +186,10 @@ hwsw_dma_supported (struct device *dev, u64 mask) } int -hwsw_dma_mapping_error (dma_addr_t dma_addr) +hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - return hwiommu_dma_mapping_error (dma_addr) || swiotlb_dma_mapping_error(dma_addr); + return hwiommu_dma_mapping_error(dev, dma_addr) || + swiotlb_dma_mapping_error(dev, dma_addr); } EXPORT_SYMBOL(hwsw_dma_mapping_error); diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 34421aed1e2..4956be40d7b 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -2147,7 +2147,7 @@ sba_dma_supported (struct device *dev, u64 mask) } int -sba_dma_mapping_error (dma_addr_t dma_addr) +sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 52175af299a..53ebb648449 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -350,7 +350,7 @@ void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, } EXPORT_SYMBOL(sn_dma_sync_sg_for_device); -int sn_dma_mapping_error(dma_addr_t dma_addr) +int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index ae39dd88b9a..891312f8e5a 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -348,7 +348,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele EXPORT_SYMBOL(dma_sync_sg_for_device); -int dma_mapping_error(dma_addr_t dma_addr) +int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c index 0e04f8fb152..3e7e0f1568e 100644 --- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c +++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c @@ -281,7 +281,7 @@ static int __init scc_pciex_iowa_init(struct iowa_bus *bus, void *data) dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va, PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(dummy_page_da)) { + if (dma_mapping_error(bus->phb->parent, dummy_page_da)) { pr_err("PCIEX:Map dummy page failed.\n"); kfree(dummy_page_va); return -1; diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c index 418b605ac35..5122ec14527 100644 --- a/arch/powerpc/platforms/cell/spider-pci.c +++ b/arch/powerpc/platforms/cell/spider-pci.c @@ -111,7 +111,7 @@ static int __init spiderpci_pci_setup_chip(struct pci_controller *phb, dummy_page_da = dma_map_single(phb->parent, dummy_page_va, PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(dummy_page_da)) { + if (dma_mapping_error(phb->parent, dummy_page_da)) { pr_err("SPIDER-IOWA:Map dummy page filed.\n"); kfree(dummy_page_va); return -1; diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c index 1dc7295746d..731d7b15774 100644 --- a/arch/powerpc/platforms/iseries/mf.c +++ b/arch/powerpc/platforms/iseries/mf.c @@ -871,7 +871,7 @@ static int proc_mf_dump_cmdline(char *page, char **start, off_t off, count = 256 - off; dma_addr = iseries_hv_map(page, off + count, DMA_FROM_DEVICE); - if (dma_mapping_error(dma_addr)) + if (dma_mapping_error(NULL, dma_addr)) return -ENOMEM; memset(page, 0, off + count); memset(&vsp_cmd, 0, sizeof(vsp_cmd)); diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 19e7fc7c2c4..1eb86be93d7 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -544,7 +544,7 @@ error: return ret; } -static const struct dma_mapping_ops calgary_dma_ops = { +static struct dma_mapping_ops calgary_dma_ops = { .alloc_coherent = calgary_alloc_coherent, .map_single = calgary_map_single, .unmap_single = calgary_unmap_single, diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index cbecb05551b..37544123896 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -11,7 +11,7 @@ static int forbid_dac __read_mostly; -const struct dma_mapping_ops *dma_ops; +struct dma_mapping_ops *dma_ops; EXPORT_SYMBOL(dma_ops); static int iommu_sac_force __read_mostly; @@ -312,6 +312,8 @@ static int dma_release_coherent(struct device *dev, int order, void *vaddr) int dma_supported(struct device *dev, u64 mask) { + struct dma_mapping_ops *ops = get_dma_ops(dev); + #ifdef CONFIG_PCI if (mask > 0xffffffff && forbid_dac > 0) { dev_info(dev, "PCI: Disallowing DAC for device\n"); @@ -319,8 +321,8 @@ int dma_supported(struct device *dev, u64 mask) } #endif - if (dma_ops->dma_supported) - return dma_ops->dma_supported(dev, mask); + if (ops->dma_supported) + return ops->dma_supported(dev, mask); /* Copied from i386. Doesn't make much sense, because it will only work for pci_alloc_coherent. @@ -367,6 +369,7 @@ void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { + struct dma_mapping_ops *ops = get_dma_ops(dev); void *memory = NULL; struct page *page; unsigned long dma_mask = 0; @@ -435,8 +438,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, /* Let low level make its own zone decisions */ gfp &= ~(GFP_DMA32|GFP_DMA); - if (dma_ops->alloc_coherent) - return dma_ops->alloc_coherent(dev, size, + if (ops->alloc_coherent) + return ops->alloc_coherent(dev, size, dma_handle, gfp); return NULL; } @@ -448,14 +451,14 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, } } - if (dma_ops->alloc_coherent) { + if (ops->alloc_coherent) { free_pages((unsigned long)memory, get_order(size)); gfp &= ~(GFP_DMA|GFP_DMA32); - return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); + return ops->alloc_coherent(dev, size, dma_handle, gfp); } - if (dma_ops->map_simple) { - *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory), + if (ops->map_simple) { + *dma_handle = ops->map_simple(dev, virt_to_phys(memory), size, PCI_DMA_BIDIRECTIONAL); if (*dma_handle != bad_dma_address) @@ -477,12 +480,14 @@ EXPORT_SYMBOL(dma_alloc_coherent); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t bus) { + struct dma_mapping_ops *ops = get_dma_ops(dev); + int order = get_order(size); WARN_ON(irqs_disabled()); /* for portability */ if (dma_release_coherent(dev, order, vaddr)) return; - if (dma_ops->unmap_single) - dma_ops->unmap_single(dev, bus, size, 0); + if (ops->unmap_single) + ops->unmap_single(dev, bus, size, 0); free_pages((unsigned long)vaddr, order); } EXPORT_SYMBOL(dma_free_coherent); diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index df5f142657d..744126e6495 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -692,8 +692,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info) extern int agp_amd64_init(void); -static const struct dma_mapping_ops gart_dma_ops = { - .mapping_error = NULL, +static struct dma_mapping_ops gart_dma_ops = { .map_single = gart_map_single, .map_simple = gart_map_simple, .unmap_single = gart_unmap_single, diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 792b9179eff..3f91f71cdc3 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c @@ -72,21 +72,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, return nents; } -/* Make sure we keep the same behaviour */ -static int nommu_mapping_error(dma_addr_t dma_addr) -{ -#ifdef CONFIG_X86_32 - return 0; -#else - return (dma_addr == bad_dma_address); -#endif -} - - -const struct dma_mapping_ops nommu_dma_ops = { +struct dma_mapping_ops nommu_dma_ops = { .map_single = nommu_map_single, .map_sg = nommu_map_sg, - .mapping_error = nommu_mapping_error, .is_phys = 1, }; diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index 20df839b9c2..c4ce0332759 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c @@ -18,7 +18,7 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); } -const struct dma_mapping_ops swiotlb_dma_ops = { +struct dma_mapping_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, .alloc_coherent = swiotlb_alloc_coherent, .free_coherent = swiotlb_free_coherent, diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c index bcbe794a3ea..e14c03dc006 100644 --- a/drivers/firewire/fw-iso.c +++ b/drivers/firewire/fw-iso.c @@ -50,7 +50,7 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, address = dma_map_page(card->device, buffer->pages[i], 0, PAGE_SIZE, direction); - if (dma_mapping_error(address)) { + if (dma_mapping_error(card->device, address)) { __free_page(buffer->pages[i]); goto out_pages; } diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 333b12544dd..566672e0bcf 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c @@ -953,7 +953,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet) payload_bus = dma_map_single(ohci->card.device, packet->payload, packet->payload_length, DMA_TO_DEVICE); - if (dma_mapping_error(payload_bus)) { + if (dma_mapping_error(ohci->card.device, payload_bus)) { packet->ack = RCODE_SEND_ERROR; return -1; } diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index 53fc5a641e6..aaff50ebba1 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c @@ -543,7 +543,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, orb->response_bus = dma_map_single(device->card->device, &orb->response, sizeof(orb->response), DMA_FROM_DEVICE); - if (dma_mapping_error(orb->response_bus)) + if (dma_mapping_error(device->card->device, orb->response_bus)) goto fail_mapping_response; orb->request.response.high = 0; @@ -577,7 +577,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, orb->base.request_bus = dma_map_single(device->card->device, &orb->request, sizeof(orb->request), DMA_TO_DEVICE); - if (dma_mapping_error(orb->base.request_bus)) + if (dma_mapping_error(device->card->device, orb->base.request_bus)) goto fail_mapping_request; sbp2_send_orb(&orb->base, lu, node_id, generation, @@ -1424,7 +1424,7 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, orb->page_table_bus = dma_map_single(device->card->device, orb->page_table, sizeof(orb->page_table), DMA_TO_DEVICE); - if (dma_mapping_error(orb->page_table_bus)) + if (dma_mapping_error(device->card->device, orb->page_table_bus)) goto fail_page_table; /* @@ -1509,7 +1509,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) orb->base.request_bus = dma_map_single(device->card->device, &orb->request, sizeof(orb->request), DMA_TO_DEVICE); - if (dma_mapping_error(orb->base.request_bus)) + if (dma_mapping_error(device->card->device, orb->base.request_bus)) goto out; sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation, diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c index eaba03273e4..284c9bca517 100644 --- a/drivers/infiniband/hw/ipath/ipath_sdma.c +++ b/drivers/infiniband/hw/ipath/ipath_sdma.c @@ -698,7 +698,7 @@ retry: addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, tx->map_len, DMA_TO_DEVICE); - if (dma_mapping_error(addr)) { + if (dma_mapping_error(&dd->pcidev->dev, addr)) { ret = -EIO; goto unlock; } diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c index 86e016916cd..82d9a0b5ca2 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c +++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd, dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len, DMA_TO_DEVICE); - if (dma_mapping_error(dma_addr)) { + if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { ret = -ENOMEM; goto free_unmap; } @@ -301,7 +301,7 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd, pages[j], 0, flen, DMA_TO_DEVICE); unsigned long fofs = addr & ~PAGE_MASK; - if (dma_mapping_error(dma_addr)) { + if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { ret = -ENOMEM; goto done; } @@ -508,7 +508,7 @@ static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd, if (page) { dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len, DMA_TO_DEVICE); - if (dma_mapping_error(dma_addr)) { + if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { ret = -ENOMEM; goto free_pbc; } diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 4e36aa7cb3d..cc6858f0b65 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c @@ -780,7 +780,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) return -ENOMEM; dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(dev->eq_table.icm_dma)) { + if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) { __free_page(dev->eq_table.icm_page); return -ENOMEM; } diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c index 1360403b88b..a9653c63f4d 100644 --- a/drivers/media/dvb/pluto2/pluto2.c +++ b/drivers/media/dvb/pluto2/pluto2.c @@ -242,7 +242,7 @@ static int __devinit pluto_dma_map(struct pluto *pluto) pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf, TS_DMA_BYTES, PCI_DMA_FROMDEVICE); - return pci_dma_mapping_error(pluto->dma_addr); + return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr); } static void pluto_dma_unmap(struct pluto *pluto) diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index c3a5db72ddd..5f95e10229b 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -337,7 +337,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, host->align_addr = dma_map_single(mmc_dev(host->mmc), host->align_buffer, 128 * 4, direction); - if (dma_mapping_error(host->align_addr)) + if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) goto fail; BUG_ON(host->align_addr & 0x3); @@ -439,7 +439,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, host->adma_addr = dma_map_single(mmc_dev(host->mmc), host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); - if (dma_mapping_error(host->align_addr)) + if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) goto unmap_entries; BUG_ON(host->adma_addr & 0x3); diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index 7a14980f347..18d3eeb7eab 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c @@ -482,7 +482,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) goto err; d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(d)) { + if (dma_mapping_error(NULL, d)) { free_page((unsigned long)page); goto err; } @@ -505,7 +505,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) goto err; d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); - if (dma_mapping_error(d)) { + if (dma_mapping_error(NULL, d)) { free_page((unsigned long)page); goto err; } diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 0263bef9cc6..c7cc760a177 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c @@ -1020,7 +1020,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); - if (unlikely(dma_mapping_error(mapping))) { + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { __free_pages(page, PAGES_PER_SGE_SHIFT); return -ENOMEM; } @@ -1048,7 +1048,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); - if (unlikely(dma_mapping_error(mapping))) { + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { dev_kfree_skb(skb); return -ENOMEM; } diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index a96331c875e..1b0861d73ab 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -386,7 +386,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len, dma_addr_t mapping; mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(mapping))) + if (unlikely(pci_dma_mapping_error(pdev, mapping))) return -ENOMEM; pci_unmap_addr_set(sd, dma_addr, mapping); diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 1037b133231..19d32a227be 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -1790,7 +1790,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(rx->dma_addr)) { + if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { dev_kfree_skb_any(rx->skb); rx->skb = NULL; rx->dma_addr = 0; diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index a14561f40db..9350564065e 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -1090,7 +1090,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) tx_ring->buffer_info[i].dma = pci_map_single(pdev, skb->data, skb->len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) { + if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) { ret_val = 4; goto err_nomem; } @@ -1153,7 +1153,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rx_ring->buffer_info[i].dma = pci_map_single(pdev, skb->data, 2048, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) { + if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) { ret_val = 8; goto err_nomem; } diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 9c0f56b3c51..d1367789976 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -195,7 +195,7 @@ map_skb: buffer_info->dma = pci_map_single(pdev, skb->data, adapter->rx_buffer_len, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(buffer_info->dma)) { + if (pci_dma_mapping_error(pdev, buffer_info->dma)) { dev_err(&pdev->dev, "RX DMA map failed\n"); adapter->rx_dma_failed++; break; @@ -265,7 +265,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, ps_page->page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(ps_page->dma)) { + if (pci_dma_mapping_error(pdev, ps_page->dma)) { dev_err(&adapter->pdev->dev, "RX DMA page map failed\n"); adapter->rx_dma_failed++; @@ -300,7 +300,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, buffer_info->dma = pci_map_single(pdev, skb->data, adapter->rx_ps_bsize0, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(buffer_info->dma)) { + if (pci_dma_mapping_error(pdev, buffer_info->dma)) { dev_err(&pdev->dev, "RX DMA map failed\n"); adapter->rx_dma_failed++; /* cleanup skb */ @@ -3344,7 +3344,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, skb->data + offset, size, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(buffer_info->dma)) { + if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) { dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); adapter->tx_dma_failed++; return -1; @@ -3382,7 +3382,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, offset, size, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(buffer_info->dma)) { + if (pci_dma_mapping_error(adapter->pdev, + buffer_info->dma)) { dev_err(&adapter->pdev->dev, "TX DMA page map failed\n"); adapter->tx_dma_failed++; diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index e5a6e2e8454..91ec9fdc718 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -260,7 +260,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, pool->buff_size, DMA_FROM_DEVICE); - if (dma_mapping_error(dma_addr)) + if (dma_mapping_error((&adapter->vdev->dev, dma_addr)) goto failure; pool->free_map[free_index] = IBM_VETH_INVALID_MAP; @@ -294,7 +294,7 @@ failure: pool->consumer_index = pool->size - 1; else pool->consumer_index--; - if (!dma_mapping_error(dma_addr)) + if (!dma_mapping_error((&adapter->vdev->dev, dma_addr)) dma_unmap_single(&adapter->vdev->dev, pool->dma_addr[index], pool->buff_size, DMA_FROM_DEVICE); @@ -448,11 +448,11 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) static void ibmveth_cleanup(struct ibmveth_adapter *adapter) { int i; + struct device *dev = &adapter->vdev->dev; if(adapter->buffer_list_addr != NULL) { - if(!dma_mapping_error(adapter->buffer_list_dma)) { - dma_unmap_single(&adapter->vdev->dev, - adapter->buffer_list_dma, 4096, + if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { + dma_unmap_single(dev, adapter->buffer_list_dma, 4096, DMA_BIDIRECTIONAL); adapter->buffer_list_dma = DMA_ERROR_CODE; } @@ -461,9 +461,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) } if(adapter->filter_list_addr != NULL) { - if(!dma_mapping_error(adapter->filter_list_dma)) { - dma_unmap_single(&adapter->vdev->dev, - adapter->filter_list_dma, 4096, + if (!dma_mapping_error(dev, adapter->filter_list_dma)) { + dma_unmap_single(dev, adapter->filter_list_dma, 4096, DMA_BIDIRECTIONAL); adapter->filter_list_dma = DMA_ERROR_CODE; } @@ -472,8 +471,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) } if(adapter->rx_queue.queue_addr != NULL) { - if(!dma_mapping_error(adapter->rx_queue.queue_dma)) { - dma_unmap_single(&adapter->vdev->dev, + if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { + dma_unmap_single(dev, adapter->rx_queue.queue_dma, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); @@ -535,6 +534,7 @@ static int ibmveth_open(struct net_device *netdev) int rc; union ibmveth_buf_desc rxq_desc; int i; + struct device *dev; ibmveth_debug_printk("open starting\n"); @@ -563,17 +563,19 @@ static int ibmveth_open(struct net_device *netdev) return -ENOMEM; } - adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev, + dev = &adapter->vdev->dev; + + adapter->buffer_list_dma = dma_map_single(dev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); - adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev, + adapter->filter_list_dma = dma_map_single(dev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); - adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev, + adapter->rx_queue.queue_dma = dma_map_single(dev, adapter->rx_queue.queue_addr, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); - if((dma_mapping_error(adapter->buffer_list_dma) ) || - (dma_mapping_error(adapter->filter_list_dma)) || - (dma_mapping_error(adapter->rx_queue.queue_dma))) { + if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || + (dma_mapping_error(dev, adapter->filter_list_dma)) || + (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { ibmveth_error_printk("unable to map filter or buffer list pages\n"); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); @@ -645,7 +647,7 @@ static int ibmveth_open(struct net_device *netdev) adapter->bounce_buffer_dma = dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); - if (dma_mapping_error(adapter->bounce_buffer_dma)) { + if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { ibmveth_error_printk("unable to map bounce buffer\n"); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); @@ -922,7 +924,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) buf[1] = 0; } - if (dma_mapping_error(data_dma_addr)) { + if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) { if (!firmware_has_feature(FW_FEATURE_CMO)) ibmveth_error_printk("tx: unable to map xmit buffer\n"); skb_copy_from_linear_data(skb, adapter->bounce_buffer, diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index b8d0639c1cd..c46864d626b 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c @@ -1128,7 +1128,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, msg->data.addr[0] = dma_map_single(port->dev, skb->data, skb->len, DMA_TO_DEVICE); - if (dma_mapping_error(msg->data.addr[0])) + if (dma_mapping_error(port->dev, msg->data.addr[0])) goto recycle_and_drop; msg->dev = port->dev; @@ -1226,7 +1226,7 @@ static void veth_recycle_msg(struct veth_lpar_connection *cnx, dma_address = msg->data.addr[0]; dma_length = msg->data.len[0]; - if (!dma_mapping_error(dma_address)) + if (!dma_mapping_error(msg->dev, dma_address)) dma_unmap_single(msg->dev, dma_address, dma_length, DMA_TO_DEVICE); diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index ea3a09aaa84..7df928d3a3d 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c @@ -526,7 +526,7 @@ int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) return -ENOMEM; priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(priv->eq_table.icm_dma)) { + if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) { __free_page(priv->eq_table.icm_page); return -ENOMEM; } diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 993d87c9296..edc0fd58898 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c @@ -650,7 +650,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev, mac->bufsz - LOCAL_SKB_ALIGN, PCI_DMA_FROMDEVICE); - if (unlikely(dma_mapping_error(dma))) { + if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) { dev_kfree_skb_irq(info->skb); break; } @@ -1519,7 +1519,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), PCI_DMA_TODEVICE); map_size[0] = skb_headlen(skb); - if (dma_mapping_error(map[0])) + if (pci_dma_mapping_error(mac->dma_pdev, map[0])) goto out_err_nolock; for (i = 0; i < nfrags; i++) { @@ -1529,7 +1529,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) frag->page_offset, frag->size, PCI_DMA_TODEVICE); map_size[i+1] = frag->size; - if (dma_mapping_error(map[i+1])) { + if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) { nfrags = i; goto out_err_nolock; } diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index e7d48a352be..e82b37bbd6c 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c @@ -328,7 +328,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, qdev->lrg_buffer_len - QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); - err = pci_dma_mapping_error(map); + err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); @@ -1919,7 +1919,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); - err = pci_dma_mapping_error(map); + err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); @@ -2454,7 +2454,7 @@ static int ql_send_map(struct ql3_adapter *qdev, */ map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); - err = pci_dma_mapping_error(map); + err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); @@ -2487,7 +2487,7 @@ static int ql_send_map(struct ql3_adapter *qdev, sizeof(struct oal), PCI_DMA_TODEVICE); - err = pci_dma_mapping_error(map); + err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", @@ -2514,7 +2514,7 @@ static int ql_send_map(struct ql3_adapter *qdev, frag->page_offset, frag->size, PCI_DMA_TODEVICE); - err = pci_dma_mapping_error(map); + err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", qdev->ndev->name, err); @@ -2916,7 +2916,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); - err = pci_dma_mapping_error(map); + err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 9dae40ccf04..86d77d05190 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -2512,8 +2512,8 @@ static void stop_nic(struct s2io_nic *nic) * Return Value: * SUCCESS on success or an appropriate -ve value on failure. */ - -static int fill_rx_buffers(struct ring_info *ring, int from_card_up) +static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, + int from_card_up) { struct sk_buff *skb; struct RxD_t *rxdp; @@ -2602,7 +2602,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) rxdp1->Buffer0_ptr = pci_map_single (ring->pdev, skb->data, size - NET_IP_ALIGN, PCI_DMA_FROMDEVICE); - if(pci_dma_mapping_error(rxdp1->Buffer0_ptr)) + if (pci_dma_mapping_error(nic->pdev, + rxdp1->Buffer0_ptr)) goto pci_map_failed; rxdp->Control_2 = @@ -2636,7 +2637,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) rxdp3->Buffer0_ptr = pci_map_single(ring->pdev, ba->ba_0, BUF0_LEN, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) + if (pci_dma_mapping_error(nic->pdev, + rxdp3->Buffer0_ptr)) goto pci_map_failed; } else pci_dma_sync_single_for_device(ring->pdev, @@ -2655,7 +2657,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) (ring->pdev, skb->data, ring->mtu + 4, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) + if (pci_dma_mapping_error(nic->pdev, + rxdp3->Buffer2_ptr)) goto pci_map_failed; if (from_card_up) { @@ -2664,8 +2667,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) ba->ba_1, BUF1_LEN, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error - (rxdp3->Buffer1_ptr)) { + if (pci_dma_mapping_error(nic->pdev, + rxdp3->Buffer1_ptr)) { pci_unmap_single (ring->pdev, (dma_addr_t)(unsigned long) @@ -2806,9 +2809,9 @@ static void free_rx_buffers(struct s2io_nic *sp) } } -static int s2io_chk_rx_buffers(struct ring_info *ring) +static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring) { - if (fill_rx_buffers(ring, 0) == -ENOMEM) { + if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) { DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); } @@ -2848,7 +2851,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget) return 0; pkts_processed = rx_intr_handler(ring, budget); - s2io_chk_rx_buffers(ring); + s2io_chk_rx_buffers(nic, ring); if (pkts_processed < budget_org) { netif_rx_complete(dev, napi); @@ -2882,7 +2885,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget) for (i = 0; i < config->rx_ring_num; i++) { ring = &mac_control->rings[i]; ring_pkts_processed = rx_intr_handler(ring, budget); - s2io_chk_rx_buffers(ring); + s2io_chk_rx_buffers(nic, ring); pkts_processed += ring_pkts_processed; budget -= ring_pkts_processed; if (budget <= 0) @@ -2939,7 +2942,8 @@ static void s2io_netpoll(struct net_device *dev) rx_intr_handler(&mac_control->rings[i], 0); for (i = 0; i < config->rx_ring_num; i++) { - if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) { + if (fill_rx_buffers(nic, &mac_control->rings[i], 0) == + -ENOMEM) { DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); break; @@ -4235,14 +4239,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) txdp->Buffer_Pointer = pci_map_single(sp->pdev, fifo->ufo_in_band_v, sizeof(u64), PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(txdp->Buffer_Pointer)) + if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) goto pci_map_failed; txdp++; } txdp->Buffer_Pointer = pci_map_single (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(txdp->Buffer_Pointer)) + if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) goto pci_map_failed; txdp->Host_Control = (unsigned long) skb; @@ -4345,7 +4349,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) netif_rx_schedule(dev, &ring->napi); } else { rx_intr_handler(ring, 0); - s2io_chk_rx_buffers(ring); + s2io_chk_rx_buffers(sp, ring); } return IRQ_HANDLED; @@ -4826,7 +4830,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) */ if (!config->napi) { for (i = 0; i < config->rx_ring_num; i++) - s2io_chk_rx_buffers(&mac_control->rings[i]); + s2io_chk_rx_buffers(sp, &mac_control->rings[i]); } writeq(sp->general_int_mask, &bar0->general_int_mask); readl(&bar0->general_int_status); @@ -6859,7 +6863,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, pci_map_single( sp->pdev, (*skb)->data, size - NET_IP_ALIGN, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rxdp1->Buffer0_ptr)) + if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr)) goto memalloc_failed; rxdp->Host_Control = (unsigned long) (*skb); } @@ -6886,12 +6890,13 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, pci_map_single(sp->pdev, (*skb)->data, dev->mtu + 4, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) + if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr)) goto memalloc_failed; rxdp3->Buffer0_ptr = *temp0 = pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) { + if (pci_dma_mapping_error(sp->pdev, + rxdp3->Buffer0_ptr)) { pci_unmap_single (sp->pdev, (dma_addr_t)rxdp3->Buffer2_ptr, dev->mtu + 4, PCI_DMA_FROMDEVICE); @@ -6903,7 +6908,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, rxdp3->Buffer1_ptr = *temp1 = pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) { + if (pci_dma_mapping_error(sp->pdev, + rxdp3->Buffer1_ptr)) { pci_unmap_single (sp->pdev, (dma_addr_t)rxdp3->Buffer0_ptr, BUF0_LEN, PCI_DMA_FROMDEVICE); @@ -7187,7 +7193,7 @@ static int s2io_card_up(struct s2io_nic * sp) for (i = 0; i < config->rx_ring_num; i++) { mac_control->rings[i].mtu = dev->mtu; - ret = fill_rx_buffers(&mac_control->rings[i], 1); + ret = fill_rx_buffers(sp, &mac_control->rings[i], 1); if (ret) { DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", dev->name); diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 601b001437c..0d27dd39bc0 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c @@ -233,7 +233,7 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, rx_buf->data, rx_buf->len, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) { + if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) { dev_kfree_skb_any(rx_buf->skb); rx_buf->skb = NULL; return -EIO; @@ -275,7 +275,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, 0, efx_rx_buf_size(efx), PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(dma_addr))) { + if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { __free_pages(rx_buf->page, efx->rx_buffer_order); rx_buf->page = NULL; return -EIO; diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 5cdd082ab8f..5e8374ab28e 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -172,7 +172,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, /* Process all fragments */ while (1) { - if (unlikely(pci_dma_mapping_error(dma_addr))) + if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) goto pci_err; /* Store fields for marking in the per-fragment final @@ -661,7 +661,8 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, TSOH_BUFFER(tsoh), header_len, PCI_DMA_TODEVICE); - if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) { + if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, + tsoh->dma_addr))) { kfree(tsoh); return NULL; } @@ -863,7 +864,7 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, len, PCI_DMA_TODEVICE); - if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) { + if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { st->ifc.unmap_len = len; st->ifc.len = len; st->ifc.dma_addr = st->ifc.unmap_addr; diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 00aa0b108cb..b6435d0d71f 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -452,7 +452,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, /* iommu-map the skb */ buf = pci_map_single(card->pdev, descr->skb->data, SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(buf)) { + if (pci_dma_mapping_error(card->pdev, buf)) { dev_kfree_skb_any(descr->skb); descr->skb = NULL; if (netif_msg_rx_err(card) && net_ratelimit()) @@ -691,7 +691,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, unsigned long flags; buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(buf)) { + if (pci_dma_mapping_error(card->pdev, buf)) { if (netif_msg_tx_err(card) && net_ratelimit()) dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " "Dropping packet\n", skb->data, skb->len); diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index a645e5028c1..8487ace9d2e 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c @@ -506,7 +506,7 @@ static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle) return NULL; *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(*dma_handle)) { + if (pci_dma_mapping_error(hwdev, *dma_handle)) { free_page((unsigned long)buf); return NULL; } @@ -536,7 +536,7 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev, return NULL; *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(*dma_handle)) { + if (pci_dma_mapping_error(hwdev, *dma_handle)) { dev_kfree_skb_any(skb); return NULL; } diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index 217d506527a..d9769c52734 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c @@ -1166,7 +1166,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) bf->skb = skb; bf->skbaddr = pci_map_single(sc->pdev, skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(bf->skbaddr))) { + if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) { ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); dev_kfree_skb(skb); bf->skb = NULL; @@ -1918,7 +1918,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " "skbaddr %llx\n", skb, skb->data, skb->len, (unsigned long long)bf->skbaddr); - if (pci_dma_mapping_error(bf->skbaddr)) { + if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) { ATH5K_ERR(sc, "beacon DMA mapping failed\n"); return -EIO; } diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index c4a7c06793c..61f8fdea2d9 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -3525,7 +3525,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost) crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(crq->msg_token)) + if (dma_mapping_error(dev, crq->msg_token)) goto map_failed; retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, @@ -3618,7 +3618,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost) async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL); - if (dma_mapping_error(async_q->msg_token)) { + if (dma_mapping_error(dev, async_q->msg_token)) { dev_err(dev, "Failed to map async queue\n"); goto free_async_crq; } diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 20000ec79b0..6b24b9cdb04 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -859,7 +859,7 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL); - if (dma_mapping_error(req->buffer)) { + if (dma_mapping_error(hostdata->dev, req->buffer)) { if (!firmware_has_feature(FW_FEATURE_CMO)) dev_err(hostdata->dev, "Unable to map request_buffer for " @@ -1407,7 +1407,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, length, DMA_BIDIRECTIONAL); - if (dma_mapping_error(host_config->buffer)) { + if (dma_mapping_error(hostdata->dev, host_config->buffer)) { if (!firmware_has_feature(FW_FEATURE_CMO)) dev_err(hostdata->dev, "dma_mapping error getting host config\n"); diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c index 3b9514c8f1f..2e13ec00172 100644 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/drivers/scsi/ibmvscsi/ibmvstgt.c @@ -564,7 +564,7 @@ static int crq_queue_create(struct crq_queue *queue, struct srp_target *target) queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); - if (dma_mapping_error(queue->msg_token)) + if (dma_mapping_error(target->dev, queue->msg_token)) goto map_failed; err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token, diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c index 182146100dc..462a8574dad 100644 --- a/drivers/scsi/ibmvscsi/rpa_vscsi.c +++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c @@ -253,7 +253,7 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue, queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); - if (dma_mapping_error(queue->msg_token)) + if (dma_mapping_error(hostdata->dev, queue->msg_token)) goto map_failed; gather_partition_info(); diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c index e81d59d7891..0c716566085 100644 --- a/drivers/spi/atmel_spi.c +++ b/drivers/spi/atmel_spi.c @@ -313,14 +313,14 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) xfer->tx_dma = dma_map_single(dev, (void *) xfer->tx_buf, xfer->len, DMA_TO_DEVICE); - if (dma_mapping_error(xfer->tx_dma)) + if (dma_mapping_error(dev, xfer->tx_dma)) return -ENOMEM; } if (xfer->rx_buf) { xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, xfer->len, DMA_FROM_DEVICE); - if (dma_mapping_error(xfer->rx_dma)) { + if (dma_mapping_error(dev, xfer->rx_dma)) { if (xfer->tx_buf) dma_unmap_single(dev, xfer->tx_dma, xfer->len, diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c index 9149689c79d..87b73e0169c 100644 --- a/drivers/spi/au1550_spi.c +++ b/drivers/spi/au1550_spi.c @@ -334,7 +334,7 @@ static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size) hw->dma_rx_tmpbuf_size = size; hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf, size, DMA_FROM_DEVICE); - if (dma_mapping_error(hw->dma_rx_tmpbuf_addr)) { + if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) { kfree(hw->dma_rx_tmpbuf); hw->dma_rx_tmpbuf = 0; hw->dma_rx_tmpbuf_size = 0; @@ -378,7 +378,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) dma_rx_addr = dma_map_single(hw->dev, (void *)t->rx_buf, t->len, DMA_FROM_DEVICE); - if (dma_mapping_error(dma_rx_addr)) + if (dma_mapping_error(hw->dev, dma_rx_addr)) dev_err(hw->dev, "rx dma map error\n"); } } else { @@ -401,7 +401,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) dma_tx_addr = dma_map_single(hw->dev, (void *)t->tx_buf, t->len, DMA_TO_DEVICE); - if (dma_mapping_error(dma_tx_addr)) + if (dma_mapping_error(hw->dev, dma_tx_addr)) dev_err(hw->dev, "tx dma map error\n"); } } else { diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index b1cc148036c..f6f987bb71c 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c @@ -836,7 +836,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) if (tx_buf != NULL) { t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf, len, DMA_TO_DEVICE); - if (dma_mapping_error(t->tx_dma)) { + if (dma_mapping_error(&spi->dev, t->tx_dma)) { dev_dbg(&spi->dev, "dma %cX %d bytes error\n", 'T', len); return -EINVAL; @@ -845,7 +845,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) if (rx_buf != NULL) { t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len, DMA_FROM_DEVICE); - if (dma_mapping_error(t->rx_dma)) { + if (dma_mapping_error(&spi->dev, t->rx_dma)) { dev_dbg(&spi->dev, "dma %cX %d bytes error\n", 'R', len); if (tx_buf != NULL) diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index 0c452c46ab0..067299d6d19 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c @@ -353,7 +353,7 @@ static int map_dma_buffers(struct driver_data *drv_data) drv_data->rx_dma = dma_map_single(dev, drv_data->rx, drv_data->rx_map_len, DMA_FROM_DEVICE); - if (dma_mapping_error(drv_data->rx_dma)) + if (dma_mapping_error(dev, drv_data->rx_dma)) return 0; /* Stream map the tx buffer */ @@ -361,7 +361,7 @@ static int map_dma_buffers(struct driver_data *drv_data) drv_data->tx_map_len, DMA_TO_DEVICE); - if (dma_mapping_error(drv_data->tx_dma)) { + if (dma_mapping_error(dev, drv_data->tx_dma)) { dma_unmap_single(dev, drv_data->rx_dma, drv_data->rx_map_len, DMA_FROM_DEVICE); return 0; diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c index 54ac7bea5f8..6fb77fcc497 100644 --- a/drivers/spi/spi_imx.c +++ b/drivers/spi/spi_imx.c @@ -491,7 +491,7 @@ static int map_dma_buffers(struct driver_data *drv_data) buf, drv_data->tx_map_len, DMA_TO_DEVICE); - if (dma_mapping_error(drv_data->tx_dma)) + if (dma_mapping_error(dev, drv_data->tx_dma)) return -1; drv_data->tx_dma_needs_unmap = 1; @@ -516,7 +516,7 @@ static int map_dma_buffers(struct driver_data *drv_data) buf, drv_data->len, DMA_FROM_DEVICE); - if (dma_mapping_error(drv_data->rx_dma)) + if (dma_mapping_error(dev, drv_data->rx_dma)) return -1; drv_data->rx_dma_needs_unmap = 1; } @@ -534,7 +534,7 @@ static int map_dma_buffers(struct driver_data *drv_data) buf, drv_data->tx_map_len, DMA_TO_DEVICE); - if (dma_mapping_error(drv_data->tx_dma)) { + if (dma_mapping_error(dev, drv_data->tx_dma)) { if (drv_data->rx_dma) { dma_unmap_single(dev, drv_data->rx_dma, diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h index db351d1296f..a5801ae02e4 100644 --- a/include/asm-alpha/dma-mapping.h +++ b/include/asm-alpha/dma-mapping.h @@ -24,8 +24,8 @@ pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir) #define dma_supported(dev, mask) \ pci_dma_supported(alpha_gendev_to_pci(dev), mask) -#define dma_mapping_error(addr) \ - pci_dma_mapping_error(addr) +#define dma_mapping_error(dev, addr) \ + pci_dma_mapping_error(alpha_gendev_to_pci(dev), addr) #else /* no PCI - no IOMMU. */ @@ -45,7 +45,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, #define dma_unmap_page(dev, addr, size, dir) ((void)0) #define dma_unmap_sg(dev, sg, nents, dir) ((void)0) -#define dma_mapping_error(addr) (0) +#define dma_mapping_error(dev, addr) (0) #endif /* !CONFIG_PCI */ diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h index d31fd49ff79..2a14302c17a 100644 --- a/include/asm-alpha/pci.h +++ b/include/asm-alpha/pci.h @@ -106,7 +106,7 @@ extern dma_addr_t pci_map_page(struct pci_dev *, struct page *, /* Test for pci_map_single or pci_map_page having generated an error. */ static inline int -pci_dma_mapping_error(dma_addr_t dma_addr) +pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) { return dma_addr == 0; } diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h index e99406a7bec..f41335ba633 100644 --- a/include/asm-arm/dma-mapping.h +++ b/include/asm-arm/dma-mapping.h @@ -56,7 +56,7 @@ static inline int dma_is_consistent(struct device *dev, dma_addr_t handle) /* * DMA errors are defined by all-bits-set in the DMA address. */ -static inline int dma_mapping_error(dma_addr_t dma_addr) +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return dma_addr == ~0; } diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h index 57dc672bab8..0399359ab5d 100644 --- a/include/asm-avr32/dma-mapping.h +++ b/include/asm-avr32/dma-mapping.h @@ -35,7 +35,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) /* * dma_map_single can't fail as it is implemented now. */ -static inline int dma_mapping_error(dma_addr_t addr) +static inline int dma_mapping_error(struct device *dev, dma_addr_t addr) { return 0; } diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h index edc8d1bfaae..cb2fb25ff8d 100644 --- a/include/asm-cris/dma-mapping.h +++ b/include/asm-cris/dma-mapping.h @@ -120,7 +120,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, } static inline int -dma_mapping_error(dma_addr_t dma_addr) +dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h index 2e8966ca030..b2898877c07 100644 --- a/include/asm-frv/dma-mapping.h +++ b/include/asm-frv/dma-mapping.h @@ -126,7 +126,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele } static inline -int dma_mapping_error(dma_addr_t dma_addr) +int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h index e2468f894d2..82cd0cb1c3f 100644 --- a/include/asm-generic/dma-mapping-broken.h +++ b/include/asm-generic/dma-mapping-broken.h @@ -61,7 +61,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, #define dma_sync_sg_for_device dma_sync_sg_for_cpu extern int -dma_mapping_error(dma_addr_t dma_addr); +dma_mapping_error(struct device *dev, dma_addr_t dma_addr); extern int dma_supported(struct device *dev, u64 mask); diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h index 783ab9944d7..189486c3f92 100644 --- a/include/asm-generic/dma-mapping.h +++ b/include/asm-generic/dma-mapping.h @@ -144,9 +144,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, } static inline int -dma_mapping_error(dma_addr_t dma_addr) +dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - return pci_dma_mapping_error(dma_addr); + return pci_dma_mapping_error(to_pci_dev(dev), dma_addr); } diff --git a/include/asm-generic/pci-dma-compat.h b/include/asm-generic/pci-dma-compat.h index 25c10e96b2b..37b3706226e 100644 --- a/include/asm-generic/pci-dma-compat.h +++ b/include/asm-generic/pci-dma-compat.h @@ -99,9 +99,9 @@ pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, } static inline int -pci_dma_mapping_error(dma_addr_t dma_addr) +pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) { - return dma_mapping_error(dma_addr); + return dma_mapping_error(&pdev->dev, dma_addr); } #endif diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index 0721a5e8271..a6d50c77b6b 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h @@ -54,7 +54,7 @@ typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_ typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int); typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int); typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int); -typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr); +typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr); typedef int ia64_mv_dma_supported (struct device *, u64); typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *); diff --git a/include/asm-m68k/dma-mapping.h b/include/asm-m68k/dma-mapping.h index a26cdeb46a5..91f7944333d 100644 --- a/include/asm-m68k/dma-mapping.h +++ b/include/asm-m68k/dma-mapping.h @@ -84,7 +84,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *s { } -static inline int dma_mapping_error(dma_addr_t handle) +static inline int dma_mapping_error(struct device *dev, dma_addr_t handle) { return 0; } diff --git a/include/asm-mips/dma-mapping.h b/include/asm-mips/dma-mapping.h index 230b3f1b69b..c64afb40cd0 100644 --- a/include/asm-mips/dma-mapping.h +++ b/include/asm-mips/dma-mapping.h @@ -42,7 +42,7 @@ extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction); extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction); -extern int dma_mapping_error(dma_addr_t dma_addr); +extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr); extern int dma_supported(struct device *dev, u64 mask); static inline int diff --git a/include/asm-mn10300/dma-mapping.h b/include/asm-mn10300/dma-mapping.h index 7c882fca9ec..ccae8f6c632 100644 --- a/include/asm-mn10300/dma-mapping.h +++ b/include/asm-mn10300/dma-mapping.h @@ -182,7 +182,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, } static inline -int dma_mapping_error(dma_addr_t dma_addr) +int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } diff --git a/include/asm-parisc/dma-mapping.h b/include/asm-parisc/dma-mapping.h index c6c0e9ff6bd..53af696f23d 100644 --- a/include/asm-parisc/dma-mapping.h +++ b/include/asm-parisc/dma-mapping.h @@ -248,6 +248,6 @@ void * sba_get_iommu(struct parisc_device *dev); #endif /* At the moment, we panic on error for IOMMU resource exaustion */ -#define dma_mapping_error(x) 0 +#define dma_mapping_error(dev, x) 0 #endif diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h index 74c54978098..c7ca45f97dd 100644 --- a/include/asm-powerpc/dma-mapping.h +++ b/include/asm-powerpc/dma-mapping.h @@ -415,7 +415,7 @@ static inline void dma_sync_sg_for_device(struct device *dev, __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); } -static inline int dma_mapping_error(dma_addr_t dma_addr) +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { #ifdef CONFIG_PPC64 return (dma_addr == DMA_ERROR_CODE); diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h index 22cc419389f..6c0b8a2de14 100644 --- a/include/asm-sh/dma-mapping.h +++ b/include/asm-sh/dma-mapping.h @@ -171,7 +171,7 @@ static inline int dma_get_cache_alignment(void) return L1_CACHE_BYTES; } -static inline int dma_mapping_error(dma_addr_t dma_addr) +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return dma_addr == 0; } diff --git a/include/asm-sparc/dma-mapping_64.h b/include/asm-sparc/dma-mapping_64.h index 38cbec76a33..bfa64f9702d 100644 --- a/include/asm-sparc/dma-mapping_64.h +++ b/include/asm-sparc/dma-mapping_64.h @@ -135,7 +135,7 @@ static inline void dma_sync_sg_for_device(struct device *dev, /* No flushing needed to sync cpu writes to the device. */ } -static inline int dma_mapping_error(dma_addr_t dma_addr) +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return (dma_addr == DMA_ERROR_CODE); } diff --git a/include/asm-sparc/pci_32.h b/include/asm-sparc/pci_32.h index b93b6c79e08..0ee949d220c 100644 --- a/include/asm-sparc/pci_32.h +++ b/include/asm-sparc/pci_32.h @@ -154,7 +154,8 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, #define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0) -static inline int pci_dma_mapping_error(dma_addr_t dma_addr) +static inline int pci_dma_mapping_error(struct pci_dev *pdev, + dma_addr_t dma_addr) { return (dma_addr == PCI_DMA_ERROR_CODE); } diff --git a/include/asm-sparc/pci_64.h b/include/asm-sparc/pci_64.h index f59f2571295..4f79a54948f 100644 --- a/include/asm-sparc/pci_64.h +++ b/include/asm-sparc/pci_64.h @@ -140,9 +140,10 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) #define PCI64_ADDR_BASE 0xfffc000000000000UL -static inline int pci_dma_mapping_error(dma_addr_t dma_addr) +static inline int pci_dma_mapping_error(struct pci_dev *pdev, + dma_addr_t dma_addr) { - return dma_mapping_error(dma_addr); + return dma_mapping_error(&pdev->dev, dma_addr); } #ifdef CONFIG_PCI diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h index 87a715367a1..3c034f48fdb 100644 --- a/include/asm-x86/device.h +++ b/include/asm-x86/device.h @@ -5,6 +5,9 @@ struct dev_archdata { #ifdef CONFIG_ACPI void *acpi_handle; #endif +#ifdef CONFIG_X86_64 +struct dma_mapping_ops *dma_ops; +#endif #ifdef CONFIG_DMAR void *iommu; /* hook for IOMMU specific extension */ #endif diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h index c2ddd3d1b88..0eaa9bf6011 100644 --- a/include/asm-x86/dma-mapping.h +++ b/include/asm-x86/dma-mapping.h @@ -17,7 +17,8 @@ extern int panic_on_overflow; extern int force_iommu; struct dma_mapping_ops { - int (*mapping_error)(dma_addr_t dma_addr); + int (*mapping_error)(struct device *dev, + dma_addr_t dma_addr); void* (*alloc_coherent)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); void (*free_coherent)(struct device *dev, size_t size, @@ -56,14 +57,32 @@ struct dma_mapping_ops { int is_phys; }; -extern const struct dma_mapping_ops *dma_ops; +extern struct dma_mapping_ops *dma_ops; -static inline int dma_mapping_error(dma_addr_t dma_addr) +static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) { - if (dma_ops->mapping_error) - return dma_ops->mapping_error(dma_addr); +#ifdef CONFIG_X86_32 + return dma_ops; +#else + if (unlikely(!dev) || !dev->archdata.dma_ops) + return dma_ops; + else + return dev->archdata.dma_ops; +#endif +} + +/* Make sure we keep the same behaviour */ +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ +#ifdef CONFIG_X86_32 + return 0; +#else + struct dma_mapping_ops *ops = get_dma_ops(dev); + if (ops->mapping_error) + return ops->mapping_error(dev, dma_addr); return (dma_addr == bad_dma_address); +#endif } #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) @@ -83,44 +102,53 @@ static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction); + return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); } static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(dev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->unmap_single) - dma_ops->unmap_single(dev, addr, size, direction); + if (ops->unmap_single) + ops->unmap_single(dev, addr, size, direction); } static inline int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - return dma_ops->map_sg(hwdev, sg, nents, direction); + return ops->map_sg(hwdev, sg, nents, direction); } static inline void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->unmap_sg) - dma_ops->unmap_sg(hwdev, sg, nents, direction); + if (ops->unmap_sg) + ops->unmap_sg(hwdev, sg, nents, direction); } static inline void dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_single_for_cpu) - dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, - direction); + if (ops->sync_single_for_cpu) + ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); flush_write_buffers(); } @@ -128,10 +156,11 @@ static inline void dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_single_for_device) - dma_ops->sync_single_for_device(hwdev, dma_handle, size, - direction); + if (ops->sync_single_for_device) + ops->sync_single_for_device(hwdev, dma_handle, size, direction); flush_write_buffers(); } @@ -139,11 +168,12 @@ static inline void dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { - BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_single_range_for_cpu) - dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, - size, direction); + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); + if (ops->sync_single_range_for_cpu) + ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, + size, direction); flush_write_buffers(); } @@ -152,11 +182,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { - BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_single_range_for_device) - dma_ops->sync_single_range_for_device(hwdev, dma_handle, - offset, size, direction); + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); + if (ops->sync_single_range_for_device) + ops->sync_single_range_for_device(hwdev, dma_handle, + offset, size, direction); flush_write_buffers(); } @@ -164,9 +195,11 @@ static inline void dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_sg_for_cpu) - dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); + if (ops->sync_sg_for_cpu) + ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); flush_write_buffers(); } @@ -174,9 +207,11 @@ static inline void dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(hwdev); + BUG_ON(!valid_dma_direction(direction)); - if (dma_ops->sync_sg_for_device) - dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); + if (ops->sync_sg_for_device) + ops->sync_sg_for_device(hwdev, sg, nelems, direction); flush_write_buffers(); } @@ -185,9 +220,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, int direction) { + struct dma_mapping_ops *ops = get_dma_ops(dev); + BUG_ON(!valid_dma_direction(direction)); - return dma_ops->map_single(dev, page_to_phys(page)+offset, - size, direction); + return ops->map_single(dev, page_to_phys(page) + offset, + size, direction); } static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h index c706a744263..2730b351afc 100644 --- a/include/asm-x86/swiotlb.h +++ b/include/asm-x86/swiotlb.h @@ -35,7 +35,7 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction); extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction); -extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); +extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); extern void swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h index 3c7d537dd15..51882ae3db4 100644 --- a/include/asm-xtensa/dma-mapping.h +++ b/include/asm-xtensa/dma-mapping.h @@ -139,7 +139,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, consistent_sync(sg_virt(sg), sg->length, dir); } static inline int -dma_mapping_error(dma_addr_t dma_addr) +dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } diff --git a/include/linux/i2o.h b/include/linux/i2o.h index 7d51cbca49a..75ae6d8aba4 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h @@ -758,7 +758,7 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, } dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); - if (!dma_mapping_error(dma_addr)) { + if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 if ((sizeof(dma_addr_t) > 4) && c->pae_support) { *mptr++ = cpu_to_le32(0x7C020002); diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 4bf8cade9db..e530026eedf 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -427,9 +427,9 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr) { switch (dev->bus->bustype) { case SSB_BUSTYPE_PCI: - return pci_dma_mapping_error(addr); + return pci_dma_mapping_error(dev->bus->host_pci, addr); case SSB_BUSTYPE_SSB: - return dma_mapping_error(addr); + return dma_mapping_error(dev->dev, addr); default: __ssb_dma_not_implemented(dev); } diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 90b529f7a15..936e333e7ce 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1590,7 +1590,7 @@ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) { if (dev->dma_ops) return dev->dma_ops->mapping_error(dev, dma_addr); - return dma_mapping_error(dma_addr); + return dma_mapping_error(dev->dma_device, dma_addr); } /** diff --git a/lib/swiotlb.c b/lib/swiotlb.c index d568894df8c..977edbdbc1d 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -492,7 +492,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, */ dma_addr_t handle; handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); - if (swiotlb_dma_mapping_error(handle)) + if (swiotlb_dma_mapping_error(hwdev, handle)) return NULL; ret = bus_to_virt(handle); @@ -824,7 +824,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, } int -swiotlb_dma_mapping_error(dma_addr_t dma_addr) +swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) { return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); } diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index a19b22b452a..84d328329d9 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -169,7 +169,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, (void *) vec->sge[xdr_sge_no].iov_base + sge_off, sge_bytes, DMA_TO_DEVICE); - if (dma_mapping_error(sge[sge_no].addr)) + if (dma_mapping_error(xprt->sc_cm_id->device->dma_device, + sge[sge_no].addr)) goto err; sge_off = 0; sge_no++; -- cgit v1.2.3-70-g09d2 From 1956a96de488feb05e95c08c9d5e80f63a4be2b1 Mon Sep 17 00:00:00 2001 From: Alexis Bruemmer Date: Fri, 25 Jul 2008 19:44:51 -0700 Subject: x86 calgary: fix handling of devices that aren't behind the Calgary The calgary code can give drivers addresses above 4GB which is very bad for hardware that is only 32bit DMA addressable. With this patch, the calgary code sets the global dma_ops to swiotlb or nommu properly, and the dma_ops of devices behind the Calgary/CalIOC2 to calgary_dma_ops. So the calgary code can handle devices safely that aren't behind the Calgary/CalIOC2. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Alexis Bruemmer Signed-off-by: FUJITA Tomonori Cc: Muli Ben-Yehuda Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/pci-calgary_64.c | 71 +++++++++++++++------------------------- include/asm-x86/iommu.h | 1 + 2 files changed, 27 insertions(+), 45 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 1eb86be93d7..b67a4b1d4ea 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -37,6 +37,7 @@ #include #include #include + #include #include #include @@ -413,22 +414,6 @@ static void calgary_unmap_sg(struct device *dev, } } -static int calgary_nontranslate_map_sg(struct device* dev, - struct scatterlist *sg, int nelems, int direction) -{ - struct scatterlist *s; - int i; - - for_each_sg(sg, s, nelems, i) { - struct page *p = sg_page(s); - - BUG_ON(!p); - s->dma_address = virt_to_bus(sg_virt(s)); - s->dma_length = s->length; - } - return nelems; -} - static int calgary_map_sg(struct device *dev, struct scatterlist *sg, int nelems, int direction) { @@ -439,9 +424,6 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, unsigned long entry; int i; - if (!translation_enabled(tbl)) - return calgary_nontranslate_map_sg(dev, sg, nelems, direction); - for_each_sg(sg, s, nelems, i) { BUG_ON(!sg_page(s)); @@ -477,7 +459,6 @@ error: static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, size_t size, int direction) { - dma_addr_t dma_handle = bad_dma_address; void *vaddr = phys_to_virt(paddr); unsigned long uaddr; unsigned int npages; @@ -486,12 +467,7 @@ static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, uaddr = (unsigned long)vaddr; npages = num_dma_pages(uaddr, size); - if (translation_enabled(tbl)) - dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction); - else - dma_handle = virt_to_bus(vaddr); - - return dma_handle; + return iommu_alloc(dev, tbl, vaddr, npages, direction); } static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, @@ -500,9 +476,6 @@ static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, struct iommu_table *tbl = find_iommu_table(dev); unsigned int npages; - if (!translation_enabled(tbl)) - return; - npages = num_dma_pages(dma_handle, size); iommu_free(tbl, dma_handle, npages); } @@ -525,18 +498,12 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size, goto error; memset(ret, 0, size); - if (translation_enabled(tbl)) { - /* set up tces to cover the allocated range */ - mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); - if (mapping == bad_dma_address) - goto free; - - *dma_handle = mapping; - } else /* non translated slot */ - *dma_handle = virt_to_bus(ret); - + /* set up tces to cover the allocated range */ + mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); + if (mapping == bad_dma_address) + goto free; + *dma_handle = mapping; return ret; - free: free_pages((unsigned long)ret, get_order(size)); ret = NULL; @@ -1241,6 +1208,16 @@ static int __init calgary_init(void) goto error; } while (1); + dev = NULL; + for_each_pci_dev(dev) { + struct iommu_table *tbl; + + tbl = find_iommu_table(&dev->dev); + + if (translation_enabled(tbl)) + dev->dev.archdata.dma_ops = &calgary_dma_ops; + } + return ret; error: @@ -1262,6 +1239,7 @@ error: calgary_disable_translation(dev); calgary_free_bus(dev); pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */ + dev->dev.archdata.dma_ops = NULL; } while (1); return ret; @@ -1503,6 +1481,10 @@ void __init detect_calgary(void) printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d, " "CONFIG_IOMMU_DEBUG is %s.\n", specified_table_size, debugging ? "enabled" : "disabled"); + + /* swiotlb for devices that aren't behind the Calgary. */ + if (max_pfn > MAX_DMA32_PFN) + swiotlb = 1; } return; @@ -1519,7 +1501,7 @@ int __init calgary_iommu_init(void) { int ret; - if (no_iommu || swiotlb) + if (no_iommu || (swiotlb && !calgary_detected)) return -ENODEV; if (!calgary_detected) @@ -1532,15 +1514,14 @@ int __init calgary_iommu_init(void) if (ret) { printk(KERN_ERR "PCI-DMA: Calgary init failed %d, " "falling back to no_iommu\n", ret); - if (max_pfn > MAX_DMA32_PFN) - printk(KERN_ERR "WARNING more than 4GB of memory, " - "32bit PCI may malfunction.\n"); return ret; } force_iommu = 1; bad_dma_address = 0x0; - dma_ops = &calgary_dma_ops; + /* dma_ops is set to swiotlb or nommu */ + if (!dma_ops) + dma_ops = &nommu_dma_ops; return 0; } diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h index d63166fb3ab..ecc8061904a 100644 --- a/include/asm-x86/iommu.h +++ b/include/asm-x86/iommu.h @@ -3,6 +3,7 @@ extern void pci_iommu_shutdown(void); extern void no_iommu_init(void); +extern struct dma_mapping_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; -- cgit v1.2.3-70-g09d2 From 3ab83521378268044a448113c6aa9a9e245f4d2f Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Fri, 25 Jul 2008 19:45:07 -0700 Subject: kexec jump This patch provides an enhancement to kexec/kdump. It implements the following features: - Backup/restore memory used by the original kernel before/after kexec. - Save/restore CPU state before/after kexec. The features of this patch can be used as a general method to call program in physical mode (paging turning off). This can be used to call BIOS code under Linux. kexec-tools needs to be patched to support kexec jump. The patches and the precompiled kexec can be download from the following URL: source: http://khibernation.sourceforge.net/download/release_v10/kexec-tools/kexec-tools-src_git_kh10.tar.bz2 patches: http://khibernation.sourceforge.net/download/release_v10/kexec-tools/kexec-tools-patches_git_kh10.tar.bz2 binary: http://khibernation.sourceforge.net/download/release_v10/kexec-tools/kexec_git_kh10 Usage example of calling some physical mode code and return: 1. Compile and install patched kernel with following options selected: CONFIG_X86_32=y CONFIG_KEXEC=y CONFIG_PM=y CONFIG_KEXEC_JUMP=y 2. Build patched kexec-tool or download the pre-built one. 3. Build some physical mode executable named such as "phy_mode" 4. Boot kernel compiled in step 1. 5. Load physical mode executable with /sbin/kexec. The shell command line can be as follow: /sbin/kexec --load-preserve-context --args-none phy_mode 6. Call physical mode executable with following shell command line: /sbin/kexec -e Implementation point: To support jumping without reserving memory. One shadow backup page (source page) is allocated for each page used by kexeced code image (destination page). When do kexec_load, the image of kexeced code is loaded into source pages, and before executing, the destination pages and the source pages are swapped, so the contents of destination pages are backupped. Before jumping to the kexeced code image and after jumping back to the original kernel, the destination pages and the source pages are swapped too. C ABI (calling convention) is used as communication protocol between kernel and called code. A flag named KEXEC_PRESERVE_CONTEXT for sys_kexec_load is added to indicate that the loaded kernel image is used for jumping back. Now, only the i386 architecture is supported. Signed-off-by: Huang Ying Acked-by: Vivek Goyal Cc: "Eric W. Biederman" Cc: Pavel Machek Cc: Nigel Cunningham Cc: "Rafael J. Wysocki" Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/machine_kexec.c | 2 +- arch/sh/kernel/machine_kexec.c | 2 +- arch/x86/Kconfig | 7 ++ arch/x86/kernel/machine_kexec_32.c | 27 ++++-- arch/x86/kernel/machine_kexec_64.c | 2 +- arch/x86/kernel/relocate_kernel_32.S | 174 ++++++++++++++++++++++++++++++----- include/asm-x86/kexec.h | 18 ++-- include/linux/kexec.h | 17 +++- kernel/kexec.c | 57 ++++++++++++ kernel/sys.c | 31 ++----- 10 files changed, 269 insertions(+), 68 deletions(-) (limited to 'arch/x86') diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 29a0e039d43..aab76887a84 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -48,7 +48,7 @@ void machine_kexec_cleanup(struct kimage *image) * Do not allocate memory (or fail in any way) in machine_kexec(). * We are past the point of no return, committed to rebooting now. */ -NORET_TYPE void machine_kexec(struct kimage *image) +void machine_kexec(struct kimage *image) { if (ppc_md.machine_kexec) ppc_md.machine_kexec(image); diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 5c17de51987..ec1eadce4aa 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -70,7 +70,7 @@ static void kexec_info(struct kimage *image) * Do not allocate memory (or fail in any way) in machine_kexec(). * We are past the point of no return, committed to rebooting now. */ -NORET_TYPE void machine_kexec(struct kimage *image) +void machine_kexec(struct kimage *image) { unsigned long page_list; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e3cba0b4560..7ecb679f013 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1279,6 +1279,13 @@ config CRASH_DUMP (CONFIG_RELOCATABLE=y). For more details see Documentation/kdump/kdump.txt +config KEXEC_JUMP + bool "kexec jump (EXPERIMENTAL)" + depends on EXPERIMENTAL + depends on KEXEC && PM_SLEEP && X86_32 + help + Invoke code in physical address mode via KEXEC + config PHYSICAL_START hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) default "0x1000000" if X86_NUMAQ diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 8864230d55a..2b67609d0a1 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -22,6 +22,7 @@ #include #include #include +#include #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE))) static u32 kexec_pgd[1024] PAGE_ALIGNED; @@ -85,10 +86,12 @@ static void load_segments(void) * reboot code buffer to allow us to avoid allocations * later. * - * Currently nothing. + * Make control page executable. */ int machine_kexec_prepare(struct kimage *image) { + if (nx_enabled) + set_pages_x(image->control_code_page, 1); return 0; } @@ -98,16 +101,24 @@ int machine_kexec_prepare(struct kimage *image) */ void machine_kexec_cleanup(struct kimage *image) { + if (nx_enabled) + set_pages_nx(image->control_code_page, 1); } /* * Do not allocate memory (or fail in any way) in machine_kexec(). * We are past the point of no return, committed to rebooting now. */ -NORET_TYPE void machine_kexec(struct kimage *image) +void machine_kexec(struct kimage *image) { unsigned long page_list[PAGES_NR]; void *control_page; + asmlinkage unsigned long + (*relocate_kernel_ptr)(unsigned long indirection_page, + unsigned long control_page, + unsigned long start_address, + unsigned int has_pae, + unsigned int preserve_context); tracer_disable(); @@ -115,10 +126,11 @@ NORET_TYPE void machine_kexec(struct kimage *image) local_irq_disable(); control_page = page_address(image->control_code_page); - memcpy(control_page, relocate_kernel, PAGE_SIZE); + memcpy(control_page, relocate_kernel, PAGE_SIZE/2); + relocate_kernel_ptr = control_page; page_list[PA_CONTROL_PAGE] = __pa(control_page); - page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel; + page_list[VA_CONTROL_PAGE] = (unsigned long)control_page; page_list[PA_PGD] = __pa(kexec_pgd); page_list[VA_PGD] = (unsigned long)kexec_pgd; #ifdef CONFIG_X86_PAE @@ -131,6 +143,7 @@ NORET_TYPE void machine_kexec(struct kimage *image) page_list[VA_PTE_0] = (unsigned long)kexec_pte0; page_list[PA_PTE_1] = __pa(kexec_pte1); page_list[VA_PTE_1] = (unsigned long)kexec_pte1; + page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) << PAGE_SHIFT); /* The segment registers are funny things, they have both a * visible and an invisible part. Whenever the visible part is @@ -149,8 +162,10 @@ NORET_TYPE void machine_kexec(struct kimage *image) set_idt(phys_to_virt(0),0); /* now call it */ - relocate_kernel((unsigned long)image->head, (unsigned long)page_list, - image->start, cpu_has_pae); + image->start = relocate_kernel_ptr((unsigned long)image->head, + (unsigned long)page_list, + image->start, cpu_has_pae, + image->preserve_context); } void arch_crash_save_vmcoreinfo(void) diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 9dd9262693a..c43caa3a91f 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -181,7 +181,7 @@ void machine_kexec_cleanup(struct kimage *image) * Do not allocate memory (or fail in any way) in machine_kexec(). * We are past the point of no return, committed to rebooting now. */ -NORET_TYPE void machine_kexec(struct kimage *image) +void machine_kexec(struct kimage *image) { unsigned long page_list[PAGES_NR]; void *control_page; diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index c30fe25d470..703310a9902 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S @@ -20,11 +20,44 @@ #define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) #define PAE_PGD_ATTR (_PAGE_PRESENT) +/* control_page + PAGE_SIZE/2 ~ control_page + PAGE_SIZE * 3/4 are + * used to save some data for jumping back + */ +#define DATA(offset) (PAGE_SIZE/2+(offset)) + +/* Minimal CPU state */ +#define ESP DATA(0x0) +#define CR0 DATA(0x4) +#define CR3 DATA(0x8) +#define CR4 DATA(0xc) + +/* other data */ +#define CP_VA_CONTROL_PAGE DATA(0x10) +#define CP_PA_PGD DATA(0x14) +#define CP_PA_SWAP_PAGE DATA(0x18) +#define CP_PA_BACKUP_PAGES_MAP DATA(0x1c) + .text .align PAGE_SIZE .globl relocate_kernel relocate_kernel: - movl 8(%esp), %ebp /* list of pages */ + /* Save the CPU context, used for jumping back */ + + pushl %ebx + pushl %esi + pushl %edi + pushl %ebp + pushf + + movl 20+8(%esp), %ebp /* list of pages */ + movl PTR(VA_CONTROL_PAGE)(%ebp), %edi + movl %esp, ESP(%edi) + movl %cr0, %eax + movl %eax, CR0(%edi) + movl %cr3, %eax + movl %eax, CR3(%edi) + movl %cr4, %eax + movl %eax, CR4(%edi) #ifdef CONFIG_X86_PAE /* map the control page at its virtual address */ @@ -138,15 +171,25 @@ relocate_kernel: relocate_new_kernel: /* read the arguments and say goodbye to the stack */ - movl 4(%esp), %ebx /* page_list */ - movl 8(%esp), %ebp /* list of pages */ - movl 12(%esp), %edx /* start address */ - movl 16(%esp), %ecx /* cpu_has_pae */ + movl 20+4(%esp), %ebx /* page_list */ + movl 20+8(%esp), %ebp /* list of pages */ + movl 20+12(%esp), %edx /* start address */ + movl 20+16(%esp), %ecx /* cpu_has_pae */ + movl 20+20(%esp), %esi /* preserve_context */ /* zero out flags, and disable interrupts */ pushl $0 popfl + /* save some information for jumping back */ + movl PTR(VA_CONTROL_PAGE)(%ebp), %edi + movl %edi, CP_VA_CONTROL_PAGE(%edi) + movl PTR(PA_PGD)(%ebp), %eax + movl %eax, CP_PA_PGD(%edi) + movl PTR(PA_SWAP_PAGE)(%ebp), %eax + movl %eax, CP_PA_SWAP_PAGE(%edi) + movl %ebx, CP_PA_BACKUP_PAGES_MAP(%edi) + /* get physical address of control page now */ /* this is impossible after page table switch */ movl PTR(PA_CONTROL_PAGE)(%ebp), %edi @@ -197,8 +240,90 @@ identity_mapped: xorl %eax, %eax movl %eax, %cr3 + movl CP_PA_SWAP_PAGE(%edi), %eax + pushl %eax + pushl %ebx + call swap_pages + addl $8, %esp + + /* To be certain of avoiding problems with self-modifying code + * I need to execute a serializing instruction here. + * So I flush the TLB, it's handy, and not processor dependent. + */ + xorl %eax, %eax + movl %eax, %cr3 + + /* set all of the registers to known values */ + /* leave %esp alone */ + + testl %esi, %esi + jnz 1f + xorl %edi, %edi + xorl %eax, %eax + xorl %ebx, %ebx + xorl %ecx, %ecx + xorl %edx, %edx + xorl %esi, %esi + xorl %ebp, %ebp + ret +1: + popl %edx + movl CP_PA_SWAP_PAGE(%edi), %esp + addl $PAGE_SIZE, %esp +2: + call *%edx + + /* get the re-entry point of the peer system */ + movl 0(%esp), %ebp + call 1f +1: + popl %ebx + subl $(1b - relocate_kernel), %ebx + movl CP_VA_CONTROL_PAGE(%ebx), %edi + lea PAGE_SIZE(%ebx), %esp + movl CP_PA_SWAP_PAGE(%ebx), %eax + movl CP_PA_BACKUP_PAGES_MAP(%ebx), %edx + pushl %eax + pushl %edx + call swap_pages + addl $8, %esp + movl CP_PA_PGD(%ebx), %eax + movl %eax, %cr3 + movl %cr0, %eax + orl $(1<<31), %eax + movl %eax, %cr0 + lea PAGE_SIZE(%edi), %esp + movl %edi, %eax + addl $(virtual_mapped - relocate_kernel), %eax + pushl %eax + ret + +virtual_mapped: + movl CR4(%edi), %eax + movl %eax, %cr4 + movl CR3(%edi), %eax + movl %eax, %cr3 + movl CR0(%edi), %eax + movl %eax, %cr0 + movl ESP(%edi), %esp + movl %ebp, %eax + + popf + popl %ebp + popl %edi + popl %esi + popl %ebx + ret + /* Do the copies */ - movl %ebx, %ecx +swap_pages: + movl 8(%esp), %edx + movl 4(%esp), %ecx + pushl %ebp + pushl %ebx + pushl %edi + pushl %esi + movl %ecx, %ebx jmp 1f 0: /* top, read another word from the indirection page */ @@ -226,27 +351,28 @@ identity_mapped: movl %ecx, %esi /* For every source page do a copy */ andl $0xfffff000, %esi + movl %edi, %eax + movl %esi, %ebp + + movl %edx, %edi movl $1024, %ecx rep ; movsl - jmp 0b -3: - - /* To be certain of avoiding problems with self-modifying code - * I need to execute a serializing instruction here. - * So I flush the TLB, it's handy, and not processor dependent. - */ - xorl %eax, %eax - movl %eax, %cr3 + movl %ebp, %edi + movl %eax, %esi + movl $1024, %ecx + rep ; movsl - /* set all of the registers to known values */ - /* leave %esp alone */ + movl %eax, %edi + movl %edx, %esi + movl $1024, %ecx + rep ; movsl - xorl %eax, %eax - xorl %ebx, %ebx - xorl %ecx, %ecx - xorl %edx, %edx - xorl %esi, %esi - xorl %edi, %edi - xorl %ebp, %ebp + lea PAGE_SIZE(%ebp), %esi + jmp 0b +3: + popl %esi + popl %edi + popl %ebx + popl %ebp ret diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h index 8f855a15f64..c0e52a14fd4 100644 --- a/include/asm-x86/kexec.h +++ b/include/asm-x86/kexec.h @@ -10,14 +10,15 @@ # define VA_PTE_0 5 # define PA_PTE_1 6 # define VA_PTE_1 7 +# define PA_SWAP_PAGE 8 # ifdef CONFIG_X86_PAE -# define PA_PMD_0 8 -# define VA_PMD_0 9 -# define PA_PMD_1 10 -# define VA_PMD_1 11 -# define PAGES_NR 12 +# define PA_PMD_0 9 +# define VA_PMD_0 10 +# define PA_PMD_1 11 +# define VA_PMD_1 12 +# define PAGES_NR 13 # else -# define PAGES_NR 8 +# define PAGES_NR 9 # endif #else # define PA_CONTROL_PAGE 0 @@ -152,11 +153,12 @@ static inline void crash_setup_regs(struct pt_regs *newregs, } #ifdef CONFIG_X86_32 -asmlinkage NORET_TYPE void +asmlinkage unsigned long relocate_kernel(unsigned long indirection_page, unsigned long control_page, unsigned long start_address, - unsigned int has_pae) ATTRIB_NORET; + unsigned int has_pae, + unsigned int preserve_context); #else NORET_TYPE void relocate_kernel(unsigned long indirection_page, diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 3265968cd2c..82f88a8a827 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -83,6 +83,7 @@ struct kimage { unsigned long start; struct page *control_code_page; + struct page *swap_page; unsigned long nr_segments; struct kexec_segment segment[KEXEC_SEGMENT_MAX]; @@ -98,18 +99,20 @@ struct kimage { unsigned int type : 1; #define KEXEC_TYPE_DEFAULT 0 #define KEXEC_TYPE_CRASH 1 + unsigned int preserve_context : 1; }; /* kexec interface functions */ -extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET; +extern void machine_kexec(struct kimage *image); extern int machine_kexec_prepare(struct kimage *image); extern void machine_kexec_cleanup(struct kimage *image); extern asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, struct kexec_segment __user *segments, unsigned long flags); +extern int kernel_kexec(void); #ifdef CONFIG_COMPAT extern asmlinkage long compat_sys_kexec_load(unsigned long entry, unsigned long nr_segments, @@ -156,8 +159,9 @@ extern struct kimage *kexec_crash_image; #define kexec_flush_icache_page(page) #endif -#define KEXEC_ON_CRASH 0x00000001 -#define KEXEC_ARCH_MASK 0xffff0000 +#define KEXEC_ON_CRASH 0x00000001 +#define KEXEC_PRESERVE_CONTEXT 0x00000002 +#define KEXEC_ARCH_MASK 0xffff0000 /* These values match the ELF architecture values. * Unless there is a good reason that should continue to be the case. @@ -174,7 +178,12 @@ extern struct kimage *kexec_crash_image; #define KEXEC_ARCH_MIPS_LE (10 << 16) #define KEXEC_ARCH_MIPS ( 8 << 16) -#define KEXEC_FLAGS (KEXEC_ON_CRASH) /* List of defined/legal kexec flags */ +/* List of defined/legal kexec flags */ +#ifndef CONFIG_KEXEC_JUMP +#define KEXEC_FLAGS KEXEC_ON_CRASH +#else +#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT) +#endif #define VMCOREINFO_BYTES (4096) #define VMCOREINFO_NOTE_NAME "VMCOREINFO" diff --git a/kernel/kexec.c b/kernel/kexec.c index 6db42ff8d52..a0d920915b3 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -24,6 +24,8 @@ #include #include #include +#include +#include #include #include @@ -242,6 +244,12 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, goto out; } + image->swap_page = kimage_alloc_control_pages(image, 0); + if (!image->swap_page) { + printk(KERN_ERR "Could not allocate swap buffer\n"); + goto out; + } + result = 0; out: if (result == 0) @@ -986,6 +994,8 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, if (result) goto out; + if (flags & KEXEC_PRESERVE_CONTEXT) + image->preserve_context = 1; result = machine_kexec_prepare(image); if (result) goto out; @@ -1411,3 +1421,50 @@ static int __init crash_save_vmcoreinfo_init(void) } module_init(crash_save_vmcoreinfo_init) + +/** + * kernel_kexec - reboot the system + * + * Move into place and start executing a preloaded standalone + * executable. If nothing was preloaded return an error. + */ +int kernel_kexec(void) +{ + int error = 0; + + if (xchg(&kexec_lock, 1)) + return -EBUSY; + if (!kexec_image) { + error = -EINVAL; + goto Unlock; + } + + if (kexec_image->preserve_context) { +#ifdef CONFIG_KEXEC_JUMP + local_irq_disable(); + save_processor_state(); +#endif + } else { + blocking_notifier_call_chain(&reboot_notifier_list, + SYS_RESTART, NULL); + system_state = SYSTEM_RESTART; + device_shutdown(); + sysdev_shutdown(); + printk(KERN_EMERG "Starting new kernel\n"); + machine_shutdown(); + } + + machine_kexec(kexec_image); + + if (kexec_image->preserve_context) { +#ifdef CONFIG_KEXEC_JUMP + restore_processor_state(); + local_irq_enable(); +#endif + } + + Unlock: + xchg(&kexec_lock, 0); + + return error; +} diff --git a/kernel/sys.c b/kernel/sys.c index 0c9d3fa1f5f..c01858090a9 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -301,26 +301,6 @@ void kernel_restart(char *cmd) } EXPORT_SYMBOL_GPL(kernel_restart); -/** - * kernel_kexec - reboot the system - * - * Move into place and start executing a preloaded standalone - * executable. If nothing was preloaded return an error. - */ -static void kernel_kexec(void) -{ -#ifdef CONFIG_KEXEC - struct kimage *image; - image = xchg(&kexec_image, NULL); - if (!image) - return; - kernel_restart_prepare(NULL); - printk(KERN_EMERG "Starting new kernel\n"); - machine_shutdown(); - machine_kexec(image); -#endif -} - static void kernel_shutdown_prepare(enum system_states state) { blocking_notifier_call_chain(&reboot_notifier_list, @@ -425,10 +405,15 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user kernel_restart(buffer); break; +#ifdef CONFIG_KEXEC case LINUX_REBOOT_CMD_KEXEC: - kernel_kexec(); - unlock_kernel(); - return -EINVAL; + { + int ret; + ret = kernel_kexec(); + unlock_kernel(); + return ret; + } +#endif #ifdef CONFIG_HIBERNATION case LINUX_REBOOT_CMD_SW_SUSPEND: -- cgit v1.2.3-70-g09d2 From 89081d17f7bb81d89fa1aa9b70f821c5cf4d39e9 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Fri, 25 Jul 2008 19:45:10 -0700 Subject: kexec jump: save/restore device state This patch implements devices state save/restore before after kexec. This patch together with features in kexec_jump patch can be used for following: - A simple hibernation implementation without ACPI support. You can kexec a hibernating kernel, save the memory image of original system and shutdown the system. When resuming, you restore the memory image of original system via ordinary kexec load then jump back. - Kernel/system debug through making system snapshot. You can make system snapshot, jump back, do some thing and make another system snapshot. - Cooperative multi-kernel/system. With kexec jump, you can switch between several kernels/systems quickly without boot process except the first time. This appears like swap a whole kernel/system out/in. - A general method to call program in physical mode (paging turning off). This can be used to invoke BIOS code under Linux. The following user-space tools can be used with kexec jump: - kexec-tools needs to be patched to support kexec jump. The patches and the precompiled kexec can be download from the following URL: source: http://khibernation.sourceforge.net/download/release_v10/kexec-tools/kexec-tools-src_git_kh10.tar.bz2 patches: http://khibernation.sourceforge.net/download/release_v10/kexec-tools/kexec-tools-patches_git_kh10.tar.bz2 binary: http://khibernation.sourceforge.net/download/release_v10/kexec-tools/kexec_git_kh10 - makedumpfile with patches are used as memory image saving tool, it can exclude free pages from original kernel memory image file. The patches and the precompiled makedumpfile can be download from the following URL: source: http://khibernation.sourceforge.net/download/release_v10/makedumpfile/makedumpfile-src_cvs_kh10.tar.bz2 patches: http://khibernation.sourceforge.net/download/release_v10/makedumpfile/makedumpfile-patches_cvs_kh10.tar.bz2 binary: http://khibernation.sourceforge.net/download/release_v10/makedumpfile/makedumpfile_cvs_kh10 - An initramfs image can be used as the root file system of kexeced kernel. An initramfs image built with "BuildRoot" can be downloaded from the following URL: initramfs image: http://khibernation.sourceforge.net/download/release_v10/initramfs/rootfs_cvs_kh10.gz All user space tools above are included in the initramfs image. Usage example of simple hibernation: 1. Compile and install patched kernel with following options selected: CONFIG_X86_32=y CONFIG_RELOCATABLE=y CONFIG_KEXEC=y CONFIG_CRASH_DUMP=y CONFIG_PM=y CONFIG_HIBERNATION=y CONFIG_KEXEC_JUMP=y 2. Build an initramfs image contains kexec-tool and makedumpfile, or download the pre-built initramfs image, called rootfs.gz in following text. 3. Prepare a partition to save memory image of original kernel, called hibernating partition in following text. 4. Boot kernel compiled in step 1 (kernel A). 5. In the kernel A, load kernel compiled in step 1 (kernel B) with /sbin/kexec. The shell command line can be as follow: /sbin/kexec --load-preserve-context /boot/bzImage --mem-min=0x100000 --mem-max=0xffffff --initrd=rootfs.gz 6. Boot the kernel B with following shell command line: /sbin/kexec -e 7. The kernel B will boot as normal kexec. In kernel B the memory image of kernel A can be saved into hibernating partition as follow: jump_back_entry=`cat /proc/cmdline | tr ' ' '\n' | grep kexec_jump_back_entry | cut -d '='` echo $jump_back_entry > kexec_jump_back_entry cp /proc/vmcore dump.elf Then you can shutdown the machine as normal. 8. Boot kernel compiled in step 1 (kernel C). Use the rootfs.gz as root file system. 9. In kernel C, load the memory image of kernel A as follow: /sbin/kexec -l --args-none --entry=`cat kexec_jump_back_entry` dump.elf 10. Jump back to the kernel A as follow: /sbin/kexec -e Then, kernel A is resumed. Implementation point: To support jumping between two kernels, before jumping to (executing) the new kernel and jumping back to the original kernel, the devices are put into quiescent state, and the state of devices and CPU is saved. After jumping back from kexeced kernel and jumping to the new kernel, the state of devices and CPU are restored accordingly. The devices/CPU state save/restore code of software suspend is called to implement corresponding function. Known issues: - Because the segment number supported by sys_kexec_load is limited, hibernation image with many segments may not be load. This is planned to be eliminated by adding a new flag to sys_kexec_load to make a image can be loaded with multiple sys_kexec_load invoking. Now, only the i386 architecture is supported. Signed-off-by: Huang Ying Acked-by: Vivek Goyal Cc: "Eric W. Biederman" Cc: Pavel Machek Cc: Nigel Cunningham Cc: "Rafael J. Wysocki" Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/Kconfig | 5 +++-- arch/x86/kernel/machine_kexec_32.c | 12 ++++++++++++ include/linux/suspend.h | 2 ++ kernel/kexec.c | 39 ++++++++++++++++++++++++++++++++++++++ kernel/power/power.h | 2 -- 5 files changed, 56 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7ecb679f013..6b2debfabdd 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1282,9 +1282,10 @@ config CRASH_DUMP config KEXEC_JUMP bool "kexec jump (EXPERIMENTAL)" depends on EXPERIMENTAL - depends on KEXEC && PM_SLEEP && X86_32 + depends on KEXEC && HIBERNATION && X86_32 help - Invoke code in physical address mode via KEXEC + Jump between original kernel and kexeced kernel and invoke + code in physical address mode via KEXEC config PHYSICAL_START hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 2b67609d0a1..9fe478d9840 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -125,6 +125,18 @@ void machine_kexec(struct kimage *image) /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); + if (image->preserve_context) { +#ifdef CONFIG_X86_IO_APIC + /* We need to put APICs in legacy mode so that we can + * get timer interrupts in second kernel. kexec/kdump + * paths already have calls to disable_IO_APIC() in + * one form or other. kexec jump path also need + * one. + */ + disable_IO_APIC(); +#endif + } + control_page = page_address(image->control_code_page); memcpy(control_page, relocate_kernel, PAGE_SIZE/2); diff --git a/include/linux/suspend.h b/include/linux/suspend.h index e8e69159af7..c6343509597 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -278,4 +278,6 @@ static inline void register_nosave_region_late(unsigned long b, unsigned long e) } #endif +extern struct mutex pm_mutex; + #endif /* _LINUX_SUSPEND_H */ diff --git a/kernel/kexec.c b/kernel/kexec.c index a0d920915b3..c8a4370e2a3 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -26,6 +26,10 @@ #include #include #include +#include +#include +#include +#include #include #include @@ -1441,7 +1445,31 @@ int kernel_kexec(void) if (kexec_image->preserve_context) { #ifdef CONFIG_KEXEC_JUMP + mutex_lock(&pm_mutex); + pm_prepare_console(); + error = freeze_processes(); + if (error) { + error = -EBUSY; + goto Restore_console; + } + suspend_console(); + error = device_suspend(PMSG_FREEZE); + if (error) + goto Resume_console; + error = disable_nonboot_cpus(); + if (error) + goto Resume_devices; local_irq_disable(); + /* At this point, device_suspend() has been called, + * but *not* device_power_down(). We *must* + * device_power_down() now. Otherwise, drivers for + * some devices (e.g. interrupt controllers) become + * desynchronized with the actual state of the + * hardware at resume time, and evil weirdness ensues. + */ + error = device_power_down(PMSG_FREEZE); + if (error) + goto Enable_irqs; save_processor_state(); #endif } else { @@ -1459,7 +1487,18 @@ int kernel_kexec(void) if (kexec_image->preserve_context) { #ifdef CONFIG_KEXEC_JUMP restore_processor_state(); + device_power_up(PMSG_RESTORE); + Enable_irqs: local_irq_enable(); + enable_nonboot_cpus(); + Resume_devices: + device_resume(PMSG_RESTORE); + Resume_console: + resume_console(); + thaw_processes(); + Restore_console: + pm_restore_console(); + mutex_unlock(&pm_mutex); #endif } diff --git a/kernel/power/power.h b/kernel/power/power.h index 700f44ec840..acc0c101dbd 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -53,8 +53,6 @@ extern int hibernation_platform_enter(void); extern int pfn_is_nosave(unsigned long); -extern struct mutex pm_mutex; - #define power_attr(_name) \ static struct kobj_attribute _name##_attr = { \ .attr = { \ -- cgit v1.2.3-70-g09d2 From 8174c430e445a93016ef18f717fe570214fa38bf Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 25 Jul 2008 19:45:24 -0700 Subject: x86: lockless get_user_pages_fast() Implement get_user_pages_fast without locking in the fastpath on x86. Do an optimistic lockless pagetable walk, without taking mmap_sem or any page table locks or even mmap_sem. Page table existence is guaranteed by turning interrupts off (combined with the fact that we're always looking up the current mm, means we can do the lockless page table walk within the constraints of the TLB shootdown design). Basically we can do this lockless pagetable walk in a similar manner to the way the CPU's pagetable walker does not have to take any locks to find present ptes. This patch (combined with the subsequent ones to convert direct IO to use it) was found to give about 10% performance improvement on a 2 socket 8 core Intel Xeon system running an OLTP workload on DB2 v9.5 "To test the effects of the patch, an OLTP workload was run on an IBM x3850 M2 server with 2 processors (quad-core Intel Xeon processors at 2.93 GHz) using IBM DB2 v9.5 running Linux 2.6.24rc7 kernel. Comparing runs with and without the patch resulted in an overall performance benefit of ~9.8%. Correspondingly, oprofiles showed that samples from __up_read and __down_read routines that is seen during thread contention for system resources was reduced from 2.8% down to .05%. Monitoring the /proc/vmstat output from the patched run showed that the counter for fast_gup contained a very high number while the fast_gup_slow value was zero." (fast_gup is the old name for get_user_pages_fast, fast_gup_slow is a counter we had for the number of times the slowpath was invoked). The main reason for the improvement is that DB2 has multiple threads each issuing direct-IO. Direct-IO uses get_user_pages, and thus the threads contend the mmap_sem cacheline, and can also contend on page table locks. I would anticipate larger performance gains on larger systems, however I think DB2 uses an adaptive mix of threads and processes, so it could be that thread contention remains pretty constant as machine size increases. In which case, we stuck with "only" a 10% gain. The downside of using get_user_pages_fast is that if there is not a pte with the correct permissions for the access, we end up falling back to get_user_pages and so the get_user_pages_fast is a bit of extra work. However this should not be the common case in most performance critical code. [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: Kconfig fix] [akpm@linux-foundation.org: Makefile fix/cleanup] [akpm@linux-foundation.org: warning fix] Signed-off-by: Nick Piggin Cc: Dave Kleikamp Cc: Andy Whitcroft Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Andi Kleen Cc: Dave Kleikamp Cc: Badari Pulavarty Cc: Zach Brown Cc: Jens Axboe Reviewed-by: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/Kconfig | 1 + arch/x86/mm/Makefile | 1 + arch/x86/mm/gup.c | 258 ++++++++++++++++++++++++++++++++++++++++++++++ include/asm-x86/uaccess.h | 1 + mm/Kconfig | 3 + 5 files changed, 264 insertions(+) create mode 100644 arch/x86/mm/gup.c (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6b2debfabdd..6bdde845818 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -22,6 +22,7 @@ config X86 select HAVE_IDE select HAVE_OPROFILE select HAVE_IOREMAP_PROT + select HAVE_GET_USER_PAGES_FAST select HAVE_KPROBES select ARCH_WANT_OPTIONAL_GPIOLIB if !X86_RDC321X select HAVE_KRETPROBES diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 1fbb844c3d7..2977ea37791 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -1,6 +1,7 @@ obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ pat.o pgtable.o +obj-$(CONFIG_HAVE_GET_USER_PAGES_FAST) += gup.o obj-$(CONFIG_X86_32) += pgtable_32.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c new file mode 100644 index 00000000000..6f733121f32 --- /dev/null +++ b/arch/x86/mm/gup.c @@ -0,0 +1,258 @@ +/* + * Lockless get_user_pages_fast for x86 + * + * Copyright (C) 2008 Nick Piggin + * Copyright (C) 2008 Novell Inc. + */ +#include +#include +#include +#include + +#include + +static inline pte_t gup_get_pte(pte_t *ptep) +{ +#ifndef CONFIG_X86_PAE + return *ptep; +#else + /* + * With get_user_pages_fast, we walk down the pagetables without taking + * any locks. For this we would like to load the pointers atoimcally, + * but that is not possible (without expensive cmpxchg8b) on PAE. What + * we do have is the guarantee that a pte will only either go from not + * present to present, or present to not present or both -- it will not + * switch to a completely different present page without a TLB flush in + * between; something that we are blocking by holding interrupts off. + * + * Setting ptes from not present to present goes: + * ptep->pte_high = h; + * smp_wmb(); + * ptep->pte_low = l; + * + * And present to not present goes: + * ptep->pte_low = 0; + * smp_wmb(); + * ptep->pte_high = 0; + * + * We must ensure here that the load of pte_low sees l iff pte_high + * sees h. We load pte_high *after* loading pte_low, which ensures we + * don't see an older value of pte_high. *Then* we recheck pte_low, + * which ensures that we haven't picked up a changed pte high. We might + * have got rubbish values from pte_low and pte_high, but we are + * guaranteed that pte_low will not have the present bit set *unless* + * it is 'l'. And get_user_pages_fast only operates on present ptes, so + * we're safe. + * + * gup_get_pte should not be used or copied outside gup.c without being + * very careful -- it does not atomically load the pte or anything that + * is likely to be useful for you. + */ + pte_t pte; + +retry: + pte.pte_low = ptep->pte_low; + smp_rmb(); + pte.pte_high = ptep->pte_high; + smp_rmb(); + if (unlikely(pte.pte_low != ptep->pte_low)) + goto retry; + + return pte; +#endif +} + +/* + * The performance critical leaf functions are made noinline otherwise gcc + * inlines everything into a single function which results in too much + * register pressure. + */ +static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, + unsigned long end, int write, struct page **pages, int *nr) +{ + unsigned long mask; + pte_t *ptep; + + mask = _PAGE_PRESENT|_PAGE_USER; + if (write) + mask |= _PAGE_RW; + + ptep = pte_offset_map(&pmd, addr); + do { + pte_t pte = gup_get_pte(ptep); + struct page *page; + + if ((pte_val(pte) & (mask | _PAGE_SPECIAL)) != mask) { + pte_unmap(ptep); + return 0; + } + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + page = pte_page(pte); + get_page(page); + pages[*nr] = page; + (*nr)++; + + } while (ptep++, addr += PAGE_SIZE, addr != end); + pte_unmap(ptep - 1); + + return 1; +} + +static inline void get_head_page_multiple(struct page *page, int nr) +{ + VM_BUG_ON(page != compound_head(page)); + VM_BUG_ON(page_count(page) == 0); + atomic_add(nr, &page->_count); +} + +static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, + unsigned long end, int write, struct page **pages, int *nr) +{ + unsigned long mask; + pte_t pte = *(pte_t *)&pmd; + struct page *head, *page; + int refs; + + mask = _PAGE_PRESENT|_PAGE_USER; + if (write) + mask |= _PAGE_RW; + if ((pte_val(pte) & mask) != mask) + return 0; + /* hugepages are never "special" */ + VM_BUG_ON(pte_val(pte) & _PAGE_SPECIAL); + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + + refs = 0; + head = pte_page(pte); + page = head + ((addr & ~HPAGE_MASK) >> PAGE_SHIFT); + do { + VM_BUG_ON(compound_head(page) != head); + pages[*nr] = page; + (*nr)++; + page++; + refs++; + } while (addr += PAGE_SIZE, addr != end); + get_head_page_multiple(head, refs); + + return 1; +} + +static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + unsigned long next; + pmd_t *pmdp; + + pmdp = pmd_offset(&pud, addr); + do { + pmd_t pmd = *pmdp; + + next = pmd_addr_end(addr, end); + if (pmd_none(pmd)) + return 0; + if (unlikely(pmd_large(pmd))) { + if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) + return 0; + } else { + if (!gup_pte_range(pmd, addr, next, write, pages, nr)) + return 0; + } + } while (pmdp++, addr = next, addr != end); + + return 1; +} + +static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + unsigned long next; + pud_t *pudp; + + pudp = pud_offset(&pgd, addr); + do { + pud_t pud = *pudp; + + next = pud_addr_end(addr, end); + if (pud_none(pud)) + return 0; + if (!gup_pmd_range(pud, addr, next, write, pages, nr)) + return 0; + } while (pudp++, addr = next, addr != end); + + return 1; +} + +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) +{ + struct mm_struct *mm = current->mm; + unsigned long end = start + (nr_pages << PAGE_SHIFT); + unsigned long addr = start; + unsigned long next; + pgd_t *pgdp; + int nr = 0; + + if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, + start, nr_pages*PAGE_SIZE))) + goto slow_irqon; + + /* + * XXX: batch / limit 'nr', to avoid large irq off latency + * needs some instrumenting to determine the common sizes used by + * important workloads (eg. DB2), and whether limiting the batch size + * will decrease performance. + * + * It seems like we're in the clear for the moment. Direct-IO is + * the main guy that batches up lots of get_user_pages, and even + * they are limited to 64-at-a-time which is not so many. + */ + /* + * This doesn't prevent pagetable teardown, but does prevent + * the pagetables and pages from being freed on x86. + * + * So long as we atomically load page table pointers versus teardown + * (which we do on x86, with the above PAE exception), we can follow the + * address down to the the page and take a ref on it. + */ + local_irq_disable(); + pgdp = pgd_offset(mm, addr); + do { + pgd_t pgd = *pgdp; + + next = pgd_addr_end(addr, end); + if (pgd_none(pgd)) + goto slow; + if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) + goto slow; + } while (pgdp++, addr = next, addr != end); + local_irq_enable(); + + VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); + return nr; + + { + int ret; + +slow: + local_irq_enable(); +slow_irqon: + /* Try to get the remaining pages with get_user_pages */ + start += nr << PAGE_SHIFT; + pages += nr; + + down_read(&mm->mmap_sem); + ret = get_user_pages(current, mm, start, + (end - start) >> PAGE_SHIFT, write, 0, pages, NULL); + up_read(&mm->mmap_sem); + + /* Have to be a bit careful with return values */ + if (nr > 0) { + if (ret < 0) + ret = nr; + else + ret += nr; + } + + return ret; + } +} diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h index f6fa4d841bb..5f702d1d521 100644 --- a/include/asm-x86/uaccess.h +++ b/include/asm-x86/uaccess.h @@ -451,3 +451,4 @@ extern struct movsl_mask { #endif #endif + diff --git a/mm/Kconfig b/mm/Kconfig index aa799007a11..efee5d379df 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -77,6 +77,9 @@ config FLAT_NODE_MEM_MAP def_bool y depends on !SPARSEMEM +config HAVE_GET_USER_PAGES_FAST + bool + # # Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's # to represent different areas of memory. This variable allows -- cgit v1.2.3-70-g09d2 From 652ea695364142b2464744746beac206d050ef19 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 25 Jul 2008 19:45:27 -0700 Subject: x86: support 1GB hugepages with get_user_pages_lockless() Signed-off-by: Nick Piggin Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/gup.c | 43 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 6f733121f32..3085f25b435 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -124,7 +124,7 @@ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, refs = 0; head = pte_page(pte); - page = head + ((addr & ~HPAGE_MASK) >> PAGE_SHIFT); + page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { VM_BUG_ON(compound_head(page) != head); pages[*nr] = page; @@ -162,6 +162,38 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, return 1; } +static noinline int gup_huge_pud(pud_t pud, unsigned long addr, + unsigned long end, int write, struct page **pages, int *nr) +{ + unsigned long mask; + pte_t pte = *(pte_t *)&pud; + struct page *head, *page; + int refs; + + mask = _PAGE_PRESENT|_PAGE_USER; + if (write) + mask |= _PAGE_RW; + if ((pte_val(pte) & mask) != mask) + return 0; + /* hugepages are never "special" */ + VM_BUG_ON(pte_val(pte) & _PAGE_SPECIAL); + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + + refs = 0; + head = pte_page(pte); + page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); + do { + VM_BUG_ON(compound_head(page) != head); + pages[*nr] = page; + (*nr)++; + page++; + refs++; + } while (addr += PAGE_SIZE, addr != end); + get_head_page_multiple(head, refs); + + return 1; +} + static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { @@ -175,8 +207,13 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; - if (!gup_pmd_range(pud, addr, next, write, pages, nr)) - return 0; + if (unlikely(pud_large(pud))) { + if (!gup_huge_pud(pud, addr, next, write, pages, nr)) + return 0; + } else { + if (!gup_pmd_range(pud, addr, next, write, pages, nr)) + return 0; + } } while (pudp++, addr = next, addr != end); return 1; -- cgit v1.2.3-70-g09d2 From 6341c393fcc37d58727865f1ee2f65e632e9d4f0 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Fri, 25 Jul 2008 19:45:44 -0700 Subject: tracehook: exec This moves all the ptrace hooks related to exec into tracehook.h inlines. This also lifts the calls for tracing out of the binfmt load_binary hooks into search_binary_handler() after it calls into the binfmt module. This change has no effect, since all the binfmt modules' load_binary functions did the call at the end on success, and now search_binary_handler() does it immediately after return if successful. We consolidate the repeated code, and binfmt modules no longer need to import ptrace_notify(). Signed-off-by: Roland McGrath Cc: Oleg Nesterov Reviewed-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/ia32/ia32_aout.c | 6 ------ fs/binfmt_aout.c | 6 ------ fs/binfmt_elf.c | 6 ------ fs/binfmt_elf_fdpic.c | 7 ------- fs/binfmt_flat.c | 3 --- fs/binfmt_som.c | 2 -- fs/exec.c | 12 ++++-------- include/linux/tracehook.h | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 8 files changed, 50 insertions(+), 38 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 58cccb6483b..a0e1dbe67dc 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -441,12 +441,6 @@ beyond_if: regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0; set_fs(USER_DS); - if (unlikely(current->ptrace & PT_PTRACED)) { - if (current->ptrace & PT_TRACE_EXEC) - ptrace_notify((PTRACE_EVENT_EXEC << 8) | SIGTRAP); - else - send_sig(SIGTRAP, current, 0); - } return 0; } diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index ba4cddb92f1..204cfd1d767 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -444,12 +444,6 @@ beyond_if: regs->gp = ex.a_gpvalue; #endif start_thread(regs, ex.a_entry, current->mm->start_stack); - if (unlikely(current->ptrace & PT_PTRACED)) { - if (current->ptrace & PT_TRACE_EXEC) - ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP); - else - send_sig(SIGTRAP, current, 0); - } return 0; } diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 3b6ff854d98..655ed8d30a8 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1003,12 +1003,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) #endif start_thread(regs, elf_entry, bprm->p); - if (unlikely(current->ptrace & PT_PTRACED)) { - if (current->ptrace & PT_TRACE_EXEC) - ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP); - else - send_sig(SIGTRAP, current, 0); - } retval = 0; out: kfree(loc); diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 1b59b1edf26..fdeadab2f18 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -433,13 +433,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, entryaddr = interp_params.entry_addr ?: exec_params.entry_addr; start_thread(regs, entryaddr, current->mm->start_stack); - if (unlikely(current->ptrace & PT_PTRACED)) { - if (current->ptrace & PT_TRACE_EXEC) - ptrace_notify((PTRACE_EVENT_EXEC << 8) | SIGTRAP); - else - send_sig(SIGTRAP, current, 0); - } - retval = 0; error: diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 2cb1acda3a8..56372ecf169 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -920,9 +920,6 @@ static int load_flat_binary(struct linux_binprm * bprm, struct pt_regs * regs) start_thread(regs, start_addr, current->mm->start_stack); - if (current->ptrace & PT_PTRACED) - send_sig(SIGTRAP, current, 0); - return 0; } diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c index fdc36bfd6a7..68be580ba28 100644 --- a/fs/binfmt_som.c +++ b/fs/binfmt_som.c @@ -274,8 +274,6 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs) map_hpux_gateway_page(current,current->mm); start_thread_som(regs, som_entry, bprm->p); - if (current->ptrace & PT_PTRACED) - send_sig(SIGTRAP, current, 0); return 0; /* error cleanup */ diff --git a/fs/exec.c b/fs/exec.c index 5e559013e30..b8792a13153 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -42,13 +42,13 @@ #include #include #include -#include #include #include #include #include #include #include +#include #include #include @@ -1071,13 +1071,8 @@ EXPORT_SYMBOL(prepare_binprm); static int unsafe_exec(struct task_struct *p) { - int unsafe = 0; - if (p->ptrace & PT_PTRACED) { - if (p->ptrace & PT_PTRACE_CAP) - unsafe |= LSM_UNSAFE_PTRACE_CAP; - else - unsafe |= LSM_UNSAFE_PTRACE; - } + int unsafe = tracehook_unsafe_exec(p); + if (atomic_read(&p->fs->count) > 1 || atomic_read(&p->files->count) > 1 || atomic_read(&p->sighand->count) > 1) @@ -1214,6 +1209,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) read_unlock(&binfmt_lock); retval = fn(bprm, regs); if (retval >= 0) { + tracehook_report_exec(fmt, bprm, regs); put_binfmt(fmt); allow_write_access(bprm->file); if (bprm->file) diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index bea0f3eeff5..6276353709c 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -48,5 +48,51 @@ #include #include +#include +struct linux_binprm; + +/** + * tracehook_unsafe_exec - check for exec declared unsafe due to tracing + * @task: current task doing exec + * + * Return %LSM_UNSAFE_* bits applied to an exec because of tracing. + * + * Called with task_lock() held on @task. + */ +static inline int tracehook_unsafe_exec(struct task_struct *task) +{ + int unsafe = 0; + int ptrace = task_ptrace(task); + if (ptrace & PT_PTRACED) { + if (ptrace & PT_PTRACE_CAP) + unsafe |= LSM_UNSAFE_PTRACE_CAP; + else + unsafe |= LSM_UNSAFE_PTRACE; + } + return unsafe; +} + +/** + * tracehook_report_exec - a successful exec was completed + * @fmt: &struct linux_binfmt that performed the exec + * @bprm: &struct linux_binprm containing exec details + * @regs: user-mode register state + * + * An exec just completed, we are shortly going to return to user mode. + * The freshly initialized register state can be seen and changed in @regs. + * The name, file and other pointers in @bprm are still on hand to be + * inspected, but will be freed as soon as this returns. + * + * Called with no locks, but with some kernel resources held live + * and a reference on @fmt->module. + */ +static inline void tracehook_report_exec(struct linux_binfmt *fmt, + struct linux_binprm *bprm, + struct pt_regs *regs) +{ + if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) && + unlikely(task_ptrace(current) & PT_PTRACED)) + send_sig(SIGTRAP, current, 0); +} #endif /* */ -- cgit v1.2.3-70-g09d2 From 8dad322f5449010c14990dd6934878f576b2ee60 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 25 Jul 2008 19:46:11 -0700 Subject: x86: use generic show_mem() Remove arch-specific show_mem() in favor of the generic version. This also removes the following redundant information display: - pages in swapcache, printed by show_swap_cache_info() - dirty pages, writeback pages, mapped pages, slab pages, pagetable pages, printed by show_free_areas() where show_mem() calls show_free_areas(), which calls show_swap_cache_info(). Signed-off-by: Johannes Weiner Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/init_64.c | 37 ------------------------------------- arch/x86/mm/pgtable_32.c | 47 ----------------------------------------------- 2 files changed, 84 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index ec37121f670..129618ca0ea 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -86,43 +86,6 @@ early_param("gbpages", parse_direct_gbpages_on); * around without checking the pgd every time. */ -void show_mem(void) -{ - long i, total = 0, reserved = 0; - long shared = 0, cached = 0; - struct page *page; - pg_data_t *pgdat; - - printk(KERN_INFO "Mem-info:\n"); - show_free_areas(); - for_each_online_pgdat(pgdat) { - for (i = 0; i < pgdat->node_spanned_pages; ++i) { - /* - * This loop can take a while with 256 GB and - * 4k pages so defer the NMI watchdog: - */ - if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) - touch_nmi_watchdog(); - - if (!pfn_valid(pgdat->node_start_pfn + i)) - continue; - - page = pfn_to_page(pgdat->node_start_pfn + i); - total++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (page_count(page)) - shared += page_count(page) - 1; - } - } - printk(KERN_INFO "%lu pages of RAM\n", total); - printk(KERN_INFO "%lu reserved pages\n", reserved); - printk(KERN_INFO "%lu pages shared\n", shared); - printk(KERN_INFO "%lu pages swap cached\n", cached); -} - int after_bootmem; static __init void *spp_getpage(void) diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index b4becbf8c57..cab0abbd1eb 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -20,53 +20,6 @@ #include #include -void show_mem(void) -{ - int total = 0, reserved = 0; - int shared = 0, cached = 0; - int highmem = 0; - struct page *page; - pg_data_t *pgdat; - unsigned long i; - unsigned long flags; - - printk(KERN_INFO "Mem-info:\n"); - show_free_areas(); - for_each_online_pgdat(pgdat) { - pgdat_resize_lock(pgdat, &flags); - for (i = 0; i < pgdat->node_spanned_pages; ++i) { - if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) - touch_nmi_watchdog(); - page = pgdat_page_nr(pgdat, i); - total++; - if (PageHighMem(page)) - highmem++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (page_count(page)) - shared += page_count(page) - 1; - } - pgdat_resize_unlock(pgdat, &flags); - } - printk(KERN_INFO "%d pages of RAM\n", total); - printk(KERN_INFO "%d pages of HIGHMEM\n", highmem); - printk(KERN_INFO "%d reserved pages\n", reserved); - printk(KERN_INFO "%d pages shared\n", shared); - printk(KERN_INFO "%d pages swap cached\n", cached); - - printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY)); - printk(KERN_INFO "%lu pages writeback\n", - global_page_state(NR_WRITEBACK)); - printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); - printk(KERN_INFO "%lu pages slab\n", - global_page_state(NR_SLAB_RECLAIMABLE) + - global_page_state(NR_SLAB_UNRECLAIMABLE)); - printk(KERN_INFO "%lu pages pagetables\n", - global_page_state(NR_PAGETABLE)); -} - /* * Associate a virtual page frame with a given physical page frame * and protection flags for that frame. -- cgit v1.2.3-70-g09d2 From 5f4cb662a0a2533b45656607471571460310a5ca Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 14 Jul 2008 20:36:36 +0200 Subject: KVM: SVM: allow enabling/disabling NPT by reloading only the architecture module If NPT is enabled after loading both KVM modules on AMD and it should be disabled, both KVM modules must be reloaded. If only the architecture module is reloaded the behavior is undefined. With this patch it is possible to disable NPT only by reloading the kvm_amd module. Signed-off-by: Joerg Roedel Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 6 ++++++ arch/x86/kvm/svm.c | 3 ++- include/asm-x86/kvm_host.h | 1 + 3 files changed, 9 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b0e4ddca6c1..d087d9c4f2d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1870,6 +1870,12 @@ void kvm_enable_tdp(void) } EXPORT_SYMBOL_GPL(kvm_enable_tdp); +void kvm_disable_tdp(void) +{ + tdp_enabled = false; +} +EXPORT_SYMBOL_GPL(kvm_disable_tdp); + static void free_mmu_pages(struct kvm_vcpu *vcpu) { struct kvm_mmu_page *sp; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b756e876dce..951b789cc91 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -453,7 +453,8 @@ static __init int svm_hardware_setup(void) if (npt_enabled) { printk(KERN_INFO "kvm: Nested Paging enabled\n"); kvm_enable_tdp(); - } + } else + kvm_disable_tdp(); return 0; diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index fdde0bedaa9..bc34dc21f17 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h @@ -556,6 +556,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu); int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); void kvm_enable_tdp(void); +void kvm_disable_tdp(void); int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); int complete_pio(struct kvm_vcpu *vcpu); -- cgit v1.2.3-70-g09d2 From 98899aa0e0bf5de05850082be0eb837058c09ea5 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 16 Jul 2008 19:07:10 -0300 Subject: KVM: task switch: segment base is linear address The segment base is always a linear address, so translate before accessing guest memory. Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9f1cdb011cf..cd687395e4e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3223,6 +3223,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu, static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, struct desc_struct *seg_desc) { + gpa_t gpa; struct descriptor_table dtable; u16 index = selector >> 3; @@ -3232,13 +3233,16 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); return 1; } - return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8); + gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base); + gpa += index * 8; + return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8); } /* allowed just for 8 bytes segments */ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, struct desc_struct *seg_desc) { + gpa_t gpa; struct descriptor_table dtable; u16 index = selector >> 3; @@ -3246,7 +3250,9 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, if (dtable.limit < index * 8 + 7) return 1; - return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8); + gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base); + gpa += index * 8; + return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8); } static u32 get_tss_base_addr(struct kvm_vcpu *vcpu, @@ -3258,7 +3264,7 @@ static u32 get_tss_base_addr(struct kvm_vcpu *vcpu, base_addr |= (seg_desc->base1 << 16); base_addr |= (seg_desc->base2 << 24); - return base_addr; + return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr); } static int load_tss_segment32(struct kvm_vcpu *vcpu, -- cgit v1.2.3-70-g09d2 From 34198bf8426276a2ce1e97056a0f02d43637e5ae Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 16 Jul 2008 19:07:11 -0300 Subject: KVM: task switch: use seg regs provided by subarch instead of reading from GDT There is no guarantee that the old TSS descriptor in the GDT contains the proper base address. This is the case for Windows installation's reboot-via-triplefault. Use guest registers instead. Also translate the address properly. Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 93 ++++++++++++++++++------------------------------------ 1 file changed, 30 insertions(+), 63 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cd687395e4e..27c6ece91da 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3267,54 +3267,6 @@ static u32 get_tss_base_addr(struct kvm_vcpu *vcpu, return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr); } -static int load_tss_segment32(struct kvm_vcpu *vcpu, - struct desc_struct *seg_desc, - struct tss_segment_32 *tss) -{ - u32 base_addr; - - base_addr = get_tss_base_addr(vcpu, seg_desc); - - return kvm_read_guest(vcpu->kvm, base_addr, tss, - sizeof(struct tss_segment_32)); -} - -static int save_tss_segment32(struct kvm_vcpu *vcpu, - struct desc_struct *seg_desc, - struct tss_segment_32 *tss) -{ - u32 base_addr; - - base_addr = get_tss_base_addr(vcpu, seg_desc); - - return kvm_write_guest(vcpu->kvm, base_addr, tss, - sizeof(struct tss_segment_32)); -} - -static int load_tss_segment16(struct kvm_vcpu *vcpu, - struct desc_struct *seg_desc, - struct tss_segment_16 *tss) -{ - u32 base_addr; - - base_addr = get_tss_base_addr(vcpu, seg_desc); - - return kvm_read_guest(vcpu->kvm, base_addr, tss, - sizeof(struct tss_segment_16)); -} - -static int save_tss_segment16(struct kvm_vcpu *vcpu, - struct desc_struct *seg_desc, - struct tss_segment_16 *tss) -{ - u32 base_addr; - - base_addr = get_tss_base_addr(vcpu, seg_desc); - - return kvm_write_guest(vcpu->kvm, base_addr, tss, - sizeof(struct tss_segment_16)); -} - static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment kvm_seg; @@ -3472,20 +3424,26 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu, } static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector, - struct desc_struct *cseg_desc, + u32 old_tss_base, struct desc_struct *nseg_desc) { struct tss_segment_16 tss_segment_16; int ret = 0; - if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16)) + if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16, + sizeof tss_segment_16)) goto out; save_state_to_tss16(vcpu, &tss_segment_16); - save_tss_segment16(vcpu, cseg_desc, &tss_segment_16); - if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16)) + if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16, + sizeof tss_segment_16)) goto out; + + if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), + &tss_segment_16, sizeof tss_segment_16)) + goto out; + if (load_state_from_tss16(vcpu, &tss_segment_16)) goto out; @@ -3495,20 +3453,26 @@ out: } static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector, - struct desc_struct *cseg_desc, + u32 old_tss_base, struct desc_struct *nseg_desc) { struct tss_segment_32 tss_segment_32; int ret = 0; - if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32)) + if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32, + sizeof tss_segment_32)) goto out; save_state_to_tss32(vcpu, &tss_segment_32); - save_tss_segment32(vcpu, cseg_desc, &tss_segment_32); - if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32)) + if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32, + sizeof tss_segment_32)) + goto out; + + if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), + &tss_segment_32, sizeof tss_segment_32)) goto out; + if (load_state_from_tss32(vcpu, &tss_segment_32)) goto out; @@ -3523,16 +3487,20 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) struct desc_struct cseg_desc; struct desc_struct nseg_desc; int ret = 0; + u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); + u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); - kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR); + old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base); + /* FIXME: Handle errors. Failure to read either TSS or their + * descriptors should generate a pagefault. + */ if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc)) goto out; - if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc)) + if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc)) goto out; - if (reason != TASK_SWITCH_IRET) { int cpl; @@ -3550,8 +3518,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { cseg_desc.type &= ~(1 << 1); //clear the B flag - save_guest_segment_descriptor(vcpu, tr_seg.selector, - &cseg_desc); + save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc); } if (reason == TASK_SWITCH_IRET) { @@ -3563,10 +3530,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) kvm_x86_ops->cache_regs(vcpu); if (nseg_desc.type & 8) - ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc, + ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base, &nseg_desc); else - ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc, + ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base, &nseg_desc); if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) { -- cgit v1.2.3-70-g09d2 From 577bdc496614ced56d999bbb425e85adf2386490 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sat, 19 Jul 2008 08:57:05 +0300 Subject: KVM: Avoid instruction emulation when event delivery is pending When an event (such as an interrupt) is injected, and the stack is shadowed (and therefore write protected), the guest will exit. The current code will see that the stack is shadowed and emulate a few instructions, each time postponing the injection. Eventually the injection may succeed, but at that time the guest may be unwilling to accept the interrupt (for example, the TPR may have changed). This occurs every once in a while during a Windows 2008 boot. Fix by unshadowing the fault address if the fault was due to an event injection. Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 1 + arch/x86/kvm/svm.c | 7 ++++++- arch/x86/kvm/vmx.c | 2 ++ 3 files changed, 9 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d087d9c4f2d..2fa231923cf 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1814,6 +1814,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) spin_unlock(&vcpu->kvm->mmu_lock); return r; } +EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) { diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 951b789cc91..e2ee264740c 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1008,10 +1008,13 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) struct kvm *kvm = svm->vcpu.kvm; u64 fault_address; u32 error_code; + bool event_injection = false; if (!irqchip_in_kernel(kvm) && - is_external_interrupt(exit_int_info)) + is_external_interrupt(exit_int_info)) { + event_injection = true; push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); + } fault_address = svm->vmcb->control.exit_info_2; error_code = svm->vmcb->control.exit_info_1; @@ -1025,6 +1028,8 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) (u32)fault_address, (u32)(fault_address >> 32), handler); + if (event_injection) + kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0cac6370171..b918fc83435 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2298,6 +2298,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) cr2 = vmcs_readl(EXIT_QUALIFICATION); KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, (u32)((u64)cr2 >> 32), handler); + if (vect_info & VECTORING_INFO_VALID_MASK) + kvm_mmu_unprotect_page_virt(vcpu, cr2); return kvm_mmu_page_fault(vcpu, cr2, error_code); } -- cgit v1.2.3-70-g09d2 From c93cd3a58845012df2d658fecd0ac99f7008d753 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Sat, 19 Jul 2008 19:08:07 -0300 Subject: KVM: task switch: translate guest segment limit to virt-extension byte granular field If 'g' is one then limit is 4kb granular. Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 27c6ece91da..5916191420c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3184,6 +3184,10 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector, kvm_desct->base |= seg_desc->base2 << 24; kvm_desct->limit = seg_desc->limit0; kvm_desct->limit |= seg_desc->limit << 16; + if (seg_desc->g) { + kvm_desct->limit <<= 12; + kvm_desct->limit |= 0xfff; + } kvm_desct->selector = selector; kvm_desct->type = seg_desc->type; kvm_desct->present = seg_desc->p; -- cgit v1.2.3-70-g09d2 From 5ec5726a16245138f5d5305b00a752acb5730076 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Wed, 16 Jul 2008 09:21:22 +0800 Subject: KVM: VMX: Fix bypass_guest_pf enabling when disable EPT in module parameter Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b918fc83435..f71151d999e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3305,7 +3305,7 @@ static int __init vmx_init(void) vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP); vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP); - if (cpu_has_vmx_ept()) + if (vm_need_ept()) bypass_guest_pf = 0; if (bypass_guest_pf) -- cgit v1.2.3-70-g09d2 From 5fdbcb9dd16f1e89ead127d3ee1a38e3a00cf1ea Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Wed, 16 Jul 2008 09:25:40 +0800 Subject: KVM: VMX: Fix undefined beaviour of EPT after reload kvm-intel.ko As well as move set base/mask ptes to vmx_init(). Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f71151d999e..2a69773e3b2 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3118,15 +3118,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) return ERR_PTR(-ENOMEM); allocate_vpid(vmx); - if (id == 0 && vm_need_ept()) { - kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | - VMX_EPT_WRITABLE_MASK | - VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); - kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK, - VMX_EPT_FAKE_DIRTY_MASK, 0ull, - VMX_EPT_EXECUTABLE_MASK); - kvm_enable_tdp(); - } err = kvm_vcpu_init(&vmx->vcpu, kvm, id); if (err) @@ -3305,8 +3296,17 @@ static int __init vmx_init(void) vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP); vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP); - if (vm_need_ept()) + if (vm_need_ept()) { bypass_guest_pf = 0; + kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | + VMX_EPT_WRITABLE_MASK | + VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); + kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK, + VMX_EPT_FAKE_DIRTY_MASK, 0ull, + VMX_EPT_EXECUTABLE_MASK); + kvm_enable_tdp(); + } else + kvm_disable_tdp(); if (bypass_guest_pf) kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); -- cgit v1.2.3-70-g09d2 From 583323b9d2f624884a8c9563fb5a4d795f39ab07 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 27 Jul 2008 21:43:11 +0200 Subject: x86: fix cpu hotplug on 32bit commit 3e9704739daf46a8ba6593d749c67b5f7cd633d2 ("x86: boot secondary cpus through initial_code") causes the kernel to crash when a CPU is brought online after the read only sections have been write protected. The write to initial_code in do_boot_cpu() fails. Move inital_code to .cpuinit.data section. Signed-off-by: Thomas Gleixner Acked-by: H. Peter Anvin --- arch/x86/kernel/head_32.S | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index f67e93441ca..a7010c3a377 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -456,9 +456,6 @@ is386: movl $2,%ecx # set MP 1: #endif /* CONFIG_SMP */ jmp *(initial_code) -.align 4 -ENTRY(initial_code) - .long i386_start_kernel /* * We depend on ET to be correct. This checks for 287/387. @@ -601,6 +598,11 @@ ignore_int: #endif iret +.section .cpuinit.data,"wa" +.align 4 +ENTRY(initial_code) + .long i386_start_kernel + .section .text /* * Real beginning of normal "text" segment -- cgit v1.2.3-70-g09d2 From e56b3bc7942982ac2589c942fb345e38bc7a341a Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 28 Jul 2008 11:32:33 -0700 Subject: cpu masks: optimize and clean up cpumask_of_cpu() Clean up and optimize cpumask_of_cpu(), by sharing all the zero words. Instead of stupidly generating all possible i=0...NR_CPUS 2^i patterns creating a huge array of constant bitmasks, realize that the zero words can be shared. In other words, on a 64-bit architecture, we only ever need 64 of these arrays - with a different bit set in one single world (with enough zero words around it so that we can create any bitmask by just offsetting in that big array). And then we just put enough zeroes around it that we can point every single cpumask to be one of those things. So when we have 4k CPU's, instead of having 4k arrays (of 4k bits each, with one bit set in each array - 2MB memory total), we have exactly 64 arrays instead, each 8k bits in size (64kB total). And then we just point cpumask(n) to the right position (which we can calculate dynamically). Once we have the right arrays, getting "cpumask(n)" ends up being: static inline const cpumask_t *get_cpu_mask(unsigned int cpu) { const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; p -= cpu / BITS_PER_LONG; return (const cpumask_t *)p; } This brings other advantages and simplifications as well: - we are not wasting memory that is just filled with a single bit in various different places - we don't need all those games to re-create the arrays in some dense format, because they're already going to be dense enough. if we compile a kernel for up to 4k CPU's, "wasting" that 64kB of memory is a non-issue (especially since by doing this "overlapping" trick we probably get better cache behaviour anyway). [ mingo@elte.hu: Converted Linus's mails into a commit. See: http://lkml.org/lkml/2008/7/27/156 http://lkml.org/lkml/2008/7/28/320 Also applied a family filter - which also has the side-effect of leaving out the bits where Linus calls me an idio... Oh, never mind ;-) ] Signed-off-by: Ingo Molnar Cc: Rusty Russell Cc: Andrew Morton Cc: Al Viro Cc: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_percpu.c | 23 -------- include/linux/cpumask.h | 26 ++++++++- kernel/cpu.c | 128 +++++++---------------------------------- 3 files changed, 43 insertions(+), 134 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 1cd53dfcd30..76e305e064f 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -80,26 +80,6 @@ static void __init setup_per_cpu_maps(void) #endif } -#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP -/* - * Replace static cpumask_of_cpu_map in the initdata section, - * with one that's allocated sized by the possible number of cpus. - * - * (requires nr_cpu_ids to be initialized) - */ -static void __init setup_cpumask_of_cpu(void) -{ - int i; - - /* alloc_bootmem zeroes memory */ - cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids); - for (i = 0; i < nr_cpu_ids; i++) - cpu_set(i, cpumask_of_cpu_map[i]); -} -#else -static inline void setup_cpumask_of_cpu(void) { } -#endif - #ifdef CONFIG_X86_32 /* * Great future not-so-futuristic plan: make i386 and x86_64 do it @@ -199,9 +179,6 @@ void __init setup_per_cpu_areas(void) /* Setup node to cpumask map */ setup_node_to_cpumask_map(); - - /* Setup cpumask_of_cpu map */ - setup_cpumask_of_cpu(); } #endif diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 8fa3b6d4a32..96d0509fb8d 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -265,10 +265,30 @@ static inline void __cpus_shift_left(cpumask_t *dstp, bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } +/* + * Special-case data structure for "single bit set only" constant CPU masks. + * + * We pre-generate all the 64 (or 32) possible bit positions, with enough + * padding to the left and the right, and return the constant pointer + * appropriately offset. + */ +extern const unsigned long + cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; + +static inline const cpumask_t *get_cpu_mask(unsigned int cpu) +{ + const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; + p -= cpu / BITS_PER_LONG; + return (const cpumask_t *)p; +} + +/* + * In cases where we take the address of the cpumask immediately, + * gcc optimizes it out (it's a constant) and there's no huge stack + * variable created: + */ +#define cpumask_of_cpu(cpu) ({ *get_cpu_mask(cpu); }) -/* cpumask_of_cpu_map[] is in kernel/cpu.c */ -extern const cpumask_t *cpumask_of_cpu_map; -#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) diff --git a/kernel/cpu.c b/kernel/cpu.c index a35d8995dc8..06a8358bb41 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -462,115 +462,27 @@ out: #endif /* CONFIG_SMP */ -/* 64 bits of zeros, for initializers. */ -#if BITS_PER_LONG == 32 -#define Z64 0, 0 -#else -#define Z64 0 -#endif +/* + * cpu_bit_bitmap[] is a special, "compressed" data structure that + * represents all NR_CPUS bits binary values of 1< 4 - CMI0(4), CMI0(5), CMI0(6), CMI0(7), -#endif -#if NR_CPUS > 8 - CMI0(8), CMI0(9), CMI0(10), CMI0(11), - CMI0(12), CMI0(13), CMI0(14), CMI0(15), -#endif -#if NR_CPUS > 16 - CMI0(16), CMI0(17), CMI0(18), CMI0(19), - CMI0(20), CMI0(21), CMI0(22), CMI0(23), - CMI0(24), CMI0(25), CMI0(26), CMI0(27), - CMI0(28), CMI0(29), CMI0(30), CMI0(31), -#endif -#if NR_CPUS > 32 -#if BITS_PER_LONG == 32 - CMI(32, 0), CMI(33, 0), CMI(34, 0), CMI(35, 0), - CMI(36, 0), CMI(37, 0), CMI(38, 0), CMI(39, 0), - CMI(40, 0), CMI(41, 0), CMI(42, 0), CMI(43, 0), - CMI(44, 0), CMI(45, 0), CMI(46, 0), CMI(47, 0), - CMI(48, 0), CMI(49, 0), CMI(50, 0), CMI(51, 0), - CMI(52, 0), CMI(53, 0), CMI(54, 0), CMI(55, 0), - CMI(56, 0), CMI(57, 0), CMI(58, 0), CMI(59, 0), - CMI(60, 0), CMI(61, 0), CMI(62, 0), CMI(63, 0), -#else - CMI0(32), CMI0(33), CMI0(34), CMI0(35), - CMI0(36), CMI0(37), CMI0(38), CMI0(39), - CMI0(40), CMI0(41), CMI0(42), CMI0(43), - CMI0(44), CMI0(45), CMI0(46), CMI0(47), - CMI0(48), CMI0(49), CMI0(50), CMI0(51), - CMI0(52), CMI0(53), CMI0(54), CMI0(55), - CMI0(56), CMI0(57), CMI0(58), CMI0(59), - CMI0(60), CMI0(61), CMI0(62), CMI0(63), -#endif /* BITS_PER_LONG == 64 */ -#endif -#if NR_CPUS > 64 - CMI64(64, Z64), -#endif -#if NR_CPUS > 128 - CMI64(128, Z64, Z64), CMI64(192, Z64, Z64, Z64), -#endif -#if NR_CPUS > 256 - CMI256(256, Z256), -#endif -#if NR_CPUS > 512 - CMI256(512, Z256, Z256), CMI256(768, Z256, Z256, Z256), -#endif -#if NR_CPUS > 1024 - CMI1024(1024, Z1024), -#endif -#if NR_CPUS > 2048 - CMI1024(2048, Z1024, Z1024), CMI1024(3072, Z1024, Z1024, Z1024), -#endif -#if NR_CPUS > 4096 -#error NR_CPUS too big. Fix initializers or set CONFIG_HAVE_CPUMASK_OF_CPU_MAP +const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { + + MASK_DECLARE_8(0), MASK_DECLARE_8(8), + MASK_DECLARE_8(16), MASK_DECLARE_8(24), +#if BITS_PER_LONG > 32 + MASK_DECLARE_8(32), MASK_DECLARE_8(40), + MASK_DECLARE_8(48), MASK_DECLARE_8(56), #endif }; - -const cpumask_t *cpumask_of_cpu_map = cpumask_map; - -EXPORT_SYMBOL_GPL(cpumask_of_cpu_map); +EXPORT_SYMBOL_GPL(cpu_bit_bitmap); -- cgit v1.2.3-70-g09d2 From 12c0b20fa4afb5c8a377d6987fb2dcf353e1dce1 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Wed, 23 Jul 2008 17:00:13 -0600 Subject: x86/PCI: use dev_printk when possible Convert printks to use dev_printk(). I converted DBG() to dev_dbg(). This DBG() is from arch/x86/pci/pci.h and requires source-code modification to enable, so dev_dbg() seems roughly equivalent. Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/x86/pci/fixup.c | 3 +- arch/x86/pci/i386.c | 26 +++++------- arch/x86/pci/irq.c | 106 ++++++++++++++++++++++-------------------------- arch/x86/pci/numaq_32.c | 5 ++- 4 files changed, 64 insertions(+), 76 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index ff3a6a33634..4bdaa590375 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -23,7 +23,8 @@ static void __devinit pci_fixup_i450nx(struct pci_dev *d) pci_read_config_byte(d, reg++, &busno); pci_read_config_byte(d, reg++, &suba); pci_read_config_byte(d, reg++, &subb); - DBG("i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno, suba, subb); + dev_dbg(&d->dev, "i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno, + suba, subb); if (busno) pci_scan_bus_with_sysdata(busno); /* Bus A */ if (suba < subb) diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index a09505806b8..5807d1bc73f 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -128,10 +128,8 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) pr = pci_find_parent_resource(dev, r); if (!r->start || !pr || request_resource(pr, r) < 0) { - printk(KERN_ERR "PCI: Cannot allocate " - "resource region %d " - "of bridge %s\n", - idx, pci_name(dev)); + dev_err(&dev->dev, "BAR %d: can't " + "allocate resource\n", idx); /* * Something is wrong with the region. * Invalidate the resource to prevent @@ -166,15 +164,15 @@ static void __init pcibios_allocate_resources(int pass) else disabled = !(command & PCI_COMMAND_MEMORY); if (pass == disabled) { - DBG("PCI: Resource %08lx-%08lx " - "(f=%lx, d=%d, p=%d)\n", - r->start, r->end, r->flags, disabled, pass); + dev_dbg(&dev->dev, "resource %#08llx-%#08llx " + "(f=%lx, d=%d, p=%d)\n", + (unsigned long long) r->start, + (unsigned long long) r->end, + r->flags, disabled, pass); pr = pci_find_parent_resource(dev, r); if (!pr || request_resource(pr, r) < 0) { - printk(KERN_ERR "PCI: Cannot allocate " - "resource region %d " - "of device %s\n", - idx, pci_name(dev)); + dev_err(&dev->dev, "BAR %d: can't " + "allocate resource\n", idx); /* We'll assign a new address later */ r->end -= r->start; r->start = 0; @@ -187,8 +185,7 @@ static void __init pcibios_allocate_resources(int pass) /* Turn the ROM off, leave the resource region, * but keep it unregistered. */ u32 reg; - DBG("PCI: Switching off ROM of %s\n", - pci_name(dev)); + dev_dbg(&dev->dev, "disabling ROM\n"); r->flags &= ~IORESOURCE_ROM_ENABLE; pci_read_config_dword(dev, dev->rom_base_reg, ®); @@ -257,8 +254,7 @@ void pcibios_set_master(struct pci_dev *dev) lat = pcibios_max_latency; else return; - printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", - pci_name(dev), lat); + dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat); pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); } diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 6a06a2eb059..fec0123b33a 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -436,7 +436,7 @@ static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { WARN_ON_ONCE(pirq >= 9); if (pirq > 8) { - printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq); + dev_info(&dev->dev, "VLSI router PIRQ escape (%d)\n", pirq); return 0; } return read_config_nybble(router, 0x74, pirq-1); @@ -446,7 +446,7 @@ static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, { WARN_ON_ONCE(pirq >= 9); if (pirq > 8) { - printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq); + dev_info(&dev->dev, "VLSI router PIRQ escape (%d)\n", pirq); return 0; } write_config_nybble(router, 0x74, pirq-1, irq); @@ -492,15 +492,17 @@ static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq irq = 0; if (pirq <= 4) irq = read_config_nybble(router, 0x56, pirq - 1); - printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n", - dev->vendor, dev->device, pirq, irq); + dev_info(&dev->dev, + "AMD756: dev [%04x/%04x], router PIRQ %d get IRQ %d\n", + dev->vendor, dev->device, pirq, irq); return irq; } static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { - printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n", - dev->vendor, dev->device, pirq, irq); + dev_info(&dev->dev, + "AMD756: dev [%04x/%04x], router PIRQ %d set IRQ %d\n", + dev->vendor, dev->device, pirq, irq); if (pirq <= 4) write_config_nybble(router, 0x56, pirq - 1, irq); return 1; @@ -730,7 +732,6 @@ static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, switch (device) { case PCI_DEVICE_ID_AL_M1533: case PCI_DEVICE_ID_AL_M1563: - printk(KERN_DEBUG "PCI: Using ALI IRQ Router\n"); r->name = "ALI"; r->get = pirq_ali_get; r->set = pirq_ali_set; @@ -840,11 +841,9 @@ static void __init pirq_find_router(struct irq_router *r) h->probe(r, pirq_router_dev, pirq_router_dev->device)) break; } - printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n", - pirq_router.name, - pirq_router_dev->vendor, - pirq_router_dev->device, - pci_name(pirq_router_dev)); + dev_info(&pirq_router_dev->dev, "%s IRQ router [%04x/%04x]\n", + pirq_router.name, + pirq_router_dev->vendor, pirq_router_dev->device); /* The device remains referenced for the kernel lifetime */ } @@ -877,7 +876,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) /* Find IRQ pin */ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (!pin) { - DBG(KERN_DEBUG " -> no interrupt pin\n"); + dev_dbg(&dev->dev, "no interrupt pin\n"); return 0; } pin = pin - 1; @@ -887,20 +886,20 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) if (!pirq_table) return 0; - DBG(KERN_DEBUG "IRQ for %s[%c]", pci_name(dev), 'A' + pin); info = pirq_get_info(dev); if (!info) { - DBG(" -> not found in routing table\n" KERN_DEBUG); + dev_dbg(&dev->dev, "PCI INT %c not found in routing table\n", + 'A' + pin); return 0; } pirq = info->irq[pin].link; mask = info->irq[pin].bitmap; if (!pirq) { - DBG(" -> not routed\n" KERN_DEBUG); + dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin); return 0; } - DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, - pirq_table->exclusive_irqs); + dev_dbg(&dev->dev, "PCI INT %c -> PIRQ %02x, mask %04x, excl %04x", + 'A' + pin, pirq, mask, pirq_table->exclusive_irqs); mask &= pcibios_irq_mask; /* Work around broken HP Pavilion Notebooks which assign USB to @@ -930,10 +929,8 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) if (pci_probe & PCI_USE_PIRQ_MASK) newirq = 0; else - printk("\n" KERN_WARNING - "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n" - KERN_DEBUG, newirq, - pci_name(dev)); + dev_warn(&dev->dev, "IRQ %d doesn't match PIRQ mask " + "%#x; try pci=usepirqmask\n", newirq, mask); } if (!newirq && assign) { for (i = 0; i < 16; i++) { @@ -944,39 +941,35 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) newirq = i; } } - DBG(" -> newirq=%d", newirq); + dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin, newirq); /* Check if it is hardcoded */ if ((pirq & 0xf0) == 0xf0) { irq = pirq & 0xf; - DBG(" -> hardcoded IRQ %d\n", irq); - msg = "Hardcoded"; + msg = "hardcoded"; } else if (r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \ ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask))) { - DBG(" -> got IRQ %d\n", irq); - msg = "Found"; + msg = "found"; eisa_set_level_irq(irq); } else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) { - DBG(" -> assigning IRQ %d", newirq); if (r->set(pirq_router_dev, dev, pirq, newirq)) { eisa_set_level_irq(newirq); - DBG(" ... OK\n"); - msg = "Assigned"; + msg = "assigned"; irq = newirq; } } if (!irq) { - DBG(" ... failed\n"); if (newirq && mask == (1 << newirq)) { - msg = "Guessed"; + msg = "guessed"; irq = newirq; - } else + } else { + dev_dbg(&dev->dev, "can't route interrupt\n"); return 0; + } } - printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, - pci_name(dev)); + dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin, irq); /* Update IRQ for all devices with the same pirq value */ while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) { @@ -996,17 +989,17 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) (!(pci_probe & PCI_USE_PIRQ_MASK) || \ ((1 << dev2->irq) & mask))) { #ifndef CONFIG_PCI_MSI - printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n", - pci_name(dev2), dev2->irq, irq); + dev_info(&dev2->dev, "IRQ routing conflict: " + "have IRQ %d, want IRQ %d\n", + dev2->irq, irq); #endif continue; } dev2->irq = irq; pirq_penalty[irq]++; if (dev != dev2) - printk(KERN_INFO - "PCI: Sharing IRQ %d with %s\n", - irq, pci_name(dev2)); + dev_info(&dev->dev, "sharing IRQ %d with %s\n", + irq, pci_name(dev2)); } } return 1; @@ -1025,8 +1018,7 @@ static void __init pcibios_fixup_irqs(void) * already in use. */ if (dev->irq >= 16) { - DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n", - pci_name(dev), dev->irq); + dev_dbg(&dev->dev, "ignoring bogus IRQ %d\n", dev->irq); dev->irq = 0; } /* @@ -1070,12 +1062,12 @@ static void __init pcibios_fixup_irqs(void) irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, PCI_SLOT(bridge->devfn), pin); if (irq >= 0) - printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n", - pci_name(bridge), 'A' + pin, irq); + dev_warn(&dev->dev, "using bridge %s INT %c to get IRQ %d\n", + pci_name(bridge), + 'A' + pin, irq); } if (irq >= 0) { - printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n", - pci_name(dev), 'A' + pin, irq); + dev_info(&dev->dev, "PCI->APIC IRQ transform: INT %c -> IRQ %d\n", 'A' + pin, irq); dev->irq = irq; } } @@ -1231,25 +1223,24 @@ static int pirq_enable_irq(struct pci_dev *dev) irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, PCI_SLOT(bridge->devfn), pin); if (irq >= 0) - printk(KERN_WARNING - "PCI: using PPB %s[%c] to get irq %d\n", - pci_name(bridge), - 'A' + pin, irq); + dev_warn(&dev->dev, "using bridge %s " + "INT %c to get IRQ %d\n", + pci_name(bridge), 'A' + pin, + irq); dev = bridge; } dev = temp_dev; if (irq >= 0) { - printk(KERN_INFO - "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n", - pci_name(dev), 'A' + pin, irq); + dev_info(&dev->dev, "PCI->APIC IRQ transform: " + "INT %c -> IRQ %d\n", 'A' + pin, irq); dev->irq = irq; return 0; } else - msg = " Probably buggy MP table."; + msg = "; probably buggy MP table"; } else if (pci_probe & PCI_BIOS_IRQ_SCAN) msg = ""; else - msg = " Please try using pci=biosirq."; + msg = "; please try using pci=biosirq"; /* * With IDE legacy devices the IRQ lookup failure is not @@ -1259,9 +1250,8 @@ static int pirq_enable_irq(struct pci_dev *dev) !(dev->class & 0x5)) return 0; - printk(KERN_WARNING - "PCI: No IRQ known for interrupt pin %c of device %s.%s\n", - 'A' + pin, pci_name(dev), msg); + dev_warn(&dev->dev, "can't find IRQ for PCI INT %c%s\n", + 'A' + pin, msg); } return 0; } diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c index f4b16dc11da..1177845d318 100644 --- a/arch/x86/pci/numaq_32.c +++ b/arch/x86/pci/numaq_32.c @@ -131,13 +131,14 @@ static void __devinit pci_fixup_i450nx(struct pci_dev *d) u8 busno, suba, subb; int quad = BUS2QUAD(d->bus->number); - printk("PCI: Searching for i450NX host bridges on %s\n", pci_name(d)); + dev_info(&d->dev, "searching for i450NX host bridges\n"); reg = 0xd0; for(pxb=0; pxb<2; pxb++) { pci_read_config_byte(d, reg++, &busno); pci_read_config_byte(d, reg++, &suba); pci_read_config_byte(d, reg++, &subb); - DBG("i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno, suba, subb); + dev_dbg(&d->dev, "i450NX PXB %d: %02x/%02x/%02x\n", + pxb, busno, suba, subb); if (busno) { /* Bus A */ pci_scan_bus_with_sysdata(QUADLOCAL2BUS(quad, busno)); -- cgit v1.2.3-70-g09d2 From cddb8a5c14aa89810b40495d94d3d2a0faee6619 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Mon, 28 Jul 2008 15:46:29 -0700 Subject: mmu-notifiers: core With KVM/GFP/XPMEM there isn't just the primary CPU MMU pointing to pages. There are secondary MMUs (with secondary sptes and secondary tlbs) too. sptes in the kvm case are shadow pagetables, but when I say spte in mmu-notifier context, I mean "secondary pte". In GRU case there's no actual secondary pte and there's only a secondary tlb because the GRU secondary MMU has no knowledge about sptes and every secondary tlb miss event in the MMU always generates a page fault that has to be resolved by the CPU (this is not the case of KVM where the a secondary tlb miss will walk sptes in hardware and it will refill the secondary tlb transparently to software if the corresponding spte is present). The same way zap_page_range has to invalidate the pte before freeing the page, the spte (and secondary tlb) must also be invalidated before any page is freed and reused. Currently we take a page_count pin on every page mapped by sptes, but that means the pages can't be swapped whenever they're mapped by any spte because they're part of the guest working set. Furthermore a spte unmap event can immediately lead to a page to be freed when the pin is released (so requiring the same complex and relatively slow tlb_gather smp safe logic we have in zap_page_range and that can be avoided completely if the spte unmap event doesn't require an unpin of the page previously mapped in the secondary MMU). The mmu notifiers allow kvm/GRU/XPMEM to attach to the tsk->mm and know when the VM is swapping or freeing or doing anything on the primary MMU so that the secondary MMU code can drop sptes before the pages are freed, avoiding all page pinning and allowing 100% reliable swapping of guest physical address space. Furthermore it avoids the code that teardown the mappings of the secondary MMU, to implement a logic like tlb_gather in zap_page_range that would require many IPI to flush other cpu tlbs, for each fixed number of spte unmapped. To make an example: if what happens on the primary MMU is a protection downgrade (from writeable to wrprotect) the secondary MMU mappings will be invalidated, and the next secondary-mmu-page-fault will call get_user_pages and trigger a do_wp_page through get_user_pages if it called get_user_pages with write=1, and it'll re-establishing an updated spte or secondary-tlb-mapping on the copied page. Or it will setup a readonly spte or readonly tlb mapping if it's a guest-read, if it calls get_user_pages with write=0. This is just an example. This allows to map any page pointed by any pte (and in turn visible in the primary CPU MMU), into a secondary MMU (be it a pure tlb like GRU, or an full MMU with both sptes and secondary-tlb like the shadow-pagetable layer with kvm), or a remote DMA in software like XPMEM (hence needing of schedule in XPMEM code to send the invalidate to the remote node, while no need to schedule in kvm/gru as it's an immediate event like invalidating primary-mmu pte). At least for KVM without this patch it's impossible to swap guests reliably. And having this feature and removing the page pin allows several other optimizations that simplify life considerably. Dependencies: 1) mm_take_all_locks() to register the mmu notifier when the whole VM isn't doing anything with "mm". This allows mmu notifier users to keep track if the VM is in the middle of the invalidate_range_begin/end critical section with an atomic counter incraese in range_begin and decreased in range_end. No secondary MMU page fault is allowed to map any spte or secondary tlb reference, while the VM is in the middle of range_begin/end as any page returned by get_user_pages in that critical section could later immediately be freed without any further ->invalidate_page notification (invalidate_range_begin/end works on ranges and ->invalidate_page isn't called immediately before freeing the page). To stop all page freeing and pagetable overwrites the mmap_sem must be taken in write mode and all other anon_vma/i_mmap locks must be taken too. 2) It'd be a waste to add branches in the VM if nobody could possibly run KVM/GRU/XPMEM on the kernel, so mmu notifiers will only enabled if CONFIG_KVM=m/y. In the current kernel kvm won't yet take advantage of mmu notifiers, but this already allows to compile a KVM external module against a kernel with mmu notifiers enabled and from the next pull from kvm.git we'll start using them. And GRU/XPMEM will also be able to continue the development by enabling KVM=m in their config, until they submit all GRU/XPMEM GPLv2 code to the mainline kernel. Then they can also enable MMU_NOTIFIERS in the same way KVM does it (even if KVM=n). This guarantees nobody selects MMU_NOTIFIER=y if KVM and GRU and XPMEM are all =n. The mmu_notifier_register call can fail because mm_take_all_locks may be interrupted by a signal and return -EINTR. Because mmu_notifier_reigster is used when a driver startup, a failure can be gracefully handled. Here an example of the change applied to kvm to register the mmu notifiers. Usually when a driver startups other allocations are required anyway and -ENOMEM failure paths exists already. struct kvm *kvm_arch_create_vm(void) { struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); + int err; if (!kvm) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); + kvm->arch.mmu_notifier.ops = &kvm_mmu_notifier_ops; + err = mmu_notifier_register(&kvm->arch.mmu_notifier, current->mm); + if (err) { + kfree(kvm); + return ERR_PTR(err); + } + return kvm; } mmu_notifier_unregister returns void and it's reliable. The patch also adds a few needed but missing includes that would prevent kernel to compile after these changes on non-x86 archs (x86 didn't need them by luck). [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: fix mm/filemap_xip.c build] [akpm@linux-foundation.org: fix mm/mmu_notifier.c build] Signed-off-by: Andrea Arcangeli Signed-off-by: Nick Piggin Signed-off-by: Christoph Lameter Cc: Jack Steiner Cc: Robin Holt Cc: Nick Piggin Cc: Peter Zijlstra Cc: Kanoj Sarcar Cc: Roland Dreier Cc: Steve Wise Cc: Avi Kivity Cc: Hugh Dickins Cc: Rusty Russell Cc: Anthony Liguori Cc: Chris Wright Cc: Marcelo Tosatti Cc: Eric Dumazet Cc: "Paul E. McKenney" Cc: Izik Eidus Cc: Anthony Liguori Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kvm/Kconfig | 1 + include/linux/mm_types.h | 4 + include/linux/mmu_notifier.h | 279 +++++++++++++++++++++++++++++++++++++++++++ kernel/fork.c | 3 + mm/Kconfig | 3 + mm/Makefile | 1 + mm/filemap_xip.c | 3 +- mm/fremap.c | 3 + mm/hugetlb.c | 3 + mm/memory.c | 35 +++++- mm/mmap.c | 2 + mm/mmu_notifier.c | 277 ++++++++++++++++++++++++++++++++++++++++++ mm/mprotect.c | 3 + mm/mremap.c | 6 + mm/rmap.c | 13 +- 15 files changed, 623 insertions(+), 13 deletions(-) create mode 100644 include/linux/mmu_notifier.h create mode 100644 mm/mmu_notifier.c (limited to 'arch/x86') diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 8d45fabc5f3..ce3251ce550 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -21,6 +21,7 @@ config KVM tristate "Kernel-based Virtual Machine (KVM) support" depends on HAVE_KVM select PREEMPT_NOTIFIERS + select MMU_NOTIFIER select ANON_INODES ---help--- Support hosting fully virtualized guest machines using hardware diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 746f975b58e..386edbe2cb4 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -253,6 +254,9 @@ struct mm_struct { struct file *exe_file; unsigned long num_exe_file_vmas; #endif +#ifdef CONFIG_MMU_NOTIFIER + struct mmu_notifier_mm *mmu_notifier_mm; +#endif }; #endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h new file mode 100644 index 00000000000..b77486d152c --- /dev/null +++ b/include/linux/mmu_notifier.h @@ -0,0 +1,279 @@ +#ifndef _LINUX_MMU_NOTIFIER_H +#define _LINUX_MMU_NOTIFIER_H + +#include +#include +#include + +struct mmu_notifier; +struct mmu_notifier_ops; + +#ifdef CONFIG_MMU_NOTIFIER + +/* + * The mmu notifier_mm structure is allocated and installed in + * mm->mmu_notifier_mm inside the mm_take_all_locks() protected + * critical section and it's released only when mm_count reaches zero + * in mmdrop(). + */ +struct mmu_notifier_mm { + /* all mmu notifiers registerd in this mm are queued in this list */ + struct hlist_head list; + /* to serialize the list modifications and hlist_unhashed */ + spinlock_t lock; +}; + +struct mmu_notifier_ops { + /* + * Called either by mmu_notifier_unregister or when the mm is + * being destroyed by exit_mmap, always before all pages are + * freed. This can run concurrently with other mmu notifier + * methods (the ones invoked outside the mm context) and it + * should tear down all secondary mmu mappings and freeze the + * secondary mmu. If this method isn't implemented you've to + * be sure that nothing could possibly write to the pages + * through the secondary mmu by the time the last thread with + * tsk->mm == mm exits. + * + * As side note: the pages freed after ->release returns could + * be immediately reallocated by the gart at an alias physical + * address with a different cache model, so if ->release isn't + * implemented because all _software_ driven memory accesses + * through the secondary mmu are terminated by the time the + * last thread of this mm quits, you've also to be sure that + * speculative _hardware_ operations can't allocate dirty + * cachelines in the cpu that could not be snooped and made + * coherent with the other read and write operations happening + * through the gart alias address, so leading to memory + * corruption. + */ + void (*release)(struct mmu_notifier *mn, + struct mm_struct *mm); + + /* + * clear_flush_young is called after the VM is + * test-and-clearing the young/accessed bitflag in the + * pte. This way the VM will provide proper aging to the + * accesses to the page through the secondary MMUs and not + * only to the ones through the Linux pte. + */ + int (*clear_flush_young)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + + /* + * Before this is invoked any secondary MMU is still ok to + * read/write to the page previously pointed to by the Linux + * pte because the page hasn't been freed yet and it won't be + * freed until this returns. If required set_page_dirty has to + * be called internally to this method. + */ + void (*invalidate_page)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + + /* + * invalidate_range_start() and invalidate_range_end() must be + * paired and are called only when the mmap_sem and/or the + * locks protecting the reverse maps are held. The subsystem + * must guarantee that no additional references are taken to + * the pages in the range established between the call to + * invalidate_range_start() and the matching call to + * invalidate_range_end(). + * + * Invalidation of multiple concurrent ranges may be + * optionally permitted by the driver. Either way the + * establishment of sptes is forbidden in the range passed to + * invalidate_range_begin/end for the whole duration of the + * invalidate_range_begin/end critical section. + * + * invalidate_range_start() is called when all pages in the + * range are still mapped and have at least a refcount of one. + * + * invalidate_range_end() is called when all pages in the + * range have been unmapped and the pages have been freed by + * the VM. + * + * The VM will remove the page table entries and potentially + * the page between invalidate_range_start() and + * invalidate_range_end(). If the page must not be freed + * because of pending I/O or other circumstances then the + * invalidate_range_start() callback (or the initial mapping + * by the driver) must make sure that the refcount is kept + * elevated. + * + * If the driver increases the refcount when the pages are + * initially mapped into an address space then either + * invalidate_range_start() or invalidate_range_end() may + * decrease the refcount. If the refcount is decreased on + * invalidate_range_start() then the VM can free pages as page + * table entries are removed. If the refcount is only + * droppped on invalidate_range_end() then the driver itself + * will drop the last refcount but it must take care to flush + * any secondary tlb before doing the final free on the + * page. Pages will no longer be referenced by the linux + * address space but may still be referenced by sptes until + * the last refcount is dropped. + */ + void (*invalidate_range_start)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end); + void (*invalidate_range_end)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end); +}; + +/* + * The notifier chains are protected by mmap_sem and/or the reverse map + * semaphores. Notifier chains are only changed when all reverse maps and + * the mmap_sem locks are taken. + * + * Therefore notifier chains can only be traversed when either + * + * 1. mmap_sem is held. + * 2. One of the reverse map locks is held (i_mmap_lock or anon_vma->lock). + * 3. No other concurrent thread can access the list (release) + */ +struct mmu_notifier { + struct hlist_node hlist; + const struct mmu_notifier_ops *ops; +}; + +static inline int mm_has_notifiers(struct mm_struct *mm) +{ + return unlikely(mm->mmu_notifier_mm); +} + +extern int mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm); +extern int __mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void mmu_notifier_unregister(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); +extern void __mmu_notifier_release(struct mm_struct *mm); +extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address); +extern void __mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address); +extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end); + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_release(mm); +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + return __mmu_notifier_clear_flush_young(mm, address); + return 0; +} + +static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_page(mm, address); +} + +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_start(mm, start, end); +} + +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_end(mm, start, end); +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ + mm->mmu_notifier_mm = NULL; +} + +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_mm_destroy(mm); +} + +/* + * These two macros will sometime replace ptep_clear_flush. + * ptep_clear_flush is impleemnted as macro itself, so this also is + * implemented as a macro until ptep_clear_flush will converted to an + * inline function, to diminish the risk of compilation failure. The + * invalidate_page method over time can be moved outside the PT lock + * and these two macros can be later removed. + */ +#define ptep_clear_flush_notify(__vma, __address, __ptep) \ +({ \ + pte_t __pte; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __pte = ptep_clear_flush(___vma, ___address, __ptep); \ + mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \ + __pte; \ +}) + +#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ +({ \ + int __young; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __young = ptep_clear_flush_young(___vma, ___address, __ptep); \ + __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ + ___address); \ + __young; \ +}) + +#else /* CONFIG_MMU_NOTIFIER */ + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + return 0; +} + +static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address) +{ +} + +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ +} + +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) +{ +} + +#define ptep_clear_flush_young_notify ptep_clear_flush_young +#define ptep_clear_flush_notify ptep_clear_flush + +#endif /* CONFIG_MMU_NOTIFIER */ + +#endif /* _LINUX_MMU_NOTIFIER_H */ diff --git a/kernel/fork.c b/kernel/fork.c index 8214ba7c8bb..7ce2ebe8479 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -414,6 +415,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) if (likely(!mm_alloc_pgd(mm))) { mm->def_flags = 0; + mmu_notifier_mm_init(mm); return mm; } @@ -446,6 +448,7 @@ void __mmdrop(struct mm_struct *mm) BUG_ON(mm == &init_mm); mm_free_pgd(mm); destroy_context(mm); + mmu_notifier_mm_destroy(mm); free_mm(mm); } EXPORT_SYMBOL_GPL(__mmdrop); diff --git a/mm/Kconfig b/mm/Kconfig index efee5d379df..446c6588c75 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -208,3 +208,6 @@ config NR_QUICK config VIRT_TO_BUS def_bool y depends on !ARCH_NO_VIRT_TO_BUS + +config MMU_NOTIFIER + bool diff --git a/mm/Makefile b/mm/Makefile index 06ca2381fef..da4ccf015ae 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_SHMEM) += shmem.o obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o obj-$(CONFIG_SLOB) += slob.o +obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_SLUB) += slub.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 98a3f31ccd6..380ab402d71 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -188,7 +189,7 @@ __xip_unmap (struct address_space * mapping, if (pte) { /* Nuke the page table entry. */ flush_cache_page(vma, address, pte_pfn(*pte)); - pteval = ptep_clear_flush(vma, address, pte); + pteval = ptep_clear_flush_notify(vma, address, pte); page_remove_rmap(page, vma); dec_mm_counter(mm, file_rss); BUG_ON(pte_dirty(pteval)); diff --git a/mm/fremap.c b/mm/fremap.c index 07a9c82ce1a..7881638e4a1 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -214,7 +215,9 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, spin_unlock(&mapping->i_mmap_lock); } + mmu_notifier_invalidate_range_start(mm, start, start + size); err = populate_range(mm, vma, start, size, pgoff); + mmu_notifier_invalidate_range_end(mm, start, start + size); if (!err && !(flags & MAP_NONBLOCK)) { if (unlikely(has_write_lock)) { downgrade_write(&mm->mmap_sem); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3be79dc18c5..80eb0d31d0d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -1672,6 +1673,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, BUG_ON(start & ~huge_page_mask(h)); BUG_ON(end & ~huge_page_mask(h)); + mmu_notifier_invalidate_range_start(mm, start, end); spin_lock(&mm->page_table_lock); for (address = start; address < end; address += sz) { ptep = huge_pte_offset(mm, address); @@ -1713,6 +1715,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, } spin_unlock(&mm->page_table_lock); flush_tlb_range(vma, start, end); + mmu_notifier_invalidate_range_end(mm, start, end); list_for_each_entry_safe(page, tmp, &page_list, lru) { list_del(&page->lru); put_page(page); diff --git a/mm/memory.c b/mm/memory.c index a8ca04faaea..67f0ab9077d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include @@ -652,6 +653,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, unsigned long next; unsigned long addr = vma->vm_start; unsigned long end = vma->vm_end; + int ret; /* * Don't copy ptes where a page fault will fill them correctly. @@ -667,17 +669,33 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, if (is_vm_hugetlb_page(vma)) return copy_hugetlb_page_range(dst_mm, src_mm, vma); + /* + * We need to invalidate the secondary MMU mappings only when + * there could be a permission downgrade on the ptes of the + * parent mm. And a permission downgrade will only happen if + * is_cow_mapping() returns true. + */ + if (is_cow_mapping(vma->vm_flags)) + mmu_notifier_invalidate_range_start(src_mm, addr, end); + + ret = 0; dst_pgd = pgd_offset(dst_mm, addr); src_pgd = pgd_offset(src_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(src_pgd)) continue; - if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, - vma, addr, next)) - return -ENOMEM; + if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, + vma, addr, next))) { + ret = -ENOMEM; + break; + } } while (dst_pgd++, src_pgd++, addr = next, addr != end); - return 0; + + if (is_cow_mapping(vma->vm_flags)) + mmu_notifier_invalidate_range_end(src_mm, + vma->vm_start, end); + return ret; } static unsigned long zap_pte_range(struct mmu_gather *tlb, @@ -881,7 +899,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp, unsigned long start = start_addr; spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; int fullmm = (*tlbp)->fullmm; + struct mm_struct *mm = vma->vm_mm; + mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { unsigned long end; @@ -946,6 +966,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp, } } out: + mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); return start; /* which is now the end (or restart) address */ } @@ -1616,10 +1637,11 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, { pgd_t *pgd; unsigned long next; - unsigned long end = addr + size; + unsigned long start = addr, end = addr + size; int err; BUG_ON(addr >= end); + mmu_notifier_invalidate_range_start(mm, start, end); pgd = pgd_offset(mm, addr); do { next = pgd_addr_end(addr, end); @@ -1627,6 +1649,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, if (err) break; } while (pgd++, addr = next, addr != end); + mmu_notifier_invalidate_range_end(mm, start, end); return err; } EXPORT_SYMBOL_GPL(apply_to_page_range); @@ -1839,7 +1862,7 @@ gotten: * seen in the presence of one thread doing SMC and another * thread doing COW. */ - ptep_clear_flush(vma, address, page_table); + ptep_clear_flush_notify(vma, address, page_table); set_pte_at(mm, address, page_table, entry); update_mmu_cache(vma, address, entry); lru_cache_add_active(new_page); diff --git a/mm/mmap.c b/mm/mmap.c index e5f9cb83d6d..245c3d69067 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -2061,6 +2062,7 @@ void exit_mmap(struct mm_struct *mm) /* mm's last user has gone, and its about to be pulled down */ arch_exit_mmap(mm); + mmu_notifier_release(mm); lru_add_drain(); flush_cache_mm(mm); diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c new file mode 100644 index 00000000000..5f4ef0250be --- /dev/null +++ b/mm/mmu_notifier.c @@ -0,0 +1,277 @@ +/* + * linux/mm/mmu_notifier.c + * + * Copyright (C) 2008 Qumranet, Inc. + * Copyright (C) 2008 SGI + * Christoph Lameter + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * This function can't run concurrently against mmu_notifier_register + * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap + * runs with mm_users == 0. Other tasks may still invoke mmu notifiers + * in parallel despite there being no task using this mm any more, + * through the vmas outside of the exit_mmap context, such as with + * vmtruncate. This serializes against mmu_notifier_unregister with + * the mmu_notifier_mm->lock in addition to RCU and it serializes + * against the other mmu notifiers with RCU. struct mmu_notifier_mm + * can't go away from under us as exit_mmap holds an mm_count pin + * itself. + */ +void __mmu_notifier_release(struct mm_struct *mm) +{ + struct mmu_notifier *mn; + + spin_lock(&mm->mmu_notifier_mm->lock); + while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { + mn = hlist_entry(mm->mmu_notifier_mm->list.first, + struct mmu_notifier, + hlist); + /* + * We arrived before mmu_notifier_unregister so + * mmu_notifier_unregister will do nothing other than + * to wait ->release to finish and + * mmu_notifier_unregister to return. + */ + hlist_del_init_rcu(&mn->hlist); + /* + * RCU here will block mmu_notifier_unregister until + * ->release returns. + */ + rcu_read_lock(); + spin_unlock(&mm->mmu_notifier_mm->lock); + /* + * if ->release runs before mmu_notifier_unregister it + * must be handled as it's the only way for the driver + * to flush all existing sptes and stop the driver + * from establishing any more sptes before all the + * pages in the mm are freed. + */ + if (mn->ops->release) + mn->ops->release(mn, mm); + rcu_read_unlock(); + spin_lock(&mm->mmu_notifier_mm->lock); + } + spin_unlock(&mm->mmu_notifier_mm->lock); + + /* + * synchronize_rcu here prevents mmu_notifier_release to + * return to exit_mmap (which would proceed freeing all pages + * in the mm) until the ->release method returns, if it was + * invoked by mmu_notifier_unregister. + * + * The mmu_notifier_mm can't go away from under us because one + * mm_count is hold by exit_mmap. + */ + synchronize_rcu(); +} + +/* + * If no young bitflag is supported by the hardware, ->clear_flush_young can + * unmap the address and return 1 or 0 depending if the mapping previously + * existed or not. + */ +int __mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + struct mmu_notifier *mn; + struct hlist_node *n; + int young = 0; + + rcu_read_lock(); + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + if (mn->ops->clear_flush_young) + young |= mn->ops->clear_flush_young(mn, mm, address); + } + rcu_read_unlock(); + + return young; +} + +void __mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address) +{ + struct mmu_notifier *mn; + struct hlist_node *n; + + rcu_read_lock(); + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + if (mn->ops->invalidate_page) + mn->ops->invalidate_page(mn, mm, address); + } + rcu_read_unlock(); +} + +void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct mmu_notifier *mn; + struct hlist_node *n; + + rcu_read_lock(); + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + if (mn->ops->invalidate_range_start) + mn->ops->invalidate_range_start(mn, mm, start, end); + } + rcu_read_unlock(); +} + +void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct mmu_notifier *mn; + struct hlist_node *n; + + rcu_read_lock(); + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + if (mn->ops->invalidate_range_end) + mn->ops->invalidate_range_end(mn, mm, start, end); + } + rcu_read_unlock(); +} + +static int do_mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm, + int take_mmap_sem) +{ + struct mmu_notifier_mm *mmu_notifier_mm; + int ret; + + BUG_ON(atomic_read(&mm->mm_users) <= 0); + + ret = -ENOMEM; + mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); + if (unlikely(!mmu_notifier_mm)) + goto out; + + if (take_mmap_sem) + down_write(&mm->mmap_sem); + ret = mm_take_all_locks(mm); + if (unlikely(ret)) + goto out_cleanup; + + if (!mm_has_notifiers(mm)) { + INIT_HLIST_HEAD(&mmu_notifier_mm->list); + spin_lock_init(&mmu_notifier_mm->lock); + mm->mmu_notifier_mm = mmu_notifier_mm; + mmu_notifier_mm = NULL; + } + atomic_inc(&mm->mm_count); + + /* + * Serialize the update against mmu_notifier_unregister. A + * side note: mmu_notifier_release can't run concurrently with + * us because we hold the mm_users pin (either implicitly as + * current->mm or explicitly with get_task_mm() or similar). + * We can't race against any other mmu notifier method either + * thanks to mm_take_all_locks(). + */ + spin_lock(&mm->mmu_notifier_mm->lock); + hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); + spin_unlock(&mm->mmu_notifier_mm->lock); + + mm_drop_all_locks(mm); +out_cleanup: + if (take_mmap_sem) + up_write(&mm->mmap_sem); + /* kfree() does nothing if mmu_notifier_mm is NULL */ + kfree(mmu_notifier_mm); +out: + BUG_ON(atomic_read(&mm->mm_users) <= 0); + return ret; +} + +/* + * Must not hold mmap_sem nor any other VM related lock when calling + * this registration function. Must also ensure mm_users can't go down + * to zero while this runs to avoid races with mmu_notifier_release, + * so mm has to be current->mm or the mm should be pinned safely such + * as with get_task_mm(). If the mm is not current->mm, the mm_users + * pin should be released by calling mmput after mmu_notifier_register + * returns. mmu_notifier_unregister must be always called to + * unregister the notifier. mm_count is automatically pinned to allow + * mmu_notifier_unregister to safely run at any time later, before or + * after exit_mmap. ->release will always be called before exit_mmap + * frees the pages. + */ +int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) +{ + return do_mmu_notifier_register(mn, mm, 1); +} +EXPORT_SYMBOL_GPL(mmu_notifier_register); + +/* + * Same as mmu_notifier_register but here the caller must hold the + * mmap_sem in write mode. + */ +int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) +{ + return do_mmu_notifier_register(mn, mm, 0); +} +EXPORT_SYMBOL_GPL(__mmu_notifier_register); + +/* this is called after the last mmu_notifier_unregister() returned */ +void __mmu_notifier_mm_destroy(struct mm_struct *mm) +{ + BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list)); + kfree(mm->mmu_notifier_mm); + mm->mmu_notifier_mm = LIST_POISON1; /* debug */ +} + +/* + * This releases the mm_count pin automatically and frees the mm + * structure if it was the last user of it. It serializes against + * running mmu notifiers with RCU and against mmu_notifier_unregister + * with the unregister lock + RCU. All sptes must be dropped before + * calling mmu_notifier_unregister. ->release or any other notifier + * method may be invoked concurrently with mmu_notifier_unregister, + * and only after mmu_notifier_unregister returned we're guaranteed + * that ->release or any other method can't run anymore. + */ +void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) +{ + BUG_ON(atomic_read(&mm->mm_count) <= 0); + + spin_lock(&mm->mmu_notifier_mm->lock); + if (!hlist_unhashed(&mn->hlist)) { + hlist_del_rcu(&mn->hlist); + + /* + * RCU here will force exit_mmap to wait ->release to finish + * before freeing the pages. + */ + rcu_read_lock(); + spin_unlock(&mm->mmu_notifier_mm->lock); + /* + * exit_mmap will block in mmu_notifier_release to + * guarantee ->release is called before freeing the + * pages. + */ + if (mn->ops->release) + mn->ops->release(mn, mm); + rcu_read_unlock(); + } else + spin_unlock(&mm->mmu_notifier_mm->lock); + + /* + * Wait any running method to finish, of course including + * ->release if it was run by mmu_notifier_relase instead of us. + */ + synchronize_rcu(); + + BUG_ON(atomic_read(&mm->mm_count) <= 0); + + mmdrop(mm); +} +EXPORT_SYMBOL_GPL(mmu_notifier_unregister); diff --git a/mm/mprotect.c b/mm/mprotect.c index abd645a3b0a..fded06f923f 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -203,10 +204,12 @@ success: dirty_accountable = 1; } + mmu_notifier_invalidate_range_start(mm, start, end); if (is_vm_hugetlb_page(vma)) hugetlb_change_protection(vma, start, end, vma->vm_page_prot); else change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable); + mmu_notifier_invalidate_range_end(mm, start, end); vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); vm_stat_account(mm, newflags, vma->vm_file, nrpages); return 0; diff --git a/mm/mremap.c b/mm/mremap.c index 08e3c7f2bd1..1a7743923c8 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -74,7 +75,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; spinlock_t *old_ptl, *new_ptl; + unsigned long old_start; + old_start = old_addr; + mmu_notifier_invalidate_range_start(vma->vm_mm, + old_start, old_end); if (vma->vm_file) { /* * Subtle point from Rajesh Venkatasubramanian: before @@ -116,6 +121,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, pte_unmap_unlock(old_pte - 1, old_ptl); if (mapping) spin_unlock(&mapping->i_mmap_lock); + mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end); } #define LATENCY_LIMIT (64 * PAGE_SIZE) diff --git a/mm/rmap.c b/mm/rmap.c index 39ae5a9bf38..99bc3f9cd79 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -49,6 +49,7 @@ #include #include #include +#include #include @@ -287,7 +288,7 @@ static int page_referenced_one(struct page *page, if (vma->vm_flags & VM_LOCKED) { referenced++; *mapcount = 1; /* break early from loop */ - } else if (ptep_clear_flush_young(vma, address, pte)) + } else if (ptep_clear_flush_young_notify(vma, address, pte)) referenced++; /* Pretend the page is referenced if the task has the @@ -457,7 +458,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) pte_t entry; flush_cache_page(vma, address, pte_pfn(*pte)); - entry = ptep_clear_flush(vma, address, pte); + entry = ptep_clear_flush_notify(vma, address, pte); entry = pte_wrprotect(entry); entry = pte_mkclean(entry); set_pte_at(mm, address, pte, entry); @@ -705,14 +706,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * skipped over this mm) then we should reactivate it. */ if (!migration && ((vma->vm_flags & VM_LOCKED) || - (ptep_clear_flush_young(vma, address, pte)))) { + (ptep_clear_flush_young_notify(vma, address, pte)))) { ret = SWAP_FAIL; goto out_unmap; } /* Nuke the page table entry. */ flush_cache_page(vma, address, page_to_pfn(page)); - pteval = ptep_clear_flush(vma, address, pte); + pteval = ptep_clear_flush_notify(vma, address, pte); /* Move the dirty bit to the physical page now the pte is gone. */ if (pte_dirty(pteval)) @@ -837,12 +838,12 @@ static void try_to_unmap_cluster(unsigned long cursor, page = vm_normal_page(vma, address, *pte); BUG_ON(!page || PageAnon(page)); - if (ptep_clear_flush_young(vma, address, pte)) + if (ptep_clear_flush_young_notify(vma, address, pte)) continue; /* Nuke the page table entry. */ flush_cache_page(vma, address, pte_pfn(*pte)); - pteval = ptep_clear_flush(vma, address, pte); + pteval = ptep_clear_flush_notify(vma, address, pte); /* If nonlinear, store the file page offset in the pte. */ if (page->index != linear_page_index(vma, address)) -- cgit v1.2.3-70-g09d2 From 5d006d8d09e82f086ca0baf79a2907f2c1e25af7 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 29 Jul 2008 09:58:29 -0500 Subject: lguest: set max_pfn_mapped, growl loudly at Yinghai Lu 6af61a7614a306fe882a0c2b4ddc63b65aa66efc 'x86: clean up max_pfn_mapped usage - 32-bit' makes the following comment: XEN PV and lguest may need to assign max_pfn_mapped too. But no CC. Yinghai, wasting fellow developers' time is a VERY bad habit. If you do it again, I will hunt you down and try to extract the three hours of my life I just lost :) Signed-off-by: Rusty Russell Cc: Yinghai Lu --- arch/x86/lguest/boot.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 0313a5eec41..d9249a882aa 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -1014,6 +1014,9 @@ __init void lguest_init(void) init_pg_tables_start = __pa(pg0); init_pg_tables_end = __pa(pg0); + /* As described in head_32.S, we map the first 128M of memory. */ + max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT; + /* Load the %fs segment register (the per-cpu segment register) with * the normal data segment to get through booting. */ asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory"); -- cgit v1.2.3-70-g09d2 From 9b79022ca909b66e2cd0cfd9248f832fc165f77f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 28 Jul 2008 17:54:21 -0700 Subject: Fix 'get_user_pages_fast()' with non-page-aligned start address Alexey Dobriyan reported trouble with LTP with the new fast-gup code, and Johannes Weiner debugged it to non-page-aligned addresses, where the new get_user_pages_fast() code would do all the wrong things, including just traversing past the end of the requested area due to 'addr' never matching 'end' exactly. This is not a pretty fix, and we may actually want to move the alignment into generic code, leaving just the core code per-arch, but Alexey verified that the vmsplice01 LTP test doesn't crash with this. Reported-and-tested-by: Alexey Dobriyan Debugged-by: Johannes Weiner Cc: Nick Piggin Cc: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/gup.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 3085f25b435..007bb06c750 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -223,14 +223,17 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { struct mm_struct *mm = current->mm; - unsigned long end = start + (nr_pages << PAGE_SHIFT); - unsigned long addr = start; + unsigned long addr, len, end; unsigned long next; pgd_t *pgdp; int nr = 0; + start &= PAGE_MASK; + addr = start; + len = (unsigned long) nr_pages << PAGE_SHIFT; + end = start + len; if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, - start, nr_pages*PAGE_SIZE))) + start, len))) goto slow_irqon; /* -- cgit v1.2.3-70-g09d2 From a1708ce8a362c4999f1201237ae7b77c4d13af82 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Fri, 25 Jul 2008 16:26:39 +0200 Subject: KVM: Allow reading aliases with mmu_lock This allows the mmu notifier code to run unalias_gfn with only the mmu_lock held. Only alias writes need the mmu_lock held. Readers will either take the slots_lock in read mode or the mmu_lock. Signed-off-by: Andrea Arcangeli Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5916191420c..9870ce42292 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1495,6 +1495,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, goto out; down_write(&kvm->slots_lock); + spin_lock(&kvm->mmu_lock); p = &kvm->arch.aliases[alias->slot]; p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; @@ -1506,6 +1507,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, break; kvm->arch.naliases = n; + spin_unlock(&kvm->mmu_lock); kvm_mmu_zap_all(kvm); up_write(&kvm->slots_lock); -- cgit v1.2.3-70-g09d2 From 604b38ac0369bd50fcbb33344aa5553c071009f7 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Fri, 25 Jul 2008 16:32:03 +0200 Subject: KVM: Allow browsing memslots with mmu_lock This allows reading memslots with only the mmu_lock hold for mmu notifiers that runs in atomic context and with mmu_lock held. Signed-off-by: Andrea Arcangeli Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 21 ++++++++++++++------- virt/kvm/kvm_main.c | 20 ++++++++++++++++---- 2 files changed, 30 insertions(+), 11 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9870ce42292..c7b01efe064 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3974,16 +3974,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm, */ if (!user_alloc) { if (npages && !old.rmap) { + unsigned long userspace_addr; + down_write(¤t->mm->mmap_sem); - memslot->userspace_addr = do_mmap(NULL, 0, - npages * PAGE_SIZE, - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, - 0); + userspace_addr = do_mmap(NULL, 0, + npages * PAGE_SIZE, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + 0); up_write(¤t->mm->mmap_sem); - if (IS_ERR((void *)memslot->userspace_addr)) - return PTR_ERR((void *)memslot->userspace_addr); + if (IS_ERR((void *)userspace_addr)) + return PTR_ERR((void *)userspace_addr); + + /* set userspace_addr atomically for kvm_hva_to_rmapp */ + spin_lock(&kvm->mmu_lock); + memslot->userspace_addr = userspace_addr; + spin_unlock(&kvm->mmu_lock); } else { if (!old.user_alloc && old.rmap) { int ret; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a845890b680..3735212cd3f 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -375,7 +375,15 @@ int __kvm_set_memory_region(struct kvm *kvm, memset(new.rmap, 0, npages * sizeof(*new.rmap)); new.user_alloc = user_alloc; - new.userspace_addr = mem->userspace_addr; + /* + * hva_to_rmmap() serialzies with the mmu_lock and to be + * safe it has to ignore memslots with !user_alloc && + * !userspace_addr. + */ + if (user_alloc) + new.userspace_addr = mem->userspace_addr; + else + new.userspace_addr = 0; } if (npages && !new.lpage_info) { int largepages = npages / KVM_PAGES_PER_HPAGE; @@ -408,17 +416,21 @@ int __kvm_set_memory_region(struct kvm *kvm, } #endif /* not defined CONFIG_S390 */ - if (mem->slot >= kvm->nmemslots) - kvm->nmemslots = mem->slot + 1; - if (!npages) kvm_arch_flush_shadow(kvm); + spin_lock(&kvm->mmu_lock); + if (mem->slot >= kvm->nmemslots) + kvm->nmemslots = mem->slot + 1; + *memslot = new; + spin_unlock(&kvm->mmu_lock); r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); if (r) { + spin_lock(&kvm->mmu_lock); *memslot = old; + spin_unlock(&kvm->mmu_lock); goto out_free; } -- cgit v1.2.3-70-g09d2 From e930bffe95e1e886a1ede80726ea38df5838d067 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Fri, 25 Jul 2008 16:24:52 +0200 Subject: KVM: Synchronize guest physical memory map to host virtual memory map Synchronize changes to host virtual addresses which are part of a KVM memory slot to the KVM shadow mmu. This allows pte operations like swapping, page migration, and madvise() to transparently work with KVM. Signed-off-by: Andrea Arcangeli Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 100 +++++++++++++++++++++++++++++++++ arch/x86/kvm/paging_tmpl.h | 12 ++++ include/asm-x86/kvm_host.h | 6 ++ include/linux/kvm_host.h | 24 ++++++++ virt/kvm/kvm_main.c | 135 +++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 277 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2fa231923cf..0bfe2bd305e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -653,6 +653,84 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn) account_shadowed(kvm, gfn); } +static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) +{ + u64 *spte; + int need_tlb_flush = 0; + + while ((spte = rmap_next(kvm, rmapp, NULL))) { + BUG_ON(!(*spte & PT_PRESENT_MASK)); + rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); + rmap_remove(kvm, spte); + set_shadow_pte(spte, shadow_trap_nonpresent_pte); + need_tlb_flush = 1; + } + return need_tlb_flush; +} + +static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, + int (*handler)(struct kvm *kvm, unsigned long *rmapp)) +{ + int i; + int retval = 0; + + /* + * If mmap_sem isn't taken, we can look the memslots with only + * the mmu_lock by skipping over the slots with userspace_addr == 0. + */ + for (i = 0; i < kvm->nmemslots; i++) { + struct kvm_memory_slot *memslot = &kvm->memslots[i]; + unsigned long start = memslot->userspace_addr; + unsigned long end; + + /* mmu_lock protects userspace_addr */ + if (!start) + continue; + + end = start + (memslot->npages << PAGE_SHIFT); + if (hva >= start && hva < end) { + gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; + retval |= handler(kvm, &memslot->rmap[gfn_offset]); + retval |= handler(kvm, + &memslot->lpage_info[ + gfn_offset / + KVM_PAGES_PER_HPAGE].rmap_pde); + } + } + + return retval; +} + +int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) +{ + return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); +} + +static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp) +{ + u64 *spte; + int young = 0; + + spte = rmap_next(kvm, rmapp, NULL); + while (spte) { + int _young; + u64 _spte = *spte; + BUG_ON(!(_spte & PT_PRESENT_MASK)); + _young = _spte & PT_ACCESSED_MASK; + if (_young) { + young = 1; + clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte); + } + spte = rmap_next(kvm, rmapp, spte); + } + return young; +} + +int kvm_age_hva(struct kvm *kvm, unsigned long hva) +{ + return kvm_handle_hva(kvm, hva, kvm_age_rmapp); +} + #ifdef MMU_DEBUG static int is_empty_shadow_page(u64 *spt) { @@ -1203,6 +1281,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) int r; int largepage = 0; pfn_t pfn; + unsigned long mmu_seq; down_read(¤t->mm->mmap_sem); if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { @@ -1210,6 +1289,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) largepage = 1; } + mmu_seq = vcpu->kvm->mmu_notifier_seq; + /* implicit mb(), we'll read before PT lock is unlocked */ pfn = gfn_to_pfn(vcpu->kvm, gfn); up_read(¤t->mm->mmap_sem); @@ -1220,6 +1301,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) } spin_lock(&vcpu->kvm->mmu_lock); + if (mmu_notifier_retry(vcpu, mmu_seq)) + goto out_unlock; kvm_mmu_free_some_pages(vcpu); r = __direct_map(vcpu, v, write, largepage, gfn, pfn, PT32E_ROOT_LEVEL); @@ -1227,6 +1310,11 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) return r; + +out_unlock: + spin_unlock(&vcpu->kvm->mmu_lock); + kvm_release_pfn_clean(pfn); + return 0; } @@ -1345,6 +1433,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, int r; int largepage = 0; gfn_t gfn = gpa >> PAGE_SHIFT; + unsigned long mmu_seq; ASSERT(vcpu); ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); @@ -1358,6 +1447,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, gfn &= ~(KVM_PAGES_PER_HPAGE-1); largepage = 1; } + mmu_seq = vcpu->kvm->mmu_notifier_seq; + /* implicit mb(), we'll read before PT lock is unlocked */ pfn = gfn_to_pfn(vcpu->kvm, gfn); up_read(¤t->mm->mmap_sem); if (is_error_pfn(pfn)) { @@ -1365,12 +1456,19 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, return 1; } spin_lock(&vcpu->kvm->mmu_lock); + if (mmu_notifier_retry(vcpu, mmu_seq)) + goto out_unlock; kvm_mmu_free_some_pages(vcpu); r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK, largepage, gfn, pfn, kvm_x86_ops->get_tdp_level()); spin_unlock(&vcpu->kvm->mmu_lock); return r; + +out_unlock: + spin_unlock(&vcpu->kvm->mmu_lock); + kvm_release_pfn_clean(pfn); + return 0; } static void nonpaging_free(struct kvm_vcpu *vcpu) @@ -1670,6 +1768,8 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, gfn &= ~(KVM_PAGES_PER_HPAGE-1); vcpu->arch.update_pte.largepage = 1; } + vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq; + /* implicit mb(), we'll read before PT lock is unlocked */ pfn = gfn_to_pfn(vcpu->kvm, gfn); up_read(¤t->mm->mmap_sem); diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 4d918220bae..f72ac1fa35f 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -263,6 +263,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, pfn = vcpu->arch.update_pte.pfn; if (is_error_pfn(pfn)) return; + if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq)) + return; kvm_get_pfn(pfn); mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte), @@ -380,6 +382,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, int r; pfn_t pfn; int largepage = 0; + unsigned long mmu_seq; pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); kvm_mmu_audit(vcpu, "pre page fault"); @@ -413,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, largepage = 1; } } + mmu_seq = vcpu->kvm->mmu_notifier_seq; + /* implicit mb(), we'll read before PT lock is unlocked */ pfn = gfn_to_pfn(vcpu->kvm, walker.gfn); up_read(¤t->mm->mmap_sem); @@ -424,6 +429,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, } spin_lock(&vcpu->kvm->mmu_lock); + if (mmu_notifier_retry(vcpu, mmu_seq)) + goto out_unlock; kvm_mmu_free_some_pages(vcpu); shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, largepage, &write_pt, pfn); @@ -439,6 +446,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, spin_unlock(&vcpu->kvm->mmu_lock); return write_pt; + +out_unlock: + spin_unlock(&vcpu->kvm->mmu_lock); + kvm_release_pfn_clean(pfn); + return 0; } static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index bc34dc21f17..0f3c5311461 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -251,6 +252,7 @@ struct kvm_vcpu_arch { gfn_t gfn; /* presumed gfn during guest pte update */ pfn_t pfn; /* pfn corresponding to that gfn */ int largepage; + unsigned long mmu_seq; } update_pte; struct i387_fxsave_struct host_fx_image; @@ -729,4 +731,8 @@ asmlinkage void kvm_handle_fault_on_reboot(void); KVM_EX_ENTRY " 666b, 667b \n\t" \ ".popsection" +#define KVM_ARCH_WANT_MMU_NOTIFIER +int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); +int kvm_age_hva(struct kvm *kvm, unsigned long hva); + #endif diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 07d68a8ae8e..8525afc5310 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -121,6 +121,12 @@ struct kvm { struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; #endif + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + struct mmu_notifier mmu_notifier; + unsigned long mmu_notifier_seq; + long mmu_notifier_count; +#endif }; /* The guest did something we don't support. */ @@ -332,4 +338,22 @@ int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg) #define kvm_trace_cleanup() ((void)0) #endif +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) +{ + if (unlikely(vcpu->kvm->mmu_notifier_count)) + return 1; + /* + * Both reads happen under the mmu_lock and both values are + * modified under mmu_lock, so there's no need of smb_rmb() + * here in between, otherwise mmu_notifier_count should be + * read before mmu_notifier_seq, see + * mmu_notifier_invalidate_range_end write side. + */ + if (vcpu->kvm->mmu_notifier_seq != mmu_seq) + return 1; + return 0; +} +#endif + #endif diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3735212cd3f..7dd9b0b85e4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -192,6 +192,123 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); +#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) +static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) +{ + return container_of(mn, struct kvm, mmu_notifier); +} + +static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address) +{ + struct kvm *kvm = mmu_notifier_to_kvm(mn); + int need_tlb_flush; + + /* + * When ->invalidate_page runs, the linux pte has been zapped + * already but the page is still allocated until + * ->invalidate_page returns. So if we increase the sequence + * here the kvm page fault will notice if the spte can't be + * established because the page is going to be freed. If + * instead the kvm page fault establishes the spte before + * ->invalidate_page runs, kvm_unmap_hva will release it + * before returning. + * + * The sequence increase only need to be seen at spin_unlock + * time, and not at spin_lock time. + * + * Increasing the sequence after the spin_unlock would be + * unsafe because the kvm page fault could then establish the + * pte after kvm_unmap_hva returned, without noticing the page + * is going to be freed. + */ + spin_lock(&kvm->mmu_lock); + kvm->mmu_notifier_seq++; + need_tlb_flush = kvm_unmap_hva(kvm, address); + spin_unlock(&kvm->mmu_lock); + + /* we've to flush the tlb before the pages can be freed */ + if (need_tlb_flush) + kvm_flush_remote_tlbs(kvm); + +} + +static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct kvm *kvm = mmu_notifier_to_kvm(mn); + int need_tlb_flush = 0; + + spin_lock(&kvm->mmu_lock); + /* + * The count increase must become visible at unlock time as no + * spte can be established without taking the mmu_lock and + * count is also read inside the mmu_lock critical section. + */ + kvm->mmu_notifier_count++; + for (; start < end; start += PAGE_SIZE) + need_tlb_flush |= kvm_unmap_hva(kvm, start); + spin_unlock(&kvm->mmu_lock); + + /* we've to flush the tlb before the pages can be freed */ + if (need_tlb_flush) + kvm_flush_remote_tlbs(kvm); +} + +static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct kvm *kvm = mmu_notifier_to_kvm(mn); + + spin_lock(&kvm->mmu_lock); + /* + * This sequence increase will notify the kvm page fault that + * the page that is going to be mapped in the spte could have + * been freed. + */ + kvm->mmu_notifier_seq++; + /* + * The above sequence increase must be visible before the + * below count decrease but both values are read by the kvm + * page fault under mmu_lock spinlock so we don't need to add + * a smb_wmb() here in between the two. + */ + kvm->mmu_notifier_count--; + spin_unlock(&kvm->mmu_lock); + + BUG_ON(kvm->mmu_notifier_count < 0); +} + +static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address) +{ + struct kvm *kvm = mmu_notifier_to_kvm(mn); + int young; + + spin_lock(&kvm->mmu_lock); + young = kvm_age_hva(kvm, address); + spin_unlock(&kvm->mmu_lock); + + if (young) + kvm_flush_remote_tlbs(kvm); + + return young; +} + +static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { + .invalidate_page = kvm_mmu_notifier_invalidate_page, + .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, + .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, + .clear_flush_young = kvm_mmu_notifier_clear_flush_young, +}; +#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ + static struct kvm *kvm_create_vm(void) { struct kvm *kvm = kvm_arch_create_vm(); @@ -212,6 +329,21 @@ static struct kvm *kvm_create_vm(void) (struct kvm_coalesced_mmio_ring *)page_address(page); #endif +#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) + { + int err; + kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; + err = mmu_notifier_register(&kvm->mmu_notifier, current->mm); + if (err) { +#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET + put_page(page); +#endif + kfree(kvm); + return ERR_PTR(err); + } + } +#endif + kvm->mm = current->mm; atomic_inc(&kvm->mm->mm_count); spin_lock_init(&kvm->mmu_lock); @@ -271,6 +403,9 @@ static void kvm_destroy_vm(struct kvm *kvm) #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET if (kvm->coalesced_mmio_ring != NULL) free_page((unsigned long)kvm->coalesced_mmio_ring); +#endif +#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) + mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); #endif kvm_arch_destroy_vm(kvm); mmdrop(mm); -- cgit v1.2.3-70-g09d2 From ed8486243379ef3e6c61363df915882945c0eaec Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 29 Jul 2008 11:30:57 +0300 Subject: KVM: Advertise synchronized mmu support to userspace Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 1 + include/linux/kvm.h | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c7b01efe064..0d682fc6aeb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -883,6 +883,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_PIT: case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: + case KVM_CAP_SYNC_MMU: r = 1; break; case KVM_CAP_COALESCED_MMIO: diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 0ea064cbfbc..69511f74f91 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -371,6 +371,7 @@ struct kvm_trace_rec { #define KVM_CAP_PV_MMU 13 #define KVM_CAP_MP_STATE 14 #define KVM_CAP_COALESCED_MMIO 15 +#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */ /* * ioctls for VM fds -- cgit v1.2.3-70-g09d2 From 8978b74253280d59e97cf49a3ec2c0cbccd5b801 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Tue, 29 Jul 2008 13:38:53 +0900 Subject: generic, x86: fix add iommu_num_pages helper function This IOMMU helper function doesn't work for some architectures: http://marc.info/?l=linux-kernel&m=121699304403202&w=2 It also breaks POWER and SPARC builds: http://marc.info/?l=linux-kernel&m=121730388001890&w=2 Currently, only x86 IOMMUs use this so let's move it to x86 for now. Reported-by: Stephen Rothwell Signed-off-by: FUJITA Tomonori Signed-off-by: Ingo Molnar --- arch/x86/kernel/pci-dma.c | 8 ++++++++ include/asm-x86/iommu.h | 2 ++ include/linux/iommu-helper.h | 1 - lib/iommu-helper.c | 8 -------- 4 files changed, 10 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 8dbffb846de..87d4d6964ec 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -123,6 +123,14 @@ void __init pci_iommu_alloc(void) pci_swiotlb_init(); } + +unsigned long iommu_num_pages(unsigned long addr, unsigned long len) +{ + unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE); + + return size >> PAGE_SHIFT; +} +EXPORT_SYMBOL(iommu_num_pages); #endif /* diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h index ecc8061904a..5f888cc5be4 100644 --- a/include/asm-x86/iommu.h +++ b/include/asm-x86/iommu.h @@ -7,6 +7,8 @@ extern struct dma_mapping_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; +extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len); + #ifdef CONFIG_GART_IOMMU extern int gart_iommu_aperture; extern int gart_iommu_aperture_allowed; diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h index f8598f58394..c975caf7538 100644 --- a/include/linux/iommu-helper.h +++ b/include/linux/iommu-helper.h @@ -8,4 +8,3 @@ extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, unsigned long align_mask); extern void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr); -extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len); diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index 889ddce2021..a3b8d4c3f77 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c @@ -80,11 +80,3 @@ void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr) } } EXPORT_SYMBOL(iommu_area_free); - -unsigned long iommu_num_pages(unsigned long addr, unsigned long len) -{ - unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE); - - return size >> PAGE_SHIFT; -} -EXPORT_SYMBOL(iommu_num_pages); -- cgit v1.2.3-70-g09d2 From 0d39741a27d86d305cc75ba626392be410dcbab9 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Tue, 29 Jul 2008 22:34:01 -0700 Subject: GRU Driver: export is_uv_system(), zap_page_range() & follow_page() Exports needed by the GRU driver. Signed-off-by: Jack Steiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/genapic_64.c | 1 + mm/memory.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 1fa8be5bd21..eaff0bbb144 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -99,3 +99,4 @@ int is_uv_system(void) { return uv_system_type != UV_NONE; } +EXPORT_SYMBOL_GPL(is_uv_system); diff --git a/mm/memory.c b/mm/memory.c index 6793b9c6810..0e4eea10c7b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -993,6 +993,7 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, tlb_finish_mmu(tlb, address, end); return end; } +EXPORT_SYMBOL_GPL(zap_page_range); /** * zap_vma_ptes - remove ptes mapping the vma @@ -1110,6 +1111,7 @@ no_page_table: } return page; } +EXPORT_SYMBOL_GPL(follow_page); /* Can we do the FOLL_ANON optimization? */ static inline int use_zero_page(struct vm_area_struct *vma) -- cgit v1.2.3-70-g09d2 From afd962a9e8708c571c5c0c4a6d098f931742c229 Mon Sep 17 00:00:00 2001 From: Vitaly Mayatskikh Date: Wed, 30 Jul 2008 13:30:14 +0200 Subject: x86: wrong register was used in align macro New ALIGN_DESTINATION macro has sad typo: r8d register was used instead of ecx in fixup section. This can be considered as a regression. Register ecx was also wrongly loaded with value in r8d in copy_user_nocache routine. Signed-off-by: Vitaly Mayatskikh Signed-off-by: Linus Torvalds --- arch/x86/lib/copy_user_64.S | 2 +- arch/x86/lib/copy_user_nocache_64.S | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index dfdf428975c..f118c110af3 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -52,7 +52,7 @@ jnz 100b 102: .section .fixup,"ax" -103: addl %r8d,%edx /* ecx is zerorest also */ +103: addl %ecx,%edx /* ecx is zerorest also */ jmp copy_user_handle_tail .previous diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S index 40e0e309d27..cb0c112386f 100644 --- a/arch/x86/lib/copy_user_nocache_64.S +++ b/arch/x86/lib/copy_user_nocache_64.S @@ -32,7 +32,7 @@ jnz 100b 102: .section .fixup,"ax" -103: addl %r8d,%edx /* ecx is zerorest also */ +103: addl %ecx,%edx /* ecx is zerorest also */ jmp copy_user_handle_tail .previous @@ -108,7 +108,6 @@ ENTRY(__copy_user_nocache) jmp 60f 50: movl %ecx,%edx 60: sfence - movl %r8d,%ecx jmp copy_user_handle_tail .previous -- cgit v1.2.3-70-g09d2 From e0d22d03c06c4e2c194d7010bc1e4a972199f156 Mon Sep 17 00:00:00 2001 From: Krzysztof Helt Date: Thu, 31 Jul 2008 23:43:44 +0200 Subject: x86: fdiv bug detection fix The fdiv detection code writes s32 integer into the boot_cpu_data.fdiv_bug. However, the boot_cpu_data.fdiv_bug is only char (s8) field so the detection overwrites already set fields for other bugs, e.g. the f00f bug field. Use local s32 variable to receive result. This is a partial fix to Bugzilla #9928 - fixes wrong information about the f00f bug (tested) and probably for coma bug (I have no cpu to test this). Signed-off-by: Krzysztof Helt Cc: Andrew Morton Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/bugs.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c9b58a806e8..c8e315f1aa8 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -50,6 +50,8 @@ static double __initdata y = 3145727.0; */ static void __init check_fpu(void) { + s32 fdiv_bug; + if (!boot_cpu_data.hard_math) { #ifndef CONFIG_MATH_EMULATION printk(KERN_EMERG "No coprocessor found and no math emulation present.\n"); @@ -74,8 +76,10 @@ static void __init check_fpu(void) "fistpl %0\n\t" "fwait\n\t" "fninit" - : "=m" (*&boot_cpu_data.fdiv_bug) + : "=m" (*&fdiv_bug) : "m" (*&x), "m" (*&y)); + + boot_cpu_data.fdiv_bug = fdiv_bug; if (boot_cpu_data.fdiv_bug) printk("Hmm, FPU with FDIV bug.\n"); } -- cgit v1.2.3-70-g09d2 From ec983f7060cd73e14cdd3edd910339127a8a4e96 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Wed, 30 Jul 2008 12:05:03 -0400 Subject: [CPUFREQ] Remove EXPERIMENTAL annotation from VIA C7 powersaver kconfig. This has been pretty solid, and doesn't see much change at all. Noticed by Harald Welte. Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig index cb7a5715596..efae3b22a0f 100644 --- a/arch/x86/kernel/cpu/cpufreq/Kconfig +++ b/arch/x86/kernel/cpu/cpufreq/Kconfig @@ -235,9 +235,9 @@ config X86_LONGHAUL If in doubt, say N. config X86_E_POWERSAVER - tristate "VIA C7 Enhanced PowerSaver (EXPERIMENTAL)" + tristate "VIA C7 Enhanced PowerSaver" select CPU_FREQ_TABLE - depends on X86_32 && EXPERIMENTAL + depends on X86_32 help This adds the CPUFreq driver for VIA C7 processors. -- cgit v1.2.3-70-g09d2 From 460f5ef2835dbc33825f611f408eb09c29be4b85 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Wed, 30 Jul 2008 13:01:42 -0400 Subject: [CPUFREQ] Fix warning in elanfreq arch/x86/kernel/cpu/cpufreq/elanfreq.c:47:26: warning: symbol 'elan_multiplier' was not declared. Should it be static? Yes, yes it should. Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/elanfreq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c index 94619c22f56..e4a4bf870e9 100644 --- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c +++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c @@ -44,7 +44,7 @@ struct s_elan_multiplier { * It is important that the frequencies * are listed in ascending order here! */ -struct s_elan_multiplier elan_multiplier[] = { +static struct s_elan_multiplier elan_multiplier[] = { {1000, 0x02, 0x18}, {2000, 0x02, 0x10}, {4000, 0x02, 0x08}, -- cgit v1.2.3-70-g09d2 From 23431b495fcbbde6d09aba30019b6a224d543cf9 Mon Sep 17 00:00:00 2001 From: Mark Langsdorf Date: Thu, 31 Jul 2008 12:39:05 -0500 Subject: [CPUFREQ][1/2] whitespace fix for powernow-k8 Trivial whitespace fix for powernow-k8. Signed-off-by: Mark Langsdorf Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index c45ca6d4dce..84bb395038d 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -66,7 +66,6 @@ static u32 find_freq_from_fid(u32 fid) return 800 + (fid * 100); } - /* Return a frequency in KHz, given an input fid */ static u32 find_khz_freq_from_fid(u32 fid) { @@ -78,7 +77,6 @@ static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, u32 p return data[pstate].frequency; } - /* Return the vco fid for an input fid * * Each "low" fid has corresponding "high" fid, and you can get to "low" fids @@ -166,7 +164,6 @@ static void fidvid_msr_init(void) wrmsr(MSR_FIDVID_CTL, lo, hi); } - /* write the new fid value along with the other control fields to the msr */ static int write_new_fid(struct powernow_k8_data *data, u32 fid) { -- cgit v1.2.3-70-g09d2 From 34ae7f35a21694aa5cb8829dc5142c39d73d6ba0 Mon Sep 17 00:00:00 2001 From: Mark Langsdorf Date: Thu, 31 Jul 2008 12:39:12 -0500 Subject: [CPUFREQ][2/2] preregister support for powernow-k8 This patch provides support for the _PSD ACPI object in the Powernow-k8 driver. Although it looks like an invasive patch, most of it is simply the consequence of turning the static acpi_performance_data structure into a pointer. AMD has tested it on several machines over the past few days without issue. [trivial checkpatch warnings fixed up by davej] [X86_POWERNOW_K8_ACPI=n buildfix from Randy Dunlap] Signed-off-by: Mark Langsdorf Tested-by: Frank Arnold Signed-off-by: Randy Dunlap Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 109 ++++++++++++++++++++---------- arch/x86/kernel/cpu/cpufreq/powernow-k8.h | 3 +- 2 files changed, 75 insertions(+), 37 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 84bb395038d..4e7271999a7 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -737,44 +737,63 @@ static int find_psb_table(struct powernow_k8_data *data) #ifdef CONFIG_X86_POWERNOW_K8_ACPI static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { - if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) + if (!data->acpi_data->state_count || (cpu_family == CPU_HW_PSTATE)) return; - data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK; - data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK; - data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; - data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; - data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK); - data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK; + data->irt = (data->acpi_data->states[index].control >> IRT_SHIFT) & IRT_MASK; + data->rvo = (data->acpi_data->states[index].control >> RVO_SHIFT) & RVO_MASK; + data->exttype = (data->acpi_data->states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; + data->plllock = (data->acpi_data->states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; + data->vidmvs = 1 << ((data->acpi_data->states[index].control >> MVS_SHIFT) & MVS_MASK); + data->vstable = (data->acpi_data->states[index].control >> VST_SHIFT) & VST_MASK; +} + + +static struct acpi_processor_performance *acpi_perf_data; +static int preregister_valid; + +static int powernow_k8_cpu_preinit_acpi(void) +{ + acpi_perf_data = alloc_percpu(struct acpi_processor_performance); + if (!acpi_perf_data) + return -ENODEV; + + if (acpi_processor_preregister_performance(acpi_perf_data)) + return -ENODEV; + else + preregister_valid = 1; + return 0; } static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { struct cpufreq_frequency_table *powernow_table; int ret_val; + int cpu = 0; - if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { + data->acpi_data = percpu_ptr(acpi_perf_data, cpu); + if (acpi_processor_register_performance(data->acpi_data, data->cpu)) { dprintk("register performance failed: bad ACPI data\n"); return -EIO; } /* verify the data contained in the ACPI structures */ - if (data->acpi_data.state_count <= 1) { + if (data->acpi_data->state_count <= 1) { dprintk("No ACPI P-States\n"); goto err_out; } - if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || - (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { + if ((data->acpi_data->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || + (data->acpi_data->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { dprintk("Invalid control/status registers (%x - %x)\n", - data->acpi_data.control_register.space_id, - data->acpi_data.status_register.space_id); + data->acpi_data->control_register.space_id, + data->acpi_data->status_register.space_id); goto err_out; } /* fill in data->powernow_table */ powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) - * (data->acpi_data.state_count + 1)), GFP_KERNEL); + * (data->acpi_data->state_count + 1)), GFP_KERNEL); if (!powernow_table) { dprintk("powernow_table memory alloc failure\n"); goto err_out; @@ -787,12 +806,12 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) if (ret_val) goto err_out_mem; - powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END; - powernow_table[data->acpi_data.state_count].index = 0; + powernow_table[data->acpi_data->state_count].frequency = CPUFREQ_TABLE_END; + powernow_table[data->acpi_data->state_count].index = 0; data->powernow_table = powernow_table; /* fill in data */ - data->numps = data->acpi_data.state_count; + data->numps = data->acpi_data->state_count; if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) print_basics(data); powernow_k8_acpi_pst_values(data, 0); @@ -800,16 +819,31 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) /* notify BIOS that we exist */ acpi_processor_notify_smm(THIS_MODULE); + /* determine affinity, from ACPI if available */ + if (preregister_valid) { + if ((data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ALL) || + (data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ANY)) + data->starting_core_affinity = data->acpi_data->shared_cpu_map; + else + data->starting_core_affinity = cpumask_of_cpu(data->cpu); + } else { + /* best guess from family if not */ + if (cpu_family == CPU_HW_PSTATE) + data->starting_core_affinity = cpumask_of_cpu(data->cpu); + else + data->starting_core_affinity = per_cpu(cpu_core_map, data->cpu); + } + return 0; err_out_mem: kfree(powernow_table); err_out: - acpi_processor_unregister_performance(&data->acpi_data, data->cpu); + acpi_processor_unregister_performance(data->acpi_data, data->cpu); /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ - data->acpi_data.state_count = 0; + data->acpi_data->state_count = 0; return -ENODEV; } @@ -821,10 +855,10 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; - for (i = 0; i < data->acpi_data.state_count; i++) { + for (i = 0; i < data->acpi_data->state_count; i++) { u32 index; - index = data->acpi_data.states[i].control & HW_PSTATE_MASK; + index = data->acpi_data->states[i].control & HW_PSTATE_MASK; if (index > data->max_hw_pstate) { printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); @@ -840,7 +874,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf powernow_table[i].index = index; - powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; + powernow_table[i].frequency = data->acpi_data->states[i].core_frequency * 1000; } return 0; } @@ -849,16 +883,16 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf { int i; int cntlofreq = 0; - for (i = 0; i < data->acpi_data.state_count; i++) { + for (i = 0; i < data->acpi_data->state_count; i++) { u32 fid; u32 vid; if (data->exttype) { - fid = data->acpi_data.states[i].status & EXT_FID_MASK; - vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK; + fid = data->acpi_data->states[i].status & EXT_FID_MASK; + vid = (data->acpi_data->states[i].status >> VID_SHIFT) & EXT_VID_MASK; } else { - fid = data->acpi_data.states[i].control & FID_MASK; - vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK; + fid = data->acpi_data->states[i].control & FID_MASK; + vid = (data->acpi_data->states[i].control >> VID_SHIFT) & VID_MASK; } dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); @@ -899,10 +933,10 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf cntlofreq = i; } - if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { + if (powernow_table[i].frequency != (data->acpi_data->states[i].core_frequency * 1000)) { printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", powernow_table[i].frequency, - (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); + (unsigned int) (data->acpi_data->states[i].core_frequency * 1000)); powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; continue; } @@ -912,11 +946,12 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { - if (data->acpi_data.state_count) - acpi_processor_unregister_performance(&data->acpi_data, data->cpu); + if (data->acpi_data->state_count) + acpi_processor_unregister_performance(data->acpi_data, data->cpu); } #else +static int powernow_k8_cpu_preinit_acpi(void) { return -ENODEV; } static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } @@ -1101,7 +1136,7 @@ static int powernowk8_verify(struct cpufreq_policy *pol) static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) { struct powernow_k8_data *data; - cpumask_t oldmask; + cpumask_t oldmask = CPU_MASK_ALL; int rc; if (!cpu_online(pol->cpu)) @@ -1174,10 +1209,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) /* run on any CPU again */ set_cpus_allowed_ptr(current, &oldmask); - if (cpu_family == CPU_HW_PSTATE) - pol->cpus = cpumask_of_cpu(pol->cpu); - else - pol->cpus = per_cpu(cpu_core_map, pol->cpu); + pol->cpus = data->starting_core_affinity; data->available_cores = &(pol->cpus); /* Take a crude guess here. @@ -1300,6 +1332,7 @@ static int __cpuinit powernowk8_init(void) } if (supported_cpus == num_online_cpus()) { + powernow_k8_cpu_preinit_acpi(); printk(KERN_INFO PFX "Found %d %s " "processors (%d cpu cores) (" VERSION ")\n", num_online_nodes(), @@ -1316,6 +1349,10 @@ static void __exit powernowk8_exit(void) dprintk("exit\n"); cpufreq_unregister_driver(&cpufreq_amd64_driver); + +#ifdef CONFIG_X86_POWERNOW_K8_ACPI + free_percpu(acpi_perf_data); +#endif } MODULE_AUTHOR("Paul Devriendt and Mark Langsdorf "); diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index ab48cfed4d9..a62612cd4be 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h @@ -33,12 +33,13 @@ struct powernow_k8_data { #ifdef CONFIG_X86_POWERNOW_K8_ACPI /* the acpi table needs to be kept. it's only available if ACPI was * used to determine valid frequency/vid/fid states */ - struct acpi_processor_performance acpi_data; + struct acpi_processor_performance *acpi_data; #endif /* we need to keep track of associated cores, but let cpufreq * handle hotplug events - so just point at cpufreq pol->cpus * structure */ cpumask_t *available_cores; + cpumask_t starting_core_affinity; }; -- cgit v1.2.3-70-g09d2 From 31343d8a5079cda57ffd539fcf4f00cea344fe98 Mon Sep 17 00:00:00 2001 From: Alok Kataria Date: Fri, 8 Aug 2008 12:15:57 -0700 Subject: x86: Fix broken VMI in 2.6.27-rc.. The lowmem mapping table created by VMI need not depend on max_low_pfn at all. Instead we now create an extra large mapping which covers all possible lowmem instead of the physical ram that is actually available. This allows the vmi initialization to be done before max_low_pfn could be computed. We also move the vmi_init code very early in the boot process so that nobody accidentally breaks the fixmap dependancy. Signed-off-by: Alok N Kataria Acked-by: Zachary Amsden Signed-off-by: H. Peter Anvin --- arch/x86/kernel/setup.c | 16 ++++++++-------- arch/x86/kernel/vmi_32.c | 3 ++- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 2d888586385..6e5823b9e76 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -604,6 +604,14 @@ void __init setup_arch(char **cmdline_p) early_cpu_init(); early_ioremap_init(); +#if defined(CONFIG_VMI) && defined(CONFIG_X86_32) + /* + * Must be before kernel pagetables are setup + * or fixmap area is touched. + */ + vmi_init(); +#endif + ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); screen_info = boot_params.screen_info; edid_info = boot_params.edid_info; @@ -817,14 +825,6 @@ void __init setup_arch(char **cmdline_p) kvmclock_init(); #endif -#if defined(CONFIG_VMI) && defined(CONFIG_X86_32) - /* - * Must be after max_low_pfn is determined, and before kernel - * pagetables are setup. - */ - vmi_init(); -#endif - paravirt_pagetable_setup_start(swapper_pg_dir); paging_init(); paravirt_pagetable_setup_done(swapper_pg_dir); diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 0a1b1a9d922..6ca515d6db5 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c @@ -37,6 +37,7 @@ #include #include #include +#include /* Convenient for calling VMI functions indirectly in the ROM */ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); @@ -683,7 +684,7 @@ void vmi_bringup(void) { /* We must establish the lowmem mapping for MMU ops to work */ if (vmi_ops.set_linear_mapping) - vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0); + vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0); } /* -- cgit v1.2.3-70-g09d2 From d388e5fdc461344d04307a3fa83862b9ed429647 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 9 Aug 2008 15:09:02 -0700 Subject: x86: Restore proper vector locking during cpu hotplug Having cpu_online_map change during assign_irq_vector can result in some really nasty and weird things happening. The one that bit me last time was accessing non existent per cpu memory for non existent cpus. This locking was removed in a sloppy x86_64 and x86_32 merge patch. Guys can we please try and avoid subtly breaking x86 when we are merging files together? Signed-off-by: Eric W. Biederman Signed-off-by: H. Peter Anvin --- arch/x86/kernel/io_apic_32.c | 6 +----- arch/x86/kernel/io_apic_64.c | 25 +++++++++++++++---------- arch/x86/kernel/smpboot.c | 12 +++++++++--- include/asm-x86/hw_irq.h | 12 ++++++++++-- 4 files changed, 35 insertions(+), 20 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index de9aa0e3a9c..09cddb57bec 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -57,7 +57,7 @@ atomic_t irq_mis_count; static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; static DEFINE_SPINLOCK(ioapic_lock); -static DEFINE_SPINLOCK(vector_lock); +DEFINE_SPINLOCK(vector_lock); int timer_through_8259 __initdata; @@ -1209,10 +1209,6 @@ static int assign_irq_vector(int irq) return vector; } -void setup_vector_irq(int cpu) -{ -} - static struct irq_chip ioapic_chip; #define IOAPIC_AUTO -1 diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 8269434d170..61a83b70c18 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -101,7 +101,7 @@ int timer_through_8259 __initdata; static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; static DEFINE_SPINLOCK(ioapic_lock); -DEFINE_SPINLOCK(vector_lock); +static DEFINE_SPINLOCK(vector_lock); /* * # of IRQ routing registers @@ -697,6 +697,19 @@ static int pin_2_irq(int idx, int apic, int pin) return irq; } +void lock_vector_lock(void) +{ + /* Used to the online set of cpus does not change + * during assign_irq_vector. + */ + spin_lock(&vector_lock); +} + +void unlock_vector_lock(void) +{ + spin_unlock(&vector_lock); +} + static int __assign_irq_vector(int irq, cpumask_t mask) { /* @@ -802,7 +815,7 @@ static void __clear_irq_vector(int irq) cpus_clear(cfg->domain); } -static void __setup_vector_irq(int cpu) +void __setup_vector_irq(int cpu) { /* Initialize vector_irq on a new cpu */ /* This function must be called with vector_lock held */ @@ -825,14 +838,6 @@ static void __setup_vector_irq(int cpu) } } -void setup_vector_irq(int cpu) -{ - spin_lock(&vector_lock); - __setup_vector_irq(smp_processor_id()); - spin_unlock(&vector_lock); -} - - static struct irq_chip ioapic_chip; static void ioapic_register_intr(int irq, unsigned long trigger) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 332512767f4..da10f07fc59 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -326,12 +326,16 @@ static void __cpuinit start_secondary(void *unused) * for which cpus receive the IPI. Holding this * lock helps us to not include this cpu in a currently in progress * smp_call_function(). + * + * We need to hold vector_lock so there the set of online cpus + * does not change while we are assigning vectors to cpus. Holding + * this lock ensures we don't half assign or remove an irq from a cpu. */ ipi_call_lock_irq(); -#ifdef CONFIG_X86_IO_APIC - setup_vector_irq(smp_processor_id()); -#endif + lock_vector_lock(); + __setup_vector_irq(smp_processor_id()); cpu_set(smp_processor_id(), cpu_online_map); + unlock_vector_lock(); ipi_call_unlock_irq(); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; @@ -1336,7 +1340,9 @@ int __cpu_disable(void) remove_siblinginfo(cpu); /* It's now safe to remove this processor from the online map */ + lock_vector_lock(); remove_cpu_from_maps(cpu); + unlock_vector_lock(); fixup_irqs(cpu_online_map); return 0; } diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h index 77ba51df566..edd0b95f14d 100644 --- a/include/asm-x86/hw_irq.h +++ b/include/asm-x86/hw_irq.h @@ -98,9 +98,17 @@ extern void (*const interrupt[NR_IRQS])(void); #else typedef int vector_irq_t[NR_VECTORS]; DECLARE_PER_CPU(vector_irq_t, vector_irq); -extern spinlock_t vector_lock; #endif -extern void setup_vector_irq(int cpu); + +#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_X86_64) +extern void lock_vector_lock(void); +extern void unlock_vector_lock(void); +extern void __setup_vector_irq(int cpu); +#else +static inline void lock_vector_lock(void) {} +static inline void unlock_vector_lock(void) {} +static inline void __setup_vector_irq(int cpu) {} +#endif #endif /* !ASSEMBLY_ */ -- cgit v1.2.3-70-g09d2 From eeb0d7d113895556db473ff1e638803d7d49bff9 Mon Sep 17 00:00:00 2001 From: Rene Herman Date: Mon, 11 Aug 2008 17:44:57 +0200 Subject: x86, debug: tone down arch/x86/kernel/mpparse.c debugging printk commit 11a62a056093a7f25f1595fbd8bd5f93559572b6 turns some formerly nopped debugging printks in arch/x86/kernel/mppparse.c into regular ones. The one at the top of smp_scan_config() in particular also prints on !CONFIG_SMP/CONFIG_X86_LOCAL_APIC kernels and UP machines without anything resembling MP tables which makes their lowly UP owners wonder... Turn the former Dprintk()s into apic_printk()s instead meaning that their printing is dependent on passing the apic=verbose (or =debug) command line param. On 32-bit, "apic" is a __setup() param which isn't early enough for this code and therefore needs a followup changing it into an early_param(). On 64-bit, it already is. Signed-off-by: Rene Herman Cc: Andrew Morton Cc: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 6ae005ccaed..678090508a6 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -83,7 +83,7 @@ static void __init MP_bus_info(struct mpc_config_bus *m) if (x86_quirks->mpc_oem_bus_info) x86_quirks->mpc_oem_bus_info(m, str); else - printk(KERN_INFO "Bus #%d is %s\n", m->mpc_busid, str); + apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->mpc_busid, str); #if MAX_MP_BUSSES < 256 if (m->mpc_busid >= MAX_MP_BUSSES) { @@ -154,7 +154,7 @@ static void __init MP_ioapic_info(struct mpc_config_ioapic *m) static void print_MP_intsrc_info(struct mpc_config_intsrc *m) { - printk(KERN_CONT "Int: type %d, pol %d, trig %d, bus %02x," + apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," " IRQ %02x, APIC ID %x, APIC INT %02x\n", m->mpc_irqtype, m->mpc_irqflag & 3, (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, @@ -163,7 +163,7 @@ static void print_MP_intsrc_info(struct mpc_config_intsrc *m) static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq) { - printk(KERN_CONT "Int: type %d, pol %d, trig %d, bus %02x," + apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," " IRQ %02x, APIC ID %x, APIC INT %02x\n", mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3, (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus, @@ -235,7 +235,7 @@ static void __init MP_intsrc_info(struct mpc_config_intsrc *m) static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) { - printk(KERN_INFO "Lint: type %d, pol %d, trig %d, bus %02x," + apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x," " IRQ %02x, APIC ID %x, APIC LINT %02x\n", m->mpc_irqtype, m->mpc_irqflag & 3, (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid, @@ -695,7 +695,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length, unsigned int *bp = phys_to_virt(base); struct intel_mp_floating *mpf; - printk(KERN_DEBUG "Scan SMP from %p for %ld bytes.\n", bp, length); + apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n", + bp, length); BUILD_BUG_ON(sizeof(*mpf) != 16); while (length > 0) { -- cgit v1.2.3-70-g09d2 From fb6bef8002d54a9c3062abc281893ec7c896d3ce Mon Sep 17 00:00:00 2001 From: Rene Herman Date: Mon, 11 Aug 2008 17:45:53 +0200 Subject: x86: make "apic" an early_param() on 32-bit On 32-bit, "apic" is a __setup() param meaning it is parsed rather late in the game. Make it an early_param() for apic_printk() use by arch/x86/kernel/mpparse.c. On 64-bit, it already is an early_param(). Signed-off-by: Rene Herman Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_32.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index d6c89835837..f432d4833a1 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -1726,9 +1726,9 @@ static int __init apic_set_verbosity(char *str) apic_verbosity = APIC_DEBUG; else if (strcmp("verbose", str) == 0) apic_verbosity = APIC_VERBOSE; - return 1; + return 0; } -__setup("apic=", apic_set_verbosity); +early_param("apic", apic_set_verbosity); static int __init lapic_insert_resource(void) { -- cgit v1.2.3-70-g09d2 From cf3e50501259f9a7cb108a69c3e1b912135628f6 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 8 Aug 2008 13:46:07 -0700 Subject: x86: work around gcc 3.4.x bug Simon Horman reported that gcc-3.4.x crashes when compiling pgd_prepopulate_pmd() when PREALLOCATED_PMDS == 0 and CONFIG_DEBUG_INFO is enabled. Adding an extra check for PREALLOCATED_PMDS == 0 [which is compiled out by gcc] seems to avoid the problem. Reported-by: Simon Horman Signed-off-by: Jeremy Fitzhardinge Acked-by: Simon Horman Signed-off-by: Ingo Molnar --- arch/x86/mm/pgtable.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 557b2abceef..d50302774fe 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -207,6 +207,9 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) unsigned long addr; int i; + if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ + return; + pud = pud_offset(pgd, 0); for (addr = i = 0; i < PREALLOCATED_PMDS; -- cgit v1.2.3-70-g09d2 From 9b0094f7f289d752868aae1a3984517d712cbb22 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 7 Aug 2008 15:14:55 -0700 Subject: x86, pci-calgary: fix function declaration Fix function declaration: linux-next-20080807/arch/x86/kernel/pci-calgary_64.c:1353:36: warning: non-ANSI function declaration of function 'get_tce_space_from_tar' Signed-off-by: Randy Dunlap Acked-by: Acked-by: Muli Ben-Yehuda Signed-off-by: Ingo Molnar --- arch/x86/kernel/pci-calgary_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index b67a4b1d4ea..02d19328525 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -1350,7 +1350,7 @@ static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl) * Function for kdump case. Get the tce tables from first kernel * by reading the contents of the base adress register of calgary iommu */ -static void get_tce_space_from_tar() +static void get_tce_space_from_tar(void) { int bus; void __iomem *target; -- cgit v1.2.3-70-g09d2 From 48d97cb65e62a5f1122ac2cf1149800d4f4693e8 Mon Sep 17 00:00:00 2001 From: Rene Herman Date: Mon, 11 Aug 2008 19:20:17 +0200 Subject: x86: make "apic" an early_param() on 32-bit, NULL check Cyrill Gorcunov observed: > you turned it into early_param so now it's NULL injecting vulnerabled. > Could you please add checking for NULL str param? fix that. Also, change the name of 'str' into 'arg', to make it more apparent that this is an optional argument that can be NULL, not a string parameter that is empty when unset. Reported-by: Cyrill Gorcunov Signed-off-by: Rene Herman Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_32.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index f432d4833a1..039a8d4aaf6 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -1720,12 +1720,16 @@ static int __init parse_lapic_timer_c2_ok(char *arg) } early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); -static int __init apic_set_verbosity(char *str) +static int __init apic_set_verbosity(char *arg) { - if (strcmp("debug", str) == 0) + if (!arg) + return -EINVAL; + + if (strcmp(arg, "debug") == 0) apic_verbosity = APIC_DEBUG; - else if (strcmp("verbose", str) == 0) + else if (strcmp(arg, "verbose") == 0) apic_verbosity = APIC_VERBOSE; + return 0; } early_param("apic", apic_set_verbosity); -- cgit v1.2.3-70-g09d2 From b74548e76a0eab1f29546e7c5a589429c069a680 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 11 Aug 2008 13:36:04 -0700 Subject: x86: fix 2.6.27rc1 cannot boot more than 8CPUs Jeff Chua reported that booting a !bigsmp kernel on a 16-way box hangs silently. this is a long-standing issue, smp start AP cpu could check the apic id >=8 etc before trying to start it. achieve this by moving the def_to_bigsmp check later and skip the apicid id > 8 [ mingo@elte.hu: clean up the message that is printed. ] Reported-by: "Jeff Chua" Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar arch/x86/kernel/setup.c | 6 ------ arch/x86/kernel/smpboot.c | 10 ++++++++++ 2 files changed, 10 insertions(+), 6 deletions(-) --- arch/x86/kernel/setup.c | 6 ------ arch/x86/kernel/smpboot.c | 10 ++++++++++ 2 files changed, 10 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 6e5823b9e76..68b48e3fbcb 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -861,12 +861,6 @@ void __init setup_arch(char **cmdline_p) init_apic_mappings(); ioapic_init_mappings(); -#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC) && defined(CONFIG_X86_32) - if (def_to_bigsmp) - printk(KERN_WARNING "More than 8 CPUs detected and " - "CONFIG_X86_PC cannot handle it.\nUse " - "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n"); -#endif kvm_guest_init(); e820_reserve_resources(); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index da10f07fc59..91055d7fc1b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -994,7 +994,17 @@ int __cpuinit native_cpu_up(unsigned int cpu) flush_tlb_all(); low_mappings = 1; +#ifdef CONFIG_X86_PC + if (def_to_bigsmp && apicid > 8) { + printk(KERN_WARNING + "More than 8 CPUs detected - skipping them.\n" + "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n"); + err = -1; + } else + err = do_boot_cpu(apicid, cpu); +#else err = do_boot_cpu(apicid, cpu); +#endif zap_low_mappings(); low_mappings = 0; -- cgit v1.2.3-70-g09d2 From 912985dce45ef18fcdd9f5439fef054e0e22302a Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 12 Aug 2008 17:52:52 -0500 Subject: mm: Make generic weak get_user_pages_fast and EXPORT_GPL it Out of line get_user_pages_fast fallback implementation, make it a weak symbol, get rid of CONFIG_HAVE_GET_USER_PAGES_FAST. Export the symbol to modules so lguest can use it. Signed-off-by: Nick Piggin Signed-off-by: Rusty Russell --- arch/powerpc/Kconfig | 3 --- arch/x86/Kconfig | 1 - arch/x86/mm/Makefile | 3 +-- include/linux/mm.h | 20 -------------------- mm/Kconfig | 3 --- mm/util.c | 15 +++++++++++++++ 6 files changed, 16 insertions(+), 29 deletions(-) (limited to 'arch/x86') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 63c9cafda9c..587da5e0990 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -42,9 +42,6 @@ config GENERIC_HARDIRQS bool default y -config HAVE_GET_USER_PAGES_FAST - def_bool PPC64 - config HAVE_SETUP_PER_CPU_AREA def_bool PPC64 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 3d0f2b6a5a1..ac2fb0641a0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -22,7 +22,6 @@ config X86 select HAVE_IDE select HAVE_OPROFILE select HAVE_IOREMAP_PROT - select HAVE_GET_USER_PAGES_FAST select HAVE_KPROBES select ARCH_WANT_OPTIONAL_GPIOLIB select HAVE_KRETPROBES diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 2977ea37791..dfb932dcf13 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -1,7 +1,6 @@ obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ - pat.o pgtable.o + pat.o pgtable.o gup.o -obj-$(CONFIG_HAVE_GET_USER_PAGES_FAST) += gup.o obj-$(CONFIG_X86_32) += pgtable_32.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/include/linux/mm.h b/include/linux/mm.h index 335288bff1b..fa651609b65 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -834,7 +834,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags); -#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST /* * get_user_pages_fast provides equivalent functionality to get_user_pages, * operating on current and current->mm (force=0 and doesn't return any vmas). @@ -848,25 +847,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma, int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); -#else -/* - * Should probably be moved to asm-generic, and architectures can include it if - * they don't implement their own get_user_pages_fast. - */ -#define get_user_pages_fast(start, nr_pages, write, pages) \ -({ \ - struct mm_struct *mm = current->mm; \ - int ret; \ - \ - down_read(&mm->mmap_sem); \ - ret = get_user_pages(current, mm, start, nr_pages, \ - write, 0, pages, NULL); \ - up_read(&mm->mmap_sem); \ - \ - ret; \ -}) -#endif - /* * A callback you can register to apply pressure to ageable caches. * diff --git a/mm/Kconfig b/mm/Kconfig index 446c6588c75..0bd9c2dbb2a 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -77,9 +77,6 @@ config FLAT_NODE_MEM_MAP def_bool y depends on !SPARSEMEM -config HAVE_GET_USER_PAGES_FAST - bool - # # Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's # to represent different areas of memory. This variable allows diff --git a/mm/util.c b/mm/util.c index 9341ca77bd8..cb00b748ce4 100644 --- a/mm/util.c +++ b/mm/util.c @@ -171,3 +171,18 @@ void arch_pick_mmap_layout(struct mm_struct *mm) mm->unmap_area = arch_unmap_area; } #endif + +int __attribute__((weak)) get_user_pages_fast(unsigned long start, + int nr_pages, int write, struct page **pages) +{ + struct mm_struct *mm = current->mm; + int ret; + + down_read(&mm->mmap_sem); + ret = get_user_pages(current, mm, start, nr_pages, + write, 0, pages, NULL); + up_read(&mm->mmap_sem); + + return ret; +} +EXPORT_SYMBOL_GPL(get_user_pages_fast); -- cgit v1.2.3-70-g09d2