diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-01-27 12:29:13 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-01-27 12:29:37 +0100 |
commit | f97b12cce6dea51880a6a89d4607c29c70a6a841 (patch) | |
tree | 1f05f6d39975bd213e7506e8a73ae0a59188c75e /arch/x86/mm/tlb.c | |
parent | ccaa8d657117bb1876d471bd91579d774106778d (diff) | |
parent | 1bae4ce27c9c90344f23c65ea6966c50ffeae2f5 (diff) |
Merge commit 'v2.6.38-rc2' into core/locking
Reason: Update to mainline before adding the locking cleanup
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/tlb.c')
-rw-r--r-- | arch/x86/mm/tlb.c | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 49358481c73..6acc724d5d8 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -223,7 +223,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask, static void __cpuinit calculate_tlb_offset(void) { - int cpu, node, nr_node_vecs; + int cpu, node, nr_node_vecs, idx = 0; /* * we are changing tlb_vector_offset for each CPU in runtime, but this * will not cause inconsistency, as the write is atomic under X86. we @@ -239,7 +239,7 @@ static void __cpuinit calculate_tlb_offset(void) nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes; for_each_online_node(node) { - int node_offset = (node % NUM_INVALIDATE_TLB_VECTORS) * + int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) * nr_node_vecs; int cpu_offset = 0; for_each_cpu(cpu, cpumask_of_node(node)) { @@ -248,10 +248,11 @@ static void __cpuinit calculate_tlb_offset(void) cpu_offset++; cpu_offset = cpu_offset % nr_node_vecs; } + idx++; } } -static int tlb_cpuhp_notify(struct notifier_block *n, +static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n, unsigned long action, void *hcpu) { switch (action & 0xf) { |