summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/context.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-01 09:15:15 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-01 09:15:15 -0800
commitac0f6f927db539e03e1f3f61bcd4ed57d5cde7a9 (patch)
tree816e5ac643b15c2050c64a7075f0f7e13d86ea09 /arch/arm/mm/context.c
parentb1bf9368407ae7e89d8a005bb40beb70a41df539 (diff)
parent9f33be2c3a80bdc2cc08342dd77fac87652e0548 (diff)
Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (100 commits) ARM: Eliminate decompressor -Dstatic= PIC hack ARM: 5958/1: ARM: U300: fix inverted clk round rate ARM: 5956/1: misplaced parentheses ARM: 5955/1: ep93xx: move timer defines into core.c and document ARM: 5954/1: ep93xx: move gpio interrupt support to gpio.c ARM: 5953/1: ep93xx: fix broken build of clock.c ARM: 5952/1: ARM: MM: Add ARM_L1_CACHE_SHIFT_6 for handle inside each ARCH Kconfig ARM: 5949/1: NUC900 add gpio virtual memory map ARM: 5948/1: Enable timer0 to time4 clock support for nuc910 ARM: 5940/2: ARM: MMCI: remove custom DBG macro and printk ARM: make_coherent(): fix problems with highpte, part 2 MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself ARM: 5945/1: ep93xx: include correct irq.h in core.c ARM: 5933/1: amba-pl011: support hardware flow control ARM: 5930/1: Add PKMAP area description to memory.txt. ARM: 5929/1: Add checks to detect overlap of memory regions. ARM: 5928/1: Change type of VMALLOC_END to unsigned long. ARM: 5927/1: Make delimiters of DMA area globally visibly. ARM: 5926/1: Add "Virtual kernel memory..." printout. ARM: 5920/1: OMAP4: Enable L2 Cache ... Fix up trivial conflict in arch/arm/mach-mx25/clock.c
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r--arch/arm/mm/context.c124
1 files changed, 110 insertions, 14 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index a9e22e31eaa..b0ee9ba3cfa 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -10,12 +10,17 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
static DEFINE_SPINLOCK(cpu_asid_lock);
unsigned int cpu_last_asid = ASID_FIRST_VERSION;
+#ifdef CONFIG_SMP
+DEFINE_PER_CPU(struct mm_struct *, current_mm);
+#endif
/*
* We fork()ed a process, and we need a new context for the child
@@ -26,13 +31,109 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION;
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context.id = 0;
+ spin_lock_init(&mm->context.id_lock);
}
+static void flush_context(void)
+{
+ /* set the reserved ASID before flushing the TLB */
+ asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0));
+ isb();
+ local_flush_tlb_all();
+ if (icache_is_vivt_asid_tagged()) {
+ __flush_icache_all();
+ dsb();
+ }
+}
+
+#ifdef CONFIG_SMP
+
+static void set_mm_context(struct mm_struct *mm, unsigned int asid)
+{
+ unsigned long flags;
+
+ /*
+ * Locking needed for multi-threaded applications where the
+ * same mm->context.id could be set from different CPUs during
+ * the broadcast. This function is also called via IPI so the
+ * mm->context.id_lock has to be IRQ-safe.
+ */
+ spin_lock_irqsave(&mm->context.id_lock, flags);
+ if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
+ /*
+ * Old version of ASID found. Set the new one and
+ * reset mm_cpumask(mm).
+ */
+ mm->context.id = asid;
+ cpumask_clear(mm_cpumask(mm));
+ }
+ spin_unlock_irqrestore(&mm->context.id_lock, flags);
+
+ /*
+ * Set the mm_cpumask(mm) bit for the current CPU.
+ */
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+}
+
+/*
+ * Reset the ASID on the current CPU. This function call is broadcast
+ * from the CPU handling the ASID rollover and holding cpu_asid_lock.
+ */
+static void reset_context(void *info)
+{
+ unsigned int asid;
+ unsigned int cpu = smp_processor_id();
+ struct mm_struct *mm = per_cpu(current_mm, cpu);
+
+ /*
+ * Check if a current_mm was set on this CPU as it might still
+ * be in the early booting stages and using the reserved ASID.
+ */
+ if (!mm)
+ return;
+
+ smp_rmb();
+ asid = cpu_last_asid + cpu + 1;
+
+ flush_context();
+ set_mm_context(mm, asid);
+
+ /* set the new ASID */
+ asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id));
+ isb();
+}
+
+#else
+
+static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
+{
+ mm->context.id = asid;
+ cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
+}
+
+#endif
+
void __new_context(struct mm_struct *mm)
{
unsigned int asid;
spin_lock(&cpu_asid_lock);
+#ifdef CONFIG_SMP
+ /*
+ * Check the ASID again, in case the change was broadcast from
+ * another CPU before we acquired the lock.
+ */
+ if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+ spin_unlock(&cpu_asid_lock);
+ return;
+ }
+#endif
+ /*
+ * At this point, it is guaranteed that the current mm (with
+ * an old ASID) isn't active on any other CPU since the ASIDs
+ * are changed simultaneously via IPI.
+ */
asid = ++cpu_last_asid;
if (asid == 0)
asid = cpu_last_asid = ASID_FIRST_VERSION;
@@ -42,20 +143,15 @@ void __new_context(struct mm_struct *mm)
* to start a new version and flush the TLB.
*/
if (unlikely((asid & ~ASID_MASK) == 0)) {
- asid = ++cpu_last_asid;
- /* set the reserved ASID before flushing the TLB */
- asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n"
- :
- : "r" (0));
- isb();
- flush_tlb_all();
- if (icache_is_vivt_asid_tagged()) {
- __flush_icache_all();
- dsb();
- }
+ asid = cpu_last_asid + smp_processor_id() + 1;
+ flush_context();
+#ifdef CONFIG_SMP
+ smp_wmb();
+ smp_call_function(reset_context, NULL, 1);
+#endif
+ cpu_last_asid += NR_CPUS;
}
- spin_unlock(&cpu_asid_lock);
- cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
- mm->context.id = asid;
+ set_mm_context(mm, asid);
+ spin_unlock(&cpu_asid_lock);
}