diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-03-19 09:47:30 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-03-19 09:47:30 +0100 |
commit | 0d4a42f6bd298e826620585e766a154ab460617a (patch) | |
tree | 406d8f7778691d858dbe3e48e4bbb10e99c0a58a /arch/arm/mm/context.c | |
parent | d62b4892f3d9f7dd2002e5309be10719d6805b0f (diff) | |
parent | a937536b868b8369b98967929045f1df54234323 (diff) |
Merge tag 'v3.9-rc3' into drm-intel-next-queued
Backmerge so that I can merge Imre Deak's coalesced sg entries fixes,
which depend upon the new for_each_sg_page introduce in
commit a321e91b6d73ed011ffceed384c40d2785cf723b
Author: Imre Deak <imre.deak@intel.com>
Date: Wed Feb 27 17:02:56 2013 -0800
lib/scatterlist: add simple page iterator
The merge itself is just two trivial conflicts:
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r-- | arch/arm/mm/context.c | 32 |
1 files changed, 21 insertions, 11 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index bc4a5e9ebb7..a5a4b2bc42b 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -34,6 +34,9 @@ * The ASID is used to tag entries in the CPU caches and TLBs. * The context ID is used by debuggers and trace logic, and * should be unique within all running processes. + * + * In big endian operation, the two 32 bit words are swapped if accesed by + * non 64-bit operations. */ #define ASID_FIRST_VERSION (1ULL << ASID_BITS) #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) @@ -149,9 +152,9 @@ static int is_reserved_asid(u64 asid) return 0; } -static void new_context(struct mm_struct *mm, unsigned int cpu) +static u64 new_context(struct mm_struct *mm, unsigned int cpu) { - u64 asid = mm->context.id; + u64 asid = atomic64_read(&mm->context.id); u64 generation = atomic64_read(&asid_generation); if (asid != 0 && is_reserved_asid(asid)) { @@ -178,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu) cpumask_clear(mm_cpumask(mm)); } - mm->context.id = asid; + return asid; } void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { unsigned long flags; unsigned int cpu = smp_processor_id(); + u64 asid; if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) __check_vmalloc_seq(mm); @@ -195,20 +199,26 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) */ cpu_set_reserved_ttbr0(); - if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) - && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) + asid = atomic64_read(&mm->context.id); + if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) + && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) goto switch_mm_fastpath; raw_spin_lock_irqsave(&cpu_asid_lock, flags); /* Check that our ASID belongs to the current generation. */ - if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) - new_context(mm, cpu); - - atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); - cpumask_set_cpu(cpu, mm_cpumask(mm)); + asid = atomic64_read(&mm->context.id); + if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { + asid = new_context(mm, cpu); + atomic64_set(&mm->context.id, asid); + } - if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) + if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { + local_flush_bp_all(); local_flush_tlb_all(); + } + + atomic64_set(&per_cpu(active_asids, cpu), asid); + cpumask_set_cpu(cpu, mm_cpumask(mm)); raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); switch_mm_fastpath: |