summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2007-05-08 20:03:09 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2007-05-08 20:03:09 +0100
commit8678c1f04277daaa914abb107fb9fe71298d916d (patch)
treec4916538ff592210363909099aad866e1ba67985 /include
parent08fdffd4cf4ddd4eb4b32e78f93f4ff53ccec78f (diff)
[ARM] Fix ASID version switch
Close a hole in the ASID version switch, particularly the following scenario: CPU0 MM PID CPU1 MM PID idle A pid(A) A idle(lazy tlb) * new asid version triggered by B * B pid(B) A pid(A) * MM A gets new asid version * A idle(lazy tlb) A pid(A) * CPU1 doesn't see the new ASID * The result is that CPU1 continues running with the hardware set for the original (stale) ASID value, but mm->context.id contains the new ASID value. The result is that the next MM fault on CPU1 updates the page table entries, but flush_tlb_page() fails due to wrong ASID. There is a related case with a threaded application is allocated a new ASID on one CPU while another of its threads is running on some different CPU. This scenario is not fixed by this commit. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/mmu_context.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/include/asm-arm/mmu_context.h b/include/asm-arm/mmu_context.h
index f8755c818b5..4981ad41919 100644
--- a/include/asm-arm/mmu_context.h
+++ b/include/asm-arm/mmu_context.h
@@ -36,8 +36,9 @@ void __check_kvm_seq(struct mm_struct *mm);
* The context ID is used by debuggers and trace logic, and
* should be unique within all running processes.
*/
-#define ASID_BITS 8
-#define ASID_MASK ((~0) << ASID_BITS)
+#define ASID_BITS 8
+#define ASID_MASK ((~0) << ASID_BITS)
+#define ASID_FIRST_VERSION (1 << ASID_BITS)
extern unsigned int cpu_last_asid;
@@ -96,8 +97,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#ifdef CONFIG_MMU
unsigned int cpu = smp_processor_id();
- if (prev != next) {
- cpu_set(cpu, next->cpu_vm_mask);
+ if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) {
check_context(next);
cpu_switch_mm(next->pgd, next);
if (cache_is_vivt())