summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-03-06 19:59:50 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 01:14:20 -0800
commita77754b4d0731321db266c6c60ffcd7c62757da5 (patch)
tree37cc4c6793e2b616791d42ee840e5a007a13eccb /include
parent9132983ae140a8ca81e95e081d5a4c0dd7a7f670 (diff)
[SPARC64]: Bulletproof MMU context locking.
1) Always spin_lock_init() in init_context(). The caller essentially clears it out, or copies the mm info from the parent. In both cases we need to explicitly initialize the spinlock. 2) Always do explicit IRQ disabling while taking mm->context.lock and ctx_alloc_lock. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/asm-sparc64/mmu_context.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 4be40c58e3c..ca36ea96f64 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -67,14 +67,14 @@ extern void __flush_tlb_mm(unsigned long, unsigned long);
/* Switch the current MM context. Interrupts are disabled. */
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{
- unsigned long ctx_valid;
+ unsigned long ctx_valid, flags;
int cpu;
- spin_lock(&mm->context.lock);
+ spin_lock_irqsave(&mm->context.lock, flags);
ctx_valid = CTX_VALID(mm->context);
if (!ctx_valid)
get_new_mmu_context(mm);
- spin_unlock(&mm->context.lock);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
if (!ctx_valid || (old_mm != mm)) {
load_secondary_context(mm);