summaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm/mmu_context.h
diff options
context:
space:
mode:
authorGustavo Padovan <gustavo.padovan@collabora.co.uk>2013-09-27 11:56:14 -0300
committerGustavo Padovan <gustavo.padovan@collabora.co.uk>2013-09-27 11:56:14 -0300
commit1025c04cecd19882e28f16c4004034b475c372c5 (patch)
tree2b7402887e86d54bff5a123228c9059eae5e32bd /arch/s390/include/asm/mmu_context.h
parent4375f1037d52602413142e290608d0d84671ad36 (diff)
parent5bcecf325378218a8e248bb6bcae96ec7362f8ef (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth
Conflicts: net/bluetooth/hci_core.c
Diffstat (limited to 'arch/s390/include/asm/mmu_context.h')
-rw-r--r--arch/s390/include/asm/mmu_context.h22
1 files changed, 2 insertions, 20 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 084e7755ed9..9f973d8de90 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -21,24 +21,7 @@ static inline int init_new_context(struct task_struct *tsk,
#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#endif
- if (current->mm && current->mm->context.alloc_pgste) {
- /*
- * alloc_pgste indicates, that any NEW context will be created
- * with extended page tables. The old context is unchanged. The
- * page table allocation and the page table operations will
- * look at has_pgste to distinguish normal and extended page
- * tables. The only way to create extended page tables is to
- * set alloc_pgste and then create a new context (e.g. dup_mm).
- * The page table allocation is called after init_new_context
- * and if has_pgste is set, it will create extended page
- * tables.
- */
- mm->context.has_pgste = 1;
- mm->context.alloc_pgste = 1;
- } else {
- mm->context.has_pgste = 0;
- mm->context.alloc_pgste = 0;
- }
+ mm->context.has_pgste = 0;
mm->context.asce_limit = STACK_TOP_MAX;
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;
@@ -77,8 +60,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
WARN_ON(atomic_read(&prev->context.attach_count) < 0);
atomic_inc(&next->context.attach_count);
/* Check for TLBs not flushed yet */
- if (next->context.flush_mm)
- __tlb_flush_mm(next);
+ __tlb_flush_mm_lazy(next);
}
#define enter_lazy_tlb(mm,tsk) do { } while (0)