diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-06-02 14:53:57 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-06-10 10:48:28 +0200 |
commit | f8b13505607823bb9a212eb9410669ecf2bc2615 (patch) | |
tree | f7f02832e582cda3afa8ef9aabc3145b25620f94 /arch/s390 | |
parent | c1a42f49b20e9498c7abd47c01b04c6312af13c4 (diff) |
s390/uaccess: always load the kernel ASCE after task switch
This patch fixes a problem introduced with git commit beef560b4cdfafb2
"s390/uaccess: simplify control register updates".
The switch_mm function is not called if the next process is a kernel
thread without an attached mm or is a nop if the mm does not change.
But CR1 still needs to be loaded with the kernel ASCE in case the
code returns to a uaccess function that uses the secondary space mode.
In addition move the set_fs call from finish_arch_switch to
finish_arch_post_lock_switch and then remove finish_arch_switch.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 33 | ||||
-rw-r--r-- | arch/s390/include/asm/switch_to.h | 4 |
2 files changed, 16 insertions, 21 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index c28f32a45af..3815bfea1b2 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -33,10 +33,9 @@ static inline int init_new_context(struct task_struct *tsk, static inline void set_user_asce(struct mm_struct *mm) { - pgd_t *pgd = mm->pgd; - - S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); - set_fs(current->thread.mm_segment); + S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd); + if (current->thread.mm_segment.ar4) + __ctl_load(S390_lowcore.user_asce, 7, 7); set_cpu_flag(CIF_ASCE); } @@ -70,12 +69,11 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, /* Clear old ASCE by loading the kernel ASCE. */ __ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 7, 7); - /* Delay loading of the new ASCE to control registers CR1 & CR7 */ - set_cpu_flag(CIF_ASCE); atomic_inc(&next->context.attach_count); atomic_dec(&prev->context.attach_count); if (MACHINE_HAS_TLB_LC) cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); + S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); } #define finish_arch_post_lock_switch finish_arch_post_lock_switch @@ -84,17 +82,18 @@ static inline void finish_arch_post_lock_switch(void) struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; - if (!mm) - return; - preempt_disable(); - while (atomic_read(&mm->context.attach_count) >> 16) - cpu_relax(); - - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); - set_user_asce(mm); - if (mm->context.flush_mm) - __tlb_flush_mm(mm); - preempt_enable(); + load_kernel_asce(); + if (mm) { + preempt_disable(); + while (atomic_read(&mm->context.attach_count) >> 16) + cpu_relax(); + + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + if (mm->context.flush_mm) + __tlb_flush_mm(mm); + preempt_enable(); + } + set_fs(current->thread.mm_segment); } #define enter_lazy_tlb(mm,tsk) do { } while (0) diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index 29c81f82705..df38c70cd59 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -134,8 +134,4 @@ static inline void restore_access_regs(unsigned int *acrs) prev = __switch_to(prev,next); \ } while (0) -#define finish_arch_switch(prev) do { \ - set_fs(current->thread.mm_segment); \ -} while (0) - #endif /* __ASM_SWITCH_TO_H */ |