s390/uaccess: always load the kernel ASCE after task switch

This patch fixes a problem introduced with git commit beef560b4c
"s390/uaccess: simplify control register updates".

The switch_mm function is not called if the next process is a kernel
thread without an attached mm or is a nop if the mm does not change.
But CR1 still needs to be loaded with the kernel ASCE in case the
code returns to a uaccess function that uses the secondary space mode.

In addition move the set_fs call from finish_arch_switch to
finish_arch_post_lock_switch and then remove finish_arch_switch.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Martin Schwidefsky 2014-06-02 14:53:57 +02:00
parent c1a42f49b2
commit f8b1350560
2 changed files with 15 additions and 20 deletions

View File

@ -33,10 +33,9 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void set_user_asce(struct mm_struct *mm) static inline void set_user_asce(struct mm_struct *mm)
{ {
pgd_t *pgd = mm->pgd; S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
if (current->thread.mm_segment.ar4)
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); __ctl_load(S390_lowcore.user_asce, 7, 7);
set_fs(current->thread.mm_segment);
set_cpu_flag(CIF_ASCE); set_cpu_flag(CIF_ASCE);
} }
@ -70,12 +69,11 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
/* Clear old ASCE by loading the kernel ASCE. */ /* Clear old ASCE by loading the kernel ASCE. */
__ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 7, 7);
/* Delay loading of the new ASCE to control registers CR1 & CR7 */
set_cpu_flag(CIF_ASCE);
atomic_inc(&next->context.attach_count); atomic_inc(&next->context.attach_count);
atomic_dec(&prev->context.attach_count); atomic_dec(&prev->context.attach_count);
if (MACHINE_HAS_TLB_LC) if (MACHINE_HAS_TLB_LC)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
} }
#define finish_arch_post_lock_switch finish_arch_post_lock_switch #define finish_arch_post_lock_switch finish_arch_post_lock_switch
@ -84,17 +82,18 @@ static inline void finish_arch_post_lock_switch(void)
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm; struct mm_struct *mm = tsk->mm;
if (!mm) load_kernel_asce();
return; if (mm) {
preempt_disable(); preempt_disable();
while (atomic_read(&mm->context.attach_count) >> 16) while (atomic_read(&mm->context.attach_count) >> 16)
cpu_relax(); cpu_relax();
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
set_user_asce(mm); if (mm->context.flush_mm)
if (mm->context.flush_mm) __tlb_flush_mm(mm);
__tlb_flush_mm(mm); preempt_enable();
preempt_enable(); }
set_fs(current->thread.mm_segment);
} }
#define enter_lazy_tlb(mm,tsk) do { } while (0) #define enter_lazy_tlb(mm,tsk) do { } while (0)

View File

@ -134,8 +134,4 @@ static inline void restore_access_regs(unsigned int *acrs)
prev = __switch_to(prev,next); \ prev = __switch_to(prev,next); \
} while (0) } while (0)
#define finish_arch_switch(prev) do { \
set_fs(current->thread.mm_segment); \
} while (0)
#endif /* __ASM_SWITCH_TO_H */ #endif /* __ASM_SWITCH_TO_H */