s390/mm: fix local TLB flushing vs. detach of an mm address space
The local TLB flushing code keeps an additional mask in the mm.context, the cpu_attach_mask. At the time a global flush of an address space is done the cpu_attach_mask is copied to the mm_cpumask in order to avoid future global flushes in case the mm is used by a single CPU only after the flush. Trouble is that the reset of the mm_cpumask is racy against the detach of an mm address space by switch_mm. The current order is first the global TLB flush and then the copy of the cpu_attach_mask to the mm_cpumask. The order needs to be the other way around. Cc: <stable@vger.kernel.org> Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Родитель
46fde9a9d2
Коммит
b3e5dc45fd
|
@ -103,7 +103,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
if (prev == next)
|
||||
return;
|
||||
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
/* Clear old ASCE by loading the kernel ASCE. */
|
||||
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
|
||||
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
|
||||
|
@ -121,7 +120,7 @@ static inline void finish_arch_post_lock_switch(void)
|
|||
preempt_disable();
|
||||
while (atomic_read(&mm->context.flush_count))
|
||||
cpu_relax();
|
||||
|
||||
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
|
||||
if (mm->context.flush_mm)
|
||||
__tlb_flush_mm(mm);
|
||||
preempt_enable();
|
||||
|
@ -136,6 +135,7 @@ static inline void activate_mm(struct mm_struct *prev,
|
|||
struct mm_struct *next)
|
||||
{
|
||||
switch_mm(prev, next, current);
|
||||
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
|
||||
set_user_asce(next);
|
||||
}
|
||||
|
||||
|
|
|
@ -48,23 +48,6 @@ static inline void __tlb_flush_global(void)
|
|||
* Flush TLB entries for a specific mm on all CPUs (in case gmap is used
|
||||
* this implicates multiple ASCEs!).
|
||||
*/
|
||||
static inline void __tlb_flush_full(struct mm_struct *mm)
|
||||
{
|
||||
preempt_disable();
|
||||
atomic_inc(&mm->context.flush_count);
|
||||
if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
|
||||
/* Local TLB flush */
|
||||
__tlb_flush_local();
|
||||
} else {
|
||||
/* Global TLB flush */
|
||||
__tlb_flush_global();
|
||||
/* Reset TLB flush mask */
|
||||
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
|
||||
}
|
||||
atomic_dec(&mm->context.flush_count);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline void __tlb_flush_mm(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long gmap_asce;
|
||||
|
@ -76,16 +59,18 @@ static inline void __tlb_flush_mm(struct mm_struct *mm)
|
|||
*/
|
||||
preempt_disable();
|
||||
atomic_inc(&mm->context.flush_count);
|
||||
/* Reset TLB flush mask */
|
||||
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
|
||||
barrier();
|
||||
gmap_asce = READ_ONCE(mm->context.gmap_asce);
|
||||
if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
|
||||
if (gmap_asce)
|
||||
__tlb_flush_idte(gmap_asce);
|
||||
__tlb_flush_idte(mm->context.asce);
|
||||
} else {
|
||||
__tlb_flush_full(mm);
|
||||
/* Global TLB flush */
|
||||
__tlb_flush_global();
|
||||
}
|
||||
/* Reset TLB flush mask */
|
||||
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
|
||||
atomic_dec(&mm->context.flush_count);
|
||||
preempt_enable();
|
||||
}
|
||||
|
@ -99,7 +84,6 @@ static inline void __tlb_flush_kernel(void)
|
|||
}
|
||||
#else
|
||||
#define __tlb_flush_global() __tlb_flush_local()
|
||||
#define __tlb_flush_full(mm) __tlb_flush_local()
|
||||
|
||||
/*
|
||||
* Flush TLB entries for a specific ASCE on all CPUs.
|
||||
|
|
Загрузка…
Ссылка в новой задаче