x86: Avoid tlbstate lock if not enough cpus
This one isn't related to previous patch. If online cpus are below NUM_INVALIDATE_TLB_VECTORS, we don't need the lock. The comments in the code declares we don't need the check, but a hot lock still needs an atomic operation and expensive, so add the check here. Uses nr_cpu_ids here as suggested by Eric Dumazet. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Acked-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Andi Kleen <andi@firstfloor.org> LKML-Reference: <1295232730.1949.710.camel@sli10-conroe> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Родитель
70e4a36973
Коммит
7064d865af
|
@ -179,12 +179,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
|
|||
sender = this_cpu_read(tlb_vector_offset);
|
||||
f = &flush_state[sender];
|
||||
|
||||
/*
|
||||
* Could avoid this lock when
|
||||
* num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
|
||||
* probably not worth checking this for a cache-hot lock.
|
||||
*/
|
||||
raw_spin_lock(&f->tlbstate_lock);
|
||||
if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
|
||||
raw_spin_lock(&f->tlbstate_lock);
|
||||
|
||||
f->flush_mm = mm;
|
||||
f->flush_va = va;
|
||||
|
@ -202,7 +198,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
|
|||
|
||||
f->flush_mm = NULL;
|
||||
f->flush_va = 0;
|
||||
raw_spin_unlock(&f->tlbstate_lock);
|
||||
if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
|
||||
raw_spin_unlock(&f->tlbstate_lock);
|
||||
}
|
||||
|
||||
void native_flush_tlb_others(const struct cpumask *cpumask,
|
||||
|
|
Загрузка…
Ссылка в новой задаче