x86/mm/cpa: Unconditionally avoid WBINDV when we can
CAT has happened, WBINDV is bad (even before CAT blowing away the entire cache on a multi-core platform wasn't nice), try not to use it ever. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Dave Hansen <dave.hansen@intel.com> Cc: Bin Yang <bin.yang@intel.com> Cc: Mark Gross <mark.gross@intel.com> Link: https://lkml.kernel.org/r/20180919085947.933674526@infradead.org
This commit is contained in:
Родитель
c0a759abf5
Коммит
ddd07b7503
|
@ -319,26 +319,12 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
|
|||
int in_flags, struct page **pages)
|
||||
{
|
||||
unsigned int i, level;
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/*
|
||||
* Avoid wbinvd() because it causes latencies on all CPUs,
|
||||
* regardless of any CPU isolation that may be in effect.
|
||||
*
|
||||
* This should be extended for CAT enabled systems independent of
|
||||
* PREEMPT because wbinvd() does not respect the CAT partitions and
|
||||
* this is exposed to unpriviledged users through the graphics
|
||||
* subsystem.
|
||||
*/
|
||||
unsigned long do_wbinvd = 0;
|
||||
#else
|
||||
unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
|
||||
#endif
|
||||
|
||||
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
|
||||
|
||||
on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
|
||||
flush_tlb_all();
|
||||
|
||||
if (!cache || do_wbinvd)
|
||||
if (!cache)
|
||||
return;
|
||||
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче