perf_counter, x86: speed up the scheduling fast-path
We have to set up the LVT entry only at counter init time, not at every switch-in time. There's friction between NMI and non-NMI use here - we'll probably remove the per counter configurability of it - but until then, dont slow down things ... [ Impact: micro-optimization ] Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Родитель
c0daaf3f1f
Коммит
b68f1d2e7a
|
@ -285,6 +285,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
hwc->nmi = 1;
|
hwc->nmi = 1;
|
||||||
}
|
}
|
||||||
|
perf_counters_lapic_init(hwc->nmi);
|
||||||
|
|
||||||
if (!hwc->irq_period)
|
if (!hwc->irq_period)
|
||||||
hwc->irq_period = x86_pmu.max_period;
|
hwc->irq_period = x86_pmu.max_period;
|
||||||
|
@ -603,8 +604,6 @@ try_generic:
|
||||||
hwc->counter_base = x86_pmu.perfctr;
|
hwc->counter_base = x86_pmu.perfctr;
|
||||||
}
|
}
|
||||||
|
|
||||||
perf_counters_lapic_init(hwc->nmi);
|
|
||||||
|
|
||||||
x86_pmu.disable(hwc, idx);
|
x86_pmu.disable(hwc, idx);
|
||||||
|
|
||||||
cpuc->counters[idx] = counter;
|
cpuc->counters[idx] = counter;
|
||||||
|
@ -1054,7 +1053,7 @@ void __init init_hw_perf_counters(void)
|
||||||
|
|
||||||
pr_info("... counter mask: %016Lx\n", perf_counter_mask);
|
pr_info("... counter mask: %016Lx\n", perf_counter_mask);
|
||||||
|
|
||||||
perf_counters_lapic_init(0);
|
perf_counters_lapic_init(1);
|
||||||
register_die_notifier(&perf_counter_nmi_notifier);
|
register_die_notifier(&perf_counter_nmi_notifier);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче