x86/tsc: Calibrate tsc only once
During boot tsc is calibrated twice: once in tsc_early_delay_calibrate(), and the second time in tsc_init(). Rename tsc_early_delay_calibrate() to tsc_early_init(), and rework it so the calibration is done only early, and make tsc_init() to use the values already determined in tsc_early_init(). Sometimes it is not possible to determine tsc early, as the subsystem that is required is not yet initialized, in such case try again later in tsc_init(). Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: steven.sistare@oracle.com Cc: daniel.m.jordan@oracle.com Cc: linux@armlinux.org.uk Cc: schwidefsky@de.ibm.com Cc: heiko.carstens@de.ibm.com Cc: john.stultz@linaro.org Cc: sboyd@codeaurora.org Cc: hpa@zytor.com Cc: douly.fnst@cn.fujitsu.com Cc: peterz@infradead.org Cc: prarit@redhat.com Cc: feng.tang@intel.com Cc: pmladek@suse.com Cc: gnomes@lxorguk.ukuu.org.uk Cc: linux-s390@vger.kernel.org Cc: boris.ostrovsky@oracle.com Cc: jgross@suse.com Cc: pbonzini@redhat.com Link: https://lkml.kernel.org/r/20180719205545.16512-20-pasha.tatashin@oracle.com
This commit is contained in:
Родитель
227e3958a7
Коммит
cf7a63ef4e
|
@ -33,7 +33,7 @@ static inline cycles_t get_cycles(void)
|
|||
extern struct system_counterval_t convert_art_to_tsc(u64 art);
|
||||
extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);
|
||||
|
||||
extern void tsc_early_delay_calibrate(void);
|
||||
extern void tsc_early_init(void);
|
||||
extern void tsc_init(void);
|
||||
extern void mark_tsc_unstable(char *reason);
|
||||
extern int unsynchronized_tsc(void);
|
||||
|
|
|
@ -1014,6 +1014,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
*/
|
||||
init_hypervisor_platform();
|
||||
|
||||
tsc_early_init();
|
||||
x86_init.resources.probe_roms();
|
||||
|
||||
/* after parse_early_param, so could debug it */
|
||||
|
@ -1199,7 +1200,6 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
memblock_find_dma_reserve();
|
||||
|
||||
tsc_early_delay_calibrate();
|
||||
if (!early_xdbc_setup_hardware())
|
||||
early_xdbc_register_console();
|
||||
|
||||
|
|
|
@ -33,6 +33,8 @@ EXPORT_SYMBOL(cpu_khz);
|
|||
unsigned int __read_mostly tsc_khz;
|
||||
EXPORT_SYMBOL(tsc_khz);
|
||||
|
||||
#define KHZ 1000
|
||||
|
||||
/*
|
||||
* TSC can be unstable due to cpufreq or due to unsynced TSCs
|
||||
*/
|
||||
|
@ -1335,34 +1337,10 @@ unreg:
|
|||
*/
|
||||
device_initcall(init_tsc_clocksource);
|
||||
|
||||
void __init tsc_early_delay_calibrate(void)
|
||||
static bool __init determine_cpu_tsc_frequencies(void)
|
||||
{
|
||||
unsigned long lpj;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_TSC))
|
||||
return;
|
||||
|
||||
cpu_khz = x86_platform.calibrate_cpu();
|
||||
tsc_khz = x86_platform.calibrate_tsc();
|
||||
|
||||
tsc_khz = tsc_khz ? : cpu_khz;
|
||||
if (!tsc_khz)
|
||||
return;
|
||||
|
||||
lpj = tsc_khz * 1000;
|
||||
do_div(lpj, HZ);
|
||||
loops_per_jiffy = lpj;
|
||||
}
|
||||
|
||||
void __init tsc_init(void)
|
||||
{
|
||||
u64 lpj, cyc;
|
||||
int cpu;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_TSC)) {
|
||||
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
|
||||
return;
|
||||
}
|
||||
/* Make sure that cpu and tsc are not already calibrated */
|
||||
WARN_ON(cpu_khz || tsc_khz);
|
||||
|
||||
cpu_khz = x86_platform.calibrate_cpu();
|
||||
tsc_khz = x86_platform.calibrate_tsc();
|
||||
|
@ -1377,20 +1355,52 @@ void __init tsc_init(void)
|
|||
else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
|
||||
cpu_khz = tsc_khz;
|
||||
|
||||
if (!tsc_khz) {
|
||||
mark_tsc_unstable("could not calculate TSC khz");
|
||||
if (tsc_khz == 0)
|
||||
return false;
|
||||
|
||||
pr_info("Detected %lu.%03lu MHz processor\n",
|
||||
(unsigned long)cpu_khz / KHZ,
|
||||
(unsigned long)cpu_khz % KHZ);
|
||||
|
||||
if (cpu_khz != tsc_khz) {
|
||||
pr_info("Detected %lu.%03lu MHz TSC",
|
||||
(unsigned long)tsc_khz / KHZ,
|
||||
(unsigned long)tsc_khz % KHZ);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static unsigned long __init get_loops_per_jiffy(void)
|
||||
{
|
||||
unsigned long lpj = tsc_khz * KHZ;
|
||||
|
||||
do_div(lpj, HZ);
|
||||
return lpj;
|
||||
}
|
||||
|
||||
void __init tsc_early_init(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_TSC))
|
||||
return;
|
||||
if (!determine_cpu_tsc_frequencies())
|
||||
return;
|
||||
loops_per_jiffy = get_loops_per_jiffy();
|
||||
}
|
||||
|
||||
void __init tsc_init(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_TSC)) {
|
||||
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
|
||||
return;
|
||||
}
|
||||
|
||||
pr_info("Detected %lu.%03lu MHz processor\n",
|
||||
(unsigned long)cpu_khz / 1000,
|
||||
(unsigned long)cpu_khz % 1000);
|
||||
|
||||
if (cpu_khz != tsc_khz) {
|
||||
pr_info("Detected %lu.%03lu MHz TSC",
|
||||
(unsigned long)tsc_khz / 1000,
|
||||
(unsigned long)tsc_khz % 1000);
|
||||
if (!tsc_khz) {
|
||||
/* We failed to determine frequencies earlier, try again */
|
||||
if (!determine_cpu_tsc_frequencies()) {
|
||||
mark_tsc_unstable("could not calculate TSC khz");
|
||||
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Sanitize TSC ADJUST before cyc2ns gets initialized */
|
||||
|
@ -1413,10 +1423,7 @@ void __init tsc_init(void)
|
|||
if (!no_sched_irq_time)
|
||||
enable_sched_clock_irqtime();
|
||||
|
||||
lpj = ((u64)tsc_khz * 1000);
|
||||
do_div(lpj, HZ);
|
||||
lpj_fine = lpj;
|
||||
|
||||
lpj_fine = get_loops_per_jiffy();
|
||||
use_tsc_delay();
|
||||
|
||||
check_system_tsc_reliable();
|
||||
|
|
Загрузка…
Ссылка в новой задаче