sched/clock: Enable sched clock early
Allow sched_clock() to be used before schec_clock_init() is called. This provides a way to get early boot timestamps on machines with unstable clocks. Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: steven.sistare@oracle.com Cc: daniel.m.jordan@oracle.com Cc: linux@armlinux.org.uk Cc: schwidefsky@de.ibm.com Cc: heiko.carstens@de.ibm.com Cc: john.stultz@linaro.org Cc: sboyd@codeaurora.org Cc: hpa@zytor.com Cc: douly.fnst@cn.fujitsu.com Cc: peterz@infradead.org Cc: prarit@redhat.com Cc: feng.tang@intel.com Cc: pmladek@suse.com Cc: gnomes@lxorguk.ukuu.org.uk Cc: linux-s390@vger.kernel.org Cc: boris.ostrovsky@oracle.com Cc: jgross@suse.com Cc: pbonzini@redhat.com Link: https://lkml.kernel.org/r/20180719205545.16512-24-pasha.tatashin@oracle.com
This commit is contained in:
Родитель
5d2a4e91a5
Коммит
857baa87b6
|
@ -642,7 +642,6 @@ asmlinkage __visible void __init start_kernel(void)
|
||||||
softirq_init();
|
softirq_init();
|
||||||
timekeeping_init();
|
timekeeping_init();
|
||||||
time_init();
|
time_init();
|
||||||
sched_clock_init();
|
|
||||||
printk_safe_init();
|
printk_safe_init();
|
||||||
perf_event_init();
|
perf_event_init();
|
||||||
profile_init();
|
profile_init();
|
||||||
|
@ -697,6 +696,7 @@ asmlinkage __visible void __init start_kernel(void)
|
||||||
acpi_early_init();
|
acpi_early_init();
|
||||||
if (late_time_init)
|
if (late_time_init)
|
||||||
late_time_init();
|
late_time_init();
|
||||||
|
sched_clock_init();
|
||||||
calibrate_delay();
|
calibrate_delay();
|
||||||
pid_idr_init();
|
pid_idr_init();
|
||||||
anon_vma_init();
|
anon_vma_init();
|
||||||
|
|
|
@ -202,7 +202,25 @@ static void __sched_clock_gtod_offset(void)
|
||||||
|
|
||||||
void __init sched_clock_init(void)
|
void __init sched_clock_init(void)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set __gtod_offset such that once we mark sched_clock_running,
|
||||||
|
* sched_clock_tick() continues where sched_clock() left off.
|
||||||
|
*
|
||||||
|
* Even if TSC is buggered, we're still UP at this point so it
|
||||||
|
* can't really be out of sync.
|
||||||
|
*/
|
||||||
|
local_irq_save(flags);
|
||||||
|
__sched_clock_gtod_offset();
|
||||||
|
local_irq_restore(flags);
|
||||||
|
|
||||||
sched_clock_running = 1;
|
sched_clock_running = 1;
|
||||||
|
|
||||||
|
/* Now that sched_clock_running is set adjust scd */
|
||||||
|
local_irq_save(flags);
|
||||||
|
sched_clock_tick();
|
||||||
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* We run this as late_initcall() such that it runs after all built-in drivers,
|
* We run this as late_initcall() such that it runs after all built-in drivers,
|
||||||
|
@ -356,7 +374,7 @@ u64 sched_clock_cpu(int cpu)
|
||||||
return sched_clock() + __sched_clock_offset;
|
return sched_clock() + __sched_clock_offset;
|
||||||
|
|
||||||
if (unlikely(!sched_clock_running))
|
if (unlikely(!sched_clock_running))
|
||||||
return 0ull;
|
return sched_clock();
|
||||||
|
|
||||||
preempt_disable_notrace();
|
preempt_disable_notrace();
|
||||||
scd = cpu_sdc(cpu);
|
scd = cpu_sdc(cpu);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче