clocksource: Load the ACPI PM clocksource asynchronously
The ACPI clocksource takes quite some time to initialize, and this increases the boot time of the kernel for a double digit percentage. This while almost all modern systems will be using the HPET already anyway. This patch turns the clocksource loading into an asynchronous operation; which means it won't hold up the boot while still becoming available normally. To make this work well, an udelay() had to be turned into an usleep_range() so that on UP systems, we yield the CPU to regular boot tasks instead of spinning. CC: John Stultz <johnstul@us.ibm.com> CC: Thomas Gleixner <tglx@linutronix.de> CC: Len Brown <lenb@kernel.org> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: John Stultz <john.stultz@linaro.org>
This commit is contained in:
Родитель
12d6d41276
Коммит
b519508298
|
@ -23,6 +23,7 @@
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
|
#include <linux/async.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -179,17 +180,15 @@ static int verify_pmtmr_rate(void)
|
||||||
/* Number of reads we try to get two different values */
|
/* Number of reads we try to get two different values */
|
||||||
#define ACPI_PM_READ_CHECKS 10000
|
#define ACPI_PM_READ_CHECKS 10000
|
||||||
|
|
||||||
static int __init init_acpi_pm_clocksource(void)
|
static void __init acpi_pm_clocksource_async(void *unused, async_cookie_t cookie)
|
||||||
{
|
{
|
||||||
cycle_t value1, value2;
|
cycle_t value1, value2;
|
||||||
unsigned int i, j = 0;
|
unsigned int i, j = 0;
|
||||||
|
|
||||||
if (!pmtmr_ioport)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
/* "verify" this timing source: */
|
/* "verify" this timing source: */
|
||||||
for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
|
for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
|
||||||
udelay(100 * j);
|
usleep_range(100 * j, 100 * j + 100);
|
||||||
value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
|
value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
|
||||||
for (i = 0; i < ACPI_PM_READ_CHECKS; i++) {
|
for (i = 0; i < ACPI_PM_READ_CHECKS; i++) {
|
||||||
value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
|
value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
|
||||||
|
@ -203,25 +202,34 @@ static int __init init_acpi_pm_clocksource(void)
|
||||||
" 0x%#llx, 0x%#llx - aborting.\n",
|
" 0x%#llx, 0x%#llx - aborting.\n",
|
||||||
value1, value2);
|
value1, value2);
|
||||||
pmtmr_ioport = 0;
|
pmtmr_ioport = 0;
|
||||||
return -EINVAL;
|
return;
|
||||||
}
|
}
|
||||||
if (i == ACPI_PM_READ_CHECKS) {
|
if (i == ACPI_PM_READ_CHECKS) {
|
||||||
printk(KERN_INFO "PM-Timer failed consistency check "
|
printk(KERN_INFO "PM-Timer failed consistency check "
|
||||||
" (0x%#llx) - aborting.\n", value1);
|
" (0x%#llx) - aborting.\n", value1);
|
||||||
pmtmr_ioport = 0;
|
pmtmr_ioport = 0;
|
||||||
return -ENODEV;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (verify_pmtmr_rate() != 0){
|
if (verify_pmtmr_rate() != 0){
|
||||||
pmtmr_ioport = 0;
|
pmtmr_ioport = 0;
|
||||||
return -ENODEV;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
return clocksource_register_hz(&clocksource_acpi_pm,
|
clocksource_register_hz(&clocksource_acpi_pm,
|
||||||
PMTMR_TICKS_PER_SEC);
|
PMTMR_TICKS_PER_SEC);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init init_acpi_pm_clocksource(void)
|
||||||
|
{
|
||||||
|
if (!pmtmr_ioport)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
async_schedule(acpi_pm_clocksource_async, NULL);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* We use fs_initcall because we want the PCI fixups to have run
|
/* We use fs_initcall because we want the PCI fixups to have run
|
||||||
* but we still need to load before device_initcall
|
* but we still need to load before device_initcall
|
||||||
*/
|
*/
|
||||||
|
|
Загрузка…
Ссылка в новой задаче