x86, tsc, sched: Recompute cyc2ns_offset's during resume from sleep states
TSC's get reset after suspend/resume (even on cpu's with invariant TSC which runs at a constant rate across ACPI P-, C- and T-states). And in some systems BIOS seem to reinit TSC to arbitrary large value (still sync'd across cpu's) during resume. This leads to a scenario of scheduler rq->clock (sched_clock_cpu()) less than rq->age_stamp (introduced in 2.6.32). This leads to a big value returned by scale_rt_power() and the resulting big group power set by the update_group_power() is causing improper load balancing between busy and idle cpu's after suspend/resume. This resulted in multi-threaded workloads (like kernel-compilation) go slower after suspend/resume cycle on core i5 laptops. Fix this by recomputing cyc2ns_offset's during resume, so that sched_clock() continues from the point where it was left off during suspend. Reported-by: Florian Pritz <flo@xssn.at> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: <stable@kernel.org> # [v2.6.32+] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1282262618.2675.24.camel@sbsiddha-MOBL3.sc.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Родитель
861d034ee8
Коммит
cd7240c0b9
|
@ -59,5 +59,7 @@ extern void check_tsc_sync_source(int cpu);
|
|||
extern void check_tsc_sync_target(void);
|
||||
|
||||
extern int notsc_setup(char *);
|
||||
extern void save_sched_clock_state(void);
|
||||
extern void restore_sched_clock_state(void);
|
||||
|
||||
#endif /* _ASM_X86_TSC_H */
|
||||
|
|
|
@ -626,6 +626,44 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static unsigned long long cyc2ns_suspend;
|
||||
|
||||
void save_sched_clock_state(void)
|
||||
{
|
||||
if (!sched_clock_stable)
|
||||
return;
|
||||
|
||||
cyc2ns_suspend = sched_clock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Even on processors with invariant TSC, TSC gets reset in some the
|
||||
* ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
|
||||
* arbitrary value (still sync'd across cpu's) during resume from such sleep
|
||||
* states. To cope up with this, recompute the cyc2ns_offset for each cpu so
|
||||
* that sched_clock() continues from the point where it was left off during
|
||||
* suspend.
|
||||
*/
|
||||
void restore_sched_clock_state(void)
|
||||
{
|
||||
unsigned long long offset;
|
||||
unsigned long flags;
|
||||
int cpu;
|
||||
|
||||
if (!sched_clock_stable)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
get_cpu_var(cyc2ns_offset) = 0;
|
||||
offset = cyc2ns_suspend - sched_clock();
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
per_cpu(cyc2ns_offset, cpu) = offset;
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
|
||||
/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
|
||||
|
|
|
@ -113,6 +113,7 @@ static void __save_processor_state(struct saved_context *ctxt)
|
|||
void save_processor_state(void)
|
||||
{
|
||||
__save_processor_state(&saved_context);
|
||||
save_sched_clock_state();
|
||||
}
|
||||
#ifdef CONFIG_X86_32
|
||||
EXPORT_SYMBOL(save_processor_state);
|
||||
|
@ -229,6 +230,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
|
|||
void restore_processor_state(void)
|
||||
{
|
||||
__restore_processor_state(&saved_context);
|
||||
restore_sched_clock_state();
|
||||
}
|
||||
#ifdef CONFIG_X86_32
|
||||
EXPORT_SYMBOL(restore_processor_state);
|
||||
|
|
Загрузка…
Ссылка в новой задаче