clockevents: Remove the per cpu tick skew
Historically, Linux has tried to make the regular timer tick on the various CPUs not happen at the same time, to avoid contention on xtime_lock. Nowadays, with the tickless kernel, this contention no longer happens since time keeping and updating are done differently. In addition, this skew is actually hurting power consumption in a measurable way on many-core systems. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> LKML-Reference: <20100727210210.58d3118c@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Родитель
2b08de0073
Коммит
af5ab277de
|
@ -780,7 +780,6 @@ void tick_setup_sched_timer(void)
|
|||
{
|
||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||
ktime_t now = ktime_get();
|
||||
u64 offset;
|
||||
|
||||
/*
|
||||
* Emulate tick processing via per-CPU hrtimers:
|
||||
|
@ -790,10 +789,6 @@ void tick_setup_sched_timer(void)
|
|||
|
||||
/* Get the next period (per cpu) */
|
||||
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
|
||||
offset = ktime_to_ns(tick_period) >> 1;
|
||||
do_div(offset, num_possible_cpus());
|
||||
offset *= smp_processor_id();
|
||||
hrtimer_add_expires_ns(&ts->sched_timer, offset);
|
||||
|
||||
for (;;) {
|
||||
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
||||
|
|
Загрузка…
Ссылка в новой задаче