timers: Optimize collect_expired_timers() for NOHZ
After a NOHZ idle sleep the timer wheel must be forwarded to current jiffies. There might be expired timers so the current code loops and checks the expired buckets for timers. This can take quite some time for long NOHZ idle periods. The pending bitmask in the timer base allows us to do a quick search for the next expiring timer and therefore a fast forward of the base time which prevents pointless long lasting loops. For a 3 seconds idle sleep this reduces the catchup time from ~1ms to 5us. Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Arjan van de Ven <arjan@infradead.org> Cc: Chris Mason <clm@fb.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: George Spelvin <linux@sciencehorizons.net> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Len Brown <lenb@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160704094342.351296290@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
73420fea80
Коммит
236968383c
|
@ -1252,8 +1252,8 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int collect_expired_timers(struct timer_base *base,
|
static int __collect_expired_timers(struct timer_base *base,
|
||||||
struct hlist_head *heads)
|
struct hlist_head *heads)
|
||||||
{
|
{
|
||||||
unsigned long clk = base->clk;
|
unsigned long clk = base->clk;
|
||||||
struct hlist_head *vec;
|
struct hlist_head *vec;
|
||||||
|
@ -1279,9 +1279,9 @@ static int collect_expired_timers(struct timer_base *base,
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ_COMMON
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
/*
|
/*
|
||||||
* Find the next pending bucket of a level. Search from @offset + @clk upwards
|
* Find the next pending bucket of a level. Search from level start (@offset)
|
||||||
* and if nothing there, search from start of the level (@offset) up to
|
* + @clk upwards and if nothing there, search from start of the level
|
||||||
* @offset + clk.
|
* (@offset) up to @offset + clk.
|
||||||
*/
|
*/
|
||||||
static int next_pending_bucket(struct timer_base *base, unsigned offset,
|
static int next_pending_bucket(struct timer_base *base, unsigned offset,
|
||||||
unsigned clk)
|
unsigned clk)
|
||||||
|
@ -1298,14 +1298,14 @@ static int next_pending_bucket(struct timer_base *base, unsigned offset,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Search the first expiring timer in the various clock levels.
|
* Search the first expiring timer in the various clock levels. Caller must
|
||||||
|
* hold base->lock.
|
||||||
*/
|
*/
|
||||||
static unsigned long __next_timer_interrupt(struct timer_base *base)
|
static unsigned long __next_timer_interrupt(struct timer_base *base)
|
||||||
{
|
{
|
||||||
unsigned long clk, next, adj;
|
unsigned long clk, next, adj;
|
||||||
unsigned lvl, offset = 0;
|
unsigned lvl, offset = 0;
|
||||||
|
|
||||||
spin_lock(&base->lock);
|
|
||||||
next = base->clk + NEXT_TIMER_MAX_DELTA;
|
next = base->clk + NEXT_TIMER_MAX_DELTA;
|
||||||
clk = base->clk;
|
clk = base->clk;
|
||||||
for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
|
for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
|
||||||
|
@ -1358,7 +1358,6 @@ static unsigned long __next_timer_interrupt(struct timer_base *base)
|
||||||
clk >>= LVL_CLK_SHIFT;
|
clk >>= LVL_CLK_SHIFT;
|
||||||
clk += adj;
|
clk += adj;
|
||||||
}
|
}
|
||||||
spin_unlock(&base->lock);
|
|
||||||
return next;
|
return next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1416,7 +1415,10 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
|
||||||
if (cpu_is_offline(smp_processor_id()))
|
if (cpu_is_offline(smp_processor_id()))
|
||||||
return expires;
|
return expires;
|
||||||
|
|
||||||
|
spin_lock(&base->lock);
|
||||||
nextevt = __next_timer_interrupt(base);
|
nextevt = __next_timer_interrupt(base);
|
||||||
|
spin_unlock(&base->lock);
|
||||||
|
|
||||||
if (time_before_eq(nextevt, basej))
|
if (time_before_eq(nextevt, basej))
|
||||||
expires = basem;
|
expires = basem;
|
||||||
else
|
else
|
||||||
|
@ -1424,6 +1426,37 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
|
||||||
|
|
||||||
return cmp_next_hrtimer_event(basem, expires);
|
return cmp_next_hrtimer_event(basem, expires);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int collect_expired_timers(struct timer_base *base,
|
||||||
|
struct hlist_head *heads)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* NOHZ optimization. After a long idle sleep we need to forward the
|
||||||
|
* base to current jiffies. Avoid a loop by searching the bitfield for
|
||||||
|
* the next expiring timer.
|
||||||
|
*/
|
||||||
|
if ((long)(jiffies - base->clk) > 2) {
|
||||||
|
unsigned long next = __next_timer_interrupt(base);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the next timer is ahead of time forward to current
|
||||||
|
* jiffies, otherwise forward to the next expiry time.
|
||||||
|
*/
|
||||||
|
if (time_after(next, jiffies)) {
|
||||||
|
/* The call site will increment clock! */
|
||||||
|
base->clk = jiffies - 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
base->clk = next;
|
||||||
|
}
|
||||||
|
return __collect_expired_timers(base, heads);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline int collect_expired_timers(struct timer_base *base,
|
||||||
|
struct hlist_head *heads)
|
||||||
|
{
|
||||||
|
return __collect_expired_timers(base, heads);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Загрузка…
Ссылка в новой задаче