clockevents: optimise tick_nohz_stop_sched_tick() a bit
Call ts = &per_cpu(tick_cpu_sched, cpu); and cpu = smp_processor_id(); once instead of twice. No functional change done, as changed code runs with local irq off. Reduces source lines and text size (20bytes on x86_64). [ akpm@linux-foundation.org: Build fix ] Signed-off-by: Karsten Wiese <fzu@wemgehoertderstaat.de> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Родитель
3f3eafc921
Коммит
903b8a8d48
|
@ -158,9 +158,8 @@ void tick_nohz_stop_idle(int cpu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static ktime_t tick_nohz_start_idle(int cpu)
|
static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
|
||||||
{
|
{
|
||||||
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
|
||||||
ktime_t now, delta;
|
ktime_t now, delta;
|
||||||
|
|
||||||
now = ktime_get();
|
now = ktime_get();
|
||||||
|
@ -201,8 +200,8 @@ void tick_nohz_stop_sched_tick(void)
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
now = tick_nohz_start_idle(cpu);
|
|
||||||
ts = &per_cpu(tick_cpu_sched, cpu);
|
ts = &per_cpu(tick_cpu_sched, cpu);
|
||||||
|
now = tick_nohz_start_idle(ts);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this cpu is offline and it is the one which updates
|
* If this cpu is offline and it is the one which updates
|
||||||
|
@ -222,7 +221,6 @@ void tick_nohz_stop_sched_tick(void)
|
||||||
if (need_resched())
|
if (need_resched())
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
cpu = smp_processor_id();
|
|
||||||
if (unlikely(local_softirq_pending())) {
|
if (unlikely(local_softirq_pending())) {
|
||||||
static int ratelimit;
|
static int ratelimit;
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче