hrtimers: Move lock held region in hrtimer_interrupt()
We need to update the base offsets from this code and we need to do that under base->lock. Move the lock held region around the ktime_get() calls. The ktime_get() calls are going to be replaced with a function which gets the time and the offsets atomically. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Ingo Molnar <mingo@kernel.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Prarit Bhargava <prarit@redhat.com> Cc: stable@vger.kernel.org Signed-off-by: John Stultz <johnstul@us.ibm.com> Link: http://lkml.kernel.org/r/1341960205-56738-6-git-send-email-johnstul@us.ibm.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Родитель
5b9fe759a6
Коммит
196951e912
|
@ -1263,11 +1263,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
|||
cpu_base->nr_events++;
|
||||
dev->next_event.tv64 = KTIME_MAX;
|
||||
|
||||
raw_spin_lock(&cpu_base->lock);
|
||||
entry_time = now = ktime_get();
|
||||
retry:
|
||||
expires_next.tv64 = KTIME_MAX;
|
||||
|
||||
raw_spin_lock(&cpu_base->lock);
|
||||
/*
|
||||
* We set expires_next to KTIME_MAX here with cpu_base->lock
|
||||
* held to prevent that a timer is enqueued in our queue via
|
||||
|
@ -1344,6 +1343,7 @@ retry:
|
|||
* interrupt routine. We give it 3 attempts to avoid
|
||||
* overreacting on some spurious event.
|
||||
*/
|
||||
raw_spin_lock(&cpu_base->lock);
|
||||
now = ktime_get();
|
||||
cpu_base->nr_retries++;
|
||||
if (++retries < 3)
|
||||
|
@ -1356,6 +1356,7 @@ retry:
|
|||
*/
|
||||
cpu_base->nr_hangs++;
|
||||
cpu_base->hang_detected = 1;
|
||||
raw_spin_unlock(&cpu_base->lock);
|
||||
delta = ktime_sub(now, entry_time);
|
||||
if (delta.tv64 > cpu_base->max_hang_time.tv64)
|
||||
cpu_base->max_hang_time = delta;
|
||||
|
|
Загрузка…
Ссылка в новой задаче