ARM: 6835/1: perf: ensure overflows aren't missed due to IRQ latency
If a counter overflows during a perf stat profiling run it may overtake the last known value of the counter: 0 prev new 0xffffffff |----------|-------|----------------------| In this case, the number of events that have occurred is (0xffffffff - prev) + new. Unfortunately, the event update code will not realise an overflow has occurred and will instead report the event delta as (new - prev) which may be considerably smaller than the real count. This patch adds an extra argument to armpmu_event_update which indicates whether or not an overflow has occurred. If an overflow has occurred then we use the maximum period of the counter to calculate the elapsed events. Acked-by: Jamie Iles <jamie@jamieiles.com> Reported-by: Ashwin Chaugule <ashwinc@codeaurora.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
Родитель
574b69cbb6
Коммит
a737823d37
|
@ -205,11 +205,9 @@ armpmu_event_set_period(struct perf_event *event,
|
||||||
static u64
|
static u64
|
||||||
armpmu_event_update(struct perf_event *event,
|
armpmu_event_update(struct perf_event *event,
|
||||||
struct hw_perf_event *hwc,
|
struct hw_perf_event *hwc,
|
||||||
int idx)
|
int idx, int overflow)
|
||||||
{
|
{
|
||||||
int shift = 64 - 32;
|
u64 delta, prev_raw_count, new_raw_count;
|
||||||
s64 prev_raw_count, new_raw_count;
|
|
||||||
u64 delta;
|
|
||||||
|
|
||||||
again:
|
again:
|
||||||
prev_raw_count = local64_read(&hwc->prev_count);
|
prev_raw_count = local64_read(&hwc->prev_count);
|
||||||
|
@ -219,8 +217,13 @@ again:
|
||||||
new_raw_count) != prev_raw_count)
|
new_raw_count) != prev_raw_count)
|
||||||
goto again;
|
goto again;
|
||||||
|
|
||||||
delta = (new_raw_count << shift) - (prev_raw_count << shift);
|
new_raw_count &= armpmu->max_period;
|
||||||
delta >>= shift;
|
prev_raw_count &= armpmu->max_period;
|
||||||
|
|
||||||
|
if (overflow)
|
||||||
|
delta = armpmu->max_period - prev_raw_count + new_raw_count;
|
||||||
|
else
|
||||||
|
delta = new_raw_count - prev_raw_count;
|
||||||
|
|
||||||
local64_add(delta, &event->count);
|
local64_add(delta, &event->count);
|
||||||
local64_sub(delta, &hwc->period_left);
|
local64_sub(delta, &hwc->period_left);
|
||||||
|
@ -237,7 +240,7 @@ armpmu_read(struct perf_event *event)
|
||||||
if (hwc->idx < 0)
|
if (hwc->idx < 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
armpmu_event_update(event, hwc, hwc->idx);
|
armpmu_event_update(event, hwc, hwc->idx, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -255,7 +258,7 @@ armpmu_stop(struct perf_event *event, int flags)
|
||||||
if (!(hwc->state & PERF_HES_STOPPED)) {
|
if (!(hwc->state & PERF_HES_STOPPED)) {
|
||||||
armpmu->disable(hwc, hwc->idx);
|
armpmu->disable(hwc, hwc->idx);
|
||||||
barrier(); /* why? */
|
barrier(); /* why? */
|
||||||
armpmu_event_update(event, hwc, hwc->idx);
|
armpmu_event_update(event, hwc, hwc->idx, 0);
|
||||||
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -474,7 +474,7 @@ armv6pmu_handle_irq(int irq_num,
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
hwc = &event->hw;
|
hwc = &event->hw;
|
||||||
armpmu_event_update(event, hwc, idx);
|
armpmu_event_update(event, hwc, idx, 1);
|
||||||
data.period = event->hw.last_period;
|
data.period = event->hw.last_period;
|
||||||
if (!armpmu_event_set_period(event, hwc, idx))
|
if (!armpmu_event_set_period(event, hwc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -782,7 +782,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
hwc = &event->hw;
|
hwc = &event->hw;
|
||||||
armpmu_event_update(event, hwc, idx);
|
armpmu_event_update(event, hwc, idx, 1);
|
||||||
data.period = event->hw.last_period;
|
data.period = event->hw.last_period;
|
||||||
if (!armpmu_event_set_period(event, hwc, idx))
|
if (!armpmu_event_set_period(event, hwc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -246,7 +246,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
hwc = &event->hw;
|
hwc = &event->hw;
|
||||||
armpmu_event_update(event, hwc, idx);
|
armpmu_event_update(event, hwc, idx, 1);
|
||||||
data.period = event->hw.last_period;
|
data.period = event->hw.last_period;
|
||||||
if (!armpmu_event_set_period(event, hwc, idx))
|
if (!armpmu_event_set_period(event, hwc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
@ -578,7 +578,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
hwc = &event->hw;
|
hwc = &event->hw;
|
||||||
armpmu_event_update(event, hwc, idx);
|
armpmu_event_update(event, hwc, idx, 1);
|
||||||
data.period = event->hw.last_period;
|
data.period = event->hw.last_period;
|
||||||
if (!armpmu_event_set_period(event, hwc, idx))
|
if (!armpmu_event_set_period(event, hwc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче