perf, x86: Remove superfluous arguments to x86_perf_event_set_period()
The second and third argument to x86_perf_event_set_period() are superfluous since they are simple expressions of the first argument. Hence remove them. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo <acme@infradead.org> LKML-Reference: <20100304140100.006500906@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Родитель
3fb2b8ddcc
Коммит
07088edb88
|
@ -170,8 +170,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
|
||||||
.enabled = 1,
|
.enabled = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int x86_perf_event_set_period(struct perf_event *event,
|
static int x86_perf_event_set_period(struct perf_event *event);
|
||||||
struct hw_perf_event *hwc, int idx);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Generalized hw caching related hw_event table, filled
|
* Generalized hw caching related hw_event table, filled
|
||||||
|
@ -835,7 +834,7 @@ void hw_perf_enable(void)
|
||||||
|
|
||||||
if (hwc->idx == -1) {
|
if (hwc->idx == -1) {
|
||||||
x86_assign_hw_event(event, cpuc, i);
|
x86_assign_hw_event(event, cpuc, i);
|
||||||
x86_perf_event_set_period(event, hwc, hwc->idx);
|
x86_perf_event_set_period(event);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* need to mark as active because x86_pmu_disable()
|
* need to mark as active because x86_pmu_disable()
|
||||||
|
@ -876,12 +875,12 @@ static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
|
||||||
* To be called with the event disabled in hw:
|
* To be called with the event disabled in hw:
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
x86_perf_event_set_period(struct perf_event *event,
|
x86_perf_event_set_period(struct perf_event *event)
|
||||||
struct hw_perf_event *hwc, int idx)
|
|
||||||
{
|
{
|
||||||
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
s64 left = atomic64_read(&hwc->period_left);
|
s64 left = atomic64_read(&hwc->period_left);
|
||||||
s64 period = hwc->sample_period;
|
s64 period = hwc->sample_period;
|
||||||
int err, ret = 0;
|
int err, ret = 0, idx = hwc->idx;
|
||||||
|
|
||||||
if (idx == X86_PMC_IDX_FIXED_BTS)
|
if (idx == X86_PMC_IDX_FIXED_BTS)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -979,7 +978,7 @@ static int x86_pmu_start(struct perf_event *event)
|
||||||
if (hwc->idx == -1)
|
if (hwc->idx == -1)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
x86_perf_event_set_period(event, hwc, hwc->idx);
|
x86_perf_event_set_period(event);
|
||||||
x86_pmu.enable(hwc, hwc->idx);
|
x86_pmu.enable(hwc, hwc->idx);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1123,7 +1122,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||||
handled = 1;
|
handled = 1;
|
||||||
data.period = event->hw.last_period;
|
data.period = event->hw.last_period;
|
||||||
|
|
||||||
if (!x86_perf_event_set_period(event, hwc, idx))
|
if (!x86_perf_event_set_period(event))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (perf_event_overflow(event, 1, &data, regs))
|
if (perf_event_overflow(event, 1, &data, regs))
|
||||||
|
|
|
@ -699,7 +699,7 @@ static int intel_pmu_save_and_restart(struct perf_event *event)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
x86_perf_event_update(event, hwc, idx);
|
x86_perf_event_update(event, hwc, idx);
|
||||||
ret = x86_perf_event_set_period(event, hwc, idx);
|
ret = x86_perf_event_set_period(event);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче