Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf: Fix double start/stop in x86_pmu_start() perf evsel: Fix an issue where perf report fails to show the proper percentage perf tools: Fix prefix matching for kernel maps perf tools: Fix perf stack to non executable on x86_64 perf: Remove deprecated WARN_ON_ONCE()
This commit is contained in:
Коммит
ce2814f227
|
@ -439,7 +439,6 @@ void intel_pmu_pebs_enable(struct perf_event *event)
|
||||||
hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
|
hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
|
||||||
|
|
||||||
cpuc->pebs_enabled |= 1ULL << hwc->idx;
|
cpuc->pebs_enabled |= 1ULL << hwc->idx;
|
||||||
WARN_ON_ONCE(cpuc->enabled);
|
|
||||||
|
|
||||||
if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
|
if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
|
||||||
intel_pmu_lbr_enable(event);
|
intel_pmu_lbr_enable(event);
|
||||||
|
|
|
@ -72,8 +72,6 @@ void intel_pmu_lbr_enable(struct perf_event *event)
|
||||||
if (!x86_pmu.lbr_nr)
|
if (!x86_pmu.lbr_nr)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
WARN_ON_ONCE(cpuc->enabled);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reset the LBR stack if we changed task context to
|
* Reset the LBR stack if we changed task context to
|
||||||
* avoid data leaks.
|
* avoid data leaks.
|
||||||
|
|
|
@ -2303,7 +2303,7 @@ do { \
|
||||||
static DEFINE_PER_CPU(int, perf_throttled_count);
|
static DEFINE_PER_CPU(int, perf_throttled_count);
|
||||||
static DEFINE_PER_CPU(u64, perf_throttled_seq);
|
static DEFINE_PER_CPU(u64, perf_throttled_seq);
|
||||||
|
|
||||||
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
|
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
|
||||||
{
|
{
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
s64 period, sample_period;
|
s64 period, sample_period;
|
||||||
|
@ -2322,9 +2322,13 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
|
||||||
hwc->sample_period = sample_period;
|
hwc->sample_period = sample_period;
|
||||||
|
|
||||||
if (local64_read(&hwc->period_left) > 8*sample_period) {
|
if (local64_read(&hwc->period_left) > 8*sample_period) {
|
||||||
event->pmu->stop(event, PERF_EF_UPDATE);
|
if (disable)
|
||||||
|
event->pmu->stop(event, PERF_EF_UPDATE);
|
||||||
|
|
||||||
local64_set(&hwc->period_left, 0);
|
local64_set(&hwc->period_left, 0);
|
||||||
event->pmu->start(event, PERF_EF_RELOAD);
|
|
||||||
|
if (disable)
|
||||||
|
event->pmu->start(event, PERF_EF_RELOAD);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2350,6 +2354,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
raw_spin_lock(&ctx->lock);
|
raw_spin_lock(&ctx->lock);
|
||||||
|
perf_pmu_disable(ctx->pmu);
|
||||||
|
|
||||||
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
|
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
|
||||||
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
||||||
|
@ -2381,13 +2386,17 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
|
||||||
/*
|
/*
|
||||||
* restart the event
|
* restart the event
|
||||||
* reload only if value has changed
|
* reload only if value has changed
|
||||||
|
* we have stopped the event so tell that
|
||||||
|
* to perf_adjust_period() to avoid stopping it
|
||||||
|
* twice.
|
||||||
*/
|
*/
|
||||||
if (delta > 0)
|
if (delta > 0)
|
||||||
perf_adjust_period(event, period, delta);
|
perf_adjust_period(event, period, delta, false);
|
||||||
|
|
||||||
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
|
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
perf_pmu_enable(ctx->pmu);
|
||||||
raw_spin_unlock(&ctx->lock);
|
raw_spin_unlock(&ctx->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4562,7 +4571,7 @@ static int __perf_event_overflow(struct perf_event *event,
|
||||||
hwc->freq_time_stamp = now;
|
hwc->freq_time_stamp = now;
|
||||||
|
|
||||||
if (delta > 0 && delta < 2*TICK_NSEC)
|
if (delta > 0 && delta < 2*TICK_NSEC)
|
||||||
perf_adjust_period(event, delta, hwc->last_period);
|
perf_adjust_period(event, delta, hwc->last_period, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1,2 +1,8 @@
|
||||||
|
|
||||||
#include "../../../arch/x86/lib/memcpy_64.S"
|
#include "../../../arch/x86/lib/memcpy_64.S"
|
||||||
|
/*
|
||||||
|
* We need to provide note.GNU-stack section, saying that we want
|
||||||
|
* NOT executable stack. Otherwise the final linking will assume that
|
||||||
|
* the ELF stack should not be restricted at all and set it RWX.
|
||||||
|
*/
|
||||||
|
.section .note.GNU-stack,"",@progbits
|
||||||
|
|
|
@ -554,7 +554,7 @@ static int perf_event__process_kernel_mmap(struct perf_tool *tool __used,
|
||||||
|
|
||||||
is_kernel_mmap = memcmp(event->mmap.filename,
|
is_kernel_mmap = memcmp(event->mmap.filename,
|
||||||
kmmap_prefix,
|
kmmap_prefix,
|
||||||
strlen(kmmap_prefix)) == 0;
|
strlen(kmmap_prefix) - 1) == 0;
|
||||||
if (event->mmap.filename[0] == '/' ||
|
if (event->mmap.filename[0] == '/' ||
|
||||||
(!is_kernel_mmap && event->mmap.filename[0] == '[')) {
|
(!is_kernel_mmap && event->mmap.filename[0] == '[')) {
|
||||||
|
|
||||||
|
|
|
@ -463,6 +463,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
|
||||||
memset(data, 0, sizeof(*data));
|
memset(data, 0, sizeof(*data));
|
||||||
data->cpu = data->pid = data->tid = -1;
|
data->cpu = data->pid = data->tid = -1;
|
||||||
data->stream_id = data->id = data->time = -1ULL;
|
data->stream_id = data->id = data->time = -1ULL;
|
||||||
|
data->period = 1;
|
||||||
|
|
||||||
if (event->header.type != PERF_RECORD_SAMPLE) {
|
if (event->header.type != PERF_RECORD_SAMPLE) {
|
||||||
if (!sample_id_all)
|
if (!sample_id_all)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче