Merge branch 'core' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile into perf/core

Pull oprofile fixlets from Robert Richter.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2012-07-06 13:07:25 +02:00
Родитель 35c2f48c66 f8bbfd7d28
Коммит 1e27e575b2
1 изменённых файлов: 11 добавлений и 12 удалений

Просмотреть файл

@ -1,5 +1,6 @@
/* /*
* Copyright 2010 ARM Ltd. * Copyright 2010 ARM Ltd.
* Copyright 2012 Advanced Micro Devices, Inc., Robert Richter
* *
* Perf-events backend for OProfile. * Perf-events backend for OProfile.
*/ */
@ -25,7 +26,7 @@ static int oprofile_perf_enabled;
static DEFINE_MUTEX(oprofile_perf_mutex); static DEFINE_MUTEX(oprofile_perf_mutex);
static struct op_counter_config *counter_config; static struct op_counter_config *counter_config;
static struct perf_event **perf_events[NR_CPUS]; static DEFINE_PER_CPU(struct perf_event **, perf_events);
static int num_counters; static int num_counters;
/* /*
@ -38,7 +39,7 @@ static void op_overflow_handler(struct perf_event *event,
u32 cpu = smp_processor_id(); u32 cpu = smp_processor_id();
for (id = 0; id < num_counters; ++id) for (id = 0; id < num_counters; ++id)
if (perf_events[cpu][id] == event) if (per_cpu(perf_events, cpu)[id] == event)
break; break;
if (id != num_counters) if (id != num_counters)
@ -74,7 +75,7 @@ static int op_create_counter(int cpu, int event)
{ {
struct perf_event *pevent; struct perf_event *pevent;
if (!counter_config[event].enabled || perf_events[cpu][event]) if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event])
return 0; return 0;
pevent = perf_event_create_kernel_counter(&counter_config[event].attr, pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
@ -91,18 +92,18 @@ static int op_create_counter(int cpu, int event)
return -EBUSY; return -EBUSY;
} }
perf_events[cpu][event] = pevent; per_cpu(perf_events, cpu)[event] = pevent;
return 0; return 0;
} }
static void op_destroy_counter(int cpu, int event) static void op_destroy_counter(int cpu, int event)
{ {
struct perf_event *pevent = perf_events[cpu][event]; struct perf_event *pevent = per_cpu(perf_events, cpu)[event];
if (pevent) { if (pevent) {
perf_event_release_kernel(pevent); perf_event_release_kernel(pevent);
perf_events[cpu][event] = NULL; per_cpu(perf_events, cpu)[event] = NULL;
} }
} }
@ -257,12 +258,12 @@ void oprofile_perf_exit(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
for (id = 0; id < num_counters; ++id) { for (id = 0; id < num_counters; ++id) {
event = perf_events[cpu][id]; event = per_cpu(perf_events, cpu)[id];
if (event) if (event)
perf_event_release_kernel(event); perf_event_release_kernel(event);
} }
kfree(perf_events[cpu]); kfree(per_cpu(perf_events, cpu));
} }
kfree(counter_config); kfree(counter_config);
@ -277,8 +278,6 @@ int __init oprofile_perf_init(struct oprofile_operations *ops)
if (ret) if (ret)
return ret; return ret;
memset(&perf_events, 0, sizeof(perf_events));
num_counters = perf_num_counters(); num_counters = perf_num_counters();
if (num_counters <= 0) { if (num_counters <= 0) {
pr_info("oprofile: no performance counters\n"); pr_info("oprofile: no performance counters\n");
@ -298,9 +297,9 @@ int __init oprofile_perf_init(struct oprofile_operations *ops)
} }
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
perf_events[cpu] = kcalloc(num_counters, per_cpu(perf_events, cpu) = kcalloc(num_counters,
sizeof(struct perf_event *), GFP_KERNEL); sizeof(struct perf_event *), GFP_KERNEL);
if (!perf_events[cpu]) { if (!per_cpu(perf_events, cpu)) {
pr_info("oprofile: failed to allocate %d perf events " pr_info("oprofile: failed to allocate %d perf events "
"for cpu %d\n", num_counters, cpu); "for cpu %d\n", num_counters, cpu);
ret = -ENOMEM; ret = -ENOMEM;