oprofile/x86: return -EBUSY if counters are already reserved
In case a counter is already reserved by the watchdog or perf_event subsystem, oprofile ignored this counters silently. This case is handled now and oprofile_setup() now reports an error. Signed-off-by: Robert Richter <robert.richter@amd.com>
This commit is contained in:
Родитель
83300ce0df
Коммит
8617f98c00
|
@ -357,7 +357,10 @@ static int nmi_setup(void)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Assume saved/restored counters are the same on all CPUs */
|
/* Assume saved/restored counters are the same on all CPUs */
|
||||||
model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
|
err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
|
||||||
|
if (err)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
if (!cpu)
|
if (!cpu)
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -138,21 +138,30 @@ static void op_amd_shutdown(struct op_msrs const * const msrs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
|
static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < NUM_COUNTERS; i++) {
|
for (i = 0; i < NUM_COUNTERS; i++) {
|
||||||
if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
|
if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
|
||||||
continue;
|
goto fail;
|
||||||
if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
|
if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
|
||||||
release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
|
release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
|
||||||
continue;
|
goto fail;
|
||||||
}
|
}
|
||||||
/* both registers must be reserved */
|
/* both registers must be reserved */
|
||||||
msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
|
msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
|
||||||
msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
|
msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
|
||||||
|
continue;
|
||||||
|
fail:
|
||||||
|
if (!counter_config[i].enabled)
|
||||||
|
continue;
|
||||||
|
op_x86_warn_reserved(i);
|
||||||
|
op_amd_shutdown(msrs);
|
||||||
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
|
static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
|
||||||
|
@ -172,15 +181,8 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
|
||||||
|
|
||||||
/* clear all counters */
|
/* clear all counters */
|
||||||
for (i = 0; i < NUM_COUNTERS; ++i) {
|
for (i = 0; i < NUM_COUNTERS; ++i) {
|
||||||
if (unlikely(!msrs->controls[i].addr)) {
|
if (!msrs->controls[i].addr)
|
||||||
if (counter_config[i].enabled && !smp_processor_id())
|
|
||||||
/*
|
|
||||||
* counter is reserved, this is on all
|
|
||||||
* cpus, so report only for cpu #0
|
|
||||||
*/
|
|
||||||
op_x86_warn_reserved(i);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
rdmsrl(msrs->controls[i].addr, val);
|
rdmsrl(msrs->controls[i].addr, val);
|
||||||
if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
|
if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
|
||||||
op_x86_warn_in_use(i);
|
op_x86_warn_in_use(i);
|
||||||
|
|
|
@ -404,7 +404,7 @@ static void p4_shutdown(struct op_msrs const * const msrs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void p4_fill_in_addresses(struct op_msrs * const msrs)
|
static int p4_fill_in_addresses(struct op_msrs * const msrs)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
unsigned int addr, cccraddr, stag;
|
unsigned int addr, cccraddr, stag;
|
||||||
|
@ -486,6 +486,18 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs)
|
||||||
msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
|
msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < num_counters; ++i) {
|
||||||
|
if (!counter_config[i].enabled)
|
||||||
|
continue;
|
||||||
|
if (msrs->controls[i].addr)
|
||||||
|
continue;
|
||||||
|
op_x86_warn_reserved(i);
|
||||||
|
p4_shutdown(msrs);
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -46,21 +46,30 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ppro_fill_in_addresses(struct op_msrs * const msrs)
|
static int ppro_fill_in_addresses(struct op_msrs * const msrs)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < num_counters; i++) {
|
for (i = 0; i < num_counters; i++) {
|
||||||
if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
|
if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
|
||||||
continue;
|
goto fail;
|
||||||
if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) {
|
if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) {
|
||||||
release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
|
release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
|
||||||
continue;
|
goto fail;
|
||||||
}
|
}
|
||||||
/* both registers must be reserved */
|
/* both registers must be reserved */
|
||||||
msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
|
msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
|
||||||
msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
|
msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
|
||||||
|
continue;
|
||||||
|
fail:
|
||||||
|
if (!counter_config[i].enabled)
|
||||||
|
continue;
|
||||||
|
op_x86_warn_reserved(i);
|
||||||
|
ppro_shutdown(msrs);
|
||||||
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -96,15 +105,8 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
|
||||||
|
|
||||||
/* clear all counters */
|
/* clear all counters */
|
||||||
for (i = 0; i < num_counters; ++i) {
|
for (i = 0; i < num_counters; ++i) {
|
||||||
if (unlikely(!msrs->controls[i].addr)) {
|
if (!msrs->controls[i].addr)
|
||||||
if (counter_config[i].enabled && !smp_processor_id())
|
|
||||||
/*
|
|
||||||
* counter is reserved, this is on all
|
|
||||||
* cpus, so report only for cpu #0
|
|
||||||
*/
|
|
||||||
op_x86_warn_reserved(i);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
rdmsrl(msrs->controls[i].addr, val);
|
rdmsrl(msrs->controls[i].addr, val);
|
||||||
if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
|
if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
|
||||||
op_x86_warn_in_use(i);
|
op_x86_warn_in_use(i);
|
||||||
|
|
|
@ -41,7 +41,7 @@ struct op_x86_model_spec {
|
||||||
u16 event_mask;
|
u16 event_mask;
|
||||||
int (*init)(struct oprofile_operations *ops);
|
int (*init)(struct oprofile_operations *ops);
|
||||||
void (*exit)(void);
|
void (*exit)(void);
|
||||||
void (*fill_in_addresses)(struct op_msrs * const msrs);
|
int (*fill_in_addresses)(struct op_msrs * const msrs);
|
||||||
void (*setup_ctrs)(struct op_x86_model_spec const *model,
|
void (*setup_ctrs)(struct op_x86_model_spec const *model,
|
||||||
struct op_msrs const * const msrs);
|
struct op_msrs const * const msrs);
|
||||||
int (*check_ctrs)(struct pt_regs * const regs,
|
int (*check_ctrs)(struct pt_regs * const regs,
|
||||||
|
|
Загрузка…
Ссылка в новой задаче