x86, bts: fix race when bts tracer is removed

When the bts tracer is removed while the traced task is running,
the write to clear the bts tracer pointer races with context switch code.

Read the tracer once during a context switch.

When a new tracer is installed, the bts tracer is set in the ds context
before the tracer is initialized in order to claim the context for that
tracer.

This may result in write accesses using an uninitialized trace configuration
when scheduling timestamps have been requested.

Store active tracing flags separately and only set active flags after
the tracing configuration has been initialized.

Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Markus Metzger <markus.t.metzger@intel.com>
Cc: roland@redhat.com
Cc: eranian@googlemail.com
Cc: juan.villacis@intel.com
Cc: ak@linux.jf.intel.com
LKML-Reference: <20090403144548.881338000@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Markus Metzger 2009-04-03 16:43:33 +02:00 коммит произвёл Ingo Molnar
Родитель 2e8844e13a
Коммит cac94f9793
1 изменённых файлов: 37 добавлений и 21 удалений

Просмотреть файл

@ -89,6 +89,9 @@ struct bts_tracer {
/* Buffer overflow notification function: */
bts_ovfl_callback_t ovfl;
/* Active flags affecting trace collection. */
unsigned int flags;
};
struct pebs_tracer {
@ -799,6 +802,8 @@ void ds_suspend_bts(struct bts_tracer *tracer)
if (!tracer)
return;
tracer->flags = 0;
task = tracer->ds.context->task;
if (!task || (task == current))
@ -820,6 +825,8 @@ void ds_resume_bts(struct bts_tracer *tracer)
if (!tracer)
return;
tracer->flags = tracer->trace.ds.flags;
task = tracer->ds.context->task;
control = ds_cfg.ctl[dsf_bts];
@ -1037,43 +1044,52 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
}
}
static inline void ds_take_timestamp(struct ds_context *context,
enum bts_qualifier qualifier,
struct task_struct *task)
{
struct bts_tracer *tracer = context->bts_master;
struct bts_struct ts;
/* Prevent compilers from reading the tracer pointer twice. */
barrier();
if (!tracer || !(tracer->flags & BTS_TIMESTAMPS))
return;
memset(&ts, 0, sizeof(ts));
ts.qualifier = qualifier;
ts.variant.timestamp.jiffies = jiffies_64;
ts.variant.timestamp.pid = task->pid;
bts_write(tracer, &ts);
}
/*
* Change the DS configuration from tracing prev to tracing next.
*/
void ds_switch_to(struct task_struct *prev, struct task_struct *next)
{
struct ds_context *prev_ctx = prev->thread.ds_ctx;
struct ds_context *next_ctx = next->thread.ds_ctx;
struct ds_context *prev_ctx = prev->thread.ds_ctx;
struct ds_context *next_ctx = next->thread.ds_ctx;
unsigned long debugctlmsr = next->thread.debugctlmsr;
/* Make sure all data is read before we start. */
barrier();
if (prev_ctx) {
update_debugctlmsr(0);
if (prev_ctx->bts_master &&
(prev_ctx->bts_master->trace.ds.flags & BTS_TIMESTAMPS)) {
struct bts_struct ts = {
.qualifier = bts_task_departs,
.variant.timestamp.jiffies = jiffies_64,
.variant.timestamp.pid = prev->pid
};
bts_write(prev_ctx->bts_master, &ts);
}
ds_take_timestamp(prev_ctx, bts_task_departs, prev);
}
if (next_ctx) {
if (next_ctx->bts_master &&
(next_ctx->bts_master->trace.ds.flags & BTS_TIMESTAMPS)) {
struct bts_struct ts = {
.qualifier = bts_task_arrives,
.variant.timestamp.jiffies = jiffies_64,
.variant.timestamp.pid = next->pid
};
bts_write(next_ctx->bts_master, &ts);
}
ds_take_timestamp(next_ctx, bts_task_arrives, next);
wrmsrl(MSR_IA32_DS_AREA, (unsigned long)next_ctx->ds);
}
update_debugctlmsr(next->thread.debugctlmsr);
update_debugctlmsr(debugctlmsr);
}
void ds_copy_thread(struct task_struct *tsk, struct task_struct *father)