rcu: Add expedited-grace-period event tracing

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
Paul E. McKenney 2016-01-28 20:49:49 -08:00
Родитель bea2de44ae
Коммит 4f41530245
2 изменённых файлов: 16 добавлений и 7 удалений

Просмотреть файл

@ -3584,17 +3584,18 @@ static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp,
atomic_long_t *stat, unsigned long s) atomic_long_t *stat, unsigned long s)
{ {
if (rcu_exp_gp_seq_done(rsp, s)) { if (rcu_exp_gp_seq_done(rsp, s)) {
trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
if (rnp) { if (rnp) {
mutex_unlock(&rnp->exp_funnel_mutex);
trace_rcu_exp_funnel_lock(rsp->name, rnp->level, trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
rnp->grplo, rnp->grphi, rnp->grplo, rnp->grphi,
TPS("rel")); TPS("rel"));
mutex_unlock(&rnp->exp_funnel_mutex);
} else if (rdp) { } else if (rdp) {
mutex_unlock(&rdp->exp_funnel_mutex);
trace_rcu_exp_funnel_lock(rsp->name, trace_rcu_exp_funnel_lock(rsp->name,
rdp->mynode->level + 1, rdp->mynode->level + 1,
rdp->cpu, rdp->cpu, rdp->cpu, rdp->cpu,
TPS("rel")); TPS("rel"));
mutex_unlock(&rdp->exp_funnel_mutex);
} }
/* Ensure test happens before caller kfree(). */ /* Ensure test happens before caller kfree(). */
smp_mb__before_atomic(); /* ^^^ */ smp_mb__before_atomic(); /* ^^^ */
@ -3624,12 +3625,12 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
rnp0 = rcu_get_root(rsp); rnp0 = rcu_get_root(rsp);
if (!mutex_is_locked(&rnp0->exp_funnel_mutex)) { if (!mutex_is_locked(&rnp0->exp_funnel_mutex)) {
if (mutex_trylock(&rnp0->exp_funnel_mutex)) { if (mutex_trylock(&rnp0->exp_funnel_mutex)) {
if (sync_exp_work_done(rsp, rnp0, NULL,
&rdp->expedited_workdone0, s))
return NULL;
trace_rcu_exp_funnel_lock(rsp->name, rnp0->level, trace_rcu_exp_funnel_lock(rsp->name, rnp0->level,
rnp0->grplo, rnp0->grphi, rnp0->grplo, rnp0->grphi,
TPS("acq")); TPS("acq"));
if (sync_exp_work_done(rsp, rnp0, NULL,
&rdp->expedited_workdone0, s))
return NULL;
return rnp0; return rnp0;
} }
} }
@ -3656,16 +3657,16 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
trace_rcu_exp_funnel_lock(rsp->name, rnp0->level, trace_rcu_exp_funnel_lock(rsp->name, rnp0->level,
rnp0->grplo, rnp0->grphi, TPS("acq")); rnp0->grplo, rnp0->grphi, TPS("acq"));
if (rnp1) { if (rnp1) {
mutex_unlock(&rnp1->exp_funnel_mutex);
trace_rcu_exp_funnel_lock(rsp->name, rnp1->level, trace_rcu_exp_funnel_lock(rsp->name, rnp1->level,
rnp1->grplo, rnp1->grphi, rnp1->grplo, rnp1->grphi,
TPS("rel")); TPS("rel"));
mutex_unlock(&rnp1->exp_funnel_mutex);
} else { } else {
mutex_unlock(&rdp->exp_funnel_mutex);
trace_rcu_exp_funnel_lock(rsp->name, trace_rcu_exp_funnel_lock(rsp->name,
rdp->mynode->level + 1, rdp->mynode->level + 1,
rdp->cpu, rdp->cpu, rdp->cpu, rdp->cpu,
TPS("rel")); TPS("rel"));
mutex_unlock(&rdp->exp_funnel_mutex);
} }
rnp1 = rnp0; rnp1 = rnp0;
} }
@ -3895,16 +3896,21 @@ void synchronize_sched_expedited(void)
/* Take a snapshot of the sequence number. */ /* Take a snapshot of the sequence number. */
s = rcu_exp_gp_seq_snap(rsp); s = rcu_exp_gp_seq_snap(rsp);
trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
rnp = exp_funnel_lock(rsp, s); rnp = exp_funnel_lock(rsp, s);
if (rnp == NULL) if (rnp == NULL)
return; /* Someone else did our work for us. */ return; /* Someone else did our work for us. */
rcu_exp_gp_seq_start(rsp); rcu_exp_gp_seq_start(rsp);
trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler); sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler);
synchronize_sched_expedited_wait(rsp); synchronize_sched_expedited_wait(rsp);
rcu_exp_gp_seq_end(rsp); rcu_exp_gp_seq_end(rsp);
trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
rnp->grplo, rnp->grphi, TPS("rel"));
mutex_unlock(&rnp->exp_funnel_mutex); mutex_unlock(&rnp->exp_funnel_mutex);
} }
EXPORT_SYMBOL_GPL(synchronize_sched_expedited); EXPORT_SYMBOL_GPL(synchronize_sched_expedited);

Просмотреть файл

@ -750,12 +750,14 @@ void synchronize_rcu_expedited(void)
} }
s = rcu_exp_gp_seq_snap(rsp); s = rcu_exp_gp_seq_snap(rsp);
trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
rnp_unlock = exp_funnel_lock(rsp, s); rnp_unlock = exp_funnel_lock(rsp, s);
if (rnp_unlock == NULL) if (rnp_unlock == NULL)
return; /* Someone else did our work for us. */ return; /* Someone else did our work for us. */
rcu_exp_gp_seq_start(rsp); rcu_exp_gp_seq_start(rsp);
trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
/* Initialize the rcu_node tree in preparation for the wait. */ /* Initialize the rcu_node tree in preparation for the wait. */
sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler); sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
@ -766,6 +768,7 @@ void synchronize_rcu_expedited(void)
/* Clean up and exit. */ /* Clean up and exit. */
rcu_exp_gp_seq_end(rsp); rcu_exp_gp_seq_end(rsp);
trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
mutex_unlock(&rnp_unlock->exp_funnel_mutex); mutex_unlock(&rnp_unlock->exp_funnel_mutex);
trace_rcu_exp_funnel_lock(rsp->name, rnp_unlock->level, trace_rcu_exp_funnel_lock(rsp->name, rnp_unlock->level,
rnp_unlock->grplo, rnp_unlock->grphi, rnp_unlock->grplo, rnp_unlock->grphi,