rcu: Remove rcu_state_p pointer to default rcu_state structure

The rcu_state_p pointer references the default rcu_state structure,
that is, the one that call_rcu() uses, as opposed to call_rcu_bh()
and sometimes call_rcu_sched().  But there is now only one rcu_state
structure, so that one structure is by definition the default, which
means that the rcu_state_p pointer no longer serves any useful purpose.
This commit therefore removes it.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
Paul E. McKenney 2018-07-03 15:54:39 -07:00
Родитель da1df50d16
Коммит 16fc9c600b
3 изменённых файлов: 21 добавлений и 24 удалений

Просмотреть файл

@ -85,7 +85,6 @@ struct rcu_state rcu_state = {
.ofl_lock = __SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
};
static struct rcu_state *const rcu_state_p = &rcu_state;
static struct rcu_data __percpu *const rcu_data_p = &rcu_data;
LIST_HEAD(rcu_struct_flavors);
@ -491,7 +490,7 @@ static int rcu_pending(void);
*/
unsigned long rcu_get_gp_seq(void)
{
return READ_ONCE(rcu_state_p->gp_seq);
return READ_ONCE(rcu_state.gp_seq);
}
EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
@ -510,7 +509,7 @@ EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
*/
unsigned long rcu_bh_get_gp_seq(void)
{
return READ_ONCE(rcu_state_p->gp_seq);
return READ_ONCE(rcu_state.gp_seq);
}
EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
@ -522,7 +521,7 @@ EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
*/
unsigned long rcu_exp_batches_completed(void)
{
return rcu_state_p->expedited_sequence;
return rcu_state.expedited_sequence;
}
EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
@ -541,7 +540,7 @@ EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
*/
void rcu_force_quiescent_state(void)
{
force_quiescent_state(rcu_state_p);
force_quiescent_state(&rcu_state);
}
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
@ -550,7 +549,7 @@ EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
*/
void rcu_bh_force_quiescent_state(void)
{
force_quiescent_state(rcu_state_p);
force_quiescent_state(&rcu_state);
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
@ -611,7 +610,7 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
case RCU_FLAVOR:
case RCU_BH_FLAVOR:
case RCU_SCHED_FLAVOR:
rsp = rcu_state_p;
rsp = &rcu_state;
break;
default:
break;
@ -2292,7 +2291,6 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
raw_lockdep_assert_held_rcu_node(rnp);
if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)) ||
WARN_ON_ONCE(rsp != rcu_state_p) ||
WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
rnp->qsmask != 0) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@ -2604,7 +2602,6 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
raw_spin_lock_irqsave_rcu_node(rnp, flags);
if (rnp->qsmask == 0) {
if (!IS_ENABLED(CONFIG_PREEMPT) ||
rsp != rcu_state_p ||
rcu_preempt_blocked_readers_cgp(rnp)) {
/*
* No point in scanning bits because they
@ -2973,7 +2970,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
*/
void call_rcu(struct rcu_head *head, rcu_callback_t func)
{
__call_rcu(head, func, rcu_state_p, -1, 0);
__call_rcu(head, func, &rcu_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu);
@ -3000,7 +2997,7 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
void kfree_call_rcu(struct rcu_head *head,
rcu_callback_t func)
{
__call_rcu(head, func, rcu_state_p, -1, 1);
__call_rcu(head, func, &rcu_state, -1, 1);
}
EXPORT_SYMBOL_GPL(kfree_call_rcu);
@ -3029,7 +3026,7 @@ unsigned long get_state_synchronize_rcu(void)
* before the load from ->gp_seq.
*/
smp_mb(); /* ^^^ */
return rcu_seq_snap(&rcu_state_p->gp_seq);
return rcu_seq_snap(&rcu_state.gp_seq);
}
EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
@ -3049,7 +3046,7 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
*/
void cond_synchronize_rcu(unsigned long oldstate)
{
if (!rcu_seq_done(&rcu_state_p->gp_seq, oldstate))
if (!rcu_seq_done(&rcu_state.gp_seq, oldstate))
synchronize_rcu();
else
smp_mb(); /* Ensure GP ends before subsequent accesses. */
@ -3308,7 +3305,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
*/
void rcu_barrier_bh(void)
{
_rcu_barrier(rcu_state_p);
_rcu_barrier(&rcu_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
@ -3322,7 +3319,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_bh);
*/
void rcu_barrier(void)
{
_rcu_barrier(rcu_state_p);
_rcu_barrier(&rcu_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier);

Просмотреть файл

@ -756,7 +756,7 @@ static void sync_sched_exp_online_cleanup(int cpu)
*/
void synchronize_rcu_expedited(void)
{
struct rcu_state *rsp = rcu_state_p;
struct rcu_state *rsp = &rcu_state;
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||

Просмотреть файл

@ -381,7 +381,7 @@ void rcu_note_context_switch(bool preempt)
*/
rcu_qs();
if (rdp->deferred_qs)
rcu_report_exp_rdp(rcu_state_p, rdp);
rcu_report_exp_rdp(&rcu_state, rdp);
trace_rcu_utilization(TPS("End context switch"));
barrier(); /* Avoid RCU read-side critical sections leaking up. */
}
@ -509,7 +509,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
* blocked-tasks list below.
*/
if (rdp->deferred_qs) {
rcu_report_exp_rdp(rcu_state_p, rdp);
rcu_report_exp_rdp(&rcu_state, rdp);
if (!t->rcu_read_unlock_special.s) {
local_irq_restore(flags);
return;
@ -566,7 +566,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
rnp->grplo,
rnp->grphi,
!!rnp->gp_tasks);
rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
rcu_report_unblock_qs_rnp(&rcu_state, rnp, flags);
} else {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
@ -580,7 +580,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
* then we need to report up the rcu_node hierarchy.
*/
if (!empty_exp && empty_exp_now)
rcu_report_exp_rnp(rcu_state_p, rnp, true);
rcu_report_exp_rnp(&rcu_state, rnp, true);
} else {
local_irq_restore(flags);
}
@ -1300,7 +1300,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct sched_param sp;
struct task_struct *t;
if (rcu_state_p != rsp)
if (&rcu_state != rsp)
return 0;
if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
@ -1431,8 +1431,8 @@ static void __init rcu_spawn_boost_kthreads(void)
for_each_possible_cpu(cpu)
per_cpu(rcu_cpu_has_work, cpu) = 0;
BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
rcu_for_each_leaf_node(rcu_state_p, rnp)
(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
rcu_for_each_leaf_node(&rcu_state, rnp)
(void)rcu_spawn_one_boost_kthread(&rcu_state, rnp);
}
static void rcu_prepare_kthreads(int cpu)
@ -1442,7 +1442,7 @@ static void rcu_prepare_kthreads(int cpu)
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
if (rcu_scheduler_fully_active)
(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
(void)rcu_spawn_one_boost_kthread(&rcu_state, rnp);
}
#else /* #ifdef CONFIG_RCU_BOOST */