rcu: Convert from rcu_preempt_state to *rcu_state_p

It would be good to move more code from #ifdef to IS_ENABLED(), but
that does not work if the body of the IS_ENABLED() "if" statement
references a variable (such as rcu_preempt_state) that does not
exist if the IS_ENABLED() Kconfig variable is not set.  This commit
therefore substitutes *rcu_state_p for all uses of rcu_preempt_state
in kernel/rcu/tree_preempt.h, which should enable elimination of
a few #ifdefs.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
Paul E. McKenney 2015-03-03 12:56:43 -08:00
Родитель 7d0ae8086b
Коммит e63c887cfe
1 изменённых файлов: 9 добавлений и 10 удалений

Просмотреть файл

@ -150,7 +150,7 @@ static void rcu_preempt_note_context_switch(void)
!t->rcu_read_unlock_special.b.blocked) { !t->rcu_read_unlock_special.b.blocked) {
/* Possibly blocking in an RCU read-side critical section. */ /* Possibly blocking in an RCU read-side critical section. */
rdp = this_cpu_ptr(rcu_preempt_state.rda); rdp = this_cpu_ptr(rcu_state_p->rda);
rnp = rdp->mynode; rnp = rdp->mynode;
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock(); smp_mb__after_unlock_lock();
@ -353,8 +353,7 @@ void rcu_read_unlock_special(struct task_struct *t)
rnp->grplo, rnp->grplo,
rnp->grphi, rnp->grphi,
!!rnp->gp_tasks); !!rnp->gp_tasks);
rcu_report_unblock_qs_rnp(&rcu_preempt_state, rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
rnp, flags);
} else { } else {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
} }
@ -370,7 +369,7 @@ void rcu_read_unlock_special(struct task_struct *t)
* then we need to report up the rcu_node hierarchy. * then we need to report up the rcu_node hierarchy.
*/ */
if (!empty_exp && empty_exp_now) if (!empty_exp && empty_exp_now)
rcu_report_exp_rnp(&rcu_preempt_state, rnp, true); rcu_report_exp_rnp(rcu_state_p, rnp, true);
} else { } else {
local_irq_restore(flags); local_irq_restore(flags);
} }
@ -500,7 +499,7 @@ static void rcu_preempt_check_callbacks(void)
static void rcu_preempt_do_callbacks(void) static void rcu_preempt_do_callbacks(void)
{ {
rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); rcu_do_batch(rcu_state_p, this_cpu_ptr(&rcu_preempt_data));
} }
#endif /* #ifdef CONFIG_RCU_BOOST */ #endif /* #ifdef CONFIG_RCU_BOOST */
@ -510,7 +509,7 @@ static void rcu_preempt_do_callbacks(void)
*/ */
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{ {
__call_rcu(head, func, &rcu_preempt_state, -1, 0); __call_rcu(head, func, rcu_state_p, -1, 0);
} }
EXPORT_SYMBOL_GPL(call_rcu); EXPORT_SYMBOL_GPL(call_rcu);
@ -711,7 +710,7 @@ sync_rcu_preempt_exp_init2(struct rcu_state *rsp, struct rcu_node *rnp)
void synchronize_rcu_expedited(void) void synchronize_rcu_expedited(void)
{ {
struct rcu_node *rnp; struct rcu_node *rnp;
struct rcu_state *rsp = &rcu_preempt_state; struct rcu_state *rsp = rcu_state_p;
unsigned long snap; unsigned long snap;
int trycount = 0; int trycount = 0;
@ -798,7 +797,7 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
*/ */
void rcu_barrier(void) void rcu_barrier(void)
{ {
_rcu_barrier(&rcu_preempt_state); _rcu_barrier(rcu_state_p);
} }
EXPORT_SYMBOL_GPL(rcu_barrier); EXPORT_SYMBOL_GPL(rcu_barrier);
@ -807,7 +806,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier);
*/ */
static void __init __rcu_init_preempt(void) static void __init __rcu_init_preempt(void)
{ {
rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); rcu_init_one(rcu_state_p, &rcu_preempt_data);
} }
/* /*
@ -1172,7 +1171,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct sched_param sp; struct sched_param sp;
struct task_struct *t; struct task_struct *t;
if (&rcu_preempt_state != rsp) if (rcu_state_p != rsp)
return 0; return 0;
if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0) if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)