rcu: Rename ->onofflock to ->orphan_lock

The ->onofflock field in the rcu_state structure at one time synchronized
CPU-hotplug operations for RCU.  However, its scope has decreased over time
so that it now only protects the lists of orphaned RCU callbacks.  This
commit therefore renames it to ->orphan_lock to reflect its current use.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
Paul E. McKenney 2012-10-08 10:54:03 -07:00 коммит произвёл Paul E. McKenney
Родитель 489832609a
Коммит 7b2e6011f1
3 изменённых файлов: 11 добавлений и 11 удалений

Просмотреть файл

@ -70,7 +70,7 @@ static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
.fqs_state = RCU_GP_IDLE, \ .fqs_state = RCU_GP_IDLE, \
.gpnum = -300, \ .gpnum = -300, \
.completed = -300, \ .completed = -300, \
.onofflock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.onofflock), \ .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
.orphan_nxttail = &sname##_state.orphan_nxtlist, \ .orphan_nxttail = &sname##_state.orphan_nxtlist, \
.orphan_donetail = &sname##_state.orphan_donelist, \ .orphan_donetail = &sname##_state.orphan_donelist, \
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
@ -1573,7 +1573,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
/* /*
* Send the specified CPU's RCU callbacks to the orphanage. The * Send the specified CPU's RCU callbacks to the orphanage. The
* specified CPU must be offline, and the caller must hold the * specified CPU must be offline, and the caller must hold the
* ->onofflock. * ->orphan_lock.
*/ */
static void static void
rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
@ -1623,7 +1623,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
/* /*
* Adopt the RCU callbacks from the specified rcu_state structure's * Adopt the RCU callbacks from the specified rcu_state structure's
* orphanage. The caller must hold the ->onofflock. * orphanage. The caller must hold the ->orphan_lock.
*/ */
static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
{ {
@ -1702,7 +1702,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
/* Exclude any attempts to start a new grace period. */ /* Exclude any attempts to start a new grace period. */
mutex_lock(&rsp->onoff_mutex); mutex_lock(&rsp->onoff_mutex);
raw_spin_lock_irqsave(&rsp->onofflock, flags); raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
@ -1729,10 +1729,10 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
/* /*
* We still hold the leaf rcu_node structure lock here, and * We still hold the leaf rcu_node structure lock here, and
* irqs are still disabled. The reason for this subterfuge is * irqs are still disabled. The reason for this subterfuge is
* because invoking rcu_report_unblock_qs_rnp() with ->onofflock * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
* held leads to deadlock. * held leads to deadlock.
*/ */
raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
rnp = rdp->mynode; rnp = rdp->mynode;
if (need_report & RCU_OFL_TASKS_NORM_GP) if (need_report & RCU_OFL_TASKS_NORM_GP)
rcu_report_unblock_qs_rnp(rnp, flags); rcu_report_unblock_qs_rnp(rnp, flags);

Просмотреть файл

@ -383,9 +383,8 @@ struct rcu_state {
/* End of fields guarded by root rcu_node's lock. */ /* End of fields guarded by root rcu_node's lock. */
raw_spinlock_t onofflock ____cacheline_internodealigned_in_smp; raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
/* exclude on/offline and */ /* Protect following fields. */
/* starting new GP. */
struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */ struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */
/* need a grace period. */ /* need a grace period. */
struct rcu_head **orphan_nxttail; /* Tail of above. */ struct rcu_head **orphan_nxttail; /* Tail of above. */
@ -394,7 +393,7 @@ struct rcu_state {
struct rcu_head **orphan_donetail; /* Tail of above. */ struct rcu_head **orphan_donetail; /* Tail of above. */
long qlen_lazy; /* Number of lazy callbacks. */ long qlen_lazy; /* Number of lazy callbacks. */
long qlen; /* Total number of callbacks. */ long qlen; /* Total number of callbacks. */
/* End of fields guarded by onofflock. */ /* End of fields guarded by orphan_lock. */
struct mutex onoff_mutex; /* Coordinate hotplug & GPs. */ struct mutex onoff_mutex; /* Coordinate hotplug & GPs. */

Просмотреть файл

@ -757,7 +757,8 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
* grace period for the specified rcu_node structure. If there are no such * grace period for the specified rcu_node structure. If there are no such
* tasks, report it up the rcu_node hierarchy. * tasks, report it up the rcu_node hierarchy.
* *
* Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock. * Caller must hold sync_rcu_preempt_exp_mutex and must exclude
* CPU hotplug operations.
*/ */
static void static void
sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)