Merge branch 'idle.2013.09.25a' into HEAD
idle.2013.09.25a: Topic branch for idle entry-/exit-related changes.
This commit is contained in:
Коммит
2529973309
|
@ -261,6 +261,10 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
|
|||
rcu_irq_exit(); \
|
||||
} while (0)
|
||||
|
||||
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
|
||||
extern bool __rcu_is_watching(void);
|
||||
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
|
||||
|
||||
/*
|
||||
* Infrastructure to implement the synchronize_() primitives in
|
||||
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
|
||||
|
@ -297,10 +301,6 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
|
|||
}
|
||||
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
|
||||
|
||||
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
|
||||
extern int rcu_is_cpu_idle(void);
|
||||
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */
|
||||
|
||||
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
|
||||
bool rcu_lockdep_current_cpu_online(void);
|
||||
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
|
||||
|
@ -351,7 +351,7 @@ static inline int rcu_read_lock_held(void)
|
|||
{
|
||||
if (!debug_lockdep_rcu_enabled())
|
||||
return 1;
|
||||
if (rcu_is_cpu_idle())
|
||||
if (!rcu_is_watching())
|
||||
return 0;
|
||||
if (!rcu_lockdep_current_cpu_online())
|
||||
return 0;
|
||||
|
@ -402,7 +402,7 @@ static inline int rcu_read_lock_sched_held(void)
|
|||
|
||||
if (!debug_lockdep_rcu_enabled())
|
||||
return 1;
|
||||
if (rcu_is_cpu_idle())
|
||||
if (!rcu_is_watching())
|
||||
return 0;
|
||||
if (!rcu_lockdep_current_cpu_online())
|
||||
return 0;
|
||||
|
@ -771,7 +771,7 @@ static inline void rcu_read_lock(void)
|
|||
__rcu_read_lock();
|
||||
__acquire(RCU);
|
||||
rcu_lock_acquire(&rcu_lock_map);
|
||||
rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
||||
rcu_lockdep_assert(rcu_is_watching(),
|
||||
"rcu_read_lock() used illegally while idle");
|
||||
}
|
||||
|
||||
|
@ -792,7 +792,7 @@ static inline void rcu_read_lock(void)
|
|||
*/
|
||||
static inline void rcu_read_unlock(void)
|
||||
{
|
||||
rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
||||
rcu_lockdep_assert(rcu_is_watching(),
|
||||
"rcu_read_unlock() used illegally while idle");
|
||||
rcu_lock_release(&rcu_lock_map);
|
||||
__release(RCU);
|
||||
|
@ -821,7 +821,7 @@ static inline void rcu_read_lock_bh(void)
|
|||
local_bh_disable();
|
||||
__acquire(RCU_BH);
|
||||
rcu_lock_acquire(&rcu_bh_lock_map);
|
||||
rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
||||
rcu_lockdep_assert(rcu_is_watching(),
|
||||
"rcu_read_lock_bh() used illegally while idle");
|
||||
}
|
||||
|
||||
|
@ -832,7 +832,7 @@ static inline void rcu_read_lock_bh(void)
|
|||
*/
|
||||
static inline void rcu_read_unlock_bh(void)
|
||||
{
|
||||
rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
||||
rcu_lockdep_assert(rcu_is_watching(),
|
||||
"rcu_read_unlock_bh() used illegally while idle");
|
||||
rcu_lock_release(&rcu_bh_lock_map);
|
||||
__release(RCU_BH);
|
||||
|
@ -857,7 +857,7 @@ static inline void rcu_read_lock_sched(void)
|
|||
preempt_disable();
|
||||
__acquire(RCU_SCHED);
|
||||
rcu_lock_acquire(&rcu_sched_lock_map);
|
||||
rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
||||
rcu_lockdep_assert(rcu_is_watching(),
|
||||
"rcu_read_lock_sched() used illegally while idle");
|
||||
}
|
||||
|
||||
|
@ -875,7 +875,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
|
|||
*/
|
||||
static inline void rcu_read_unlock_sched(void)
|
||||
{
|
||||
rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
||||
rcu_lockdep_assert(rcu_is_watching(),
|
||||
"rcu_read_unlock_sched() used illegally while idle");
|
||||
rcu_lock_release(&rcu_sched_lock_map);
|
||||
__release(RCU_SCHED);
|
||||
|
|
|
@ -132,4 +132,21 @@ static inline void rcu_scheduler_starting(void)
|
|||
}
|
||||
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
|
||||
|
||||
static inline bool rcu_is_watching(void)
|
||||
{
|
||||
return __rcu_is_watching();
|
||||
}
|
||||
|
||||
#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
|
||||
|
||||
static inline bool rcu_is_watching(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
|
||||
|
||||
#endif /* __LINUX_RCUTINY_H */
|
||||
|
|
|
@ -90,4 +90,6 @@ extern void exit_rcu(void);
|
|||
extern void rcu_scheduler_starting(void);
|
||||
extern int rcu_scheduler_active __read_mostly;
|
||||
|
||||
extern bool rcu_is_watching(void);
|
||||
|
||||
#endif /* __LINUX_RCUTREE_H */
|
||||
|
|
|
@ -4224,7 +4224,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
|
|||
printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
|
||||
!rcu_lockdep_current_cpu_online()
|
||||
? "RCU used illegally from offline CPU!\n"
|
||||
: rcu_is_cpu_idle()
|
||||
: !rcu_is_watching()
|
||||
? "RCU used illegally from idle CPU!\n"
|
||||
: "",
|
||||
rcu_scheduler_active, debug_locks);
|
||||
|
@ -4247,7 +4247,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
|
|||
* So complain bitterly if someone does call rcu_read_lock(),
|
||||
* rcu_read_lock_bh() and so on from extended quiescent states.
|
||||
*/
|
||||
if (rcu_is_cpu_idle())
|
||||
if (!rcu_is_watching())
|
||||
printk("RCU used illegally from extended quiescent state!\n");
|
||||
|
||||
lockdep_print_held_locks(curr);
|
||||
|
|
|
@ -148,7 +148,7 @@ int rcu_read_lock_bh_held(void)
|
|||
{
|
||||
if (!debug_lockdep_rcu_enabled())
|
||||
return 1;
|
||||
if (rcu_is_cpu_idle())
|
||||
if (!rcu_is_watching())
|
||||
return 0;
|
||||
if (!rcu_lockdep_current_cpu_online())
|
||||
return 0;
|
||||
|
|
|
@ -176,18 +176,18 @@ void rcu_irq_enter(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_irq_enter);
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
|
||||
|
||||
/*
|
||||
* Test whether RCU thinks that the current CPU is idle.
|
||||
*/
|
||||
int rcu_is_cpu_idle(void)
|
||||
bool __rcu_is_watching(void)
|
||||
{
|
||||
return !rcu_dynticks_nesting;
|
||||
return rcu_dynticks_nesting;
|
||||
}
|
||||
EXPORT_SYMBOL(rcu_is_cpu_idle);
|
||||
EXPORT_SYMBOL(__rcu_is_watching);
|
||||
|
||||
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
|
||||
|
||||
/*
|
||||
* Test whether the current CPU was interrupted from idle. Nested
|
||||
|
|
|
@ -650,21 +650,34 @@ void rcu_nmi_exit(void)
|
|||
}
|
||||
|
||||
/**
|
||||
* rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
|
||||
* __rcu_is_watching - are RCU read-side critical sections safe?
|
||||
*
|
||||
* Return true if RCU is watching the running CPU, which means that
|
||||
* this CPU can safely enter RCU read-side critical sections. Unlike
|
||||
* rcu_is_watching(), the caller of __rcu_is_watching() must have at
|
||||
* least disabled preemption.
|
||||
*/
|
||||
bool __rcu_is_watching(void)
|
||||
{
|
||||
return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_is_watching - see if RCU thinks that the current CPU is idle
|
||||
*
|
||||
* If the current CPU is in its idle loop and is neither in an interrupt
|
||||
* or NMI handler, return true.
|
||||
*/
|
||||
int rcu_is_cpu_idle(void)
|
||||
bool rcu_is_watching(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
preempt_disable();
|
||||
ret = (atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1) == 0;
|
||||
ret = __rcu_is_watching();
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rcu_is_cpu_idle);
|
||||
EXPORT_SYMBOL_GPL(rcu_is_watching);
|
||||
|
||||
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
|
||||
|
||||
|
@ -2321,7 +2334,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
|
|||
* If called from an extended quiescent state, invoke the RCU
|
||||
* core in order to force a re-evaluation of RCU's idleness.
|
||||
*/
|
||||
if (rcu_is_cpu_idle() && cpu_online(smp_processor_id()))
|
||||
if (!rcu_is_watching() && cpu_online(smp_processor_id()))
|
||||
invoke_rcu_core();
|
||||
|
||||
/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
|
||||
|
|
|
@ -104,6 +104,8 @@ struct rcu_dynticks {
|
|||
/* idle-period nonlazy_posted snapshot. */
|
||||
unsigned long last_accelerate;
|
||||
/* Last jiffy CBs were accelerated. */
|
||||
unsigned long last_advance_all;
|
||||
/* Last jiffy CBs were all advanced. */
|
||||
int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
|
||||
#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
|
||||
};
|
||||
|
|
|
@ -1635,17 +1635,23 @@ module_param(rcu_idle_lazy_gp_delay, int, 0644);
|
|||
extern int tick_nohz_enabled;
|
||||
|
||||
/*
|
||||
* Try to advance callbacks for all flavors of RCU on the current CPU.
|
||||
* Afterwards, if there are any callbacks ready for immediate invocation,
|
||||
* return true.
|
||||
* Try to advance callbacks for all flavors of RCU on the current CPU, but
|
||||
* only if it has been awhile since the last time we did so. Afterwards,
|
||||
* if there are any callbacks ready for immediate invocation, return true.
|
||||
*/
|
||||
static bool rcu_try_advance_all_cbs(void)
|
||||
{
|
||||
bool cbs_ready = false;
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
struct rcu_node *rnp;
|
||||
struct rcu_state *rsp;
|
||||
|
||||
/* Exit early if we advanced recently. */
|
||||
if (jiffies == rdtp->last_advance_all)
|
||||
return 0;
|
||||
rdtp->last_advance_all = jiffies;
|
||||
|
||||
for_each_rcu_flavor(rsp) {
|
||||
rdp = this_cpu_ptr(rsp->rda);
|
||||
rnp = rdp->mynode;
|
||||
|
@ -1744,6 +1750,8 @@ static void rcu_prepare_for_idle(int cpu)
|
|||
*/
|
||||
if (rdtp->all_lazy &&
|
||||
rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
|
||||
rdtp->all_lazy = false;
|
||||
rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
|
||||
invoke_rcu_core();
|
||||
return;
|
||||
}
|
||||
|
@ -1773,17 +1781,11 @@ static void rcu_prepare_for_idle(int cpu)
|
|||
*/
|
||||
static void rcu_cleanup_after_idle(int cpu)
|
||||
{
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_state *rsp;
|
||||
|
||||
if (rcu_is_nocb_cpu(cpu))
|
||||
return;
|
||||
rcu_try_advance_all_cbs();
|
||||
for_each_rcu_flavor(rsp) {
|
||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
if (cpu_has_callbacks_ready_to_invoke(rdp))
|
||||
invoke_rcu_core();
|
||||
}
|
||||
if (rcu_try_advance_all_cbs())
|
||||
invoke_rcu_core();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче