nohz_full: Add full-system-idle arguments to API
This commit adds an isidle and jiffies argument to force_qs_rnp(), dyntick_save_progress_counter(), and rcu_implicit_dynticks_qs() to enable RCU's force-quiescent-state process to check for full-system idle. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Lai Jiangshan <laijs@cn.fujitsu.com> [ paulmck: Use true and false for boolean constants per Lai Jiangshan. ] Reviewed-by: Josh Triplett <josh@joshtriplett.org>
This commit is contained in:
Родитель
d4bd54fbac
Коммит
217af2a2ff
|
@ -246,7 +246,10 @@ module_param(jiffies_till_next_fqs, ulong, 0644);
|
||||||
|
|
||||||
static void rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
|
static void rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
|
||||||
struct rcu_data *rdp);
|
struct rcu_data *rdp);
|
||||||
static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *));
|
static void force_qs_rnp(struct rcu_state *rsp,
|
||||||
|
int (*f)(struct rcu_data *rsp, bool *isidle,
|
||||||
|
unsigned long *maxj),
|
||||||
|
bool *isidle, unsigned long *maxj);
|
||||||
static void force_quiescent_state(struct rcu_state *rsp);
|
static void force_quiescent_state(struct rcu_state *rsp);
|
||||||
static int rcu_pending(int cpu);
|
static int rcu_pending(int cpu);
|
||||||
|
|
||||||
|
@ -727,7 +730,8 @@ static int rcu_is_cpu_rrupt_from_idle(void)
|
||||||
* credit them with an implicit quiescent state. Return 1 if this CPU
|
* credit them with an implicit quiescent state. Return 1 if this CPU
|
||||||
* is in dynticks idle mode, which is an extended quiescent state.
|
* is in dynticks idle mode, which is an extended quiescent state.
|
||||||
*/
|
*/
|
||||||
static int dyntick_save_progress_counter(struct rcu_data *rdp)
|
static int dyntick_save_progress_counter(struct rcu_data *rdp,
|
||||||
|
bool *isidle, unsigned long *maxj)
|
||||||
{
|
{
|
||||||
rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
|
rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
|
||||||
return (rdp->dynticks_snap & 0x1) == 0;
|
return (rdp->dynticks_snap & 0x1) == 0;
|
||||||
|
@ -739,7 +743,8 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
|
||||||
* idle state since the last call to dyntick_save_progress_counter()
|
* idle state since the last call to dyntick_save_progress_counter()
|
||||||
* for this same CPU, or by virtue of having been offline.
|
* for this same CPU, or by virtue of having been offline.
|
||||||
*/
|
*/
|
||||||
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
|
||||||
|
bool *isidle, unsigned long *maxj)
|
||||||
{
|
{
|
||||||
unsigned int curr;
|
unsigned int curr;
|
||||||
unsigned int snap;
|
unsigned int snap;
|
||||||
|
@ -1361,16 +1366,19 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
||||||
int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
|
int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
|
||||||
{
|
{
|
||||||
int fqs_state = fqs_state_in;
|
int fqs_state = fqs_state_in;
|
||||||
|
bool isidle = false;
|
||||||
|
unsigned long maxj;
|
||||||
struct rcu_node *rnp = rcu_get_root(rsp);
|
struct rcu_node *rnp = rcu_get_root(rsp);
|
||||||
|
|
||||||
rsp->n_force_qs++;
|
rsp->n_force_qs++;
|
||||||
if (fqs_state == RCU_SAVE_DYNTICK) {
|
if (fqs_state == RCU_SAVE_DYNTICK) {
|
||||||
/* Collect dyntick-idle snapshots. */
|
/* Collect dyntick-idle snapshots. */
|
||||||
force_qs_rnp(rsp, dyntick_save_progress_counter);
|
force_qs_rnp(rsp, dyntick_save_progress_counter,
|
||||||
|
&isidle, &maxj);
|
||||||
fqs_state = RCU_FORCE_QS;
|
fqs_state = RCU_FORCE_QS;
|
||||||
} else {
|
} else {
|
||||||
/* Handle dyntick-idle and offline CPUs. */
|
/* Handle dyntick-idle and offline CPUs. */
|
||||||
force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
|
force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
|
||||||
}
|
}
|
||||||
/* Clear flag to prevent immediate re-entry. */
|
/* Clear flag to prevent immediate re-entry. */
|
||||||
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
|
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
|
||||||
|
@ -2069,7 +2077,10 @@ void rcu_check_callbacks(int cpu, int user)
|
||||||
*
|
*
|
||||||
* The caller must have suppressed start of new grace periods.
|
* The caller must have suppressed start of new grace periods.
|
||||||
*/
|
*/
|
||||||
static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
|
static void force_qs_rnp(struct rcu_state *rsp,
|
||||||
|
int (*f)(struct rcu_data *rsp, bool *isidle,
|
||||||
|
unsigned long *maxj),
|
||||||
|
bool *isidle, unsigned long *maxj)
|
||||||
{
|
{
|
||||||
unsigned long bit;
|
unsigned long bit;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
@ -2093,7 +2104,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
|
||||||
bit = 1;
|
bit = 1;
|
||||||
for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
|
for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
|
||||||
if ((rnp->qsmask & bit) != 0 &&
|
if ((rnp->qsmask & bit) != 0 &&
|
||||||
f(per_cpu_ptr(rsp->rda, cpu)))
|
f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
|
||||||
mask |= bit;
|
mask |= bit;
|
||||||
}
|
}
|
||||||
if (mask != 0) {
|
if (mask != 0) {
|
||||||
|
|
Загрузка…
Ссылка в новой задаче