diff --git a/kernel/sched.c b/kernel/sched.c index 521b89b01480..070eefdd90f5 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -680,6 +680,7 @@ enum { SCHED_FEAT_SYNC_WAKEUPS = 32, SCHED_FEAT_HRTICK = 64, SCHED_FEAT_DOUBLE_TICK = 128, + SCHED_FEAT_NORMALIZED_SLEEPER = 256, }; const_debug unsigned int sysctl_sched_features = @@ -690,7 +691,8 @@ const_debug unsigned int sysctl_sched_features = SCHED_FEAT_CACHE_HOT_BUDDY * 1 | SCHED_FEAT_SYNC_WAKEUPS * 1 | SCHED_FEAT_HRTICK * 1 | - SCHED_FEAT_DOUBLE_TICK * 0; + SCHED_FEAT_DOUBLE_TICK * 0 | + SCHED_FEAT_NORMALIZED_SLEEPER * 1; #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 290cf770b712..022e036f2c3e 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -501,8 +501,11 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) if (!initial) { /* sleeps upto a single latency don't count. */ if (sched_feat(NEW_FAIR_SLEEPERS)) { - vruntime -= calc_delta_fair(sysctl_sched_latency, - &cfs_rq->load); + if (sched_feat(NORMALIZED_SLEEPER)) + vruntime -= calc_delta_fair(sysctl_sched_latency, + &cfs_rq->load); + else + vruntime -= sysctl_sched_latency; } /* ensure we never gain time by being placed backwards. */