sched: refine negative nice level granularity
refine the granularity of negative nice level tasks: let them reschedule more often to offset the effect of them consuming their wait_runtime proportionately slower. (This makes nice-0 task scheduling smoother in the presence of negatively reniced tasks.) Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Родитель
a69edb5560
Коммит
7cff8cf61c
|
@ -222,21 +222,25 @@ niced_granularity(struct sched_entity *curr, unsigned long granularity)
|
|||
{
|
||||
u64 tmp;
|
||||
|
||||
/*
|
||||
* Negative nice levels get the same granularity as nice-0:
|
||||
*/
|
||||
if (likely(curr->load.weight >= NICE_0_LOAD))
|
||||
if (likely(curr->load.weight == NICE_0_LOAD))
|
||||
return granularity;
|
||||
/*
|
||||
* Positive nice level tasks get linearly finer
|
||||
* Positive nice levels get the same granularity as nice-0:
|
||||
*/
|
||||
if (likely(curr->load.weight < NICE_0_LOAD)) {
|
||||
tmp = curr->load.weight * (u64)granularity;
|
||||
return (long) (tmp >> NICE_0_SHIFT);
|
||||
}
|
||||
/*
|
||||
* Negative nice level tasks get linearly finer
|
||||
* granularity:
|
||||
*/
|
||||
tmp = curr->load.weight * (u64)granularity;
|
||||
tmp = curr->load.inv_weight * (u64)granularity;
|
||||
|
||||
/*
|
||||
* It will always fit into 'long':
|
||||
*/
|
||||
return (long) (tmp >> NICE_0_SHIFT);
|
||||
return (long) (tmp >> WMULT_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
|
Загрузка…
Ссылка в новой задаче