sched: Revert need_resched() to look at TIF_NEED_RESCHED
Yuanhan reported a serious throughput regression in his pigz benchmark. Using the ftrace patch I found that several idle paths need more TLC before we can switch the generic need_resched() over to preempt_need_resched. The preemption paths benefit most from preempt_need_resched and do indeed use it; all other need_resched() users don't really care that much so reverting need_resched() back to tif_need_resched() is the simple and safe solution. Reported-by: Yuanhan Liu <yuanhan.liu@linux.intel.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Fengguang Wu <fengguang.wu@intel.com> Cc: Huang Ying <ying.huang@intel.com> Cc: lkp@linux.intel.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20130927153003.GF15690@laptop.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
1a338ac32c
Коммит
75f93fed50
|
@ -79,14 +79,6 @@ static __always_inline bool __preempt_count_dec_and_test(void)
|
||||||
GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
|
GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Returns true when we need to resched -- even if we can not.
|
|
||||||
*/
|
|
||||||
static __always_inline bool need_resched(void)
|
|
||||||
{
|
|
||||||
return unlikely(test_preempt_need_resched());
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns true when we need to resched and can (barring IRQ state).
|
* Returns true when we need to resched and can (barring IRQ state).
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -84,14 +84,6 @@ static __always_inline bool __preempt_count_dec_and_test(void)
|
||||||
return !--*preempt_count_ptr();
|
return !--*preempt_count_ptr();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Returns true when we need to resched -- even if we can not.
|
|
||||||
*/
|
|
||||||
static __always_inline bool need_resched(void)
|
|
||||||
{
|
|
||||||
return unlikely(test_preempt_need_resched());
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns true when we need to resched and can (barring IRQ state).
|
* Returns true when we need to resched and can (barring IRQ state).
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -2577,6 +2577,11 @@ static inline bool __must_check current_clr_polling_and_test(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static __always_inline bool need_resched(void)
|
||||||
|
{
|
||||||
|
return unlikely(tif_need_resched());
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Thread group CPU time accounting.
|
* Thread group CPU time accounting.
|
||||||
*/
|
*/
|
||||||
|
|
Загрузка…
Ссылка в новой задаче