summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-09-27 17:30:03 +0200
committerIngo Molnar <mingo@kernel.org>2013-09-28 10:04:47 +0200
commit75f93fed50c2abadbab6ef546b265f51ca975b27 (patch)
treeae531501cb671c948baedb8e07111f8dda2d5036
parent1a338ac32ca630f67df25b4a16436cccc314e997 (diff)
downloadop-kernel-dev-75f93fed50c2abadbab6ef546b265f51ca975b27.zip
op-kernel-dev-75f93fed50c2abadbab6ef546b265f51ca975b27.tar.gz
sched: Revert need_resched() to look at TIF_NEED_RESCHED
Yuanhan reported a serious throughput regression in his pigz benchmark. Using the ftrace patch I found that several idle paths need more TLC before we can switch the generic need_resched() over to preempt_need_resched. The preemption paths benefit most from preempt_need_resched and do indeed use it; all other need_resched() users don't really care that much so reverting need_resched() back to tif_need_resched() is the simple and safe solution. Reported-by: Yuanhan Liu <yuanhan.liu@linux.intel.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Fengguang Wu <fengguang.wu@intel.com> Cc: Huang Ying <ying.huang@intel.com> Cc: lkp@linux.intel.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20130927153003.GF15690@laptop.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/preempt.h8
-rw-r--r--include/asm-generic/preempt.h8
-rw-r--r--include/linux/sched.h5
3 files changed, 5 insertions, 16 deletions
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 1de41690..8729723 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -80,14 +80,6 @@ static __always_inline bool __preempt_count_dec_and_test(void)
}
/*
- * Returns true when we need to resched -- even if we can not.
- */
-static __always_inline bool need_resched(void)
-{
- return unlikely(test_preempt_need_resched());
-}
-
-/*
* Returns true when we need to resched and can (barring IRQ state).
*/
static __always_inline bool should_resched(void)
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index 5dc14ed..ddf2b42 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -85,14 +85,6 @@ static __always_inline bool __preempt_count_dec_and_test(void)
}
/*
- * Returns true when we need to resched -- even if we can not.
- */
-static __always_inline bool need_resched(void)
-{
- return unlikely(test_preempt_need_resched());
-}
-
-/*
* Returns true when we need to resched and can (barring IRQ state).
*/
static __always_inline bool should_resched(void)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b09798b..2ac5285 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2577,6 +2577,11 @@ static inline bool __must_check current_clr_polling_and_test(void)
}
#endif
+static __always_inline bool need_resched(void)
+{
+ return unlikely(tif_need_resched());
+}
+
/*
* Thread group CPU time accounting.
*/
OpenPOWER on IntegriCloud