summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2008-01-25 21:08:28 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-25 21:08:28 +0100
commit02b67cc3ba36bdba351d6c3a00593f4ec550d9d3 (patch)
tree5185ad2d780974dc864f12d81d6c8b9fec73097b
parent03319ec8b06849051747a17aa2a0f9aba9277980 (diff)
downloadop-kernel-dev-02b67cc3ba36bdba351d6c3a00593f4ec550d9d3.zip
op-kernel-dev-02b67cc3ba36bdba351d6c3a00593f4ec550d9d3.tar.gz
sched: do not do cond_resched() when CONFIG_PREEMPT
Why do we even have cond_resched when real preemption is on? It seems to be a waste of space and time. remove cond_resched with CONFIG_PREEMPT on. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/sched.h13
-rw-r--r--kernel/sched.c6
3 files changed, 18 insertions, 5 deletions
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 94bc996..a7283c9 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -105,8 +105,8 @@ struct user;
* supposed to.
*/
#ifdef CONFIG_PREEMPT_VOLUNTARY
-extern int cond_resched(void);
-# define might_resched() cond_resched()
+extern int _cond_resched(void);
+# define might_resched() _cond_resched()
#else
# define might_resched() do { } while (0)
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index fe3f8fb..7907845c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1885,7 +1885,18 @@ static inline int need_resched(void)
* cond_resched_lock() will drop the spinlock before scheduling,
* cond_resched_softirq() will enable bhs before scheduling.
*/
-extern int cond_resched(void);
+#ifdef CONFIG_PREEMPT
+static inline int cond_resched(void)
+{
+ return 0;
+}
+#else
+extern int _cond_resched(void);
+static inline int cond_resched(void)
+{
+ return _cond_resched();
+}
+#endif
extern int cond_resched_lock(spinlock_t * lock);
extern int cond_resched_softirq(void);
diff --git a/kernel/sched.c b/kernel/sched.c
index b9ee0f4..6ee3760 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4678,7 +4678,8 @@ static void __cond_resched(void)
} while (need_resched());
}
-int __sched cond_resched(void)
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
+int __sched _cond_resched(void)
{
if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
system_state == SYSTEM_RUNNING) {
@@ -4687,7 +4688,8 @@ int __sched cond_resched(void)
}
return 0;
}
-EXPORT_SYMBOL(cond_resched);
+EXPORT_SYMBOL(_cond_resched);
+#endif
/*
* cond_resched_lock() - if a reschedule is pending, drop the given lock,
OpenPOWER on IntegriCloud