summaryrefslogtreecommitdiffstats
path: root/include/asm-generic/preempt.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic/preempt.h')
-rw-r--r--include/asm-generic/preempt.h35
1 files changed, 11 insertions, 24 deletions
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index ddf2b42..1cd3f5d 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -3,13 +3,11 @@
#include <linux/thread_info.h>
-/*
- * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
- * that think a non-zero value indicates we cannot preempt.
- */
+#define PREEMPT_ENABLED (0)
+
static __always_inline int preempt_count(void)
{
- return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED;
+ return current_thread_info()->preempt_count;
}
static __always_inline int *preempt_count_ptr(void)
@@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void)
return &current_thread_info()->preempt_count;
}
-/*
- * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
- * alternative is loosing a reschedule. Better schedule too often -- also this
- * should be a very rare operation.
- */
static __always_inline void preempt_count_set(int pc)
{
*preempt_count_ptr() = pc;
@@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc)
task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
} while (0)
-/*
- * We fold the NEED_RESCHED bit into the preempt count such that
- * preempt_enable() can decrement and test for needing to reschedule with a
- * single instruction.
- *
- * We invert the actual bit, so that when the decrement hits 0 we know we both
- * need to resched (the bit is cleared) and can resched (no preempt count).
- */
-
static __always_inline void set_preempt_need_resched(void)
{
- *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
}
static __always_inline void clear_preempt_need_resched(void)
{
- *preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
}
static __always_inline bool test_preempt_need_resched(void)
{
- return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
+ return false;
}
/*
@@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val)
static __always_inline bool __preempt_count_dec_and_test(void)
{
- return !--*preempt_count_ptr();
+ /*
+ * Because of load-store architectures cannot do per-cpu atomic
+ * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
+ * lost.
+ */
+ return !--*preempt_count_ptr() && tif_need_resched();
}
/*
@@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void)
*/
static __always_inline bool should_resched(void)
{
- return unlikely(!*preempt_count_ptr());
+ return unlikely(!preempt_count() && tif_need_resched());
}
#ifdef CONFIG_PREEMPT
OpenPOWER on IntegriCloud