summaryrefslogtreecommitdiffstats
path: root/include/linux/wait.h
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2013-10-07 18:18:24 +0200
committerIngo Molnar <mingo@kernel.org>2013-10-16 14:22:18 +0200
commitc2d816443ef305aba8eaf0bf368f4d3d87494f06 (patch)
tree0331463c4ea621c1467e83894a9cebf3a91cb136 /include/linux/wait.h
parent8922915b38cd8b72f8e5af614b95be71d1d299d4 (diff)
downloadop-kernel-dev-c2d816443ef305aba8eaf0bf368f4d3d87494f06.zip
op-kernel-dev-c2d816443ef305aba8eaf0bf368f4d3d87494f06.tar.gz
sched/wait: Introduce prepare_to_wait_event()
Add the new helper, prepare_to_wait_event() which should only be used by ___wait_event(). prepare_to_wait_event() returns -ERESTARTSYS if signal_pending_state() is true, otherwise it does prepare_to_wait/exclusive. This allows to uninline the signal-pending checks in wait_event*() macros. Also, it can initialize wait->private/func. We do not care if they were already initialized, the values are the same. This also shaves a couple of insns from the inlined code. This obviously makes prepare_*() path a little bit slower, but we are likely going to sleep anyway, so I think it makes sense to shrink .text: text data bss dec hex filename =================================================== before: 5126092 2959248 10117120 18202460 115bf5c vmlinux after: 5124618 2955152 10117120 18196890 115a99a vmlinux on my build. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20131007161824.GA29757@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/wait.h')
-rw-r--r--include/linux/wait.h24
1 files changed, 14 insertions, 10 deletions
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 04c0260..ec099b0 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -187,27 +187,30 @@ wait_queue_head_t *bit_waitqueue(void *, int);
__cond || !__ret; \
})
-#define ___wait_signal_pending(state) \
- ((state == TASK_INTERRUPTIBLE && signal_pending(current)) || \
- (state == TASK_KILLABLE && fatal_signal_pending(current)))
+#define ___wait_is_interruptible(state) \
+ (!__builtin_constant_p(state) || \
+ state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
({ \
__label__ __out; \
- DEFINE_WAIT(__wait); \
+ wait_queue_t __wait; \
long __ret = ret; \
\
+ INIT_LIST_HEAD(&__wait.task_list); \
+ if (exclusive) \
+ __wait.flags = WQ_FLAG_EXCLUSIVE; \
+ else \
+ __wait.flags = 0; \
+ \
for (;;) { \
- if (exclusive) \
- prepare_to_wait_exclusive(&wq, &__wait, state); \
- else \
- prepare_to_wait(&wq, &__wait, state); \
+ long __int = prepare_to_wait_event(&wq, &__wait, state);\
\
if (condition) \
break; \
\
- if (___wait_signal_pending(state)) { \
- __ret = -ERESTARTSYS; \
+ if (___wait_is_interruptible(state) && __int) { \
+ __ret = __int; \
if (exclusive) { \
abort_exclusive_wait(&wq, &__wait, \
state, NULL); \
@@ -791,6 +794,7 @@ extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long tim
*/
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
+long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
OpenPOWER on IntegriCloud