diff options
author | Davidlohr Bueso <dave@stgolabs.net> | 2018-08-21 21:58:23 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-22 10:52:49 -0700 |
commit | 679abf381a18e945457b01921f667cee9e656a7f (patch) | |
tree | a9b93d916c91e516f97d47e70237904021724377 /fs/eventpoll.c | |
parent | 514056d506e44084369f5ce1c8186e4253901a05 (diff) | |
download | op-kernel-dev-679abf381a18e945457b01921f667cee9e656a7f.zip op-kernel-dev-679abf381a18e945457b01921f667cee9e656a7f.tar.gz |
fs/eventpoll.c: loosen irq safety in ep_poll()
Similar to other calls, ep_poll() is not called with interrupts disabled,
and we can therefore avoid the irq save/restore dance and just disable
local irqs. In fact, the call should never be called in irq context at
all, considering that the only path is
epoll_wait(2) -> do_epoll_wait() -> ep_poll().
When running on a 2 socket 40-core (ht) IvyBridge a common pipe based
epoll_wait(2) microbenchmark, the following performance improvements are
seen:
# threads vanilla dirty
1 1805587 2106412
2 1854064 2090762
4 1805484 2017436
8 1751222 1974475
16 1725299 1962104
32 1378463 1571233
64 787368 900784
Which is a pretty constantly near 15%.
Also add a lockdep check such that we detect any mischief before
deadlocking.
Link: http://lkml.kernel.org/r/20180727053432.16679-2-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Jason Baron <jbaron@akamai.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r-- | fs/eventpoll.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index b5e43e1..88473e6 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1746,11 +1746,12 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, int maxevents, long timeout) { int res = 0, eavail, timed_out = 0; - unsigned long flags; u64 slack = 0; wait_queue_entry_t wait; ktime_t expires, *to = NULL; + lockdep_assert_irqs_enabled(); + if (timeout > 0) { struct timespec64 end_time = ep_set_mstimeout(timeout); @@ -1763,7 +1764,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, * caller specified a non blocking operation. */ timed_out = 1; - spin_lock_irqsave(&ep->wq.lock, flags); + spin_lock_irq(&ep->wq.lock); goto check_events; } @@ -1772,7 +1773,7 @@ fetch_events: if (!ep_events_available(ep)) ep_busy_loop(ep, timed_out); - spin_lock_irqsave(&ep->wq.lock, flags); + spin_lock_irq(&ep->wq.lock); if (!ep_events_available(ep)) { /* @@ -1814,11 +1815,11 @@ fetch_events: break; } - spin_unlock_irqrestore(&ep->wq.lock, flags); + spin_unlock_irq(&ep->wq.lock); if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) timed_out = 1; - spin_lock_irqsave(&ep->wq.lock, flags); + spin_lock_irq(&ep->wq.lock); } __remove_wait_queue(&ep->wq, &wait); @@ -1828,7 +1829,7 @@ check_events: /* Is it worth to try to dig for events ? */ eavail = ep_events_available(ep); - spin_unlock_irqrestore(&ep->wq.lock, flags); + spin_unlock_irq(&ep->wq.lock); /* * Try to transfer events to user space. In case we get 0 events and |