summaryrefslogtreecommitdiffstats
path: root/fs/eventpoll.c
diff options
context:
space:
mode:
authorTony Battersby <tonyb@cybernetics.com>2009-03-31 15:24:15 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-01 08:59:19 -0700
commite057e15ff66a620eda4c407486cbb8f8fbb7d878 (patch)
treeaa9e15c13ce5882ca0f0b442eec71d99fec8a4ff /fs/eventpoll.c
parentd1bc90dd5d037079f96b3327f943eb6ae8ef7491 (diff)
downloadop-kernel-dev-e057e15ff66a620eda4c407486cbb8f8fbb7d878.zip
op-kernel-dev-e057e15ff66a620eda4c407486cbb8f8fbb7d878.tar.gz
epoll: clean up ep_modify
ep_modify() doesn't need to set event.data from within the ep->lock spinlock as the comment suggests. The only place event.data is used is ep_send_events_proc(), and this is protected by ep->mtx instead of ep->lock. Also update the comment for mutex_lock() at the top of ep_scan_ready_list(), which mentions epoll_ctl(EPOLL_CTL_DEL) but not epoll_ctl(EPOLL_CTL_MOD). ep_modify() can also use spin_lock_irq() instead of spin_lock_irqsave(). Signed-off-by: Tony Battersby <tonyb@cybernetics.com> Acked-by: Davide Libenzi <davidel@xmailserver.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r--fs/eventpoll.c19
1 files changed, 7 insertions, 12 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 64c5503..744377c 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -435,7 +435,7 @@ static int ep_scan_ready_list(struct eventpoll *ep,
/*
* We need to lock this because we could be hit by
- * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
+ * eventpoll_release_file() and epoll_ctl().
*/
mutex_lock(&ep->mtx);
@@ -972,15 +972,14 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
{
int pwake = 0;
unsigned int revents;
- unsigned long flags;
/*
- * Set the new event interest mask before calling f_op->poll(), otherwise
- * a potential race might occur. In fact if we do this operation inside
- * the lock, an event might happen between the f_op->poll() call and the
- * new event set registering.
+ * Set the new event interest mask before calling f_op->poll();
+ * otherwise we might miss an event that happens between the
+ * f_op->poll() call and the new event set registering.
*/
epi->event.events = event->events;
+ epi->event.data = event->data; /* protected by mtx */
/*
* Get current event bits. We can safely use the file* here because
@@ -988,16 +987,12 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
*/
revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
- spin_lock_irqsave(&ep->lock, flags);
-
- /* Copy the data member from inside the lock */
- epi->event.data = event->data;
-
/*
* If the item is "hot" and it is not registered inside the ready
* list, push it inside.
*/
if (revents & event->events) {
+ spin_lock_irq(&ep->lock);
if (!ep_is_linked(&epi->rdllink)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
@@ -1007,8 +1002,8 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
+ spin_unlock_irq(&ep->lock);
}
- spin_unlock_irqrestore(&ep->lock, flags);
/* We have to call this outside the lock */
if (pwake)
OpenPOWER on IntegriCloud