summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2011-02-01 13:33:49 +0000
committerkib <kib@FreeBSD.org>2011-02-01 13:33:49 +0000
commitd690434e20997eb77185336e6858f63acfc2f114 (patch)
treedddd226f3af786fcd43789e74fbbfd7c15ad17ba /sys/kern
parent8d21b8a169673629dd4d42ca14c9bf19531dbbd2 (diff)
downloadFreeBSD-src-d690434e20997eb77185336e6858f63acfc2f114.zip
FreeBSD-src-d690434e20997eb77185336e6858f63acfc2f114.tar.gz
The unp_gc() function drops and reaquires lock between scan and
collect phases. The unp_discard() function executes unp_externalize_fp(), which might make the socket eligible for gc-ing, and then, later, taskqueue will close the socket. Since unp_gc() dropped the list lock to do the malloc, close might happen after the mark step but before the collection step, causing collection to not find the socket and miss one array element. I believe that the race was there before r216158, but the stated revision made the window much wider by postponing the close to taskqueue sometimes. Only process as much array elements as we find the sockets during second phase of gc [1]. Take linkage lock and recheck the eligibility of the socket for gc, as well as call fhold() under the linkage lock. Reported and tested by: jmallett Submitted by: jmallett [1] Reviewed by: rwatson, jeff (possibly) MFC after: 1 week
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/uipc_usrreq.c28
1 files changed, 16 insertions, 12 deletions
diff --git a/sys/kern/uipc_usrreq.c b/sys/kern/uipc_usrreq.c
index f158cc5..d049688 100644
--- a/sys/kern/uipc_usrreq.c
+++ b/sys/kern/uipc_usrreq.c
@@ -2153,9 +2153,9 @@ unp_gc(__unused void *arg, int pending)
struct unp_head *heads[] = { &unp_dhead, &unp_shead, &unp_sphead,
NULL };
struct unp_head **head;
- struct file **unref;
+ struct file *f, **unref;
struct unpcb *unp;
- int i;
+ int i, total;
unp_taskcount++;
UNP_LIST_LOCK();
@@ -2193,33 +2193,37 @@ unp_gc(__unused void *arg, int pending)
* Iterate looking for sockets which have been specifically marked
* as as unreachable and store them locally.
*/
+ UNP_LINK_RLOCK();
UNP_LIST_LOCK();
- for (i = 0, head = heads; *head != NULL; head++)
+ for (total = 0, head = heads; *head != NULL; head++)
LIST_FOREACH(unp, *head, unp_link)
- if (unp->unp_gcflag & UNPGC_DEAD) {
- unref[i++] = unp->unp_file;
- fhold(unp->unp_file);
- KASSERT(unp->unp_file != NULL,
- ("unp_gc: Invalid unpcb."));
- KASSERT(i <= unp_unreachable,
+ if ((unp->unp_gcflag & UNPGC_DEAD) != 0) {
+ f = unp->unp_file;
+ if (unp->unp_msgcount == 0 || f == NULL ||
+ f->f_count != unp->unp_msgcount)
+ continue;
+ unref[total++] = f;
+ fhold(f);
+ KASSERT(total <= unp_unreachable,
("unp_gc: incorrect unreachable count."));
}
UNP_LIST_UNLOCK();
+ UNP_LINK_RUNLOCK();
/*
* Now flush all sockets, free'ing rights. This will free the
* struct files associated with these sockets but leave each socket
* with one remaining ref.
*/
- for (i = 0; i < unp_unreachable; i++)
+ for (i = 0; i < total; i++)
sorflush(unref[i]->f_data);
/*
* And finally release the sockets so they can be reclaimed.
*/
- for (i = 0; i < unp_unreachable; i++)
+ for (i = 0; i < total; i++)
fdrop(unref[i], NULL);
- unp_recycled += unp_unreachable;
+ unp_recycled += total;
free(unref, M_TEMP);
}
OpenPOWER on IntegriCloud