summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralfred <alfred@FreeBSD.org>2004-07-02 07:40:10 +0000
committeralfred <alfred@FreeBSD.org>2004-07-02 07:40:10 +0000
commitf05df8a8816bd5a9d5ae811c1dc425c859f28c7f (patch)
tree84190decb4436cacad5021b8e1fc2c484b0707f3
parenta6f97943ad437b11dcb7fb2431f764ee816d7828 (diff)
downloadFreeBSD-src-f05df8a8816bd5a9d5ae811c1dc425c859f28c7f.zip
FreeBSD-src-f05df8a8816bd5a9d5ae811c1dc425c859f28c7f.tar.gz
We allocate an array of pointers to the global file table while
not holding the filelist_lock. This means the filelist can change size while allocating. Detect this race and retry the allocation.
-rw-r--r--sys/kern/uipc_usrreq.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/sys/kern/uipc_usrreq.c b/sys/kern/uipc_usrreq.c
index 2a992bf..7474012 100644
--- a/sys/kern/uipc_usrreq.c
+++ b/sys/kern/uipc_usrreq.c
@@ -1395,6 +1395,8 @@ unp_gc()
register struct socket *so;
struct file **extra_ref, **fpp;
int nunref, i;
+ int nfiles_snap;
+ int nfiles_slack = 20;
UNP_LOCK_ASSERT();
@@ -1537,8 +1539,17 @@ unp_gc()
*
* 91/09/19, bsy@cs.cmu.edu
*/
- extra_ref = malloc(nfiles * sizeof(struct file *), M_TEMP, M_WAITOK);
+again:
+ nfiles_snap = nfiles + nfiles_slack; /* some slack */
+ extra_ref = malloc(nfiles_snap * sizeof(struct file *), M_TEMP,
+ M_WAITOK);
sx_slock(&filelist_lock);
+ if (nfiles_snap < nfiles) {
+ sx_sunlock(&filelist_lock);
+ free(extra_ref, M_TEMP);
+ nfiles_slack += 20;
+ goto again;
+ }
for (nunref = 0, fp = LIST_FIRST(&filehead), fpp = extra_ref;
fp != NULL; fp = nextfp) {
nextfp = LIST_NEXT(fp, f_list);
OpenPOWER on IntegriCloud