diff options
author | kib <kib@FreeBSD.org> | 2008-12-29 12:07:18 +0000 |
---|---|---|
committer | kib <kib@FreeBSD.org> | 2008-12-29 12:07:18 +0000 |
commit | c3f2d023e878a1a1fcedd705677a7a94826a7555 (patch) | |
tree | 4c6f9041488b2c93ba41bdb9c454f876445240d5 /sys/fs | |
parent | 489c6b7af54b6bdfecf46c055927d2452f709060 (diff) | |
download | FreeBSD-src-c3f2d023e878a1a1fcedd705677a7a94826a7555.zip FreeBSD-src-c3f2d023e878a1a1fcedd705677a7a94826a7555.tar.gz |
After the pfs_vncache_mutex is dropped, another thread may attempt to
do pfs_vncache_alloc() for the same pfs_node and pid. In this case, we
could end up with two vnodes for the pair. Recheck the cache under the
locked pfs_vncache_mutex after all sleeping operations are done [1].
This case mostly cannot happen now because pseudofs uses exclusive vnode
locking for lookup. But it does drop the vnode lock for dotdot lookups,
and Marcus' pseudofs_vptocnp implementation is vulnerable too.
Do not call free() on the struct pfs_vdata after insmntque() failure,
because vp->v_data points to the structure, and pseudofs_reclaim()
frees it by the call to pfs_vncache_free().
Tested by: pho [1]
Approved by: des
MFC after: 2 weeks
Diffstat (limited to 'sys/fs')
-rw-r--r-- | sys/fs/pseudofs/pseudofs_vncache.c | 39 |
1 files changed, 26 insertions, 13 deletions
diff --git a/sys/fs/pseudofs/pseudofs_vncache.c b/sys/fs/pseudofs/pseudofs_vncache.c index eac35eb..fd2bce1 100644 --- a/sys/fs/pseudofs/pseudofs_vncache.c +++ b/sys/fs/pseudofs/pseudofs_vncache.c @@ -111,7 +111,7 @@ int pfs_vncache_alloc(struct mount *mp, struct vnode **vpp, struct pfs_node *pn, pid_t pid) { - struct pfs_vdata *pvd; + struct pfs_vdata *pvd, *pvd2; struct vnode *vp; int error; @@ -146,19 +146,11 @@ retry: } } mtx_unlock(&pfs_vncache_mutex); - ++pfs_vncache_misses; /* nope, get a new one */ pvd = malloc(sizeof *pvd, M_PFSVNCACHE, M_WAITOK); - mtx_lock(&pfs_vncache_mutex); - if (++pfs_vncache_entries > pfs_vncache_maxentries) - pfs_vncache_maxentries = pfs_vncache_entries; - mtx_unlock(&pfs_vncache_mutex); error = getnewvnode("pseudofs", mp, &pfs_vnodeops, vpp); if (error) { - mtx_lock(&pfs_vncache_mutex); - --pfs_vncache_entries; - mtx_unlock(&pfs_vncache_mutex); free(pvd, M_PFSVNCACHE); return (error); } @@ -200,14 +192,35 @@ retry: vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY); error = insmntque(*vpp, mp); if (error != 0) { - mtx_lock(&pfs_vncache_mutex); - --pfs_vncache_entries; - mtx_unlock(&pfs_vncache_mutex); - free(pvd, M_PFSVNCACHE); *vpp = NULLVP; return (error); } +retry2: mtx_lock(&pfs_vncache_mutex); + /* + * Other thread may race with us, creating the entry we are + * going to insert into the cache. Recheck after + * pfs_vncache_mutex is reacquired. + */ + for (pvd2 = pfs_vncache; pvd2; pvd2 = pvd2->pvd_next) { + if (pvd2->pvd_pn == pn && pvd2->pvd_pid == pid && + pvd2->pvd_vnode->v_mount == mp) { + vp = pvd2->pvd_vnode; + VI_LOCK(vp); + mtx_unlock(&pfs_vncache_mutex); + if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) { + ++pfs_vncache_hits; + vgone(*vpp); + *vpp = vp; + cache_purge(vp); + return (0); + } + goto retry2; + } + } + ++pfs_vncache_misses; + if (++pfs_vncache_entries > pfs_vncache_maxentries) + pfs_vncache_maxentries = pfs_vncache_entries; pvd->pvd_prev = NULL; pvd->pvd_next = pfs_vncache; if (pvd->pvd_next) |