From c3f2d023e878a1a1fcedd705677a7a94826a7555 Mon Sep 17 00:00:00 2001 From: kib Date: Mon, 29 Dec 2008 12:07:18 +0000 Subject: After the pfs_vncache_mutex is dropped, another thread may attempt to do pfs_vncache_alloc() for the same pfs_node and pid. In this case, we could end up with two vnodes for the pair. Recheck the cache under the locked pfs_vncache_mutex after all sleeping operations are done [1]. This case mostly cannot happen now because pseudofs uses exclusive vnode locking for lookup. But it does drop the vnode lock for dotdot lookups, and Marcus' pseudofs_vptocnp implementation is vulnerable too. Do not call free() on the struct pfs_vdata after insmntque() failure, because vp->v_data points to the structure, and pseudofs_reclaim() frees it by the call to pfs_vncache_free(). Tested by: pho [1] Approved by: des MFC after: 2 weeks --- sys/fs/pseudofs/pseudofs_vncache.c | 39 +++++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 13 deletions(-) (limited to 'sys/fs') diff --git a/sys/fs/pseudofs/pseudofs_vncache.c b/sys/fs/pseudofs/pseudofs_vncache.c index eac35eb..fd2bce1 100644 --- a/sys/fs/pseudofs/pseudofs_vncache.c +++ b/sys/fs/pseudofs/pseudofs_vncache.c @@ -111,7 +111,7 @@ int pfs_vncache_alloc(struct mount *mp, struct vnode **vpp, struct pfs_node *pn, pid_t pid) { - struct pfs_vdata *pvd; + struct pfs_vdata *pvd, *pvd2; struct vnode *vp; int error; @@ -146,19 +146,11 @@ retry: } } mtx_unlock(&pfs_vncache_mutex); - ++pfs_vncache_misses; /* nope, get a new one */ pvd = malloc(sizeof *pvd, M_PFSVNCACHE, M_WAITOK); - mtx_lock(&pfs_vncache_mutex); - if (++pfs_vncache_entries > pfs_vncache_maxentries) - pfs_vncache_maxentries = pfs_vncache_entries; - mtx_unlock(&pfs_vncache_mutex); error = getnewvnode("pseudofs", mp, &pfs_vnodeops, vpp); if (error) { - mtx_lock(&pfs_vncache_mutex); - --pfs_vncache_entries; - mtx_unlock(&pfs_vncache_mutex); free(pvd, M_PFSVNCACHE); return (error); } @@ -200,14 +192,35 @@ retry: vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY); error = insmntque(*vpp, mp); if (error != 0) { - mtx_lock(&pfs_vncache_mutex); - --pfs_vncache_entries; - mtx_unlock(&pfs_vncache_mutex); - free(pvd, M_PFSVNCACHE); *vpp = NULLVP; return (error); } +retry2: mtx_lock(&pfs_vncache_mutex); + /* + * Other thread may race with us, creating the entry we are + * going to insert into the cache. Recheck after + * pfs_vncache_mutex is reacquired. + */ + for (pvd2 = pfs_vncache; pvd2; pvd2 = pvd2->pvd_next) { + if (pvd2->pvd_pn == pn && pvd2->pvd_pid == pid && + pvd2->pvd_vnode->v_mount == mp) { + vp = pvd2->pvd_vnode; + VI_LOCK(vp); + mtx_unlock(&pfs_vncache_mutex); + if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) { + ++pfs_vncache_hits; + vgone(*vpp); + *vpp = vp; + cache_purge(vp); + return (0); + } + goto retry2; + } + } + ++pfs_vncache_misses; + if (++pfs_vncache_entries > pfs_vncache_maxentries) + pfs_vncache_maxentries = pfs_vncache_entries; pvd->pvd_prev = NULL; pvd->pvd_next = pfs_vncache; if (pvd->pvd_next) -- cgit v1.1