summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2008-09-24 18:51:33 +0000
committerjhb <jhb@FreeBSD.org>2008-09-24 18:51:33 +0000
commit1161006cb695195ef1ddc08bd587d10755925084 (patch)
treeee684698e82de9b4671dd17a420741a7518401dd
parent8d01b3e526211bb7a97d75c934a16413164f2b81 (diff)
downloadFreeBSD-src-1161006cb695195ef1ddc08bd587d10755925084.zip
FreeBSD-src-1161006cb695195ef1ddc08bd587d10755925084.tar.gz
Part 1 of making shared lookups more resilient with respect to forced
unmounts. When we upgrade a vnode lock from shared to exclusive during a name cache lookup, fail the lookup with EBADF if the vnode is invalidated while we are waiting for the exclusive lock. Also, for correctness (though I'm not sure it can occur in practice), downgrade an exclusively locked vnode if it should be share locked. Tested by: pho
-rw-r--r--sys/kern/vfs_cache.c26
-rw-r--r--sys/nfsclient/nfs_vnops.c5
2 files changed, 22 insertions, 9 deletions
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index ba97f38..33e8759 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -304,7 +304,9 @@ cache_zap(ncp)
* succeeds, the vnode is returned in *vpp, and a status of -1 is
* returned. If the lookup determines that the name does not exist
* (negative cacheing), a status of ENOENT is returned. If the lookup
- * fails, a status of zero is returned.
+ * fails, a status of zero is returned. If the directory vnode is
+ * recycled out from under us due to a forced unmount, a status of
+ * EBADF is returned.
*
* vpp is locked and ref'd on return. If we're looking up DOTDOT, dvp is
* unlocked. If we're looking up . an extra ref is taken, but the lock is
@@ -428,10 +430,18 @@ success:
* differently...
*/
ltype = cnp->cn_lkflags & LK_TYPE_MASK;
- if (ltype == VOP_ISLOCKED(*vpp))
- return (-1);
- else if (ltype == LK_EXCLUSIVE)
- vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
+ if (ltype != VOP_ISLOCKED(*vpp)) {
+ if (ltype == LK_EXCLUSIVE) {
+ vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
+ if ((*vpp)->v_iflag & VI_DOOMED) {
+ /* forced unmount */
+ vrele(*vpp);
+ *vpp = NULL;
+ return (EBADF);
+ }
+ } else
+ vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
+ }
return (-1);
}
ltype = 0; /* silence gcc warning */
@@ -671,9 +681,9 @@ vfs_cache_lookup(ap)
error = cache_lookup(dvp, vpp, cnp);
if (error == 0)
return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
- if (error == ENOENT)
- return (error);
- return (0);
+ if (error == -1)
+ return (0);
+ return (error);
}
diff --git a/sys/nfsclient/nfs_vnops.c b/sys/nfsclient/nfs_vnops.c
index 04b3b34..bbf6810 100644
--- a/sys/nfsclient/nfs_vnops.c
+++ b/sys/nfsclient/nfs_vnops.c
@@ -888,7 +888,10 @@ nfs_lookup(struct vop_lookup_args *ap)
*vpp = NULLVP;
return (error);
}
- if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) {
+ error = cache_lookup(dvp, vpp, cnp);
+ if (error > 0 && error != ENOENT)
+ return (error);
+ if (error == -1) {
struct vattr vattr;
newvp = *vpp;
OpenPOWER on IntegriCloud