summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2010-08-20 19:46:50 +0000
committerjhb <jhb@FreeBSD.org>2010-08-20 19:46:50 +0000
commitd4890c88b0febe402398e0a0bb66efdc998dd9c9 (patch)
tree9899b196d40cbc0b664fe2238a340bf770b2bd81 /sys
parenta9738fb2b395d0c304c9a1fcf56a05f4143d9afb (diff)
downloadFreeBSD-src-d4890c88b0febe402398e0a0bb66efdc998dd9c9.zip
FreeBSD-src-d4890c88b0febe402398e0a0bb66efdc998dd9c9.tar.gz
Add dedicated routines to toggle lockmgr flags such as LK_NOSHARE and
LK_CANRECURSE after a lock is created. Use them to implement macros that otherwise manipulated the flags directly. Assert that the associated lockmgr lock is exclusively locked by the current thread when manipulating these flags to ensure the flag updates are safe. This last change required some minor shuffling in a few filesystems to exclusively lock a brand new vnode slightly earlier. Reviewed by: kib MFC after: 3 days
Diffstat (limited to 'sys')
-rw-r--r--sys/fs/devfs/devfs_vnops.c2
-rw-r--r--sys/fs/nfsclient/nfs_clnode.c2
-rw-r--r--sys/fs/nfsclient/nfs_clport.c2
-rw-r--r--sys/fs/nwfs/nwfs_node.c2
-rw-r--r--sys/fs/pseudofs/pseudofs_vncache.c2
-rw-r--r--sys/fs/smbfs/smbfs_node.c2
-rw-r--r--sys/gnu/fs/xfs/FreeBSD/xfs_freebsd_iget.c2
-rw-r--r--sys/kern/kern_lock.c28
-rw-r--r--sys/kern/vfs_lookup.c7
-rw-r--r--sys/nfsclient/nfs_node.c2
-rw-r--r--sys/sys/lockmgr.h3
-rw-r--r--sys/sys/vnode.h6
-rw-r--r--sys/ufs/ffs/ffs_softdep.c4
-rw-r--r--sys/ufs/ffs/ffs_vfsops.c2
14 files changed, 47 insertions, 19 deletions
diff --git a/sys/fs/devfs/devfs_vnops.c b/sys/fs/devfs/devfs_vnops.c
index 0bcc093..abf1dfad 100644
--- a/sys/fs/devfs/devfs_vnops.c
+++ b/sys/fs/devfs/devfs_vnops.c
@@ -412,8 +412,8 @@ devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode,
} else {
vp->v_type = VBAD;
}
- VN_LOCK_ASHARE(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS);
+ VN_LOCK_ASHARE(vp);
mtx_lock(&devfs_de_interlock);
vp->v_data = de;
de->de_vnode = vp;
diff --git a/sys/fs/nfsclient/nfs_clnode.c b/sys/fs/nfsclient/nfs_clnode.c
index e36431f..7d76be0 100644
--- a/sys/fs/nfsclient/nfs_clnode.c
+++ b/sys/fs/nfsclient/nfs_clnode.c
@@ -140,6 +140,7 @@ ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp)
/*
* NFS supports recursive and shared locking.
*/
+ lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
VN_LOCK_AREC(vp);
VN_LOCK_ASHARE(vp);
/*
@@ -157,7 +158,6 @@ ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp)
M_NFSFH, M_WAITOK);
bcopy(fhp, np->n_fhp->nfh_fh, fhsize);
np->n_fhp->nfh_len = fhsize;
- lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
error = insmntque(vp, mntp);
if (error != 0) {
*npp = NULL;
diff --git a/sys/fs/nfsclient/nfs_clport.c b/sys/fs/nfsclient/nfs_clport.c
index 4d610b4..595c908 100644
--- a/sys/fs/nfsclient/nfs_clport.c
+++ b/sys/fs/nfsclient/nfs_clport.c
@@ -230,9 +230,9 @@ nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp,
/*
* NFS supports recursive and shared locking.
*/
+ lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
VN_LOCK_AREC(vp);
VN_LOCK_ASHARE(vp);
- lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
error = insmntque(vp, mntp);
if (error != 0) {
*npp = NULL;
diff --git a/sys/fs/nwfs/nwfs_node.c b/sys/fs/nwfs/nwfs_node.c
index fc9b41b..cf6f6d9 100644
--- a/sys/fs/nwfs/nwfs_node.c
+++ b/sys/fs/nwfs/nwfs_node.c
@@ -185,7 +185,6 @@ rescan:
if (dvp) {
np->n_parent = VTONW(dvp)->n_fid;
}
- VN_LOCK_AREC(vp);
sx_xlock(&nwhashlock);
/*
* Another process can create vnode while we blocked in malloc() or
@@ -202,6 +201,7 @@ rescan:
nhpp = NWNOHASH(fid);
LIST_INSERT_HEAD(nhpp, np, n_hash);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
+ VN_LOCK_AREC(vp);
sx_xunlock(&nwhashlock);
ASSERT_VOP_LOCKED(dvp, "nwfs_allocvp");
diff --git a/sys/fs/pseudofs/pseudofs_vncache.c b/sys/fs/pseudofs/pseudofs_vncache.c
index 035f2c7..19cc70d 100644
--- a/sys/fs/pseudofs/pseudofs_vncache.c
+++ b/sys/fs/pseudofs/pseudofs_vncache.c
@@ -189,8 +189,8 @@ retry:
if ((pn->pn_flags & PFS_PROCDEP) != 0)
(*vpp)->v_vflag |= VV_PROCDEP;
pvd->pvd_vnode = *vpp;
- VN_LOCK_AREC(*vpp);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
+ VN_LOCK_AREC(*vpp);
error = insmntque(*vpp, mp);
if (error != 0) {
free(pvd, M_PFSVNCACHE);
diff --git a/sys/fs/smbfs/smbfs_node.c b/sys/fs/smbfs/smbfs_node.c
index a0543e2..661579c 100644
--- a/sys/fs/smbfs/smbfs_node.c
+++ b/sys/fs/smbfs/smbfs_node.c
@@ -253,8 +253,8 @@ loop:
} else if (vp->v_type == VREG)
SMBERROR("new vnode '%s' born without parent ?\n", np->n_name);
- VN_LOCK_AREC(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
+ VN_LOCK_AREC(vp);
smbfs_hash_lock(smp);
LIST_FOREACH(np2, nhpp, n_hash) {
diff --git a/sys/gnu/fs/xfs/FreeBSD/xfs_freebsd_iget.c b/sys/gnu/fs/xfs/FreeBSD/xfs_freebsd_iget.c
index 49cfb5d..e4e1e8d 100644
--- a/sys/gnu/fs/xfs/FreeBSD/xfs_freebsd_iget.c
+++ b/sys/gnu/fs/xfs/FreeBSD/xfs_freebsd_iget.c
@@ -389,8 +389,8 @@ xfs_vn_allocate(xfs_mount_t *mp, xfs_inode_t *ip, struct xfs_vnode **vpp)
return (error);
}
- VN_LOCK_AREC(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
+ VN_LOCK_AREC(vp);
error = insmntque(vp, XVFSTOMNT(XFS_MTOVFS(mp)));
if (error != 0) {
kmem_free(vdata, sizeof(*vdata));
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 18bbf9a..c9829cd 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -396,6 +396,34 @@ lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
STACK_ZERO(lk);
}
+/*
+ * XXX: Gross hacks to manipulate external lock flags after
+ * initialization. Used for certain vnode and buf locks.
+ */
+void
+lockallowshare(struct lock *lk)
+{
+
+ lockmgr_assert(lk, KA_XLOCKED);
+ lk->lock_object.lo_flags &= ~LK_NOSHARE;
+}
+
+void
+lockallowrecurse(struct lock *lk)
+{
+
+ lockmgr_assert(lk, KA_XLOCKED);
+ lk->lock_object.lo_flags |= LO_RECURSABLE;
+}
+
+void
+lockdisablerecurse(struct lock *lk)
+{
+
+ lockmgr_assert(lk, KA_XLOCKED);
+ lk->lock_object.lo_flags &= ~LO_RECURSABLE;
+}
+
void
lockdestroy(struct lock *lk)
{
diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c
index 5b6ccf6..e0684b1 100644
--- a/sys/kern/vfs_lookup.c
+++ b/sys/kern/vfs_lookup.c
@@ -84,14 +84,13 @@ static struct vnode *vp_crossmp;
static void
nameiinit(void *dummy __unused)
{
- int error;
namei_zone = uma_zcreate("NAMEI", MAXPATHLEN, NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, 0);
- error = getnewvnode("crossmp", NULL, &dead_vnodeops, &vp_crossmp);
- if (error != 0)
- panic("nameiinit: getnewvnode");
+ getnewvnode("crossmp", NULL, &dead_vnodeops, &vp_crossmp);
+ vn_lock(vp_crossmp, LK_EXCLUSIVE);
VN_LOCK_ASHARE(vp_crossmp);
+ VOP_UNLOCK(vp_crossmp, 0);
}
SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nameiinit, NULL);
diff --git a/sys/nfsclient/nfs_node.c b/sys/nfsclient/nfs_node.c
index 947beed..9e558f4 100644
--- a/sys/nfsclient/nfs_node.c
+++ b/sys/nfsclient/nfs_node.c
@@ -150,6 +150,7 @@ nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int
/*
* NFS supports recursive and shared locking.
*/
+ lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
VN_LOCK_AREC(vp);
VN_LOCK_ASHARE(vp);
if (fhsize > NFS_SMALLFH) {
@@ -158,7 +159,6 @@ nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int
np->n_fhp = &np->n_fh;
bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize);
np->n_fhsize = fhsize;
- lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
error = insmntque(vp, mntp);
if (error != 0) {
*npp = NULL;
diff --git a/sys/sys/lockmgr.h b/sys/sys/lockmgr.h
index 545dfa2..4c3a272 100644
--- a/sys/sys/lockmgr.h
+++ b/sys/sys/lockmgr.h
@@ -73,7 +73,10 @@ void _lockmgr_assert(struct lock *lk, int what, const char *file, int line);
#endif
void _lockmgr_disown(struct lock *lk, const char *file, int line);
+void lockallowrecurse(struct lock *lk);
+void lockallowshare(struct lock *lk);
void lockdestroy(struct lock *lk);
+void lockdisablerecurse(struct lock *lk);
void lockinit(struct lock *lk, int prio, const char *wmesg, int timo,
int flags);
#ifdef DDB
diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h
index c38d645..e82f8ea 100644
--- a/sys/sys/vnode.h
+++ b/sys/sys/vnode.h
@@ -419,10 +419,8 @@ extern struct vattr va_null; /* predefined null vattr structure */
#define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock)
#define VI_MTX(vp) (&(vp)->v_interlock)
-#define VN_LOCK_AREC(vp) \
- ((vp)->v_vnlock->lock_object.lo_flags |= LO_RECURSABLE)
-#define VN_LOCK_ASHARE(vp) \
- ((vp)->v_vnlock->lock_object.lo_flags &= ~LK_NOSHARE)
+#define VN_LOCK_AREC(vp) lockallowrecurse((vp)->v_vnlock)
+#define VN_LOCK_ASHARE(vp) lockallowshare((vp)->v_vnlock)
#endif /* _KERNEL */
diff --git a/sys/ufs/ffs/ffs_softdep.c b/sys/ufs/ffs/ffs_softdep.c
index 039e436..b666c0f 100644
--- a/sys/ufs/ffs/ffs_softdep.c
+++ b/sys/ufs/ffs/ffs_softdep.c
@@ -904,8 +904,8 @@ MTX_SYSINIT(softdep_lock, &lk, "Softdep Lock", MTX_DEF);
#define ACQUIRE_LOCK(lk) mtx_lock(lk)
#define FREE_LOCK(lk) mtx_unlock(lk)
-#define BUF_AREC(bp) ((bp)->b_lock.lock_object.lo_flags |= LO_RECURSABLE)
-#define BUF_NOREC(bp) ((bp)->b_lock.lock_object.lo_flags &= ~LO_RECURSABLE)
+#define BUF_AREC(bp) lockallowrecurse(&(bp)->b_lock)
+#define BUF_NOREC(bp) lockdisablerecurse(&(bp)->b_lock)
/*
* Worklist queue management.
diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c
index c099732..ccf346d 100644
--- a/sys/ufs/ffs/ffs_vfsops.c
+++ b/sys/ufs/ffs/ffs_vfsops.c
@@ -1501,6 +1501,7 @@ ffs_vgetf(mp, ino, flags, vpp, ffs_flags)
/*
* FFS supports recursive locking.
*/
+ lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
VN_LOCK_AREC(vp);
vp->v_data = ip;
vp->v_bufobj.bo_bsize = fs->fs_bsize;
@@ -1518,7 +1519,6 @@ ffs_vgetf(mp, ino, flags, vpp, ffs_flags)
}
#endif
- lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
if (ffs_flags & FFSV_FORCEINSMQ)
vp->v_vflag |= VV_FORCEINSMQ;
error = insmntque(vp, mp);
OpenPOWER on IntegriCloud