summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2008-01-10 01:10:58 +0000
committerattilio <attilio@FreeBSD.org>2008-01-10 01:10:58 +0000
commit18d0a0dd51c7995ce9e549616f78ef724096b1bd (patch)
treec4e28d990eaa525916ab09f2bd1d9c6ddf2c8dea /sys/kern
parent8df9c26bfdf7538c15226c902c07a0fe350ace36 (diff)
downloadFreeBSD-src-18d0a0dd51c7995ce9e549616f78ef724096b1bd.zip
FreeBSD-src-18d0a0dd51c7995ce9e549616f78ef724096b1bd.tar.gz
vn_lock() is currently only used with the 'curthread' passed as argument.
Remove this argument and pass curthread directly to underlying VOP_LOCK1() VFS method. This modify makes the code cleaner and in particular remove an annoying dependence helping next lockmgr() cleanup. KPI results, obviously, changed. Manpage and FreeBSD_version will be updated through further commits. As a side note, would be valuable to say that next commits will address a similar cleanup about VFS methods, in particular vop_lock1 and vop_unlock. Tested by: Diego Sardina <siarodx at gmail dot com>, Andrea Di Pasquale <whyx dot it at gmail dot com>
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/imgact_aout.c5
-rw-r--r--sys/kern/imgact_elf.c4
-rw-r--r--sys/kern/imgact_gzip.c2
-rw-r--r--sys/kern/kern_alq.c2
-rw-r--r--sys/kern/kern_descrip.c4
-rw-r--r--sys/kern/kern_exec.c6
-rw-r--r--sys/kern/kern_jail.c2
-rw-r--r--sys/kern/kern_ktrace.c2
-rw-r--r--sys/kern/kern_proc.c3
-rw-r--r--sys/kern/kern_sig.c2
-rw-r--r--sys/kern/uipc_mqueue.c7
-rw-r--r--sys/kern/uipc_syscalls.c4
-rw-r--r--sys/kern/vfs_acl.c6
-rw-r--r--sys/kern/vfs_aio.c2
-rw-r--r--sys/kern/vfs_bio.c2
-rw-r--r--sys/kern/vfs_cache.c4
-rw-r--r--sys/kern/vfs_extattr.c8
-rw-r--r--sys/kern/vfs_lookup.c19
-rw-r--r--sys/kern/vfs_mount.c4
-rw-r--r--sys/kern/vfs_subr.c12
-rw-r--r--sys/kern/vfs_syscalls.c40
-rw-r--r--sys/kern/vfs_vnops.c33
22 files changed, 88 insertions, 85 deletions
diff --git a/sys/kern/imgact_aout.c b/sys/kern/imgact_aout.c
index 45b39c2..2a0d7b0 100644
--- a/sys/kern/imgact_aout.c
+++ b/sys/kern/imgact_aout.c
@@ -98,7 +98,6 @@ exec_aout_imgact(imgp)
struct image_params *imgp;
{
const struct exec *a_out = (const struct exec *) imgp->image_header;
- struct thread *td = curthread;
struct vmspace *vmspace;
vm_map_t map;
vm_object_t object;
@@ -193,14 +192,14 @@ exec_aout_imgact(imgp)
* However, in cases where the vnode lock is external, such as nullfs,
* v_usecount may become zero.
*/
- VOP_UNLOCK(imgp->vp, 0, td);
+ VOP_UNLOCK(imgp->vp, 0, curthread);
/*
* Destroy old process VM and create a new one (with a new stack)
*/
error = exec_new_vmspace(imgp, &aout_sysvec);
- vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
if (error)
return (error);
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index 0742f63..5700189 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -680,7 +680,7 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
error = exec_new_vmspace(imgp, sv);
imgp->proc->p_sysent = sv;
- vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
if (error)
return (error);
@@ -824,7 +824,7 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
error = __elfN(load_file)(imgp->proc, interp, &addr,
&imgp->entry_addr, sv->sv_pagesize);
}
- vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
if (error != 0) {
uprintf("ELF interpreter %s not found\n", interp);
return (error);
diff --git a/sys/kern/imgact_gzip.c b/sys/kern/imgact_gzip.c
index 3cd1dab..5701894 100644
--- a/sys/kern/imgact_gzip.c
+++ b/sys/kern/imgact_gzip.c
@@ -241,7 +241,7 @@ do_aout_hdr(struct imgact_gzip * gz)
*/
error = exec_new_vmspace(gz->ip, &aout_sysvec);
- vn_lock(gz->ip->vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(gz->ip->vp, LK_EXCLUSIVE | LK_RETRY);
if (error) {
gz->where = __LINE__;
return (error);
diff --git a/sys/kern/kern_alq.c b/sys/kern/kern_alq.c
index 6d132a9..5ed883c 100644
--- a/sys/kern/kern_alq.c
+++ b/sys/kern/kern_alq.c
@@ -294,7 +294,7 @@ alq_doio(struct alq *alq)
*/
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_start_write(vp, &mp, V_WAIT);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VOP_LEASE(vp, td, alq->aq_cred, LEASE_WRITE);
/*
* XXX: VOP_WRITE error checks are ignored.
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
index 3b4c0f4..b68ac30 100644
--- a/sys/kern/kern_descrip.c
+++ b/sys/kern/kern_descrip.c
@@ -1183,7 +1183,7 @@ fpathconf(struct thread *td, struct fpathconf_args *uap)
if (vp != NULL) {
int vfslocked;
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_PATHCONF(vp, uap->name, td->td_retval);
VOP_UNLOCK(vp, 0, td);
VFS_UNLOCK_GIANT(vfslocked);
@@ -2579,7 +2579,7 @@ sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGS)
fullpath = "-";
FILEDESC_SUNLOCK(fdp);
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vn_fullpath(curthread, vp, &fullpath, &freepath);
vput(vp);
VFS_UNLOCK_GIANT(vfslocked);
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index b0a8dcc..97eda1d 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -493,7 +493,7 @@ interpret:
/* close files on exec */
VOP_UNLOCK(imgp->vp, 0, td);
fdcloseexec(td);
- vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
/* Get a reference to the vnode prior to locking the proc */
VREF(ndp->ni_vp);
@@ -593,7 +593,7 @@ interpret:
setugidsafety(td);
VOP_UNLOCK(imgp->vp, 0, td);
error = fdcheckstd(td);
- vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
if (error != 0)
goto done1;
PROC_LOCK(p);
@@ -749,7 +749,7 @@ done1:
if (tracecred != NULL)
crfree(tracecred);
#endif
- vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
if (oldargs != NULL)
pargs_drop(oldargs);
if (newargs != NULL)
diff --git a/sys/kern/kern_jail.c b/sys/kern/kern_jail.c
index 1bcc264..d77114f 100644
--- a/sys/kern/kern_jail.c
+++ b/sys/kern/kern_jail.c
@@ -254,7 +254,7 @@ jail_attach(struct thread *td, struct jail_attach_args *uap)
sx_sunlock(&allprison_lock);
vfslocked = VFS_LOCK_GIANT(pr->pr_root->v_mount);
- vn_lock(pr->pr_root, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(pr->pr_root, LK_EXCLUSIVE | LK_RETRY);
if ((error = change_dir(pr->pr_root, td)) != 0)
goto e_unlock;
#ifdef MAC
diff --git a/sys/kern/kern_ktrace.c b/sys/kern/kern_ktrace.c
index 96f3da4..00562bc 100644
--- a/sys/kern/kern_ktrace.c
+++ b/sys/kern/kern_ktrace.c
@@ -924,7 +924,7 @@ ktr_writerequest(struct thread *td, struct ktr_request *req)
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_start_write(vp, &mp, V_WAIT);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
(void)VOP_LEASE(vp, td, cred, LEASE_WRITE);
#ifdef MAC
error = mac_vnode_check_write(cred, NOCRED, vp);
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index c2e33a7..be70eed 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -1397,8 +1397,7 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
VM_OBJECT_UNLOCK(obj);
if (vp != NULL) {
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY,
- curthread);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vn_fullpath(curthread, vp, &fullpath,
&freepath);
vput(vp);
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index 24547a1..577bcac 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -3141,7 +3141,7 @@ restart:
vattr.va_size = 0;
if (set_core_nodump_flag)
vattr.va_flags = UF_NODUMP;
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VOP_LEASE(vp, td, cred, LEASE_WRITE);
VOP_SETATTR(vp, &vattr, cred, td);
VOP_UNLOCK(vp, 0, td);
diff --git a/sys/kern/uipc_mqueue.c b/sys/kern/uipc_mqueue.c
index 2d2a477..4cfa21d 100644
--- a/sys/kern/uipc_mqueue.c
+++ b/sys/kern/uipc_mqueue.c
@@ -706,8 +706,7 @@ mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn)
if (vd != NULL) {
if (vget(vd->mv_vnode, 0, curthread) == 0) {
*vpp = vd->mv_vnode;
- vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE,
- curthread);
+ vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE);
return (0);
}
/* XXX if this can happen, we're in trouble */
@@ -716,7 +715,7 @@ mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn)
error = getnewvnode("mqueue", mp, &mqfs_vnodeops, vpp);
if (error)
return (error);
- vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, curthread);
+ vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
error = insmntque(*vpp, mp);
if (error != 0) {
*vpp = NULLVP;
@@ -824,7 +823,7 @@ mqfs_lookupx(struct vop_cachedlookup_args *ap)
KASSERT(pd->mn_parent, ("non-root directory has no parent"));
pn = pd->mn_parent;
error = mqfs_allocv(dvp->v_mount, vpp, pn);
- vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
return (error);
}
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index 616afa0..2eb5087 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -1777,7 +1777,7 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
if ((error = fgetvp_read(td, uap->fd, &vp)) != 0)
goto out;
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
obj = vp->v_object;
if (obj != NULL) {
/*
@@ -2024,7 +2024,7 @@ retry_space:
*/
bsize = vp->v_mount->mnt_stat.f_iosize;
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
- vn_lock(vp, LK_SHARED | LK_RETRY, td);
+ vn_lock(vp, LK_SHARED | LK_RETRY);
/*
* XXXMAC: Because we don't have fp->f_cred
diff --git a/sys/kern/vfs_acl.c b/sys/kern/vfs_acl.c
index ea33f66..0afe7ac 100644
--- a/sys/kern/vfs_acl.c
+++ b/sys/kern/vfs_acl.c
@@ -91,7 +91,7 @@ vacl_set_acl(struct thread *td, struct vnode *vp, acl_type_t type,
if (error != 0)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
#ifdef MAC
error = mac_vnode_check_setacl(td->td_ucred, vp, type, &inkernacl);
if (error != 0)
@@ -117,7 +117,7 @@ vacl_get_acl(struct thread *td, struct vnode *vp, acl_type_t type,
int error;
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
#ifdef MAC
error = mac_vnode_check_getacl(td->td_ucred, vp, type);
if (error != 0)
@@ -146,7 +146,7 @@ vacl_delete(struct thread *td, struct vnode *vp, acl_type_t type)
if (error)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
#ifdef MAC
error = mac_vnode_check_deleteacl(td->td_ucred, vp, type);
if (error)
diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c
index 264f291..ce5fd58 100644
--- a/sys/kern/vfs_aio.c
+++ b/sys/kern/vfs_aio.c
@@ -764,7 +764,7 @@ aio_fsync_vnode(struct thread *td, struct vnode *vp)
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
goto drop;
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_object != NULL) {
VM_OBJECT_LOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index f4a2a63..e799d8d 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -2206,7 +2206,7 @@ flushbufqueues(int queue, int flushdeps)
BUF_UNLOCK(bp);
continue;
}
- if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) == 0) {
+ if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
mtx_unlock(&bqlock);
CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
bp, bp->b_vp, bp->b_flags);
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index eeda3b9..432e948 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -429,7 +429,7 @@ success:
if (ltype == VOP_ISLOCKED(*vpp, td))
return (-1);
else if (ltype == LK_EXCLUSIVE)
- vn_lock(*vpp, LK_UPGRADE | LK_RETRY, td);
+ vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
return (-1);
}
ltype = 0; /* silence gcc warning */
@@ -441,7 +441,7 @@ success:
CACHE_UNLOCK();
error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, td);
if (cnp->cn_flags & ISDOTDOT)
- vn_lock(dvp, ltype | LK_RETRY, td);
+ vn_lock(dvp, ltype | LK_RETRY);
if ((cnp->cn_flags & ISLASTCN) && (cnp->cn_lkflags & LK_EXCLUSIVE))
ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
if (error) {
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index 0d414f8..e73f312 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -162,7 +162,7 @@ extattr_set_vp(struct vnode *vp, int attrnamespace, const char *attrname,
if (error)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
aiov.iov_base = data;
aiov.iov_len = nbytes;
@@ -328,7 +328,7 @@ extattr_get_vp(struct vnode *vp, int attrnamespace, const char *attrname,
VFS_ASSERT_GIANT(vp->v_mount);
VOP_LEASE(vp, td, td->td_ucred, LEASE_READ);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
/*
* Slightly unusual semantics: if the user provides a NULL data
@@ -509,7 +509,7 @@ extattr_delete_vp(struct vnode *vp, int attrnamespace, const char *attrname,
if (error)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
#ifdef MAC
error = mac_vnode_check_deleteextattr(td->td_ucred, vp, attrnamespace,
@@ -651,7 +651,7 @@ extattr_list_vp(struct vnode *vp, int attrnamespace, void *data,
VFS_ASSERT_GIANT(vp->v_mount);
VOP_LEASE(vp, td, td->td_ucred, LEASE_READ);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
auiop = NULL;
sizep = NULL;
diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c
index 0841fd3..55c496a 100644
--- a/sys/kern/vfs_lookup.c
+++ b/sys/kern/vfs_lookup.c
@@ -408,7 +408,8 @@ lookup(struct nameidata *ndp)
cnp->cn_lkflags = LK_EXCLUSIVE;
dp = ndp->ni_startdir;
ndp->ni_startdir = NULLVP;
- vn_lock(dp, compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags | LK_RETRY), td);
+ vn_lock(dp,
+ compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags | LK_RETRY));
dirloop:
/*
@@ -546,7 +547,9 @@ dirloop:
VREF(dp);
vput(tdp);
VFS_UNLOCK_GIANT(tvfslocked);
- vn_lock(dp, compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags | LK_RETRY), td);
+ vn_lock(dp,
+ compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags |
+ LK_RETRY));
}
}
@@ -572,7 +575,7 @@ unionlookup:
if (dp != vp_crossmp &&
VOP_ISLOCKED(dp, td) == LK_SHARED &&
(cnp->cn_flags & ISLASTCN) && (cnp->cn_flags & LOCKPARENT))
- vn_lock(dp, LK_UPGRADE|LK_RETRY, td);
+ vn_lock(dp, LK_UPGRADE|LK_RETRY);
/*
* If we're looking up the last component and we need an exclusive
* lock, adjust our lkflags.
@@ -601,7 +604,9 @@ unionlookup:
VREF(dp);
vput(tdp);
VFS_UNLOCK_GIANT(tvfslocked);
- vn_lock(dp, compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags | LK_RETRY), td);
+ vn_lock(dp,
+ compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags |
+ LK_RETRY));
goto unionlookup;
}
@@ -678,7 +683,7 @@ unionlookup:
ndp->ni_dvp = vp_crossmp;
error = VFS_ROOT(mp, compute_cn_lkflags(mp, cnp->cn_lkflags), &tdp, td);
vfs_unbusy(mp, td);
- if (vn_lock(vp_crossmp, LK_SHARED | LK_NOWAIT, td))
+ if (vn_lock(vp_crossmp, LK_SHARED | LK_NOWAIT))
panic("vp_crossmp exclusively locked or reclaimed");
if (error) {
dpunlocked = 1;
@@ -778,7 +783,7 @@ success:
*/
if ((cnp->cn_flags & (ISLASTCN | LOCKSHARED | LOCKLEAF)) ==
(ISLASTCN | LOCKLEAF) && VOP_ISLOCKED(dp, td) != LK_EXCLUSIVE) {
- vn_lock(dp, LK_UPGRADE | LK_RETRY, td);
+ vn_lock(dp, LK_UPGRADE | LK_RETRY);
}
if (vfslocked && dvfslocked)
VFS_UNLOCK_GIANT(dvfslocked); /* Only need one */
@@ -825,7 +830,7 @@ relookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
cnp->cn_flags &= ~ISSYMLINK;
dp = dvp;
cnp->cn_lkflags = LK_EXCLUSIVE;
- vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(dp, LK_EXCLUSIVE | LK_RETRY);
/*
* Search a new directory.
diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c
index a392f23..43b0cb0 100644
--- a/sys/kern/vfs_mount.c
+++ b/sys/kern/vfs_mount.c
@@ -1061,7 +1061,7 @@ vfs_domount(
else
mp->mnt_kern_flag &= ~MNTK_ASYNC;
MNT_IUNLOCK(mp);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
/*
* Put the new filesystem on the mount list after root.
*/
@@ -1204,7 +1204,7 @@ dounmount(mp, flags, td)
mnt_gen_r = mp->mnt_gen;
VI_LOCK(coveredvp);
vholdl(coveredvp);
- vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY, td);
+ vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY);
vdrop(coveredvp);
/*
* Check for mp being unmounted while waiting for the
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 3396544..ecaa158 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -1004,7 +1004,7 @@ insmntque_stddtr(struct vnode *vp, void *dtr_arg)
/* XXX non mp-safe fs may still call insmntque with vnode
unlocked */
if (!VOP_ISLOCKED(vp, td))
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vgone(vp);
vput(vp);
}
@@ -1662,7 +1662,7 @@ restart:
mtx_lock(&sync_mtx);
return (1);
}
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
(void) VOP_FSYNC(vp, MNT_LAZY, td);
VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
@@ -2059,7 +2059,7 @@ vget(struct vnode *vp, int flags, struct thread *td)
oweinact = 1;
}
vholdl(vp);
- if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
+ if ((error = vn_lock(vp, flags | LK_INTERLOCK)) != 0) {
vdrop(vp);
return (error);
}
@@ -2154,7 +2154,7 @@ vrele(struct vnode *vp)
* as VI_DOINGINACT to avoid recursion.
*/
vp->v_iflag |= VI_OWEINACT;
- if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
+ if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0) {
VI_LOCK(vp);
if (vp->v_usecount > 0)
vp->v_iflag &= ~VI_OWEINACT;
@@ -2359,7 +2359,7 @@ loop:
VI_LOCK(vp);
vholdl(vp);
MNT_IUNLOCK(mp);
- error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td);
+ error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE);
if (error) {
vdrop(vp);
MNT_ILOCK(mp);
@@ -3869,7 +3869,7 @@ vfs_knllock(void *arg)
{
struct vnode *vp = arg;
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
}
static void
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 483651e..0ac4316 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -364,7 +364,7 @@ kern_fstatfs(struct thread *td, int fd, struct statfs *buf)
return (error);
vp = fp->f_vnode;
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
#ifdef AUDIT
AUDIT_ARG(vnode, vp, ARG_VNODE1);
#endif
@@ -732,7 +732,7 @@ fchdir(td, uap)
VREF(vp);
fdrop(fp, td);
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
AUDIT_ARG(vnode, vp, ARG_VNODE1);
error = change_dir(vp, td);
while (!error && (mp = vp->v_mountedhere) != NULL) {
@@ -1103,7 +1103,7 @@ kern_open(struct thread *td, char *path, enum uio_seg pathseg, int flags,
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
VATTR_NULL(&vat);
vat.va_size = 0;
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
#ifdef MAC
error = mac_vnode_check_write(td->td_ucred, fp->f_cred, vp);
if (error == 0)
@@ -1462,7 +1462,7 @@ kern_link(struct thread *td, char *path, char *link, enum uio_seg segflg)
vput(nd.ni_dvp);
vrele(nd.ni_vp);
error = EEXIST;
- } else if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td))
+ } else if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY))
== 0) {
VOP_LEASE(nd.ni_dvp, td, td->td_ucred, LEASE_WRITE);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
@@ -1767,7 +1767,7 @@ lseek(td, uap)
offset += fp->f_offset;
break;
case L_XTND:
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_GETATTR(vp, &vattr, cred, td);
VOP_UNLOCK(vp, 0, td);
if (error)
@@ -2398,7 +2398,7 @@ setfflags(td, vp, flags)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VATTR_NULL(&vattr);
vattr.va_flags = flags;
#ifdef MAC
@@ -2500,7 +2500,7 @@ fchflags(td, uap)
return (error);
vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
#ifdef AUDIT
- vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY);
AUDIT_ARG(vnode, fp->f_vnode, ARG_VNODE1);
VOP_UNLOCK(fp->f_vnode, 0, td);
#endif
@@ -2526,7 +2526,7 @@ setfmode(td, vp, mode)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VATTR_NULL(&vattr);
vattr.va_mode = mode & ALLPERMS;
#ifdef MAC
@@ -2640,7 +2640,7 @@ fchmod(td, uap)
return (error);
vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
#ifdef AUDIT
- vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY);
AUDIT_ARG(vnode, fp->f_vnode, ARG_VNODE1);
VOP_UNLOCK(fp->f_vnode, 0, td);
#endif
@@ -2667,7 +2667,7 @@ setfown(td, vp, uid, gid)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VATTR_NULL(&vattr);
vattr.va_uid = uid;
vattr.va_gid = gid;
@@ -2797,7 +2797,7 @@ fchown(td, uap)
return (error);
vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
#ifdef AUDIT
- vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY);
AUDIT_ARG(vnode, fp->f_vnode, ARG_VNODE1);
VOP_UNLOCK(fp->f_vnode, 0, td);
#endif
@@ -2860,7 +2860,7 @@ setutimes(td, vp, ts, numtimes, nullflag)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
setbirthtime = 0;
if (numtimes < 3 && VOP_GETATTR(vp, &vattr, td->td_ucred, td) == 0 &&
timespeccmp(&ts[1], &vattr.va_birthtime, < ))
@@ -3010,7 +3010,7 @@ kern_futimes(struct thread *td, int fd, struct timeval *tptr,
return (error);
vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
#ifdef AUDIT
- vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(fp->f_vnode, LK_EXCLUSIVE | LK_RETRY);
AUDIT_ARG(vnode, fp->f_vnode, ARG_VNODE1);
VOP_UNLOCK(fp->f_vnode, 0, td);
#endif
@@ -3067,7 +3067,7 @@ kern_truncate(struct thread *td, char *path, enum uio_seg pathseg, off_t length)
}
NDFREE(&nd, NDF_ONLY_PNBUF);
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_type == VDIR)
error = EISDIR;
#ifdef MAC
@@ -3165,7 +3165,7 @@ fsync(td, uap)
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
goto drop;
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
AUDIT_ARG(vnode, vp, ARG_VNODE1);
if (vp->v_object != NULL) {
VM_OBJECT_LOCK(vp->v_object);
@@ -3550,7 +3550,7 @@ unionread:
auio.uio_segflg = UIO_USERSPACE;
auio.uio_td = td;
auio.uio_resid = uap->count;
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
loff = auio.uio_offset = fp->f_offset;
#ifdef MAC
error = mac_vnode_check_readdir(td->td_ucred, vp);
@@ -3692,8 +3692,8 @@ unionread:
auio.uio_segflg = UIO_USERSPACE;
auio.uio_td = td;
auio.uio_resid = uap->count;
- /* vn_lock(vp, LK_SHARED | LK_RETRY, td); */
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ /* vn_lock(vp, LK_SHARED | LK_RETRY); */
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
AUDIT_ARG(vnode, vp, ARG_VNODE1);
loff = auio.uio_offset = fp->f_offset;
#ifdef MAC
@@ -4054,7 +4054,7 @@ fhopen(td, uap)
goto out;
}
VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); /* XXX */
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
#ifdef MAC
/*
* We don't yet have fp->f_cred, so use td->td_ucred, which
@@ -4120,7 +4120,7 @@ fhopen(td, uap)
fdrop(fp, td);
goto out;
}
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
atomic_set_int(&fp->f_flag, FHASLOCK);
}
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index 10898e5..181cea1 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -287,7 +287,7 @@ vn_close(vp, flags, file_cred, td)
VFS_ASSERT_GIANT(vp->v_mount);
vn_start_write(vp, &mp, V_WAIT);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (flags & FWRITE) {
VNASSERT(vp->v_writecount > 0, vp,
("vn_close: negative writecount"));
@@ -371,14 +371,14 @@ vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
(error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
!= 0)
return (error);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
} else {
/*
* XXX This should be LK_SHARED but I don't trust VFS
* enough to leave it like that until it has been
* reviewed further.
*/
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
}
}
@@ -525,10 +525,10 @@ vn_read(fp, uio, active_cred, flags, td)
}
fp->f_vnread_flags |= FOFFSET_LOCKED;
mtx_unlock(mtxp);
- vn_lock(vp, LK_SHARED | LK_RETRY, td);
+ vn_lock(vp, LK_SHARED | LK_RETRY);
uio->uio_offset = fp->f_offset;
} else
- vn_lock(vp, LK_SHARED | LK_RETRY, td);
+ vn_lock(vp, LK_SHARED | LK_RETRY);
ioflag |= sequential_heuristic(uio, fp);
@@ -588,7 +588,7 @@ vn_write(fp, uio, active_cred, flags, td)
(error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
goto unlock;
VOP_LEASE(vp, td, fp->f_cred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if ((flags & FOF_OFFSET) == 0)
uio->uio_offset = fp->f_offset;
ioflag |= sequential_heuristic(uio, fp);
@@ -632,7 +632,7 @@ vn_truncate(fp, length, active_cred, td)
return (error);
}
VOP_LEASE(vp, td, active_cred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_type == VDIR) {
error = EISDIR;
goto out;
@@ -670,7 +670,7 @@ vn_statfile(fp, sb, active_cred, td)
int error;
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
VOP_UNLOCK(vp, 0, td);
VFS_UNLOCK_GIANT(vfslocked);
@@ -805,7 +805,7 @@ vn_ioctl(fp, com, data, active_cred, td)
case VREG:
case VDIR:
if (com == FIONREAD) {
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_GETATTR(vp, &vattr, active_cred, td);
VOP_UNLOCK(vp, 0, td);
if (!error)
@@ -842,7 +842,7 @@ vn_poll(fp, events, active_cred, td)
vp = fp->f_vnode;
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
#ifdef MAC
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = mac_vnode_check_poll(active_cred, fp->f_cred, vp);
VOP_UNLOCK(vp, 0, td);
if (!error)
@@ -858,7 +858,7 @@ vn_poll(fp, events, active_cred, td)
* acquire requested lock.
*/
int
-_vn_lock(struct vnode *vp, int flags, struct thread *td, char *file, int line)
+_vn_lock(struct vnode *vp, int flags, char *file, int line)
{
int error;
@@ -881,7 +881,8 @@ _vn_lock(struct vnode *vp, int flags, struct thread *td, char *file, int line)
* lockmgr drops interlock before it will return for
* any reason. So force the code above to relock it.
*/
- error = VOP_LOCK1(vp, flags | LK_INTERLOCK, td, file, line);
+ error = VOP_LOCK1(vp, flags | LK_INTERLOCK, curthread, file,
+ line);
flags &= ~LK_INTERLOCK;
KASSERT((flags & LK_RETRY) == 0 || error == 0,
("LK_RETRY set with incompatible flags %d\n", flags));
@@ -891,7 +892,7 @@ _vn_lock(struct vnode *vp, int flags, struct thread *td, char *file, int line)
*/
if (error == 0 && vp->v_iflag & VI_DOOMED &&
(flags & LK_RETRY) == 0) {
- VOP_UNLOCK(vp, 0, td);
+ VOP_UNLOCK(vp, 0, curthread);
error = ENOENT;
break;
}
@@ -1222,7 +1223,7 @@ vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
auio.uio_resid = *buflen;
if ((ioflg & IO_NODELOCKED) == 0)
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
@@ -1266,7 +1267,7 @@ vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
if ((ioflg & IO_NODELOCKED) == 0) {
if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
return (error);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
}
ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
@@ -1292,7 +1293,7 @@ vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
if ((ioflg & IO_NODELOCKED) == 0) {
if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
return (error);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
}
ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
OpenPOWER on IntegriCloud