summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2008-02-25 18:45:57 +0000
committerattilio <attilio@FreeBSD.org>2008-02-25 18:45:57 +0000
commit4014b558307253555f43f360be60f49ea39b7ceb (patch)
treed455fa541ca0d9b761f28e9c67c92fd959e44b2c /sys/kern
parent49cb35343eeaa02f4e480228eb7148a3305d3b70 (diff)
downloadFreeBSD-src-4014b558307253555f43f360be60f49ea39b7ceb.zip
FreeBSD-src-4014b558307253555f43f360be60f49ea39b7ceb.tar.gz
Axe the 'thread' argument from VOP_ISLOCKED() and lockstatus() as it is
always curthread. As KPI gets broken by this patch, manpages and __FreeBSD_version will be updated by further commits. Tested by: Andrea Barberio <insomniac at slackware dot it>
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_lock.c7
-rw-r--r--sys/kern/vfs_cache.c8
-rw-r--r--sys/kern/vfs_default.c5
-rw-r--r--sys/kern/vfs_lookup.c4
-rw-r--r--sys/kern/vfs_subr.c24
-rw-r--r--sys/kern/vnode_if.src1
6 files changed, 19 insertions, 30 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 1b14183..d80a853 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -617,15 +617,12 @@ _lockmgr_disown(struct lock *lkp, const char *file, int line)
* Determine the status of a lock.
*/
int
-lockstatus(lkp, td)
+lockstatus(lkp)
struct lock *lkp;
- struct thread *td;
{
int lock_type = 0;
int interlocked;
- KASSERT(td == curthread,
- ("%s: thread passed argument (%p) is not valid", __func__, td));
KASSERT((lkp->lk_flags & LK_DESTROYED) == 0,
("%s: %p lockmgr is destroyed", __func__, lkp));
@@ -635,7 +632,7 @@ lockstatus(lkp, td)
} else
interlocked = 0;
if (lkp->lk_exclusivecount != 0) {
- if (lkp->lk_lockholder == td)
+ if (lkp->lk_lockholder == curthread)
lock_type = LK_EXCLUSIVE;
else
lock_type = LK_EXCLOTHER;
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index b115b08..dd8b97c 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -314,7 +314,6 @@ cache_lookup(dvp, vpp, cnp)
struct componentname *cnp;
{
struct namecache *ncp;
- struct thread *td;
u_int32_t hash;
int error, ltype;
@@ -322,7 +321,6 @@ cache_lookup(dvp, vpp, cnp)
cnp->cn_flags &= ~MAKEENTRY;
return (0);
}
- td = cnp->cn_thread;
retry:
CACHE_LOCK();
numcalls++;
@@ -426,7 +424,7 @@ success:
* differently...
*/
ltype = cnp->cn_lkflags & (LK_SHARED | LK_EXCLUSIVE);
- if (ltype == VOP_ISLOCKED(*vpp, td))
+ if (ltype == VOP_ISLOCKED(*vpp))
return (-1);
else if (ltype == LK_EXCLUSIVE)
vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
@@ -434,12 +432,12 @@ success:
}
ltype = 0; /* silence gcc warning */
if (cnp->cn_flags & ISDOTDOT) {
- ltype = VOP_ISLOCKED(dvp, td);
+ ltype = VOP_ISLOCKED(dvp);
VOP_UNLOCK(dvp, 0);
}
VI_LOCK(*vpp);
CACHE_UNLOCK();
- error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, td);
+ error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread);
if (cnp->cn_flags & ISDOTDOT)
vn_lock(dvp, ltype | LK_RETRY);
if ((cnp->cn_flags & ISLASTCN) && (cnp->cn_lkflags & LK_EXCLUSIVE))
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index fe75ed1a..5422530 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -256,7 +256,6 @@ vop_stdlock(ap)
struct vop_lock1_args /* {
struct vnode *a_vp;
int a_flags;
- struct thread *a_td;
char *file;
int line;
} */ *ap;
@@ -274,7 +273,6 @@ vop_stdunlock(ap)
struct vop_unlock_args /* {
struct vnode *a_vp;
int a_flags;
- struct thread *a_td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
@@ -287,11 +285,10 @@ int
vop_stdislocked(ap)
struct vop_islocked_args /* {
struct vnode *a_vp;
- struct thread *a_td;
} */ *ap;
{
- return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
+ return (lockstatus(ap->a_vp->v_vnlock));
}
/*
diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c
index 8c4c612..67ee6cb 100644
--- a/sys/kern/vfs_lookup.c
+++ b/sys/kern/vfs_lookup.c
@@ -573,7 +573,7 @@ unionlookup:
* last operation.
*/
if (dp != vp_crossmp &&
- VOP_ISLOCKED(dp, td) == LK_SHARED &&
+ VOP_ISLOCKED(dp) == LK_SHARED &&
(cnp->cn_flags & ISLASTCN) && (cnp->cn_flags & LOCKPARENT))
vn_lock(dp, LK_UPGRADE|LK_RETRY);
/*
@@ -782,7 +782,7 @@ success:
* the caller may want it to be exclusively locked.
*/
if ((cnp->cn_flags & (ISLASTCN | LOCKSHARED | LOCKLEAF)) ==
- (ISLASTCN | LOCKLEAF) && VOP_ISLOCKED(dp, td) != LK_EXCLUSIVE) {
+ (ISLASTCN | LOCKLEAF) && VOP_ISLOCKED(dp) != LK_EXCLUSIVE) {
vn_lock(dp, LK_UPGRADE | LK_RETRY);
}
if (vfslocked && dvfslocked)
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index d44510b..5bdd380 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -995,14 +995,12 @@ delmntque(struct vnode *vp)
static void
insmntque_stddtr(struct vnode *vp, void *dtr_arg)
{
- struct thread *td;
- td = curthread; /* XXX ? */
vp->v_data = NULL;
vp->v_op = &dead_vnodeops;
/* XXX non mp-safe fs may still call insmntque with vnode
unlocked */
- if (!VOP_ISLOCKED(vp, td))
+ if (!VOP_ISLOCKED(vp))
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vgone(vp);
vput(vp);
@@ -1638,7 +1636,7 @@ restart:
VFS_UNLOCK_GIANT(vfslocked);
vfslocked = 0;
}
- if (VOP_ISLOCKED(vp, curthread) != 0) {
+ if (VOP_ISLOCKED(vp) != 0) {
VFS_UNLOCK_GIANT(vfslocked);
return (1);
}
@@ -2208,7 +2206,7 @@ vput(struct vnode *vp)
*/
v_decr_useonly(vp);
vp->v_iflag |= VI_OWEINACT;
- if (VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE) {
+ if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
error = VOP_LOCK(vp, LK_UPGRADE|LK_INTERLOCK|LK_NOWAIT);
VI_LOCK(vp);
if (error) {
@@ -2685,7 +2683,7 @@ DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
nmp = TAILQ_NEXT(mp, mnt_list);
TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
if (vp->v_type != VMARKER &&
- VOP_ISLOCKED(vp, curthread))
+ VOP_ISLOCKED(vp))
vprint("", vp);
}
nmp = TAILQ_NEXT(mp, mnt_list);
@@ -2972,7 +2970,7 @@ vfs_msync(struct mount *mp, int flags)
MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp);
if ((vp->v_iflag & VI_OBJDIRTY) &&
- (flags == MNT_WAIT || VOP_ISLOCKED(vp, curthread) == 0)) {
+ (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) {
MNT_IUNLOCK(mp);
if (!vget(vp,
LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
@@ -3479,7 +3477,7 @@ void
assert_vop_locked(struct vnode *vp, const char *str)
{
- if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp, curthread) == 0)
+ if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == 0)
vfs_badlock("is not locked but should be", str, vp);
}
@@ -3488,7 +3486,7 @@ assert_vop_unlocked(struct vnode *vp, const char *str)
{
if (vp && !IGNORE_LOCK(vp) &&
- VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
+ VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
vfs_badlock("is locked but should not be", str, vp);
}
@@ -3497,7 +3495,7 @@ assert_vop_elocked(struct vnode *vp, const char *str)
{
if (vp && !IGNORE_LOCK(vp) &&
- VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
+ VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
vfs_badlock("is not exclusive locked but should be", str, vp);
}
@@ -3507,7 +3505,7 @@ assert_vop_elocked_other(struct vnode *vp, const char *str)
{
if (vp && !IGNORE_LOCK(vp) &&
- VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
+ VOP_ISLOCKED(vp) != LK_EXCLOTHER)
vfs_badlock("is not exclusive locked by another thread",
str, vp);
}
@@ -3517,7 +3515,7 @@ assert_vop_slocked(struct vnode *vp, const char *str)
{
if (vp && !IGNORE_LOCK(vp) &&
- VOP_ISLOCKED(vp, curthread) != LK_SHARED)
+ VOP_ISLOCKED(vp) != LK_SHARED)
vfs_badlock("is not locked shared but should be", str, vp);
}
#endif /* 0 */
@@ -3885,7 +3883,7 @@ vfs_knllocked(void *arg)
{
struct vnode *vp = arg;
- return (VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE);
+ return (VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
}
int
diff --git a/sys/kern/vnode_if.src b/sys/kern/vnode_if.src
index 5c69255..754cbc3 100644
--- a/sys/kern/vnode_if.src
+++ b/sys/kern/vnode_if.src
@@ -61,7 +61,6 @@
vop_islocked {
IN struct vnode *vp;
- IN struct thread *td;
};
%% lookup dvp L ? ?
OpenPOWER on IntegriCloud