summaryrefslogtreecommitdiffstats
path: root/sys/kern/vfs_subr.c
diff options
context:
space:
mode:
authorwollman <wollman@FreeBSD.org>2003-10-23 18:17:36 +0000
committerwollman <wollman@FreeBSD.org>2003-10-23 18:17:36 +0000
commitdabc1a332d9183829807d43facdf8e41c5005396 (patch)
tree9f937d190fd0278bfcea1c5abf2664ab12c5a646 /sys/kern/vfs_subr.c
parent04bc71c638975ba3f42bb01b3696aae715321ff9 (diff)
downloadFreeBSD-src-dabc1a332d9183829807d43facdf8e41c5005396.zip
FreeBSD-src-dabc1a332d9183829807d43facdf8e41c5005396.tar.gz
Add appropriate const poisoning to the assert_*locked() family so that I can
call ASSERT_VOP_LOCKED(vp, __func__) without a diagnostic. Inspired by: the evil and rude OpenAFS cache manager code
Diffstat (limited to 'sys/kern/vfs_subr.c')
-rw-r--r--sys/kern/vfs_subr.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 4092506..1b48f08 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -257,7 +257,7 @@ int vfs_badlock_panic = 1;
int vfs_badlock_mutex = 1;
static void
-vfs_badlock(char *msg, char *str, struct vnode *vp)
+vfs_badlock(const char *msg, const char *str, struct vnode *vp)
{
if (vfs_badlock_print)
printf("%s: %p %s\n", str, vp, msg);
@@ -266,28 +266,28 @@ vfs_badlock(char *msg, char *str, struct vnode *vp)
}
void
-assert_vi_unlocked(struct vnode *vp, char *str)
+assert_vi_unlocked(struct vnode *vp, const char *str)
{
if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
vfs_badlock("interlock is locked but should not be", str, vp);
}
void
-assert_vi_locked(struct vnode *vp, char *str)
+assert_vi_locked(struct vnode *vp, const char *str)
{
if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
vfs_badlock("interlock is not locked but should be", str, vp);
}
void
-assert_vop_locked(struct vnode *vp, char *str)
+assert_vop_locked(struct vnode *vp, const char *str)
{
if (vp && !IGNORE_LOCK(vp) && !VOP_ISLOCKED(vp, NULL))
vfs_badlock("is not locked but should be", str, vp);
}
void
-assert_vop_unlocked(struct vnode *vp, char *str)
+assert_vop_unlocked(struct vnode *vp, const char *str)
{
if (vp && !IGNORE_LOCK(vp) &&
VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
@@ -295,7 +295,7 @@ assert_vop_unlocked(struct vnode *vp, char *str)
}
void
-assert_vop_elocked(struct vnode *vp, char *str)
+assert_vop_elocked(struct vnode *vp, const char *str)
{
if (vp && !IGNORE_LOCK(vp) &&
VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
@@ -303,7 +303,7 @@ assert_vop_elocked(struct vnode *vp, char *str)
}
void
-assert_vop_elocked_other(struct vnode *vp, char *str)
+assert_vop_elocked_other(struct vnode *vp, const char *str)
{
if (vp && !IGNORE_LOCK(vp) &&
VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
@@ -312,7 +312,7 @@ assert_vop_elocked_other(struct vnode *vp, char *str)
}
void
-assert_vop_slocked(struct vnode *vp, char *str)
+assert_vop_slocked(struct vnode *vp, const char *str)
{
if (vp && !IGNORE_LOCK(vp) &&
VOP_ISLOCKED(vp, curthread) != LK_SHARED)
OpenPOWER on IntegriCloud