summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjasone <jasone@FreeBSD.org>2001-01-24 12:35:55 +0000
committerjasone <jasone@FreeBSD.org>2001-01-24 12:35:55 +0000
commit8d2ec1ebc4a9454e2936c6fcbe29a5f1fd83504f (patch)
tree23bd3f0014237e1b861fed6a7c3b587948d149d5 /sys/kern
parentc5cc2f8e2621f1d090434a5474a18fae384e1db6 (diff)
downloadFreeBSD-src-8d2ec1ebc4a9454e2936c6fcbe29a5f1fd83504f.zip
FreeBSD-src-8d2ec1ebc4a9454e2936c6fcbe29a5f1fd83504f.tar.gz
Convert all simplelocks to mutexes and remove the simplelock implementations.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_lock.c122
-rw-r--r--sys/kern/kern_mutex.c6
-rw-r--r--sys/kern/kern_shutdown.c2
-rw-r--r--sys/kern/subr_rman.c60
-rw-r--r--sys/kern/subr_smp.c62
-rw-r--r--sys/kern/subr_turnstile.c6
-rw-r--r--sys/kern/subr_witness.c6
-rw-r--r--sys/kern/vfs_export.c110
-rw-r--r--sys/kern/vfs_subr.c110
-rw-r--r--sys/kern/vfs_vnops.c8
10 files changed, 197 insertions, 295 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 8314a0e..afd59f2 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -54,12 +54,6 @@
* Locks provide shared/exclusive sychronization.
*/
-#ifdef SIMPLELOCK_DEBUG
-#define COUNT(p, x) if (p) (p)->p_locks += (x)
-#else
-#define COUNT(p, x)
-#endif
-
#define LOCK_WAIT_TIME 100
#define LOCK_SAMPLE_WAIT 7
@@ -137,9 +131,7 @@ shareunlock(struct lock *lkp, int decr) {
}
/*
- * This is the waitloop optimization, and note for this to work
- * simple_lock and simple_unlock should be subroutines to avoid
- * optimization troubles.
+ * This is the waitloop optimization.
*/
static int
apause(struct lock *lkp, int flags)
@@ -280,7 +272,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
if (error)
break;
sharelock(lkp, 1);
- COUNT(p, 1);
break;
}
/*
@@ -288,7 +279,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
* An alternative would be to fail with EDEADLK.
*/
sharelock(lkp, 1);
- COUNT(p, 1);
/* fall into downgrade */
case LK_DOWNGRADE:
@@ -310,7 +300,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
*/
if (lkp->lk_flags & LK_WANT_UPGRADE) {
shareunlock(lkp, 1);
- COUNT(p, -1);
error = EBUSY;
break;
}
@@ -328,7 +317,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0))
panic("lockmgr: upgrade exclusive lock");
shareunlock(lkp, 1);
- COUNT(p, -1);
/*
* If we are just polling, check to see if we will block.
*/
@@ -360,7 +348,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
lkp->lk_lineno = line;
lkp->lk_lockername = name;
#endif
- COUNT(p, 1);
break;
}
/*
@@ -382,7 +369,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
panic("lockmgr: locking against myself");
if ((extflags & LK_CANRECURSE) != 0) {
lkp->lk_exclusivecount++;
- COUNT(p, 1);
break;
}
}
@@ -418,7 +404,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
lkp->lk_lineno = line;
lkp->lk_lockername = name;
#endif
- COUNT(p, 1);
break;
case LK_RELEASE:
@@ -429,9 +414,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
pid, "exclusive lock holder",
lkp->lk_lockholder);
}
- if (lkp->lk_lockholder != LK_KERNPROC) {
- COUNT(p, -1);
- }
if (lkp->lk_exclusivecount == 1) {
lkp->lk_flags &= ~LK_HAVE_EXCL;
lkp->lk_lockholder = LK_NOPROC;
@@ -439,10 +421,8 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
} else {
lkp->lk_exclusivecount--;
}
- } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
+ } else if (lkp->lk_flags & LK_SHARE_NONZERO)
shareunlock(lkp, 1);
- COUNT(p, -1);
- }
if (lkp->lk_flags & LK_WAIT_NONZERO)
wakeup((void *)lkp);
break;
@@ -468,7 +448,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
lkp->lk_lineno = line;
lkp->lk_lockername = name;
#endif
- COUNT(p, 1);
break;
default:
@@ -627,100 +606,3 @@ lockmgr_printinfo(lkp)
if (lkp->lk_waitcount > 0)
printf(" with %d pending", lkp->lk_waitcount);
}
-
-#if defined(SIMPLELOCK_DEBUG) && (MAXCPU == 1 || defined(COMPILING_LINT))
-#include <sys/kernel.h>
-#include <sys/sysctl.h>
-
-static int lockpausetime = 0;
-SYSCTL_INT(_debug, OID_AUTO, lockpausetime, CTLFLAG_RW, &lockpausetime, 0, "");
-
-static int simplelockrecurse;
-
-/*
- * Simple lock functions so that the debugger can see from whence
- * they are being called.
- */
-void
-simple_lock_init(alp)
- struct simplelock *alp;
-{
-
- alp->lock_data = 0;
-}
-
-void
-_simple_lock(alp, id, l)
- struct simplelock *alp;
- const char *id;
- int l;
-{
-
- if (simplelockrecurse)
- return;
- if (alp->lock_data == 1) {
- if (lockpausetime == -1)
- panic("%s:%d: simple_lock: lock held", id, l);
- printf("%s:%d: simple_lock: lock held\n", id, l);
- if (lockpausetime == 1) {
- Debugger("simple_lock");
- /*BACKTRACE(curproc); */
- } else if (lockpausetime > 1) {
- printf("%s:%d: simple_lock: lock held...", id, l);
- tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
- lockpausetime * hz);
- printf(" continuing\n");
- }
- }
- alp->lock_data = 1;
- if (curproc)
- curproc->p_simple_locks++;
-}
-
-int
-_simple_lock_try(alp, id, l)
- struct simplelock *alp;
- const char *id;
- int l;
-{
-
- if (alp->lock_data)
- return (0);
- if (simplelockrecurse)
- return (1);
- alp->lock_data = 1;
- if (curproc)
- curproc->p_simple_locks++;
- return (1);
-}
-
-void
-_simple_unlock(alp, id, l)
- struct simplelock *alp;
- const char *id;
- int l;
-{
-
- if (simplelockrecurse)
- return;
- if (alp->lock_data == 0) {
- if (lockpausetime == -1)
- panic("%s:%d: simple_unlock: lock not held", id, l);
- printf("%s:%d: simple_unlock: lock not held\n", id, l);
- if (lockpausetime == 1) {
- Debugger("simple_unlock");
- /* BACKTRACE(curproc); */
- } else if (lockpausetime > 1) {
- printf("%s:%d: simple_unlock: lock not held...", id, l);
- tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock",
- lockpausetime * hz);
- printf(" continuing\n");
- }
- }
- alp->lock_data = 0;
- if (curproc)
- curproc->p_simple_locks--;
-}
-#elif defined(SIMPLELOCK_DEBUG)
-#error "SIMPLELOCK_DEBUG is not compatible with SMP!"
-#endif /* SIMPLELOCK_DEBUG && MAXCPU == 1 */
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index d9afe0a..9844db4 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -1099,6 +1099,12 @@ static char *spin_order_list[] = {
/*
* leaf locks
*/
+#ifdef __i386__
+ "ap boot",
+ "imen",
+#endif
+ "com",
+ "smp rendezvous",
NULL
};
diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c
index eaf8b94..560cd4b 100644
--- a/sys/kern/kern_shutdown.c
+++ b/sys/kern/kern_shutdown.c
@@ -540,7 +540,7 @@ panic(const char *fmt, ...)
#ifdef SMP
/* Only 1 CPU can panic at a time */
- s_lock(&panic_lock);
+ mtx_enter(&panic_mtx, MTX_DEF);
#endif
bootopt = RB_AUTOBOOT | RB_DUMP;
diff --git a/sys/kern/subr_rman.c b/sys/kern/subr_rman.c
index ccd1af1..f94bbeb 100644
--- a/sys/kern/subr_rman.c
+++ b/sys/kern/subr_rman.c
@@ -62,6 +62,7 @@
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
+#include <sys/mutex.h>
#include <sys/bus.h> /* XXX debugging */
#include <machine/bus.h>
#include <sys/rman.h>
@@ -75,9 +76,7 @@
static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
struct rman_head rman_head;
-#ifndef NULL_SIMPLELOCKS
-static struct simplelock rman_lock; /* mutex to protect rman_head */
-#endif
+static struct mtx rman_mtx; /* mutex to protect rman_head */
static int int_rman_activate_resource(struct rman *rm, struct resource *r,
struct resource **whohas);
static int int_rman_deactivate_resource(struct resource *r);
@@ -91,7 +90,7 @@ rman_init(struct rman *rm)
if (once == 0) {
once = 1;
TAILQ_INIT(&rman_head);
- simple_lock_init(&rman_lock);
+ mtx_init(&rman_mtx, "rman head", MTX_DEF);
}
if (rm->rm_type == RMAN_UNINIT)
@@ -100,14 +99,14 @@ rman_init(struct rman *rm)
panic("implement RMAN_GAUGE");
TAILQ_INIT(&rm->rm_list);
- rm->rm_slock = malloc(sizeof *rm->rm_slock, M_RMAN, M_NOWAIT);
- if (rm->rm_slock == 0)
+ rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT);
+ if (rm->rm_mtx == 0)
return ENOMEM;
- simple_lock_init(rm->rm_slock);
+ mtx_init(rm->rm_mtx, "rman", MTX_DEF);
- simple_lock(&rman_lock);
+ mtx_enter(&rman_mtx, MTX_DEF);
TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
- simple_unlock(&rman_lock);
+ mtx_exit(&rman_mtx, MTX_DEF);
return 0;
}
@@ -130,7 +129,7 @@ rman_manage_region(struct rman *rm, u_long start, u_long end)
r->r_dev = 0;
r->r_rm = rm;
- simple_lock(rm->rm_slock);
+ mtx_enter(rm->rm_mtx, MTX_DEF);
for (s = TAILQ_FIRST(&rm->rm_list);
s && s->r_end < r->r_start;
s = TAILQ_NEXT(s, r_link))
@@ -142,7 +141,7 @@ rman_manage_region(struct rman *rm, u_long start, u_long end)
TAILQ_INSERT_BEFORE(s, r, r_link);
}
- simple_unlock(rm->rm_slock);
+ mtx_exit(rm->rm_mtx, MTX_DEF);
return 0;
}
@@ -151,10 +150,10 @@ rman_fini(struct rman *rm)
{
struct resource *r;
- simple_lock(rm->rm_slock);
+ mtx_enter(rm->rm_mtx, MTX_DEF);
TAILQ_FOREACH(r, &rm->rm_list, r_link) {
if (r->r_flags & RF_ALLOCATED) {
- simple_unlock(rm->rm_slock);
+ mtx_exit(rm->rm_mtx, MTX_DEF);
return EBUSY;
}
}
@@ -168,11 +167,12 @@ rman_fini(struct rman *rm)
TAILQ_REMOVE(&rm->rm_list, r, r_link);
free(r, M_RMAN);
}
- simple_unlock(rm->rm_slock);
- simple_lock(&rman_lock);
+ mtx_exit(rm->rm_mtx, MTX_DEF);
+ mtx_enter(&rman_mtx, MTX_DEF);
TAILQ_REMOVE(&rman_head, rm, rm_link);
- simple_unlock(&rman_lock);
- free(rm->rm_slock, M_RMAN);
+ mtx_exit(&rman_mtx, MTX_DEF);
+ mtx_destroy(rm->rm_mtx);
+ free(rm->rm_mtx, M_RMAN);
return 0;
}
@@ -193,7 +193,7 @@ rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
want_activate = (flags & RF_ACTIVE);
flags &= ~RF_ACTIVE;
- simple_lock(rm->rm_slock);
+ mtx_enter(rm->rm_mtx, MTX_DEF);
for (r = TAILQ_FIRST(&rm->rm_list);
r && r->r_end < start;
@@ -370,7 +370,7 @@ out:
}
}
- simple_unlock(rm->rm_slock);
+ mtx_exit(rm->rm_mtx, MTX_DEF);
return (rv);
}
@@ -417,9 +417,9 @@ rman_activate_resource(struct resource *r)
struct rman *rm;
rm = r->r_rm;
- simple_lock(rm->rm_slock);
+ mtx_enter(rm->rm_mtx, MTX_DEF);
rv = int_rman_activate_resource(rm, r, &whohas);
- simple_unlock(rm->rm_slock);
+ mtx_exit(rm->rm_mtx, MTX_DEF);
return rv;
}
@@ -432,28 +432,28 @@ rman_await_resource(struct resource *r, int pri, int timo)
rm = r->r_rm;
for (;;) {
- simple_lock(rm->rm_slock);
+ mtx_enter(rm->rm_mtx, MTX_DEF);
rv = int_rman_activate_resource(rm, r, &whohas);
if (rv != EBUSY)
- return (rv); /* returns with simplelock */
+ return (rv); /* returns with mutex held */
if (r->r_sharehead == 0)
panic("rman_await_resource");
/*
* splhigh hopefully will prevent a race between
- * simple_unlock and tsleep where a process
+ * mtx_exit and tsleep where a process
* could conceivably get in and release the resource
* before we have a chance to sleep on it.
*/
s = splhigh();
whohas->r_flags |= RF_WANTED;
- simple_unlock(rm->rm_slock);
+ mtx_exit(rm->rm_mtx, MTX_DEF);
rv = tsleep(r->r_sharehead, pri, "rmwait", timo);
if (rv) {
splx(s);
return rv;
}
- simple_lock(rm->rm_slock);
+ mtx_enter(rm->rm_mtx, MTX_DEF);
splx(s);
}
}
@@ -478,9 +478,9 @@ rman_deactivate_resource(struct resource *r)
struct rman *rm;
rm = r->r_rm;
- simple_lock(rm->rm_slock);
+ mtx_enter(rm->rm_mtx, MTX_DEF);
int_rman_deactivate_resource(r);
- simple_unlock(rm->rm_slock);
+ mtx_exit(rm->rm_mtx, MTX_DEF);
return 0;
}
@@ -576,9 +576,9 @@ rman_release_resource(struct resource *r)
int rv;
struct rman *rm = r->r_rm;
- simple_lock(rm->rm_slock);
+ mtx_enter(rm->rm_mtx, MTX_DEF);
rv = int_rman_release_resource(rm, r);
- simple_unlock(rm->rm_slock);
+ mtx_exit(rm->rm_mtx, MTX_DEF);
return (rv);
}
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index 21118f2..598fbf2 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -238,7 +238,7 @@ typedef struct BASETABLE_ENTRY {
#define MP_ANNOUNCE_POST 0x19
/* used to hold the AP's until we are ready to release them */
-struct simplelock ap_boot_lock;
+struct mtx ap_boot_mtx;
/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
int current_postcode;
@@ -318,6 +318,9 @@ SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
* Local data and functions.
*/
+/* Set to 1 once we're ready to let the APs out of the pen. */
+static volatile int aps_ready = 0;
+
static int mp_capable;
static u_int boot_address;
static u_int base_memory;
@@ -345,36 +348,40 @@ static void release_aps(void *dummy);
*/
/* critical region around IO APIC, apic_imen */
-struct simplelock imen_lock;
+struct mtx imen_mtx;
/* lock region used by kernel profiling */
-struct simplelock mcount_lock;
+struct mtx mcount_mtx;
#ifdef USE_COMLOCK
/* locks com (tty) data/hardware accesses: a FASTINTR() */
-struct simplelock com_lock;
+struct mtx com_mtx;
#endif /* USE_COMLOCK */
/* lock around the MP rendezvous */
-static struct simplelock smp_rv_lock;
+static struct mtx smp_rv_mtx;
/* only 1 CPU can panic at a time :) */
-struct simplelock panic_lock;
+struct mtx panic_mtx;
static void
init_locks(void)
{
- s_lock_init(&mcount_lock);
+ /*
+ * XXX The mcount mutex probably needs to be statically initialized,
+ * since it will be used even in the function calls that get us to this
+ * point.
+ */
+ mtx_init(&mcount_mtx, "mcount", MTX_DEF);
- s_lock_init(&imen_lock);
- s_lock_init(&smp_rv_lock);
- s_lock_init(&panic_lock);
+ mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
+ mtx_init(&panic_mtx, "panic", MTX_DEF);
#ifdef USE_COMLOCK
- s_lock_init(&com_lock);
+ mtx_init(&com_mtx, "com", MTX_SPIN);
#endif /* USE_COMLOCK */
- s_lock_init(&ap_boot_lock);
+ mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
}
/*
@@ -655,9 +662,6 @@ mp_enable(u_int boot_addr)
/* initialize all SMP locks */
init_locks();
- /* obtain the ap_boot_lock */
- s_lock(&ap_boot_lock);
-
/* start each Application Processor */
start_all_aps(boot_addr);
}
@@ -2247,8 +2251,12 @@ ap_init(void)
{
u_int apic_id;
+ /* spin until all the AP's are ready */
+ while (!aps_ready)
+ /* spin */ ;
+
/* lock against other AP's that are waking up */
- s_lock(&ap_boot_lock);
+ mtx_enter(&ap_boot_mtx, MTX_SPIN);
/* BSP may have changed PTD while we're waiting for the lock */
cpu_invltlb();
@@ -2297,7 +2305,7 @@ ap_init(void)
}
/* let other AP's wake up now */
- s_unlock(&ap_boot_lock);
+ mtx_exit(&ap_boot_mtx, MTX_SPIN);
/* wait until all the AP's are up */
while (smp_started == 0)
@@ -2851,10 +2859,9 @@ smp_rendezvous(void (* setup_func)(void *),
void (* teardown_func)(void *),
void *arg)
{
- u_int efl;
-
+
/* obtain rendezvous lock */
- s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */
+ mtx_enter(&smp_rv_mtx, MTX_SPIN);
/* set static function pointers */
smp_rv_setup_func = setup_func;
@@ -2864,27 +2871,22 @@ smp_rendezvous(void (* setup_func)(void *),
smp_rv_waiters[0] = 0;
smp_rv_waiters[1] = 0;
- /* disable interrupts on this CPU, save interrupt status */
- efl = read_eflags();
- write_eflags(efl & ~PSL_I);
-
- /* signal other processors, which will enter the IPI with interrupts off */
+ /*
+ * signal other processors, which will enter the IPI with interrupts off
+ */
all_but_self_ipi(XRENDEZVOUS_OFFSET);
/* call executor function */
smp_rendezvous_action();
- /* restore interrupt flag */
- write_eflags(efl);
-
/* release lock */
- s_unlock(&smp_rv_lock);
+ mtx_exit(&smp_rv_mtx, MTX_SPIN);
}
void
release_aps(void *dummy __unused)
{
- s_unlock(&ap_boot_lock);
+ atomic_store_rel_int(&aps_ready, 1);
}
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index d9afe0a..9844db4 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -1099,6 +1099,12 @@ static char *spin_order_list[] = {
/*
* leaf locks
*/
+#ifdef __i386__
+ "ap boot",
+ "imen",
+#endif
+ "com",
+ "smp rendezvous",
NULL
};
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index d9afe0a..9844db4 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -1099,6 +1099,12 @@ static char *spin_order_list[] = {
/*
* leaf locks
*/
+#ifdef __i386__
+ "ap boot",
+ "imen",
+#endif
+ "com",
+ "smp rendezvous",
NULL
};
diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c
index fba809c..6e2afda 100644
--- a/sys/kern/vfs_export.c
+++ b/sys/kern/vfs_export.c
@@ -152,26 +152,25 @@ struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
struct mtx mountlist_mtx;
/* For any iteration/modification of mnt_vnodelist */
-struct simplelock mntvnode_slock;
+struct mtx mntvnode_mtx;
+
/*
* Cache for the mount type id assigned to NFS. This is used for
* special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
*/
int nfs_mount_type = -1;
-#ifndef NULL_SIMPLELOCKS
/* To keep more than one thread at a time from running vfs_getnewfsid */
-static struct simplelock mntid_slock;
+static struct mtx mntid_mtx;
/* For any iteration/modification of vnode_free_list */
-static struct simplelock vnode_free_list_slock;
+static struct mtx vnode_free_list_mtx;
/*
* For any iteration/modification of dev->si_hlist (linked through
* v_specnext)
*/
-static struct simplelock spechash_slock;
-#endif
+static struct mtx spechash_mtx;
/* Publicly exported FS */
struct nfs_public nfs_pub;
@@ -250,11 +249,11 @@ vntblinit(void *dummy __unused)
desiredvnodes = maxproc + cnt.v_page_count / 4;
mtx_init(&mountlist_mtx, "mountlist", MTX_DEF);
- simple_lock_init(&mntvnode_slock);
- simple_lock_init(&mntid_slock);
- simple_lock_init(&spechash_slock);
+ mtx_init(&mntvnode_mtx, "mntvnode", MTX_DEF);
+ mtx_init(&mntid_mtx, "mntid", MTX_DEF);
+ mtx_init(&spechash_mtx, "spechash", MTX_DEF);
TAILQ_INIT(&vnode_free_list);
- simple_lock_init(&vnode_free_list_slock);
+ mtx_init(&vnode_free_list_mtx, "vnode_free_list", MTX_DEF);
vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5);
/*
* Initialize the filesystem syncer.
@@ -423,7 +422,7 @@ vfs_getnewfsid(mp)
fsid_t tfsid;
int mtype;
- simple_lock(&mntid_slock);
+ mtx_enter(&mntid_mtx, MTX_DEF);
mtype = mp->mnt_vfc->vfc_typenum;
tfsid.val[1] = mtype;
mtype = (mtype & 0xFF) << 24;
@@ -436,7 +435,7 @@ vfs_getnewfsid(mp)
}
mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
- simple_unlock(&mntid_slock);
+ mtx_exit(&mntid_mtx, MTX_DEF);
}
/*
@@ -539,7 +538,7 @@ getnewvnode(tag, mp, vops, vpp)
*/
s = splbio();
- simple_lock(&vnode_free_list_slock);
+ mtx_enter(&vnode_free_list_mtx, MTX_DEF);
if (wantfreevnodes && freevnodes < wantfreevnodes) {
vp = NULL;
@@ -579,7 +578,7 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_flag |= VDOOMED;
vp->v_flag &= ~VFREE;
freevnodes--;
- simple_unlock(&vnode_free_list_slock);
+ mtx_exit(&vnode_free_list_mtx, MTX_DEF);
cache_purge(vp);
vp->v_lease = NULL;
if (vp->v_type != VBAD) {
@@ -610,11 +609,12 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_clen = 0;
vp->v_socket = 0;
} else {
- simple_unlock(&vnode_free_list_slock);
+ mtx_exit(&vnode_free_list_mtx, MTX_DEF);
vp = (struct vnode *) zalloc(vnode_zone);
bzero((char *) vp, sizeof *vp);
mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
vp->v_dd = vp;
+ mtx_init(&vp->v_pollinfo.vpi_lock, "vnode pollinfo", MTX_DEF);
cache_purge(vp);
LIST_INIT(&vp->v_cache_src);
TAILQ_INIT(&vp->v_cache_dst);
@@ -646,7 +646,7 @@ insmntque(vp, mp)
register struct mount *mp;
{
- simple_lock(&mntvnode_slock);
+ mtx_enter(&mntvnode_mtx, MTX_DEF);
/*
* Delete from old mount point vnode list, if on one.
*/
@@ -656,11 +656,11 @@ insmntque(vp, mp)
* Insert into list of vnodes for the new mount point, if available.
*/
if ((vp->v_mount = mp) == NULL) {
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
return;
}
LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
}
/*
@@ -1402,9 +1402,9 @@ addalias(nvp, dev)
KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
nvp->v_rdev = dev;
- simple_lock(&spechash_slock);
+ mtx_enter(&spechash_mtx, MTX_DEF);
SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
- simple_unlock(&spechash_slock);
+ mtx_exit(&spechash_mtx, MTX_DEF);
}
/*
@@ -1628,7 +1628,7 @@ vflush(mp, skipvp, flags)
struct vnode *vp, *nvp;
int busy = 0;
- simple_lock(&mntvnode_slock);
+ mtx_enter(&mntvnode_mtx, MTX_DEF);
loop:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
/*
@@ -1667,9 +1667,9 @@ loop:
* vnode data structures and we are done.
*/
if (vp->v_usecount == 0) {
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
vgonel(vp, p);
- simple_lock(&mntvnode_slock);
+ mtx_enter(&mntvnode_mtx, MTX_DEF);
continue;
}
@@ -1679,7 +1679,7 @@ loop:
* all other files, just kill them.
*/
if (flags & FORCECLOSE) {
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
if (vp->v_type != VCHR) {
vgonel(vp, p);
} else {
@@ -1687,7 +1687,7 @@ loop:
vp->v_op = spec_vnodeop_p;
insmntque(vp, (struct mount *) 0);
}
- simple_lock(&mntvnode_slock);
+ mtx_enter(&mntvnode_mtx, MTX_DEF);
continue;
}
#ifdef DIAGNOSTIC
@@ -1697,7 +1697,7 @@ loop:
mtx_exit(&vp->v_interlock, MTX_DEF);
busy++;
}
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
if (busy)
return (EBUSY);
return (0);
@@ -1842,9 +1842,9 @@ vop_revoke(ap)
}
dev = vp->v_rdev;
for (;;) {
- simple_lock(&spechash_slock);
+ mtx_enter(&spechash_mtx, MTX_DEF);
vq = SLIST_FIRST(&dev->si_hlist);
- simple_unlock(&spechash_slock);
+ mtx_exit(&spechash_mtx, MTX_DEF);
if (!vq)
break;
vgone(vq);
@@ -1859,14 +1859,14 @@ vop_revoke(ap)
int
vrecycle(vp, inter_lkp, p)
struct vnode *vp;
- struct simplelock *inter_lkp;
+ struct mtx *inter_lkp;
struct proc *p;
{
mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_usecount == 0) {
if (inter_lkp) {
- simple_unlock(inter_lkp);
+ mtx_exit(inter_lkp, MTX_DEF);
}
vgonel(vp, p);
return (1);
@@ -1926,10 +1926,10 @@ vgonel(vp, p)
* if it is on one.
*/
if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
- simple_lock(&spechash_slock);
+ mtx_enter(&spechash_mtx, MTX_DEF);
SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
freedev(vp->v_rdev);
- simple_unlock(&spechash_slock);
+ mtx_exit(&spechash_mtx, MTX_DEF);
vp->v_rdev = NULL;
}
@@ -1945,14 +1945,14 @@ vgonel(vp, p)
*/
if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
s = splbio();
- simple_lock(&vnode_free_list_slock);
+ mtx_enter(&vnode_free_list_mtx, MTX_DEF);
if (vp->v_flag & VFREE)
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
else
freevnodes++;
vp->v_flag |= VFREE;
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
- simple_unlock(&vnode_free_list_slock);
+ mtx_exit(&vnode_free_list_mtx, MTX_DEF);
splx(s);
}
@@ -1971,15 +1971,15 @@ vfinddev(dev, type, vpp)
{
struct vnode *vp;
- simple_lock(&spechash_slock);
+ mtx_enter(&spechash_mtx, MTX_DEF);
SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
if (type == vp->v_type) {
*vpp = vp;
- simple_unlock(&spechash_slock);
+ mtx_exit(&spechash_mtx, MTX_DEF);
return (1);
}
}
- simple_unlock(&spechash_slock);
+ mtx_exit(&spechash_mtx, MTX_DEF);
return (0);
}
@@ -1994,10 +1994,10 @@ vcount(vp)
int count;
count = 0;
- simple_lock(&spechash_slock);
+ mtx_enter(&spechash_mtx, MTX_DEF);
SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext)
count += vq->v_usecount;
- simple_unlock(&spechash_slock);
+ mtx_exit(&spechash_mtx, MTX_DEF);
return (count);
}
@@ -2204,7 +2204,7 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
continue;
}
again:
- simple_lock(&mntvnode_slock);
+ mtx_enter(&mntvnode_mtx, MTX_DEF);
for (vp = LIST_FIRST(&mp->mnt_vnodelist);
vp != NULL;
vp = nvp) {
@@ -2214,17 +2214,17 @@ again:
* recycled onto the same filesystem.
*/
if (vp->v_mount != mp) {
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
goto again;
}
nvp = LIST_NEXT(vp, v_mntvnodes);
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
(error = SYSCTL_OUT(req, vp, VNODESZ)))
return (error);
- simple_lock(&mntvnode_slock);
+ mtx_enter(&mntvnode_mtx, MTX_DEF);
}
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
@@ -2633,7 +2633,7 @@ vfree(vp)
int s;
s = splbio();
- simple_lock(&vnode_free_list_slock);
+ mtx_enter(&vnode_free_list_mtx, MTX_DEF);
KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free"));
if (vp->v_flag & VAGE) {
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
@@ -2641,7 +2641,7 @@ vfree(vp)
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
}
freevnodes++;
- simple_unlock(&vnode_free_list_slock);
+ mtx_exit(&vnode_free_list_mtx, MTX_DEF);
vp->v_flag &= ~VAGE;
vp->v_flag |= VFREE;
splx(s);
@@ -2657,11 +2657,11 @@ vbusy(vp)
int s;
s = splbio();
- simple_lock(&vnode_free_list_slock);
+ mtx_enter(&vnode_free_list_mtx, MTX_DEF);
KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free"));
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
- simple_unlock(&vnode_free_list_slock);
+ mtx_exit(&vnode_free_list_mtx, MTX_DEF);
vp->v_flag &= ~(VFREE|VAGE);
splx(s);
}
@@ -2680,7 +2680,7 @@ vn_pollrecord(vp, p, events)
struct proc *p;
short events;
{
- simple_lock(&vp->v_pollinfo.vpi_lock);
+ mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
if (vp->v_pollinfo.vpi_revents & events) {
/*
* This leaves events we are not interested
@@ -2692,12 +2692,12 @@ vn_pollrecord(vp, p, events)
events &= vp->v_pollinfo.vpi_revents;
vp->v_pollinfo.vpi_revents &= ~events;
- simple_unlock(&vp->v_pollinfo.vpi_lock);
+ mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
return events;
}
vp->v_pollinfo.vpi_events |= events;
selrecord(p, &vp->v_pollinfo.vpi_selinfo);
- simple_unlock(&vp->v_pollinfo.vpi_lock);
+ mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
return 0;
}
@@ -2712,7 +2712,7 @@ vn_pollevent(vp, events)
struct vnode *vp;
short events;
{
- simple_lock(&vp->v_pollinfo.vpi_lock);
+ mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
if (vp->v_pollinfo.vpi_events & events) {
/*
* We clear vpi_events so that we don't
@@ -2729,7 +2729,7 @@ vn_pollevent(vp, events)
vp->v_pollinfo.vpi_revents |= events;
selwakeup(&vp->v_pollinfo.vpi_selinfo);
}
- simple_unlock(&vp->v_pollinfo.vpi_lock);
+ mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
}
/*
@@ -2741,12 +2741,12 @@ void
vn_pollgone(vp)
struct vnode *vp;
{
- simple_lock(&vp->v_pollinfo.vpi_lock);
+ mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
if (vp->v_pollinfo.vpi_events) {
vp->v_pollinfo.vpi_events = 0;
selwakeup(&vp->v_pollinfo.vpi_selinfo);
}
- simple_unlock(&vp->v_pollinfo.vpi_lock);
+ mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
}
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index fba809c..6e2afda 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -152,26 +152,25 @@ struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
struct mtx mountlist_mtx;
/* For any iteration/modification of mnt_vnodelist */
-struct simplelock mntvnode_slock;
+struct mtx mntvnode_mtx;
+
/*
* Cache for the mount type id assigned to NFS. This is used for
* special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
*/
int nfs_mount_type = -1;
-#ifndef NULL_SIMPLELOCKS
/* To keep more than one thread at a time from running vfs_getnewfsid */
-static struct simplelock mntid_slock;
+static struct mtx mntid_mtx;
/* For any iteration/modification of vnode_free_list */
-static struct simplelock vnode_free_list_slock;
+static struct mtx vnode_free_list_mtx;
/*
* For any iteration/modification of dev->si_hlist (linked through
* v_specnext)
*/
-static struct simplelock spechash_slock;
-#endif
+static struct mtx spechash_mtx;
/* Publicly exported FS */
struct nfs_public nfs_pub;
@@ -250,11 +249,11 @@ vntblinit(void *dummy __unused)
desiredvnodes = maxproc + cnt.v_page_count / 4;
mtx_init(&mountlist_mtx, "mountlist", MTX_DEF);
- simple_lock_init(&mntvnode_slock);
- simple_lock_init(&mntid_slock);
- simple_lock_init(&spechash_slock);
+ mtx_init(&mntvnode_mtx, "mntvnode", MTX_DEF);
+ mtx_init(&mntid_mtx, "mntid", MTX_DEF);
+ mtx_init(&spechash_mtx, "spechash", MTX_DEF);
TAILQ_INIT(&vnode_free_list);
- simple_lock_init(&vnode_free_list_slock);
+ mtx_init(&vnode_free_list_mtx, "vnode_free_list", MTX_DEF);
vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5);
/*
* Initialize the filesystem syncer.
@@ -423,7 +422,7 @@ vfs_getnewfsid(mp)
fsid_t tfsid;
int mtype;
- simple_lock(&mntid_slock);
+ mtx_enter(&mntid_mtx, MTX_DEF);
mtype = mp->mnt_vfc->vfc_typenum;
tfsid.val[1] = mtype;
mtype = (mtype & 0xFF) << 24;
@@ -436,7 +435,7 @@ vfs_getnewfsid(mp)
}
mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
- simple_unlock(&mntid_slock);
+ mtx_exit(&mntid_mtx, MTX_DEF);
}
/*
@@ -539,7 +538,7 @@ getnewvnode(tag, mp, vops, vpp)
*/
s = splbio();
- simple_lock(&vnode_free_list_slock);
+ mtx_enter(&vnode_free_list_mtx, MTX_DEF);
if (wantfreevnodes && freevnodes < wantfreevnodes) {
vp = NULL;
@@ -579,7 +578,7 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_flag |= VDOOMED;
vp->v_flag &= ~VFREE;
freevnodes--;
- simple_unlock(&vnode_free_list_slock);
+ mtx_exit(&vnode_free_list_mtx, MTX_DEF);
cache_purge(vp);
vp->v_lease = NULL;
if (vp->v_type != VBAD) {
@@ -610,11 +609,12 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_clen = 0;
vp->v_socket = 0;
} else {
- simple_unlock(&vnode_free_list_slock);
+ mtx_exit(&vnode_free_list_mtx, MTX_DEF);
vp = (struct vnode *) zalloc(vnode_zone);
bzero((char *) vp, sizeof *vp);
mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
vp->v_dd = vp;
+ mtx_init(&vp->v_pollinfo.vpi_lock, "vnode pollinfo", MTX_DEF);
cache_purge(vp);
LIST_INIT(&vp->v_cache_src);
TAILQ_INIT(&vp->v_cache_dst);
@@ -646,7 +646,7 @@ insmntque(vp, mp)
register struct mount *mp;
{
- simple_lock(&mntvnode_slock);
+ mtx_enter(&mntvnode_mtx, MTX_DEF);
/*
* Delete from old mount point vnode list, if on one.
*/
@@ -656,11 +656,11 @@ insmntque(vp, mp)
* Insert into list of vnodes for the new mount point, if available.
*/
if ((vp->v_mount = mp) == NULL) {
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
return;
}
LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
}
/*
@@ -1402,9 +1402,9 @@ addalias(nvp, dev)
KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
nvp->v_rdev = dev;
- simple_lock(&spechash_slock);
+ mtx_enter(&spechash_mtx, MTX_DEF);
SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
- simple_unlock(&spechash_slock);
+ mtx_exit(&spechash_mtx, MTX_DEF);
}
/*
@@ -1628,7 +1628,7 @@ vflush(mp, skipvp, flags)
struct vnode *vp, *nvp;
int busy = 0;
- simple_lock(&mntvnode_slock);
+ mtx_enter(&mntvnode_mtx, MTX_DEF);
loop:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
/*
@@ -1667,9 +1667,9 @@ loop:
* vnode data structures and we are done.
*/
if (vp->v_usecount == 0) {
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
vgonel(vp, p);
- simple_lock(&mntvnode_slock);
+ mtx_enter(&mntvnode_mtx, MTX_DEF);
continue;
}
@@ -1679,7 +1679,7 @@ loop:
* all other files, just kill them.
*/
if (flags & FORCECLOSE) {
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
if (vp->v_type != VCHR) {
vgonel(vp, p);
} else {
@@ -1687,7 +1687,7 @@ loop:
vp->v_op = spec_vnodeop_p;
insmntque(vp, (struct mount *) 0);
}
- simple_lock(&mntvnode_slock);
+ mtx_enter(&mntvnode_mtx, MTX_DEF);
continue;
}
#ifdef DIAGNOSTIC
@@ -1697,7 +1697,7 @@ loop:
mtx_exit(&vp->v_interlock, MTX_DEF);
busy++;
}
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
if (busy)
return (EBUSY);
return (0);
@@ -1842,9 +1842,9 @@ vop_revoke(ap)
}
dev = vp->v_rdev;
for (;;) {
- simple_lock(&spechash_slock);
+ mtx_enter(&spechash_mtx, MTX_DEF);
vq = SLIST_FIRST(&dev->si_hlist);
- simple_unlock(&spechash_slock);
+ mtx_exit(&spechash_mtx, MTX_DEF);
if (!vq)
break;
vgone(vq);
@@ -1859,14 +1859,14 @@ vop_revoke(ap)
int
vrecycle(vp, inter_lkp, p)
struct vnode *vp;
- struct simplelock *inter_lkp;
+ struct mtx *inter_lkp;
struct proc *p;
{
mtx_enter(&vp->v_interlock, MTX_DEF);
if (vp->v_usecount == 0) {
if (inter_lkp) {
- simple_unlock(inter_lkp);
+ mtx_exit(inter_lkp, MTX_DEF);
}
vgonel(vp, p);
return (1);
@@ -1926,10 +1926,10 @@ vgonel(vp, p)
* if it is on one.
*/
if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
- simple_lock(&spechash_slock);
+ mtx_enter(&spechash_mtx, MTX_DEF);
SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
freedev(vp->v_rdev);
- simple_unlock(&spechash_slock);
+ mtx_exit(&spechash_mtx, MTX_DEF);
vp->v_rdev = NULL;
}
@@ -1945,14 +1945,14 @@ vgonel(vp, p)
*/
if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
s = splbio();
- simple_lock(&vnode_free_list_slock);
+ mtx_enter(&vnode_free_list_mtx, MTX_DEF);
if (vp->v_flag & VFREE)
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
else
freevnodes++;
vp->v_flag |= VFREE;
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
- simple_unlock(&vnode_free_list_slock);
+ mtx_exit(&vnode_free_list_mtx, MTX_DEF);
splx(s);
}
@@ -1971,15 +1971,15 @@ vfinddev(dev, type, vpp)
{
struct vnode *vp;
- simple_lock(&spechash_slock);
+ mtx_enter(&spechash_mtx, MTX_DEF);
SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
if (type == vp->v_type) {
*vpp = vp;
- simple_unlock(&spechash_slock);
+ mtx_exit(&spechash_mtx, MTX_DEF);
return (1);
}
}
- simple_unlock(&spechash_slock);
+ mtx_exit(&spechash_mtx, MTX_DEF);
return (0);
}
@@ -1994,10 +1994,10 @@ vcount(vp)
int count;
count = 0;
- simple_lock(&spechash_slock);
+ mtx_enter(&spechash_mtx, MTX_DEF);
SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext)
count += vq->v_usecount;
- simple_unlock(&spechash_slock);
+ mtx_exit(&spechash_mtx, MTX_DEF);
return (count);
}
@@ -2204,7 +2204,7 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
continue;
}
again:
- simple_lock(&mntvnode_slock);
+ mtx_enter(&mntvnode_mtx, MTX_DEF);
for (vp = LIST_FIRST(&mp->mnt_vnodelist);
vp != NULL;
vp = nvp) {
@@ -2214,17 +2214,17 @@ again:
* recycled onto the same filesystem.
*/
if (vp->v_mount != mp) {
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
goto again;
}
nvp = LIST_NEXT(vp, v_mntvnodes);
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
(error = SYSCTL_OUT(req, vp, VNODESZ)))
return (error);
- simple_lock(&mntvnode_slock);
+ mtx_enter(&mntvnode_mtx, MTX_DEF);
}
- simple_unlock(&mntvnode_slock);
+ mtx_exit(&mntvnode_mtx, MTX_DEF);
mtx_enter(&mountlist_mtx, MTX_DEF);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, p);
@@ -2633,7 +2633,7 @@ vfree(vp)
int s;
s = splbio();
- simple_lock(&vnode_free_list_slock);
+ mtx_enter(&vnode_free_list_mtx, MTX_DEF);
KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free"));
if (vp->v_flag & VAGE) {
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
@@ -2641,7 +2641,7 @@ vfree(vp)
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
}
freevnodes++;
- simple_unlock(&vnode_free_list_slock);
+ mtx_exit(&vnode_free_list_mtx, MTX_DEF);
vp->v_flag &= ~VAGE;
vp->v_flag |= VFREE;
splx(s);
@@ -2657,11 +2657,11 @@ vbusy(vp)
int s;
s = splbio();
- simple_lock(&vnode_free_list_slock);
+ mtx_enter(&vnode_free_list_mtx, MTX_DEF);
KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free"));
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
- simple_unlock(&vnode_free_list_slock);
+ mtx_exit(&vnode_free_list_mtx, MTX_DEF);
vp->v_flag &= ~(VFREE|VAGE);
splx(s);
}
@@ -2680,7 +2680,7 @@ vn_pollrecord(vp, p, events)
struct proc *p;
short events;
{
- simple_lock(&vp->v_pollinfo.vpi_lock);
+ mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
if (vp->v_pollinfo.vpi_revents & events) {
/*
* This leaves events we are not interested
@@ -2692,12 +2692,12 @@ vn_pollrecord(vp, p, events)
events &= vp->v_pollinfo.vpi_revents;
vp->v_pollinfo.vpi_revents &= ~events;
- simple_unlock(&vp->v_pollinfo.vpi_lock);
+ mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
return events;
}
vp->v_pollinfo.vpi_events |= events;
selrecord(p, &vp->v_pollinfo.vpi_selinfo);
- simple_unlock(&vp->v_pollinfo.vpi_lock);
+ mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
return 0;
}
@@ -2712,7 +2712,7 @@ vn_pollevent(vp, events)
struct vnode *vp;
short events;
{
- simple_lock(&vp->v_pollinfo.vpi_lock);
+ mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
if (vp->v_pollinfo.vpi_events & events) {
/*
* We clear vpi_events so that we don't
@@ -2729,7 +2729,7 @@ vn_pollevent(vp, events)
vp->v_pollinfo.vpi_revents |= events;
selwakeup(&vp->v_pollinfo.vpi_selinfo);
}
- simple_unlock(&vp->v_pollinfo.vpi_lock);
+ mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
}
/*
@@ -2741,12 +2741,12 @@ void
vn_pollgone(vp)
struct vnode *vp;
{
- simple_lock(&vp->v_pollinfo.vpi_lock);
+ mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
if (vp->v_pollinfo.vpi_events) {
vp->v_pollinfo.vpi_events = 0;
selwakeup(&vp->v_pollinfo.vpi_selinfo);
}
- simple_unlock(&vp->v_pollinfo.vpi_lock);
+ mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
}
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index b7cea77..221e9c0 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -833,9 +833,9 @@ filt_vnattach(struct knote *kn)
if ((vp)->v_tag != VT_UFS)
return (EOPNOTSUPP);
- simple_lock(&vp->v_pollinfo.vpi_lock);
+ mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
SLIST_INSERT_HEAD(&vp->v_pollinfo.vpi_selinfo.si_note, kn, kn_selnext);
- simple_unlock(&vp->v_pollinfo.vpi_lock);
+ mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
return (0);
}
@@ -845,10 +845,10 @@ filt_vndetach(struct knote *kn)
{
struct vnode *vp = (struct vnode *)kn->kn_fp->f_data;
- simple_lock(&vp->v_pollinfo.vpi_lock);
+ mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
SLIST_REMOVE(&vp->v_pollinfo.vpi_selinfo.si_note,
kn, knote, kn_selnext);
- simple_unlock(&vp->v_pollinfo.vpi_lock);
+ mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
}
static int
OpenPOWER on IntegriCloud