summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjasone <jasone@FreeBSD.org>2001-03-05 19:59:41 +0000
committerjasone <jasone@FreeBSD.org>2001-03-05 19:59:41 +0000
commitcd006e6a5c321fb303d77f74ce0ee3c15ac868cd (patch)
tree6c8e2186964791ad357dd591460ffebdb46cee6d /sys
parent4c2aa2014d291e16581ae27306797b4adf550efa (diff)
downloadFreeBSD-src-cd006e6a5c321fb303d77f74ce0ee3c15ac868cd.zip
FreeBSD-src-cd006e6a5c321fb303d77f74ce0ee3c15ac868cd.tar.gz
Implement shared/exclusive locks.
Reviewed by: bmilekic, jake, jhb
Diffstat (limited to 'sys')
-rw-r--r--sys/conf/files1
-rw-r--r--sys/kern/kern_sx.c160
-rw-r--r--sys/sys/sx.h82
3 files changed, 243 insertions, 0 deletions
diff --git a/sys/conf/files b/sys/conf/files
index d01be41..f298038 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -681,6 +681,7 @@ kern/kern_shutdown.c standard
kern/kern_sig.c standard
kern/kern_subr.c standard
kern/kern_switch.c standard
+kern/kern_sx.c standard
kern/kern_synch.c standard
kern/kern_syscalls.c standard
kern/kern_sysctl.c standard
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
new file mode 100644
index 0000000..582ff04
--- /dev/null
+++ b/sys/kern/kern_sx.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Shared/exclusive locks. This implementation assures deterministic lock
+ * granting behavior, so that slocks and xlocks are interleaved.
+ *
+ * Priority propagation will not generally raise the priority of lock holders,
+ * so should not be relied upon in combination with sx locks.
+ *
+ * The witness code can not detect lock cycles.
+ *
+ * slock --> xlock (deadlock)
+ * slock --> slock (slock recursion, not fatal)
+ * xlock --> xlock (deadlock)
+ * xlock --> slock (deadlock)
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/ktr.h>
+#include <sys/condvar.h>
+#include <sys/mutex.h>
+#include <sys/sx.h>
+
+void
+sx_init(struct sx *sx, const char *description)
+{
+
+ mtx_init(&sx->sx_lock, description, MTX_DEF);
+ sx->sx_cnt = 0;
+ cv_init(&sx->sx_shrd_cv, description);
+ sx->sx_shrd_wcnt = 0;
+ cv_init(&sx->sx_excl_cv, description);
+ sx->sx_excl_wcnt = 0;
+}
+
+void
+sx_destroy(struct sx *sx)
+{
+
+ KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
+ 0), ("%s: holders or waiters\n", __FUNCTION__));
+
+ mtx_destroy(&sx->sx_lock);
+ cv_destroy(&sx->sx_shrd_cv);
+ cv_destroy(&sx->sx_excl_cv);
+}
+
+void
+sx_slock(struct sx *sx)
+{
+
+ mtx_lock(&sx->sx_lock);
+
+ /*
+ * Loop in case we lose the race for lock acquisition.
+ */
+ while (sx->sx_cnt < 0) {
+ sx->sx_shrd_wcnt++;
+ cv_wait(&sx->sx_shrd_cv, &sx->sx_lock);
+ sx->sx_shrd_wcnt--;
+ }
+
+ /* Acquire a shared lock. */
+ sx->sx_cnt++;
+
+ mtx_unlock(&sx->sx_lock);
+}
+
+void
+sx_xlock(struct sx *sx)
+{
+
+ mtx_lock(&sx->sx_lock);
+
+ /* Loop in case we lose the race for lock acquisition. */
+ while (sx->sx_cnt != 0) {
+ sx->sx_excl_wcnt++;
+ cv_wait(&sx->sx_excl_cv, &sx->sx_lock);
+ sx->sx_excl_wcnt--;
+ }
+
+ /* Acquire an exclusive lock. */
+ sx->sx_cnt--;
+
+ mtx_unlock(&sx->sx_lock);
+}
+
+void
+sx_sunlock(struct sx *sx)
+{
+
+ mtx_lock(&sx->sx_lock);
+ KASSERT((sx->sx_cnt > 0), ("%s: lacking slock\n", __FUNCTION__));
+
+ /* Release. */
+ sx->sx_cnt--;
+
+ /*
+ * If we just released the last shared lock, wake any waiters up, giving
+ * exclusive lockers precedence. In order to make sure that exclusive
+ * lockers won't be blocked forever, don't wake shared lock waiters if
+ * there are exclusive lock waiters.
+ */
+ if (sx->sx_excl_wcnt > 0) {
+ if (sx->sx_cnt == 0)
+ cv_signal(&sx->sx_excl_cv);
+ } else if (sx->sx_shrd_wcnt > 0)
+ cv_broadcast(&sx->sx_shrd_cv);
+
+ mtx_unlock(&sx->sx_lock);
+}
+
+void
+sx_xunlock(struct sx *sx)
+{
+
+ mtx_lock(&sx->sx_lock);
+ KASSERT((sx->sx_cnt == -1), ("%s: lacking xlock\n", __FUNCTION__));
+
+ /* Release. */
+ sx->sx_cnt++;
+
+ /*
+ * Wake up waiters if there are any. Give precedence to slock waiters.
+ */
+ if (sx->sx_shrd_wcnt > 0)
+ cv_broadcast(&sx->sx_shrd_cv);
+ else if (sx->sx_excl_wcnt > 0)
+ cv_signal(&sx->sx_excl_cv);
+
+ mtx_unlock(&sx->sx_lock);
+}
diff --git a/sys/sys/sx.h b/sys/sys/sx.h
new file mode 100644
index 0000000..73ef97e
--- /dev/null
+++ b/sys/sys/sx.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SX_H_
+#define _SYS_SX_H_
+
+#ifndef LOCORE
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+
+struct sx {
+ struct mtx sx_lock; /* General protection lock and xlock. */
+ int sx_cnt; /* -1: xlock, > 0: slock count. */
+ struct cv sx_shrd_cv; /* slock waiters. */
+ int sx_shrd_wcnt; /* Number of slock waiters. */
+ struct cv sx_excl_cv; /* xlock waiters. */
+ int sx_excl_wcnt; /* Number of xlock waiters. */
+};
+
+#ifdef _KERNEL
+void sx_init(struct sx *sx, const char *description);
+void sx_destroy(struct sx *sx);
+void sx_slock(struct sx *sx);
+void sx_xlock(struct sx *sx);
+void sx_sunlock(struct sx *sx);
+void sx_xunlock(struct sx *sx);
+
+#ifdef INVARIANTS
+/*
+ * SX_ASSERT_SLOCKED() can only detect that at least *some* thread owns an
+ * slock, but it cannot guarantee that *this* thread owns an slock.
+ */
+#define SX_ASSERT_SLOCKED(sx) do { \
+ mtx_lock(&(sx)->sx_lock); \
+ KASSERT(((sx)->sx_cnt > 0), ("%s: lacking slock\n", \
+ __FUNCTION__)); \
+ mtx_unlock(&(sx)->sx_lock); \
+} while (0)
+/*
+ * SX_ASSERT_XLOCKED() can only detect that at least *some* thread owns an
+ * xlock, but it cannot guarantee that *this* thread owns an xlock.
+ */
+#define SX_ASSERT_XLOCKED(sx) do { \
+ mtx_lock(&(sx)->sx_lock); \
+ KASSERT(((sx)->sx_cnt == -1), ("%s: lacking xlock\n", \
+ __FUNCTION__)); \
+ mtx_unlock(&(sx)->sx_lock); \
+} while (0)
+#else /* INVARIANTS */
+#define SX_ASSERT_SLOCKED(sx)
+#define SX_ASSERT_XLOCKER(sx)
+#endif /* INVARIANTS */
+
+#endif /* _KERNEL */
+#endif /* !LOCORE */
+#endif /* _SYS_SX_H_ */
OpenPOWER on IntegriCloud