summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_event.c
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2009-06-10 20:59:32 +0000
committerkib <kib@FreeBSD.org>2009-06-10 20:59:32 +0000
commite1cb2941d4424de90eb68716d6c4d95f4c0af0ba (patch)
tree9c12d3a92805f512ad054e2b8fa5618e0bd9cc71 /sys/kern/kern_event.c
parenta9806592196870f6605860715c0edac4deb6a55d (diff)
downloadFreeBSD-src-e1cb2941d4424de90eb68716d6c4d95f4c0af0ba.zip
FreeBSD-src-e1cb2941d4424de90eb68716d6c4d95f4c0af0ba.tar.gz
Adapt vfs kqfilter to the shared vnode lock used by zfs write vop. Use
vnode interlock to protect the knote fields [1]. The locking assumes that shared vnode lock is held, thus we get exclusive access to knote either by exclusive vnode lock protection, or by shared vnode lock + vnode interlock. Do not use kl_locked() method to assert either lock ownership or the fact that curthread does not own the lock. For shared locks, ownership is not recorded, e.g. VOP_ISLOCKED can return LK_SHARED for the shared lock not owned by curthread, causing false positives in kqueue subsystem assertions about knlist lock. Remove kl_locked method from knlist lock vector, and add two separate assertion methods kl_assert_locked and kl_assert_unlocked, that are supposed to use proper asserts. Change knlist_init accordingly. Add convenience function knlist_init_mtx to reduce number of arguments for typical knlist initialization. Submitted by: jhb [1] Noted by: jhb [2] Reviewed by: jhb Tested by: rnoland
Diffstat (limited to 'sys/kern/kern_event.c')
-rw-r--r--sys/kern/kern_event.c43
1 files changed, 29 insertions, 14 deletions
diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c
index 4acfc49..1715741 100644
--- a/sys/kern/kern_event.c
+++ b/sys/kern/kern_event.c
@@ -208,12 +208,10 @@ SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
} while (0)
#ifdef INVARIANTS
#define KNL_ASSERT_LOCKED(knl) do { \
- if (!knl->kl_locked((knl)->kl_lockarg)) \
- panic("knlist not locked, but should be"); \
+ knl->kl_assert_locked((knl)->kl_lockarg); \
} while (0)
-#define KNL_ASSERT_UNLOCKED(knl) do { \
- if (knl->kl_locked((knl)->kl_lockarg)) \
- panic("knlist locked, but should not be"); \
+#define KNL_ASSERT_UNLOCKED(knl) do { \
+ knl->kl_assert_unlocked((knl)->kl_lockarg); \
} while (0)
#else /* !INVARIANTS */
#define KNL_ASSERT_LOCKED(knl) do {} while(0)
@@ -577,7 +575,7 @@ kqueue(struct thread *td, struct kqueue_args *uap)
mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK);
TAILQ_INIT(&kq->kq_head);
kq->kq_fdp = fdp;
- knlist_init(&kq->kq_sel.si_note, &kq->kq_lock, NULL, NULL, NULL);
+ knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
FILEDESC_XLOCK(fdp);
@@ -1723,7 +1721,6 @@ MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
MTX_DEF);
static void knlist_mtx_lock(void *arg);
static void knlist_mtx_unlock(void *arg);
-static int knlist_mtx_locked(void *arg);
static void
knlist_mtx_lock(void *arg)
@@ -1737,15 +1734,22 @@ knlist_mtx_unlock(void *arg)
mtx_unlock((struct mtx *)arg);
}
-static int
-knlist_mtx_locked(void *arg)
+static void
+knlist_mtx_assert_locked(void *arg)
+{
+ mtx_assert((struct mtx *)arg, MA_OWNED);
+}
+
+static void
+knlist_mtx_assert_unlocked(void *arg)
{
- return (mtx_owned((struct mtx *)arg));
+ mtx_assert((struct mtx *)arg, MA_NOTOWNED);
}
void
knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
- void (*kl_unlock)(void *), int (*kl_locked)(void *))
+ void (*kl_unlock)(void *),
+ void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *))
{
if (lock == NULL)
@@ -1761,15 +1765,26 @@ knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
knl->kl_unlock = knlist_mtx_unlock;
else
knl->kl_unlock = kl_unlock;
- if (kl_locked == NULL)
- knl->kl_locked = knlist_mtx_locked;
+ if (kl_assert_locked == NULL)
+ knl->kl_assert_locked = knlist_mtx_assert_locked;
else
- knl->kl_locked = kl_locked;
+ knl->kl_assert_locked = kl_assert_locked;
+ if (kl_assert_unlocked == NULL)
+ knl->kl_assert_unlocked = knlist_mtx_assert_unlocked;
+ else
+ knl->kl_assert_unlocked = kl_assert_unlocked;
SLIST_INIT(&knl->kl_list);
}
void
+knlist_init_mtx(struct knlist *knl, struct mtx *lock)
+{
+
+ knlist_init(knl, lock, NULL, NULL, NULL, NULL);
+}
+
+void
knlist_destroy(struct knlist *knl)
{
OpenPOWER on IntegriCloud