summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_kobj.c
diff options
context:
space:
mode:
authordfr <dfr@FreeBSD.org>2003-10-16 09:16:28 +0000
committerdfr <dfr@FreeBSD.org>2003-10-16 09:16:28 +0000
commit3dac505582c8dd13ac06b6571c0fab5ff2ab8a02 (patch)
tree182c2fd86e1bded02c1690346e608a28307ff754 /sys/kern/subr_kobj.c
parent4aea3a9433e80a27ebfdc96bbf34edc628c9749b (diff)
downloadFreeBSD-src-3dac505582c8dd13ac06b6571c0fab5ff2ab8a02.zip
FreeBSD-src-3dac505582c8dd13ac06b6571c0fab5ff2ab8a02.tar.gz
* Add multiple inheritance to kobj. Each class can have zero or more base
classes and if a method is not found in a given class, its base classes are searched (in the order they were declared). This search is recursive, i.e. a method may be define in a base class of a base class. * Change the kobj method lookup algorithm to one which is SMP-safe. This relies only on the constraint that an observer of a sequence of writes of pointer-sized values will see exactly one of those values, not a mixture of two or more values. This assumption holds for all processors which FreeBSD supports. * Add locking to kobj class initialisation. * Add a simpler form of 'inheritance' for devclasses. Each devclass can have a parent devclass. Searches for drivers continue up the chain of devclasses until either a matching driver is found or a devclass is reached which has no parent. This can allow, for instance, pci drivers to match cardbus devices (assuming that cardbus declares pci as its parent devclass). * Increment __FreeBSD_version. This preserves the driver API entirely except for one minor feature used by the ISA compatibility shims. A workaround for ISA compatibility will be committed separately. The kobj and newbus ABI has changed - all modules must be recompiled.
Diffstat (limited to 'sys/kern/subr_kobj.c')
-rw-r--r--sys/kern/subr_kobj.c188
1 files changed, 155 insertions, 33 deletions
diff --git a/sys/kern/subr_kobj.c b/sys/kern/subr_kobj.c
index a3fc230..1704a0f 100644
--- a/sys/kern/subr_kobj.c
+++ b/sys/kern/subr_kobj.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2000 Doug Rabson
+ * Copyright (c) 2000,2003 Doug Rabson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -28,16 +28,15 @@
__FBSDID("$FreeBSD$");
#include <sys/param.h>
-#include <sys/queue.h>
-#include <sys/malloc.h>
#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/errno.h>
+#include <sys/kobj.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
#include <sys/sysctl.h>
#ifndef TEST
#include <sys/systm.h>
#endif
-#include <sys/kobj.h>
#ifdef TEST
#include "usertest.h"
@@ -57,22 +56,43 @@ SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
#endif
+static struct mtx kobj_mtx;
static int kobj_next_id = 1;
SYSCTL_UINT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
&kobj_next_id, 0, "");
-static int
+static void
+kobj_init_mutex(void *arg)
+{
+
+ mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
+}
+
+SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
+
+/*
+ * This method structure is used to initialise new caches. Since the
+ * desc pointer is NULL, it is guaranteed never to match any read
+ * descriptors.
+ */
+static struct kobj_method null_method = {
+ 0, 0,
+};
+
+int
kobj_error_method(void)
{
+
return ENXIO;
}
static void
kobj_register_method(struct kobjop_desc *desc)
{
+
+ mtx_assert(&kobj_mtx, MA_OWNED);
if (desc->id == 0) {
- KASSERT((kobj_next_id < KOBJ_CACHE_SIZE), ("kobj method table overflow"));
desc->id = kobj_next_id++;
}
}
@@ -88,6 +108,8 @@ kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
kobj_method_t *m;
int i;
+ mtx_assert(&kobj_mtx, MA_OWNED);
+
/*
* Don't do anything if we are already compiled.
*/
@@ -103,7 +125,8 @@ kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
/*
* Then initialise the ops table.
*/
- bzero(ops, sizeof(struct kobj_ops));
+ for (i = 0; i < KOBJ_CACHE_SIZE; i++)
+ ops->cache[i] = &null_method;
ops->cls = cls;
cls->ops = ops;
}
@@ -113,42 +136,106 @@ kobj_class_compile(kobj_class_t cls)
{
kobj_ops_t ops;
+ mtx_assert(&kobj_mtx, MA_NOTOWNED);
+
/*
* Allocate space for the compiled ops table.
*/
ops = malloc(sizeof(struct kobj_ops), M_KOBJ, M_NOWAIT);
if (!ops)
panic("kobj_compile_methods: out of memory");
+
+ mtx_lock(&kobj_mtx);
+
+ /*
+ * We may have lost a race for kobj_class_compile here - check
+ * to make sure someone else hasn't already compiled this
+ * class.
+ */
+ if (cls->ops) {
+ mtx_unlock(&kobj_mtx);
+ free(ops, M_KOBJ);
+ return;
+ }
+
kobj_class_compile_common(cls, ops);
+ mtx_unlock(&kobj_mtx);
}
void
kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
{
+
+ mtx_assert(&kobj_mtx, MA_NOTOWNED);
+
/*
* Increment refs to make sure that the ops table is not freed.
*/
+ mtx_lock(&kobj_mtx);
cls->refs++;
kobj_class_compile_common(cls, ops);
+ mtx_unlock(&kobj_mtx);
}
-void
-kobj_lookup_method(kobj_method_t *methods,
- kobj_method_t *ce,
- kobjop_desc_t desc)
+static kobj_method_t*
+kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
{
- ce->desc = desc;
- for (; methods && methods->desc; methods++) {
- if (methods->desc == desc) {
- ce->func = methods->func;
- return;
+ kobj_method_t *methods = cls->methods;
+ kobj_method_t *ce;
+
+ for (ce = methods; ce && ce->desc; ce++) {
+ if (ce->desc == desc) {
+ return ce;
}
}
- if (desc->deflt)
- ce->func = desc->deflt;
- else
- ce->func = kobj_error_method;
- return;
+
+ return 0;
+}
+
+static kobj_method_t*
+kobj_lookup_method_mi(kobj_class_t cls,
+ kobjop_desc_t desc)
+{
+ kobj_method_t *ce;
+ kobj_class_t *basep;
+
+ ce = kobj_lookup_method_class(cls, desc);
+ if (ce)
+ return ce;
+
+ basep = cls->baseclasses;
+ if (basep) {
+ for (; *basep; basep++) {
+ ce = kobj_lookup_method_mi(*basep, desc);
+ if (ce)
+ return ce;
+ }
+ }
+
+ return 0;
+}
+
+kobj_method_t*
+kobj_lookup_method(kobj_class_t cls,
+ kobj_method_t **cep,
+ kobjop_desc_t desc)
+{
+ kobj_method_t *ce;
+
+#ifdef KOBJ_STATS
+ /*
+ * Correct for the 'hit' assumption in KOBJOPLOOKUP and record
+ * a 'miss'.
+ */
+ kobj_lookup_hits--;
+ kobj_lookup_misses--;
+#endif
+
+ ce = kobj_lookup_method_mi(cls, desc);
+ if (!ce)
+ ce = desc->deflt;
+ *cep = ce;
+ return ce;
}
void
@@ -156,18 +243,33 @@ kobj_class_free(kobj_class_t cls)
{
int i;
kobj_method_t *m;
+ void* ops = 0;
- /*
- * Unregister any methods which are no longer used.
- */
- for (i = 0, m = cls->methods; m->desc; i++, m++)
- kobj_unregister_method(m->desc);
+ mtx_assert(&kobj_mtx, MA_NOTOWNED);
+ mtx_lock(&kobj_mtx);
/*
- * Free memory and clean up.
+ * Protect against a race between kobj_create and
+ * kobj_delete.
*/
- free(cls->ops, M_KOBJ);
- cls->ops = 0;
+ if (cls->refs == 0) {
+ /*
+ * Unregister any methods which are no longer used.
+ */
+ for (i = 0, m = cls->methods; m->desc; i++, m++)
+ kobj_unregister_method(m->desc);
+
+ /*
+ * Free memory and clean up.
+ */
+ ops = cls->ops;
+ cls->ops = 0;
+ }
+
+ mtx_unlock(&kobj_mtx);
+
+ if (ops)
+ free(ops, M_KOBJ);
}
kobj_t
@@ -191,28 +293,48 @@ kobj_create(kobj_class_t cls,
void
kobj_init(kobj_t obj, kobj_class_t cls)
{
+ mtx_assert(&kobj_mtx, MA_NOTOWNED);
+ retry:
+ mtx_lock(&kobj_mtx);
+
/*
* Consider compiling the class' method table.
*/
- if (!cls->ops)
+ if (!cls->ops) {
+ /*
+ * kobj_class_compile doesn't want the lock held
+ * because of the call to malloc - we drop the lock
+ * and re-try.
+ */
+ mtx_unlock(&kobj_mtx);
kobj_class_compile(cls);
+ goto retry;
+ }
obj->ops = cls->ops;
cls->refs++;
+
+ mtx_unlock(&kobj_mtx);
}
void
kobj_delete(kobj_t obj, struct malloc_type *mtype)
{
kobj_class_t cls = obj->ops->cls;
+ int refs;
/*
* Consider freeing the compiled method table for the class
* after its last instance is deleted. As an optimisation, we
* should defer this for a short while to avoid thrashing.
*/
+ mtx_assert(&kobj_mtx, MA_NOTOWNED);
+ mtx_lock(&kobj_mtx);
cls->refs--;
- if (!cls->refs)
+ refs = cls->refs;
+ mtx_unlock(&kobj_mtx);
+
+ if (!refs)
kobj_class_free(cls);
obj->ops = 0;
OpenPOWER on IntegriCloud