summaryrefslogtreecommitdiffstats
path: root/sys/kern/vfs_init.c
diff options
context:
space:
mode:
authorphk <phk@FreeBSD.org>2004-12-01 23:16:38 +0000
committerphk <phk@FreeBSD.org>2004-12-01 23:16:38 +0000
commit59f305606cbc120b44978581149ef1a3e62bf3b4 (patch)
treef548d86b8998e8d581602fc54079bbb1534e7c18 /sys/kern/vfs_init.c
parent350be3accf1712e54ae2732ca42ad409bbf20df7 (diff)
downloadFreeBSD-src-59f305606cbc120b44978581149ef1a3e62bf3b4.zip
FreeBSD-src-59f305606cbc120b44978581149ef1a3e62bf3b4.tar.gz
Back when VOP_* was introduced, we did not have new-style struct
initializations but we did have lofty goals and big ideals. Adjust to more contemporary circumstances and gain type checking. Replace the entire vop_t frobbing thing with properly typed structures. The only casualty is that we can not add a new VOP_ method with a loadable module. History has not given us reason to belive this would ever be feasible in the the first place. Eliminate in toto VOCALL(), vop_t, VNODEOP_SET() etc. Give coda correct prototypes and function definitions for all vop_()s. Generate a bit more data from the vnode_if.src file: a struct vop_vector and protype typedefs for all vop methods. Add a new vop_bypass() and make vop_default be a pointer to another struct vop_vector. Remove a lot of vfs_init since vop_vector is ready to use from the compiler. Cast various vop_mumble() to void * with uppercase name, for instance VOP_PANIC, VOP_NULL etc. Implement VCALL() by making vdesc_offset the offsetof() the relevant function pointer in vop_vector. This is disgusting but since the code is generated by a script comparatively safe. The alternative for nullfs etc. would be much worse. Fix up all vnode method vectors to remove casts so they become typesafe. (The bulk of this is generated by scripts)
Diffstat (limited to 'sys/kern/vfs_init.c')
-rw-r--r--sys/kern/vfs_init.c284
1 files changed, 21 insertions, 263 deletions
diff --git a/sys/kern/vfs_init.c b/sys/kern/vfs_init.c
index bba6c52..343f5f7 100644
--- a/sys/kern/vfs_init.c
+++ b/sys/kern/vfs_init.c
@@ -83,278 +83,36 @@ struct vattr va_null;
* that is a(whole)nother story.) This is a feature.
*/
-/* Table of known vnodeop vectors (list of VFS vnode vectors) */
-static const struct vnodeopv_desc **vnodeopv_descs;
-static int vnodeopv_num;
-
-/* Table of known descs (list of vnode op handlers "vop_access_desc") */
-static struct vnodeop_desc **vfs_op_descs;
-/* Reference counts for vfs_op_descs */
-static int *vfs_op_desc_refs;
-/* Number of descriptions */
-static int num_op_descs;
-/* Number of entries in each description */
-static int vfs_opv_numops = 64;
-
-/* Allow this number to be tuned at boot */
-TUNABLE_INT("vfs.opv_numops", &vfs_opv_numops);
-SYSCTL_INT(_vfs, OID_AUTO, opv_numops, CTLFLAG_RDTUN, &vfs_opv_numops,
- 0, "Maximum number of operations in vop_t vector");
-
-static int int_cmp(const void *a, const void *b);
-
-static int
-int_cmp(const void *a, const void *b)
-{
- return(*(const int *)a - *(const int *)b);
-}
+/*
+ * Routines having to do with the management of the vnode table.
+ */
/*
- * Recalculate the operations vector/description (those parts of it that can
- * be recalculated, that is.)
- * Always allocate operations vector large enough to hold vfs_opv_numops
- * entries. The vector is never freed or deallocated once it is initialized,
- * so that vnodes might safely reference it through their v_op pointer without
- * vector changing suddenly from under them.
+ * XXX: hack alert
*/
-static void
-vfs_opv_recalc(void)
+int
+vcall(struct vnode *vp, u_int off, void *ap)
{
- int i, j, k;
- int *vfs_op_offsets;
- vop_t ***opv_desc_vector_p;
- vop_t **opv_desc_vector;
- struct vnodeopv_entry_desc *opve_descp;
- const struct vnodeopv_desc *opv;
-
- if (vfs_op_descs == NULL)
- panic("vfs_opv_recalc called with null vfs_op_descs");
-
- /*
- * Allocate and initialize temporary array to store
- * offsets. Sort it to put all uninitialized entries
- * first and to make holes in existing offset sequence
- * detectable.
- */
- MALLOC(vfs_op_offsets, int *,
- num_op_descs * sizeof(int), M_TEMP, M_WAITOK);
- if (vfs_op_offsets == NULL)
- panic("vfs_opv_recalc: no memory");
- for (i = 0; i < num_op_descs; i++)
- vfs_op_offsets[i] = vfs_op_descs[i]->vdesc_offset;
- qsort(vfs_op_offsets, num_op_descs, sizeof(int), int_cmp);
-
- /*
- * Run through and make sure all known descs have an offset.
- * Use vfs_op_offsets to locate holes in offset sequence and
- * reuse them.
- * vop_default_desc is hardwired at offset 1, and offset 0
- * is a panic sanity check.
- */
- j = 1; k = 1;
- for (i = 0; i < num_op_descs; i++) {
- if (vfs_op_descs[i]->vdesc_offset != 0)
+ struct vop_vector *vop = vp->v_op;
+ vop_bypass_t **bpt;
+ int rc;
+
+ for(;;) {
+ bpt = (void *)((u_char *)vop + off);
+ if (vop != NULL && *bpt == NULL && vop->vop_bypass == NULL) {
+ vop = vop->vop_default;
continue;
- /*
- * Look at two adjacent entries vfs_op_offsets[j - 1] and
- * vfs_op_offsets[j] and see if we can fit a new offset
- * number in between. If not, look at the next pair until
- * hole is found or the end of the vfs_op_offsets vector is
- * reached. j has been initialized to 1 above so that
- * referencing (j-1)-th element is safe and the loop will
- * never execute if num_op_descs is 1. For each new value s
- * of i the j loop pick up from where previous iteration has
- * left off. When the last hole has been consumed or if no
- * hole has been found, we will start allocating new numbers
- * starting from the biggest already available offset + 1.
- */
- for (; j < num_op_descs; j++) {
- if (vfs_op_offsets[j - 1] < k && vfs_op_offsets[j] > k)
- break;
- k = vfs_op_offsets[j] + 1;
- }
- vfs_op_descs[i]->vdesc_offset = k++;
- }
- FREE(vfs_op_offsets, M_TEMP);
-
- /* Panic if new vops will cause vector overflow */
- if (k > vfs_opv_numops)
- panic("VFS: Ran out of vop_t vector entries. %d entries required, only %d available.\n", k, vfs_opv_numops);
-
- /*
- * Allocate and fill in the vectors
- */
- for (i = 0; i < vnodeopv_num; i++) {
- opv = vnodeopv_descs[i];
- opv_desc_vector_p = opv->opv_desc_vector_p;
- if (*opv_desc_vector_p == NULL)
- MALLOC(*opv_desc_vector_p, vop_t **,
- vfs_opv_numops * sizeof(vop_t *), M_VNODE,
- M_WAITOK | M_ZERO);
-
- /* Fill in, with slot 0 being to return EOPNOTSUPP */
- opv_desc_vector = *opv_desc_vector_p;
- opv_desc_vector[0] = (vop_t *)vop_eopnotsupp;
- for (j = 0; opv->opv_desc_ops[j].opve_op; j++) {
- opve_descp = &(opv->opv_desc_ops[j]);
- opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
- opve_descp->opve_impl;
- }
-
- /* Replace unfilled routines with their default (slot 1). */
- opv_desc_vector = *(opv->opv_desc_vector_p);
- if (opv_desc_vector[1] == NULL)
- panic("vfs_opv_recalc: vector without a default.");
- for (j = 0; j < vfs_opv_numops; j++)
- if (opv_desc_vector[j] == NULL)
- opv_desc_vector[j] = opv_desc_vector[1];
- }
-}
-
-/* Add a set of vnode operations (a description) to the table above. */
-void
-vfs_add_vnodeops(const void *data)
-{
- const struct vnodeopv_desc *opv;
- const struct vnodeopv_desc **newopv;
- struct vnodeop_desc **newop;
- int *newref;
- struct vnodeop_desc *desc;
- int i, j;
-
- opv = (const struct vnodeopv_desc *)data;
- MALLOC(newopv, const struct vnodeopv_desc **,
- (vnodeopv_num + 1) * sizeof(*newopv), M_VNODE, M_WAITOK);
- if (vnodeopv_descs) {
- bcopy(vnodeopv_descs, newopv, vnodeopv_num * sizeof(*newopv));
- FREE(vnodeopv_descs, M_VNODE);
- }
- newopv[vnodeopv_num] = opv;
- vnodeopv_descs = newopv;
- vnodeopv_num++;
-
- /* See if we have turned up a new vnode op desc */
- for (i = 0; (desc = opv->opv_desc_ops[i].opve_op); i++) {
- for (j = 0; j < num_op_descs; j++) {
- if (desc == vfs_op_descs[j]) {
- /* found it, increase reference count */
- vfs_op_desc_refs[j]++;
- break;
- }
- }
- if (j == num_op_descs) {
- /* not found, new entry */
- MALLOC(newop, struct vnodeop_desc **,
- (num_op_descs + 1) * sizeof(*newop),
- M_VNODE, M_WAITOK);
- /* new reference count (for unload) */
- MALLOC(newref, int *,
- (num_op_descs + 1) * sizeof(*newref),
- M_VNODE, M_WAITOK);
- if (vfs_op_descs) {
- bcopy(vfs_op_descs, newop,
- num_op_descs * sizeof(*newop));
- FREE(vfs_op_descs, M_VNODE);
- }
- if (vfs_op_desc_refs) {
- bcopy(vfs_op_desc_refs, newref,
- num_op_descs * sizeof(*newref));
- FREE(vfs_op_desc_refs, M_VNODE);
- }
- newop[num_op_descs] = desc;
- newref[num_op_descs] = 1;
- vfs_op_descs = newop;
- vfs_op_desc_refs = newref;
- num_op_descs++;
- }
- }
- vfs_opv_recalc();
-}
-
-/* Remove a vnode type from the vnode description table above. */
-void
-vfs_rm_vnodeops(const void *data)
-{
- const struct vnodeopv_desc *opv;
- const struct vnodeopv_desc **newopv;
- struct vnodeop_desc **newop;
- int *newref;
- vop_t **opv_desc_vector;
- struct vnodeop_desc *desc;
- int i, j, k;
-
- opv = (const struct vnodeopv_desc *)data;
- /* Lower ref counts on descs in the table and release if zero */
- for (i = 0; (desc = opv->opv_desc_ops[i].opve_op); i++) {
- for (j = 0; j < num_op_descs; j++) {
- if (desc == vfs_op_descs[j]) {
- /* found it, decrease reference count */
- vfs_op_desc_refs[j]--;
- break;
- }
- }
- for (j = 0; j < num_op_descs; j++) {
- if (vfs_op_desc_refs[j] > 0)
- continue;
- if (vfs_op_desc_refs[j] < 0)
- panic("vfs_remove_vnodeops: negative refcnt");
- /* Entry is going away - replace it with defaultop */
- for (k = 0; k < vnodeopv_num; k++) {
- opv_desc_vector =
- *(vnodeopv_descs[k]->opv_desc_vector_p);
- if (opv_desc_vector != NULL)
- opv_desc_vector[desc->vdesc_offset] =
- opv_desc_vector[1];
- }
- MALLOC(newop, struct vnodeop_desc **,
- (num_op_descs - 1) * sizeof(*newop),
- M_VNODE, M_WAITOK);
- /* new reference count (for unload) */
- MALLOC(newref, int *,
- (num_op_descs - 1) * sizeof(*newref),
- M_VNODE, M_WAITOK);
- for (k = j; k < (num_op_descs - 1); k++) {
- vfs_op_descs[k] = vfs_op_descs[k + 1];
- vfs_op_desc_refs[k] = vfs_op_desc_refs[k + 1];
- }
- bcopy(vfs_op_descs, newop,
- (num_op_descs - 1) * sizeof(*newop));
- bcopy(vfs_op_desc_refs, newref,
- (num_op_descs - 1) * sizeof(*newref));
- FREE(vfs_op_descs, M_VNODE);
- FREE(vfs_op_desc_refs, M_VNODE);
- vfs_op_descs = newop;
- vfs_op_desc_refs = newref;
- num_op_descs--;
- }
- }
-
- for (i = 0; i < vnodeopv_num; i++) {
- if (vnodeopv_descs[i] == opv) {
- for (j = i; j < (vnodeopv_num - 1); j++)
- vnodeopv_descs[j] = vnodeopv_descs[j + 1];
- break;
}
+ break;
}
- if (i == vnodeopv_num)
- panic("vfs_remove_vnodeops: opv not found");
- opv_desc_vector = *(opv->opv_desc_vector_p);
- if (opv_desc_vector != NULL)
- FREE(opv_desc_vector, M_VNODE);
- MALLOC(newopv, const struct vnodeopv_desc **,
- (vnodeopv_num - 1) * sizeof(*newopv), M_VNODE, M_WAITOK);
- bcopy(vnodeopv_descs, newopv, (vnodeopv_num - 1) * sizeof(*newopv));
- FREE(vnodeopv_descs, M_VNODE);
- vnodeopv_descs = newopv;
- vnodeopv_num--;
-
- vfs_opv_recalc();
+ KASSERT(vop != NULL, ("No VCALL(%p...)", vp));
+ if (*bpt != NULL)
+ rc = (*bpt)(ap);
+ else
+ rc = vop->vop_bypass(ap);
+ return (rc);
}
-/*
- * Routines having to do with the management of the vnode table.
- */
-
struct vfsconf *
vfs_byname(const char *name)
{
OpenPOWER on IntegriCloud