summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbp <bp@FreeBSD.org>2000-09-12 09:49:08 +0000
committerbp <bp@FreeBSD.org>2000-09-12 09:49:08 +0000
commita7bc78c86d1b25efbea8d9ad3e7354ef3a302e90 (patch)
treeae6a2a75d2b0d94e115a73fd536dca86dba92ac4
parent7893328799a796677510cba9c7bbb8e52d8ce56c (diff)
downloadFreeBSD-src-a7bc78c86d1b25efbea8d9ad3e7354ef3a302e90.zip
FreeBSD-src-a7bc78c86d1b25efbea8d9ad3e7354ef3a302e90.tar.gz
Add three new VOPs: VOP_CREATEVOBJECT, VOP_DESTROYVOBJECT and VOP_GETVOBJECT.
They will be used by nullfs and other stacked filesystems to support full cache coherency. Reviewed in general by: mckusick, dillon
-rw-r--r--sys/kern/imgact_aout.c2
-rw-r--r--sys/kern/imgact_elf.c2
-rw-r--r--sys/kern/kern_exec.c2
-rw-r--r--sys/kern/uipc_syscalls.c3
-rw-r--r--sys/kern/vfs_bio.c24
-rw-r--r--sys/kern/vfs_default.c116
-rw-r--r--sys/kern/vfs_export.c82
-rw-r--r--sys/kern/vfs_extattr.c9
-rw-r--r--sys/kern/vfs_subr.c82
-rw-r--r--sys/kern/vfs_syscalls.c9
-rw-r--r--sys/kern/vnode_if.src24
-rw-r--r--sys/sys/vnode.h3
-rw-r--r--sys/vm/vm_mmap.c9
13 files changed, 201 insertions, 166 deletions
diff --git a/sys/kern/imgact_aout.c b/sys/kern/imgact_aout.c
index 6d93a72..80ecc0c 100644
--- a/sys/kern/imgact_aout.c
+++ b/sys/kern/imgact_aout.c
@@ -179,7 +179,7 @@ exec_aout_imgact(imgp)
vp = imgp->vp;
map = &vmspace->vm_map;
vm_map_lock(map);
- object = vp->v_object;
+ VOP_GETVOBJECT(vp, &object);
vm_object_reference(object);
text_end = virtual_offset + a_out->a_text;
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index f692c80..dac862a 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -187,7 +187,7 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_o
vm_offset_t file_addr;
vm_offset_t data_buf = 0;
- object = vp->v_object;
+ VOP_GETVOBJECT(vp, &object);
error = 0;
/*
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 5f6d2be..453447d 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -401,7 +401,7 @@ exec_map_first_page(imgp)
exec_unmap_first_page(imgp);
}
- object = imgp->vp->v_object;
+ VOP_GETVOBJECT(imgp->vp, &object);
s = splvm();
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index ed9d691..e231a83 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -1434,8 +1434,7 @@ sendfile(struct proc *p, struct sendfile_args *uap)
}
vp = (struct vnode *)fp->f_data;
vref(vp);
- obj = vp->v_object;
- if (vp->v_type != VREG || obj == NULL) {
+ if (vp->v_type != VREG || VOP_GETVOBJECT(vp, &obj) != 0) {
error = EINVAL;
goto done;
}
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 34cff17..11e9183 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -1027,7 +1027,7 @@ brelse(struct buf * bp)
vm_page_flag_clear(m, PG_ZERO);
if (m == bogus_page) {
- obj = (vm_object_t) vp->v_object;
+ VOP_GETVOBJECT(vp, &obj);
poff = OFF_TO_IDX(bp->b_offset);
for (j = i; j < bp->b_npages; j++) {
@@ -1905,10 +1905,9 @@ inmem(struct vnode * vp, daddr_t blkno)
return 1;
if (vp->v_mount == NULL)
return 0;
- if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0)
+ if (VOP_GETVOBJECT(vp, &obj) != 0 || (vp->v_flag & VOBJBUF) == 0)
return 0;
- obj = vp->v_object;
size = PAGE_SIZE;
if (size > vp->v_mount->mnt_stat.f_iosize)
size = vp->v_mount->mnt_stat.f_iosize;
@@ -2193,7 +2192,7 @@ loop:
bsize = size;
offset = (off_t)blkno * bsize;
- vmio = (vp->v_object != 0) && (vp->v_flag & VOBJBUF);
+ vmio = (VOP_GETVOBJECT(vp, NULL) == 0) && (vp->v_flag & VOBJBUF);
maxsize = vmio ? size + (offset & PAGE_MASK) : size;
maxsize = imax(maxsize, bsize);
@@ -2462,7 +2461,7 @@ allocbuf(struct buf *bp, int size)
*/
vp = bp->b_vp;
- obj = vp->v_object;
+ VOP_GETVOBJECT(vp, &obj);
while (bp->b_npages < desiredpages) {
vm_page_t m;
@@ -2641,7 +2640,7 @@ bufdonebio(struct bio *bp)
void
bufdone(struct buf *bp)
{
- int s;
+ int s, error;
void (*biodone) __P((struct buf *));
s = splbio();
@@ -2680,14 +2679,14 @@ bufdone(struct buf *bp)
int iosize;
struct vnode *vp = bp->b_vp;
- obj = vp->v_object;
+ error = VOP_GETVOBJECT(vp, &obj);
#if defined(VFS_BIO_DEBUG)
if (vp->v_usecount == 0) {
panic("biodone: zero vnode ref count");
}
- if (vp->v_object == NULL) {
+ if (error) {
panic("biodone: missing VM object");
}
@@ -2700,7 +2699,7 @@ bufdone(struct buf *bp)
KASSERT(bp->b_offset != NOOFFSET,
("biodone: no buffer offset"));
- if (!obj) {
+ if (error) {
panic("biodone: no object");
}
#if defined(VFS_BIO_DEBUG)
@@ -2821,7 +2820,9 @@ vfs_unbusy_pages(struct buf * bp)
if (bp->b_flags & B_VMIO) {
struct vnode *vp = bp->b_vp;
- vm_object_t obj = vp->v_object;
+ vm_object_t obj;
+
+ VOP_GETVOBJECT(vp, &obj);
for (i = 0; i < bp->b_npages; i++) {
vm_page_t m = bp->b_pages[i];
@@ -2898,9 +2899,10 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
if (bp->b_flags & B_VMIO) {
struct vnode *vp = bp->b_vp;
- vm_object_t obj = vp->v_object;
+ vm_object_t obj;
vm_ooffset_t foff;
+ VOP_GETVOBJECT(vp, &obj);
foff = bp->b_offset;
KASSERT(bp->b_offset != NOOFFSET,
("vfs_busy_pages: no buffer offset"));
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index e95e7e2..2b651c8 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -43,6 +43,7 @@
#include <sys/systm.h>
#include <sys/bio.h>
#include <sys/buf.h>
+#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
@@ -51,6 +52,18 @@
#include <sys/vnode.h>
#include <sys/poll.h>
+#include <machine/limits.h>
+
+#include <vm/vm.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
+#include <vm/vnode_pager.h>
+#include <vm/vm_zone.h>
+
static int vop_nostrategy __P((struct vop_strategy_args *));
/*
@@ -67,7 +80,10 @@ static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
{ &vop_advlock_desc, (vop_t *) vop_einval },
{ &vop_bwrite_desc, (vop_t *) vop_stdbwrite },
{ &vop_close_desc, (vop_t *) vop_null },
+ { &vop_createvobject_desc, (vop_t *) vop_stdcreatevobject },
+ { &vop_destroyvobject_desc, (vop_t *) vop_stddestroyvobject },
{ &vop_fsync_desc, (vop_t *) vop_null },
+ { &vop_getvobject_desc, (vop_t *) vop_stdgetvobject },
{ &vop_inactive_desc, (vop_t *) vop_stdinactive },
{ &vop_ioctl_desc, (vop_t *) vop_enotty },
{ &vop_islocked_desc, (vop_t *) vop_noislocked },
@@ -522,6 +538,106 @@ vop_stdgetwritemount(ap)
return (0);
}
+int
+vop_stdcreatevobject(ap)
+ struct vop_createvobject_args /* {
+ struct vnode *vp;
+ struct ucred *cred;
+ struct proc *p;
+ } */ *ap;
+{
+ struct vnode *vp = ap->a_vp;
+ struct ucred *cred = ap->a_cred;
+ struct proc *p = ap->a_p;
+ struct vattr vat;
+ vm_object_t object;
+ int error = 0;
+
+ if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
+ return (0);
+
+retry:
+ if ((object = vp->v_object) == NULL) {
+ if (vp->v_type == VREG || vp->v_type == VDIR) {
+ if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
+ goto retn;
+ object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
+ } else if (devsw(vp->v_rdev) != NULL) {
+ /*
+ * This simply allocates the biggest object possible
+ * for a disk vnode. This should be fixed, but doesn't
+ * cause any problems (yet).
+ */
+ object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
+ } else {
+ goto retn;
+ }
+ /*
+ * Dereference the reference we just created. This assumes
+ * that the object is associated with the vp.
+ */
+ object->ref_count--;
+ vp->v_usecount--;
+ } else {
+ if (object->flags & OBJ_DEAD) {
+ VOP_UNLOCK(vp, 0, p);
+ tsleep(object, PVM, "vodead", 0);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ goto retry;
+ }
+ }
+
+ KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
+ vp->v_flag |= VOBJBUF;
+
+retn:
+ return (error);
+}
+
+int
+vop_stddestroyvobject(ap)
+ struct vop_destroyvobject_args /* {
+ struct vnode *vp;
+ } */ *ap;
+{
+ struct vnode *vp = ap->a_vp;
+ vm_object_t obj = vp->v_object;
+
+ if (vp->v_object == NULL)
+ return (0);
+
+ if (obj->ref_count == 0) {
+ /*
+ * vclean() may be called twice. The first time
+ * removes the primary reference to the object,
+ * the second time goes one further and is a
+ * special-case to terminate the object.
+ */
+ vm_object_terminate(obj);
+ } else {
+ /*
+ * Woe to the process that tries to page now :-).
+ */
+ vm_pager_deallocate(obj);
+ }
+ return (0);
+}
+
+int
+vop_stdgetvobject(ap)
+ struct vop_getvobject_args /* {
+ struct vnode *vp;
+ struct vm_object **objpp;
+ } */ *ap;
+{
+ struct vnode *vp = ap->a_vp;
+ struct vm_object **objpp = ap->a_objpp;
+
+ if (objpp)
+ *objpp = vp->v_object;
+ return (vp->v_object ? 0 : EINVAL);
+}
+
/*
* vfs default ops
* used to fill the vfs fucntion table to get reasonable default return values.
diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c
index 52ad0ef..bebc3c9 100644
--- a/sys/kern/vfs_export.c
+++ b/sys/kern/vfs_export.c
@@ -484,10 +484,9 @@ getnewvnode(tag, mp, vops, vpp)
* if it still has cached pages or we cannot get
* its interlock.
*/
- object = vp->v_object;
if (LIST_FIRST(&vp->v_cache_src) != NULL ||
- (object && (object->resident_page_count ||
- object->ref_count)) ||
+ (VOP_GETVOBJECT(vp, &object) == 0 &&
+ (object->resident_page_count || object->ref_count)) ||
!simple_lock_try(&vp->v_interlock)) {
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
@@ -711,8 +710,7 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
* Destroy the copy in the VM cache, too.
*/
simple_lock(&vp->v_interlock);
- object = vp->v_object;
- if (object != NULL) {
+ if (VOP_GETVOBJECT(vp, &object) == 0) {
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
}
@@ -1649,7 +1647,6 @@ vclean(vp, flags, p)
struct proc *p;
{
int active;
- vm_object_t obj;
/*
* Check to see if the vnode is in use. If so we have to reference it
@@ -1686,22 +1683,7 @@ vclean(vp, flags, p)
vinvalbuf(vp, 0, NOCRED, p, 0, 0);
}
- if ((obj = vp->v_object) != NULL) {
- if (obj->ref_count == 0) {
- /*
- * vclean() may be called twice. The first time
- * removes the primary reference to the object,
- * the second time goes one further and is a
- * special-case to terminate the object.
- */
- vm_object_terminate(obj);
- } else {
- /*
- * Woe to the process that tries to page now :-).
- */
- vm_pager_deallocate(obj);
- }
- }
+ VOP_DESTROYVOBJECT(vp);
/*
* If purging an active vnode, it must be closed and
@@ -2523,20 +2505,20 @@ loop:
continue;
if (flags != MNT_WAIT) {
- obj = vp->v_object;
- if (obj == NULL || (obj->flags & OBJ_MIGHTBEDIRTY) == 0)
+ if (VOP_GETVOBJECT(vp, &obj) != 0 ||
+ (obj->flags & OBJ_MIGHTBEDIRTY) == 0)
continue;
if (VOP_ISLOCKED(vp, NULL))
continue;
}
simple_lock(&vp->v_interlock);
- if (vp->v_object &&
- (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
+ if (VOP_GETVOBJECT(vp, &obj) == 0 &&
+ (obj->flags & OBJ_MIGHTBEDIRTY)) {
if (!vget(vp,
LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) {
- if (vp->v_object) {
- vm_object_page_clean(vp->v_object, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC);
+ if (VOP_GETVOBJECT(vp, &obj) == 0) {
+ vm_object_page_clean(obj, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC);
anyio = 1;
}
vput(vp);
@@ -2563,49 +2545,7 @@ vfs_object_create(vp, p, cred)
struct proc *p;
struct ucred *cred;
{
- struct vattr vat;
- vm_object_t object;
- int error = 0;
-
- if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
- return 0;
-
-retry:
- if ((object = vp->v_object) == NULL) {
- if (vp->v_type == VREG || vp->v_type == VDIR) {
- if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
- goto retn;
- object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
- } else if (devsw(vp->v_rdev) != NULL) {
- /*
- * This simply allocates the biggest object possible
- * for a disk vnode. This should be fixed, but doesn't
- * cause any problems (yet).
- */
- object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
- } else {
- goto retn;
- }
- /*
- * Dereference the reference we just created. This assumes
- * that the object is associated with the vp.
- */
- object->ref_count--;
- vp->v_usecount--;
- } else {
- if (object->flags & OBJ_DEAD) {
- VOP_UNLOCK(vp, 0, p);
- tsleep(object, PVM, "vodead", 0);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- goto retry;
- }
- }
-
- KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
- vp->v_flag |= VOBJBUF;
-
-retn:
- return error;
+ return (VOP_CREATEVOBJECT(vp, cred, p));
}
void
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index 7f4c6bc0..4064fa3 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -1068,7 +1068,7 @@ open(p, uap)
goto bad;
}
/* assert that vn_open created a backing object if one is needed */
- KASSERT(!vn_canvmio(vp) || vp->v_object != NULL,
+ KASSERT(!vn_canvmio(vp) || VOP_GETVOBJECT(vp, NULL) == 0,
("open: vmio vnode has no backing object after vn_open"));
p->p_retval[0] = indx;
return (0);
@@ -2637,6 +2637,7 @@ fsync(p, uap)
struct vnode *vp;
struct mount *mp;
struct file *fp;
+ vm_object_t obj;
int error;
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
@@ -2645,8 +2646,8 @@ fsync(p, uap)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- if (vp->v_object)
- vm_object_page_clean(vp->v_object, 0, 0, 0);
+ if (VOP_GETVOBJECT(vp, &obj) == 0)
+ vm_object_page_clean(obj, 0, 0, 0);
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
#ifdef SOFTUPDATES
if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
@@ -3415,7 +3416,7 @@ fhopen(p, uap)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
fp->f_flag |= FHASLOCK;
}
- if ((vp->v_type == VREG) && (vp->v_object == NULL))
+ if ((vp->v_type == VREG) && (VOP_GETVOBJECT(vp, NULL) != 0))
vfs_object_create(vp, p, p->p_ucred);
VOP_UNLOCK(vp, 0, p);
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 52ad0ef..bebc3c9 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -484,10 +484,9 @@ getnewvnode(tag, mp, vops, vpp)
* if it still has cached pages or we cannot get
* its interlock.
*/
- object = vp->v_object;
if (LIST_FIRST(&vp->v_cache_src) != NULL ||
- (object && (object->resident_page_count ||
- object->ref_count)) ||
+ (VOP_GETVOBJECT(vp, &object) == 0 &&
+ (object->resident_page_count || object->ref_count)) ||
!simple_lock_try(&vp->v_interlock)) {
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
vp = NULL;
@@ -711,8 +710,7 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
* Destroy the copy in the VM cache, too.
*/
simple_lock(&vp->v_interlock);
- object = vp->v_object;
- if (object != NULL) {
+ if (VOP_GETVOBJECT(vp, &object) == 0) {
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
}
@@ -1649,7 +1647,6 @@ vclean(vp, flags, p)
struct proc *p;
{
int active;
- vm_object_t obj;
/*
* Check to see if the vnode is in use. If so we have to reference it
@@ -1686,22 +1683,7 @@ vclean(vp, flags, p)
vinvalbuf(vp, 0, NOCRED, p, 0, 0);
}
- if ((obj = vp->v_object) != NULL) {
- if (obj->ref_count == 0) {
- /*
- * vclean() may be called twice. The first time
- * removes the primary reference to the object,
- * the second time goes one further and is a
- * special-case to terminate the object.
- */
- vm_object_terminate(obj);
- } else {
- /*
- * Woe to the process that tries to page now :-).
- */
- vm_pager_deallocate(obj);
- }
- }
+ VOP_DESTROYVOBJECT(vp);
/*
* If purging an active vnode, it must be closed and
@@ -2523,20 +2505,20 @@ loop:
continue;
if (flags != MNT_WAIT) {
- obj = vp->v_object;
- if (obj == NULL || (obj->flags & OBJ_MIGHTBEDIRTY) == 0)
+ if (VOP_GETVOBJECT(vp, &obj) != 0 ||
+ (obj->flags & OBJ_MIGHTBEDIRTY) == 0)
continue;
if (VOP_ISLOCKED(vp, NULL))
continue;
}
simple_lock(&vp->v_interlock);
- if (vp->v_object &&
- (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
+ if (VOP_GETVOBJECT(vp, &obj) == 0 &&
+ (obj->flags & OBJ_MIGHTBEDIRTY)) {
if (!vget(vp,
LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) {
- if (vp->v_object) {
- vm_object_page_clean(vp->v_object, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC);
+ if (VOP_GETVOBJECT(vp, &obj) == 0) {
+ vm_object_page_clean(obj, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC);
anyio = 1;
}
vput(vp);
@@ -2563,49 +2545,7 @@ vfs_object_create(vp, p, cred)
struct proc *p;
struct ucred *cred;
{
- struct vattr vat;
- vm_object_t object;
- int error = 0;
-
- if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
- return 0;
-
-retry:
- if ((object = vp->v_object) == NULL) {
- if (vp->v_type == VREG || vp->v_type == VDIR) {
- if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
- goto retn;
- object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
- } else if (devsw(vp->v_rdev) != NULL) {
- /*
- * This simply allocates the biggest object possible
- * for a disk vnode. This should be fixed, but doesn't
- * cause any problems (yet).
- */
- object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
- } else {
- goto retn;
- }
- /*
- * Dereference the reference we just created. This assumes
- * that the object is associated with the vp.
- */
- object->ref_count--;
- vp->v_usecount--;
- } else {
- if (object->flags & OBJ_DEAD) {
- VOP_UNLOCK(vp, 0, p);
- tsleep(object, PVM, "vodead", 0);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- goto retry;
- }
- }
-
- KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
- vp->v_flag |= VOBJBUF;
-
-retn:
- return error;
+ return (VOP_CREATEVOBJECT(vp, cred, p));
}
void
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 7f4c6bc0..4064fa3 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -1068,7 +1068,7 @@ open(p, uap)
goto bad;
}
/* assert that vn_open created a backing object if one is needed */
- KASSERT(!vn_canvmio(vp) || vp->v_object != NULL,
+ KASSERT(!vn_canvmio(vp) || VOP_GETVOBJECT(vp, NULL) == 0,
("open: vmio vnode has no backing object after vn_open"));
p->p_retval[0] = indx;
return (0);
@@ -2637,6 +2637,7 @@ fsync(p, uap)
struct vnode *vp;
struct mount *mp;
struct file *fp;
+ vm_object_t obj;
int error;
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
@@ -2645,8 +2646,8 @@ fsync(p, uap)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- if (vp->v_object)
- vm_object_page_clean(vp->v_object, 0, 0, 0);
+ if (VOP_GETVOBJECT(vp, &obj) == 0)
+ vm_object_page_clean(obj, 0, 0, 0);
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
#ifdef SOFTUPDATES
if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
@@ -3415,7 +3416,7 @@ fhopen(p, uap)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
fp->f_flag |= FHASLOCK;
}
- if ((vp->v_type == VREG) && (vp->v_object == NULL))
+ if ((vp->v_type == VREG) && (VOP_GETVOBJECT(vp, NULL) != 0))
vfs_object_create(vp, p, p->p_ucred);
VOP_UNLOCK(vp, 0, p);
diff --git a/sys/kern/vnode_if.src b/sys/kern/vnode_if.src
index 9ccba54..7a6d8ae 100644
--- a/sys/kern/vnode_if.src
+++ b/sys/kern/vnode_if.src
@@ -555,3 +555,27 @@ vop_setextattr {
IN struct ucred *cred;
IN struct proc *p;
};
+
+#
+#% createvobject vp L L L
+#
+vop_createvobject {
+ IN struct vnode *vp;
+ IN struct ucred *cred;
+ IN struct proc *p;
+};
+
+#
+#% destroyvobject vp L L L
+#
+vop_destroyvobject {
+ IN struct vnode *vp;
+};
+
+#
+#% getvobject vp L L L
+#
+vop_getvobject {
+ IN struct vnode *vp;
+ OUT struct vm_object **objpp;
+};
diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h
index 08de778..41ca09b 100644
--- a/sys/sys/vnode.h
+++ b/sys/sys/vnode.h
@@ -629,6 +629,9 @@ int vop_enotty __P((struct vop_generic_args *ap));
int vop_defaultop __P((struct vop_generic_args *ap));
int vop_null __P((struct vop_generic_args *ap));
int vop_panic __P((struct vop_generic_args *ap));
+int vop_stdcreatevobject __P((struct vop_createvobject_args *ap));
+int vop_stddestroyvobject __P((struct vop_destroyvobject_args *ap));
+int vop_stdgetvobject __P((struct vop_getvobject_args *ap));
void vfree __P((struct vnode *));
void vput __P((struct vnode *vp));
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 79b6b72..43d75a5 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -198,6 +198,7 @@ mmap(p, uap)
int disablexworkaround;
off_t pos;
struct vmspace *vms = p->p_vmspace;
+ vm_object_t obj;
addr = (vm_offset_t) uap->addr;
size = uap->len;
@@ -295,6 +296,14 @@ mmap(p, uap)
vp = (struct vnode *) fp->f_data;
if (vp->v_type != VREG && vp->v_type != VCHR)
return (EINVAL);
+ if (vp->v_type == VREG) {
+ /*
+ * Get the proper underlying object
+ */
+ if (VOP_GETVOBJECT(vp, &obj) != 0)
+ return (EINVAL);
+ vp = (struct vnode*)obj->handle;
+ }
/*
* XXX hack to handle use of /dev/zero to map anon memory (ala
* SunOS).
OpenPOWER on IntegriCloud