summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2009-06-01 21:32:52 +0000
committerjhb <jhb@FreeBSD.org>2009-06-01 21:32:52 +0000
commitfea04a3fd158d65e9ffd20e50e38364d82b85c44 (patch)
tree3173f83adb95e5298ee26ee3bdedf7f6b4746d49 /sys/vm
parent904747c9f98f0e11c35daca161c7253a12c080f6 (diff)
downloadFreeBSD-src-fea04a3fd158d65e9ffd20e50e38364d82b85c44.zip
FreeBSD-src-fea04a3fd158d65e9ffd20e50e38364d82b85c44.tar.gz
Add an extension to the character device interface that allows character
device drivers to use arbitrary VM objects to satisfy individual mmap() requests. - A new d_mmap_single(cdev, &foff, objsize, &object, prot) callback is added to cdevsw. This function is called for each mmap() request. If it returns ENODEV, then the mmap() request will fall back to using the device's device pager object and d_mmap(). Otherwise, the method can return a VM object to satisfy this entire mmap() request via *object. It can also modify the starting offset into this object via *foff. This allows device drivers to use the file offset as a cookie to identify specific VM objects. - vm_mmap_vnode() has been changed to call vm_mmap_cdev() directly when mapping V_CHR vnodes. This avoids duplicating all the cdev mmap handling code and simplifies some of vm_mmap_vnode(). - D_VERSION has been bumped to D_VERSION_02. Older device drivers using D_VERSION_01 are still supported. MFC after: 1 month
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_mmap.c105
1 files changed, 47 insertions, 58 deletions
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 6cc0acc..c79a0f4 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -117,9 +117,9 @@ vmmapentry_rsrc_init(dummy)
}
static int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
- int *, struct vnode *, vm_ooffset_t, vm_object_t *);
+ int *, struct vnode *, vm_ooffset_t *, vm_object_t *);
static int vm_mmap_cdev(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
- int *, struct cdev *, vm_ooffset_t, vm_object_t *);
+ int *, struct cdev *, vm_ooffset_t *, vm_object_t *);
static int vm_mmap_shm(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
int *, struct shmfd *, vm_ooffset_t, vm_object_t *);
@@ -1142,15 +1142,14 @@ munlock(td, uap)
int
vm_mmap_vnode(struct thread *td, vm_size_t objsize,
vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
- struct vnode *vp, vm_ooffset_t foff, vm_object_t *objp)
+ struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp)
{
struct vattr va;
- void *handle;
vm_object_t obj;
+ vm_offset_t foff;
struct mount *mp;
- struct cdevsw *dsw;
struct ucred *cred;
- int error, flags, type;
+ int error, flags;
int vfslocked;
mp = vp->v_mount;
@@ -1160,6 +1159,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
VFS_UNLOCK_GIANT(vfslocked);
return (error);
}
+ foff = *foffp;
flags = *flagsp;
obj = vp->v_object;
if (vp->v_type == VREG) {
@@ -1175,41 +1175,12 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
vp = (struct vnode*)obj->handle;
vget(vp, LK_SHARED, td);
}
- type = OBJT_VNODE;
- handle = vp;
} else if (vp->v_type == VCHR) {
- type = OBJT_DEVICE;
- handle = vp->v_rdev;
-
- dsw = dev_refthread(handle);
- if (dsw == NULL) {
- error = ENXIO;
- goto done;
- }
- if (dsw->d_flags & D_MMAP_ANON) {
- dev_relthread(handle);
- *maxprotp = VM_PROT_ALL;
- *flagsp |= MAP_ANON;
- error = 0;
- goto done;
- }
- dev_relthread(handle);
- /*
- * cdevs does not provide private mappings of any kind.
- */
- if ((*maxprotp & VM_PROT_WRITE) == 0 &&
- (prot & PROT_WRITE) != 0) {
- error = EACCES;
- goto done;
- }
- if (flags & (MAP_PRIVATE|MAP_COPY)) {
- error = EINVAL;
- goto done;
- }
- /*
- * Force device mappings to be shared.
- */
- flags |= MAP_SHARED;
+ error = vm_mmap_cdev(td, objsize, prot, maxprotp, flagsp,
+ vp->v_rdev, foffp, objp);
+ if (error == 0)
+ goto mark_atime;
+ goto done;
} else {
error = EINVAL;
goto done;
@@ -1235,18 +1206,18 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
* we do not need to sync it.
* Adjust object size to be the size of actual file.
*/
- if (vp->v_type == VREG) {
- objsize = round_page(va.va_size);
- if (va.va_nlink == 0)
- flags |= MAP_NOSYNC;
- }
- obj = vm_pager_allocate(type, handle, objsize, prot, foff);
+ objsize = round_page(va.va_size);
+ if (va.va_nlink == 0)
+ flags |= MAP_NOSYNC;
+ obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff);
if (obj == NULL) {
- error = (type == OBJT_DEVICE ? EINVAL : ENOMEM);
+ error = ENOMEM;
goto done;
}
*objp = obj;
*flagsp = flags;
+
+mark_atime:
vfs_mark_atime(vp, cred);
done:
@@ -1266,11 +1237,11 @@ done:
int
vm_mmap_cdev(struct thread *td, vm_size_t objsize,
vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
- struct cdev *cdev, vm_ooffset_t foff, vm_object_t *objp)
+ struct cdev *cdev, vm_ooffset_t *foff, vm_object_t *objp)
{
vm_object_t obj;
struct cdevsw *dsw;
- int flags;
+ int error, flags;
flags = *flagsp;
@@ -1283,25 +1254,43 @@ vm_mmap_cdev(struct thread *td, vm_size_t objsize,
*flagsp |= MAP_ANON;
return (0);
}
- dev_relthread(cdev);
/*
- * cdevs does not provide private mappings of any kind.
+ * cdevs do not provide private mappings of any kind.
*/
if ((*maxprotp & VM_PROT_WRITE) == 0 &&
- (prot & PROT_WRITE) != 0)
+ (prot & PROT_WRITE) != 0) {
+ dev_relthread(cdev);
return (EACCES);
- if (flags & (MAP_PRIVATE|MAP_COPY))
+ }
+ if (flags & (MAP_PRIVATE|MAP_COPY)) {
+ dev_relthread(cdev);
return (EINVAL);
+ }
/*
* Force device mappings to be shared.
*/
flags |= MAP_SHARED;
#ifdef MAC_XXX
- error = mac_check_cdev_mmap(td->td_ucred, cdev, prot);
- if (error != 0)
+ error = mac_cdev_check_mmap(td->td_ucred, cdev, prot);
+ if (error != 0) {
+ dev_relthread(cdev);
return (error);
+ }
#endif
- obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, foff);
+ /*
+ * First, try d_mmap_single(). If that is not implemented
+ * (returns ENODEV), fall back to using the device pager.
+ * Note that d_mmap_single() must return a reference to the
+ * object (it needs to bump the reference count of the object
+ * it returns somehow).
+ *
+ * XXX assumes VM_PROT_* == PROT_*
+ */
+ error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
+ dev_relthread(cdev);
+ if (error != ENODEV)
+ return (error);
+ obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff);
if (obj == NULL)
return (EINVAL);
*objp = obj;
@@ -1396,11 +1385,11 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
switch (handle_type) {
case OBJT_DEVICE:
error = vm_mmap_cdev(td, size, prot, &maxprot, &flags,
- handle, foff, &object);
+ handle, &foff, &object);
break;
case OBJT_VNODE:
error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
- handle, foff, &object);
+ handle, &foff, &object);
break;
case OBJT_SWAP:
error = vm_mmap_shm(td, size, prot, &maxprot, &flags,
OpenPOWER on IntegriCloud