summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>1999-09-21 05:00:48 +0000
committerdillon <dillon@FreeBSD.org>1999-09-21 05:00:48 +0000
commit37bee3bb3f0cedaa15e074e03de098469f37fe08 (patch)
tree724796750696919d0118ee89b80a7938648019ed /sys/vm
parente286d87af176929d88bb2fbe5aee591986ad1b41 (diff)
downloadFreeBSD-src-37bee3bb3f0cedaa15e074e03de098469f37fe08.zip
FreeBSD-src-37bee3bb3f0cedaa15e074e03de098469f37fe08.tar.gz
cleanup madvise code, add a few more sanity checks.
Reviewed by: Alan Cox <alc@cs.rice.edu>, dg@root.com
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_map.c118
-rw-r--r--sys/vm/vm_map.h2
-rw-r--r--sys/vm/vm_mmap.c19
3 files changed, 89 insertions, 50 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 03633e9..e1a23b3 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -1009,31 +1009,51 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
* the vm_map_entry structure, or those effecting the underlying
* objects.
*/
-void
+
+int
vm_map_madvise(map, start, end, behav)
vm_map_t map;
vm_offset_t start, end;
int behav;
{
vm_map_entry_t current, entry;
- int modify_map;
+ int modify_map = 0;
- modify_map = (behav == MADV_NORMAL || behav == MADV_SEQUENTIAL ||
- behav == MADV_RANDOM);
+ /*
+ * Some madvise calls directly modify the vm_map_entry, in which case
+ * we need to use an exclusive lock on the map and we need to perform
+ * various clipping operations. Otherwise we only need a read-lock
+ * on the map.
+ */
- if (modify_map) {
+ switch(behav) {
+ case MADV_NORMAL:
+ case MADV_SEQUENTIAL:
+ case MADV_RANDOM:
+ modify_map = 1;
vm_map_lock(map);
- }
- else
+ break;
+ case MADV_WILLNEED:
+ case MADV_DONTNEED:
+ case MADV_FREE:
vm_map_lock_read(map);
+ break;
+ default:
+ return (KERN_INVALID_ARGUMENT);
+ }
+
+ /*
+ * Locate starting entry and clip if necessary.
+ */
VM_MAP_RANGE_CHECK(map, start, end);
if (vm_map_lookup_entry(map, start, &entry)) {
if (modify_map)
vm_map_clip_start(map, entry, start);
- } else
+ } else {
entry = entry->next;
+ }
if (modify_map) {
/*
@@ -1044,8 +1064,8 @@ vm_map_madvise(map, start, end, behav)
*/
for (current = entry;
(current != &map->header) && (current->start < end);
- current = current->next) {
-
+ current = current->next
+ ) {
if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
continue;
@@ -1067,51 +1087,53 @@ vm_map_madvise(map, start, end, behav)
vm_map_simplify_entry(map, current);
}
vm_map_unlock(map);
- }
- else {
- if (behav == MADV_FREE || behav == MADV_DONTNEED ||
- behav == MADV_WILLNEED) {
- vm_pindex_t pindex;
- int count;
+ } else {
+ vm_pindex_t pindex;
+ int count;
- /*
- * madvise behaviors that are implemented in the underlying
- * vm_object.
- *
- * Since we don't clip the vm_map_entry, we have to clip
- * the vm_object pindex and count.
- */
- for (current = entry;
- (current != &map->header) && (current->start < end);
- current = current->next) {
+ /*
+ * madvise behaviors that are implemented in the underlying
+ * vm_object.
+ *
+ * Since we don't clip the vm_map_entry, we have to clip
+ * the vm_object pindex and count.
+ */
+ for (current = entry;
+ (current != &map->header) && (current->start < end);
+ current = current->next
+ ) {
+ if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
+ continue;
- if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
- continue;
+ pindex = OFF_TO_IDX(current->offset);
+ count = atop(current->end - current->start);
- pindex = OFF_TO_IDX(current->offset);
- count = atop(current->end - current->start);
+ if (current->start < start) {
+ pindex += atop(start - current->start);
+ count -= atop(start - current->start);
+ }
+ if (current->end > end)
+ count -= atop(current->end - end);
- if (current->start < start) {
- pindex += atop(start - current->start);
- count -= atop(start - current->start);
- }
- if (current->end > end)
- count -= atop(current->end - end);
-
- if (count <= 0)
- continue;
-
- vm_object_madvise(current->object.vm_object,
- pindex, count, behav);
- if (behav == MADV_WILLNEED)
- pmap_object_init_pt(map->pmap, current->start,
- current->object.vm_object,
- pindex, (count << PAGE_SHIFT),
- 0);
+ if (count <= 0)
+ continue;
+
+ vm_object_madvise(current->object.vm_object,
+ pindex, count, behav);
+ if (behav == MADV_WILLNEED) {
+ pmap_object_init_pt(
+ map->pmap,
+ current->start,
+ current->object.vm_object,
+ pindex,
+ (count << PAGE_SHIFT),
+ 0
+ );
}
}
vm_map_unlock_read(map);
}
+ return(0);
}
@@ -2764,12 +2786,14 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
TAILQ_REMOVE(&oldobject->shadow_head,
first_object, shadow_list);
oldobject->shadow_count--;
+ /* XXX bump generation? */
vm_object_deallocate(oldobject);
}
TAILQ_INSERT_TAIL(&srcobject->shadow_head,
first_object, shadow_list);
srcobject->shadow_count++;
+ /* XXX bump generation? */
first_object->backing_object = srcobject;
}
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 9a823d4..b02f970 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -361,7 +361,7 @@ int vm_map_protect __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t
int vm_map_remove __P((vm_map_t, vm_offset_t, vm_offset_t));
void vm_map_startup __P((void));
int vm_map_submap __P((vm_map_t, vm_offset_t, vm_offset_t, vm_map_t));
-void vm_map_madvise __P((vm_map_t, vm_offset_t, vm_offset_t, int));
+int vm_map_madvise __P((vm_map_t, vm_offset_t, vm_offset_t, int));
void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
void vm_init2 __P((void));
int vm_uiomove __P((vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *));
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 9fb8458..0151508 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -136,6 +136,15 @@ ogetpagesize(p, uap)
* modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not
* page-aligned, the actual mapping starts at trunc_page(addr)
* and the return value is adjusted up by the page offset.
+ *
+ * Generally speaking, only character devices which are themselves
+ * memory-based, such as a video framebuffer, can be mmap'd. Otherwise
+ * there would be no cache coherency between a descriptor and a VM mapping
+ * both to the same character device.
+ *
+ * Block devices can be mmap'd no matter what they represent. Cache coherency
+ * is maintained as long as you do not write directly to the underlying
+ * character device.
*/
#ifndef _SYS_SYSPROTO_H_
struct mmap_args {
@@ -615,6 +624,12 @@ madvise(p, uap)
struct madvise_args *uap;
{
vm_offset_t start, end;
+
+ /*
+ * Check for illegal behavior
+ */
+ if (uap->behav < 0 || uap->behav > MADV_FREE)
+ return (EINVAL);
/*
* Check for illegal addresses. Watch out for address wrap... Note
* that VM_*_ADDRESS are not constants due to casts (argh).
@@ -636,8 +651,8 @@ madvise(p, uap)
start = trunc_page((vm_offset_t) uap->addr);
end = round_page((vm_offset_t) uap->addr + uap->len);
- vm_map_madvise(&p->p_vmspace->vm_map, start, end, uap->behav);
-
+ if (vm_map_madvise(&p->p_vmspace->vm_map, start, end, uap->behav))
+ return (EINVAL);
return (0);
}
OpenPOWER on IntegriCloud