summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_map.c
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>2001-07-04 16:20:28 +0000
committerdillon <dillon@FreeBSD.org>2001-07-04 16:20:28 +0000
commite028603b7e3e4fb35cdf00aab533f3965f4a13cc (patch)
tree7420cce169451a74c5b87963467a4aeff668ed12 /sys/vm/vm_map.c
parent0b028660051eb7abf4306d34e7fec0e7fde86a28 (diff)
downloadFreeBSD-src-e028603b7e3e4fb35cdf00aab533f3965f4a13cc.zip
FreeBSD-src-e028603b7e3e4fb35cdf00aab533f3965f4a13cc.tar.gz
With Alfred's permission, remove vm_mtx in favor of a fine-grained approach
(this commit is just the first stage). Also add various GIANT_ macros to formalize the removal of Giant, making it easy to test in a more piecemeal fashion. These macros will allow us to test fine-grained locks to a degree before removing Giant, and also after, and to remove Giant in a piecemeal fashion via sysctl's on those subsystems which the authors believe can operate without Giant.
Diffstat (limited to 'sys/vm/vm_map.c')
-rw-r--r--sys/vm/vm_map.c113
1 files changed, 53 insertions, 60 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 229a822..707f5e6 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -173,7 +173,7 @@ vmspace_alloc(min, max)
{
struct vmspace *vm;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
vm = zalloc(vmspace_zone);
CTR1(KTR_VM, "vmspace_alloc: %p", vm);
vm_map_init(&vm->vm_map, min, max);
@@ -201,8 +201,8 @@ void
vmspace_free(vm)
struct vmspace *vm;
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_OWNED);
if (vm->vm_refcnt == 0)
panic("vmspace_free: attempt to free already freed vmspace");
@@ -273,7 +273,8 @@ vm_map_create(pmap, min, max)
{
vm_map_t result;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
result = zalloc(mapzone);
CTR1(KTR_VM, "vm_map_create: %p", result);
vm_map_init(result, min, max);
@@ -291,8 +292,8 @@ vm_map_init(map, min, max)
struct vm_map *map;
vm_offset_t min, max;
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_OWNED);
map->header.next = map->header.prev = &map->header;
map->nentries = 0;
map->size = 0;
@@ -310,8 +311,7 @@ void
vm_map_destroy(map)
struct vm_map *map;
{
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
lockdestroy(&map->lock);
}
@@ -400,8 +400,6 @@ vm_map_entry_unlink(vm_map_t map,
* in the "entry" parameter. The boolean
* result indicates whether the address is
* actually contained in the map.
- *
- * Doesn't block.
*/
boolean_t
vm_map_lookup_entry(map, address, entry)
@@ -412,7 +410,7 @@ vm_map_lookup_entry(map, address, entry)
vm_map_entry_t cur;
vm_map_entry_t last;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Start looking either from the head of the list, or from the hint.
*/
@@ -492,7 +490,8 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_map_entry_t temp_entry;
vm_eflags_t protoeflags;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
/*
* Check that the start and end points are not bogus.
*/
@@ -654,7 +653,7 @@ vm_map_findspace(map, start, length, addr)
vm_map_entry_t entry, next;
vm_offset_t end;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (start < map->min_offset)
start = map->min_offset;
if (start > map->max_offset)
@@ -723,7 +722,8 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_offset_t start;
int result, s = 0;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
start = *addr;
if (map == kmem_map)
@@ -769,7 +769,8 @@ vm_map_simplify_entry(map, entry)
vm_map_entry_t next, prev;
vm_size_t prevsize, esize;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
return;
@@ -988,7 +989,8 @@ vm_map_submap(map, start, end, submap)
vm_map_entry_t entry;
int result = KERN_INVALID_ARGUMENT;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
@@ -1027,7 +1029,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_map_entry_t current;
vm_map_entry_t entry;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
@@ -1117,7 +1119,8 @@ vm_map_madvise(map, start, end, behav)
vm_map_entry_t current, entry;
int modify_map = 0;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
/*
* Some madvise calls directly modify the vm_map_entry, in which case
* we need to use an exclusive lock on the map and we need to perform
@@ -1271,7 +1274,8 @@ vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_map_entry_t entry;
vm_map_entry_t temp_entry;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
switch (new_inheritance) {
case VM_INHERIT_NONE:
case VM_INHERIT_COPY:
@@ -1458,7 +1462,8 @@ vm_map_pageable(map, start, end, new_pageable)
vm_offset_t failed = 0;
int rv;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
@@ -1689,8 +1694,8 @@ vm_map_clean(map, start, end, syncio, invalidate)
vm_object_t object;
vm_ooffset_t offset;
- mtx_assert(&Giant, MA_OWNED);
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
vm_map_lock_read(map);
VM_MAP_RANGE_CHECK(map, start, end);
if (!vm_map_lookup_entry(map, start, &entry)) {
@@ -1769,9 +1774,7 @@ vm_map_clean(map, start, end, syncio, invalidate)
int flags;
vm_object_reference(object);
- mtx_unlock(&vm_mtx);
vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
- mtx_lock(&vm_mtx);
flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
flags |= invalidate ? OBJPC_INVAL : 0;
vm_object_page_clean(object,
@@ -1848,7 +1851,8 @@ vm_map_delete(map, start, end)
vm_map_entry_t entry;
vm_map_entry_t first_entry;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
/*
* Find the start of the region, and clip it
*/
@@ -1950,7 +1954,8 @@ vm_map_remove(map, start, end)
{
int result, s = 0;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (map == kmem_map)
s = splvm();
@@ -1979,7 +1984,8 @@ vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_map_entry_t entry;
vm_map_entry_t tmp_entry;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
return (FALSE);
}
@@ -2027,7 +2033,8 @@ vm_map_split(entry)
vm_size_t size;
vm_ooffset_t offset;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
orig_object = entry->object.vm_object;
if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
return;
@@ -2194,7 +2201,8 @@ vmspace_fork(vm1)
vm_map_entry_t new_entry;
vm_object_t object;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
vm_map_lock(old_map);
old_map->infork = 1;
@@ -2304,7 +2312,8 @@ vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
vm_size_t init_ssize;
int rv;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
return (KERN_NO_SPACE);
@@ -2368,8 +2377,6 @@ vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
* the stack. Also returns KERN_SUCCESS if addr is outside the
* stack range (this is strange, but preserves compatibility with
* the grow function in vm_machdep.c).
- *
- * Will grab vm_mtx if needed
*/
int
vm_map_growstack (struct proc *p, vm_offset_t addr)
@@ -2383,16 +2390,8 @@ vm_map_growstack (struct proc *p, vm_offset_t addr)
int grow_amount;
int rv;
int is_procstack;
- int hadvmlock;
-
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
-#define myreturn(rval) do { \
- if (!hadvmlock) \
- mtx_unlock(&vm_mtx); \
- return (rval); \
-} while (0)
+
+ GIANT_REQUIRED;
Retry:
vm_map_lock_read(map);
@@ -2400,12 +2399,12 @@ Retry:
/* If addr is already in the entry range, no need to grow.*/
if (vm_map_lookup_entry(map, addr, &prev_entry)) {
vm_map_unlock_read(map);
- myreturn (KERN_SUCCESS);
+ return (KERN_SUCCESS);
}
if ((stack_entry = prev_entry->next) == &map->header) {
vm_map_unlock_read(map);
- myreturn (KERN_SUCCESS);
+ return (KERN_SUCCESS);
}
if (prev_entry == &map->header)
end = stack_entry->start - stack_entry->avail_ssize;
@@ -2423,14 +2422,14 @@ Retry:
addr >= stack_entry->start ||
addr < stack_entry->start - stack_entry->avail_ssize) {
vm_map_unlock_read(map);
- myreturn (KERN_SUCCESS);
+ return (KERN_SUCCESS);
}
/* Find the minimum grow amount */
grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
if (grow_amount > stack_entry->avail_ssize) {
vm_map_unlock_read(map);
- myreturn (KERN_NO_SPACE);
+ return (KERN_NO_SPACE);
}
/* If there is no longer enough space between the entries
@@ -2449,7 +2448,7 @@ Retry:
stack_entry->avail_ssize = stack_entry->start - end;
vm_map_unlock(map);
- myreturn (KERN_NO_SPACE);
+ return (KERN_NO_SPACE);
}
is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
@@ -2460,7 +2459,7 @@ Retry:
if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
vm_map_unlock_read(map);
- myreturn (KERN_NO_SPACE);
+ return (KERN_NO_SPACE);
}
/* Round up the grow amount modulo SGROWSIZ */
@@ -2512,8 +2511,7 @@ Retry:
}
vm_map_unlock(map);
- myreturn (rv);
-#undef myreturn
+ return (rv);
}
/*
@@ -2527,7 +2525,7 @@ vmspace_exec(struct proc *p) {
struct vmspace *newvmspace;
vm_map_t map = &p->p_vmspace->vm_map;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
(caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
@@ -2555,7 +2553,7 @@ vmspace_unshare(struct proc *p) {
struct vmspace *oldvmspace = p->p_vmspace;
struct vmspace *newvmspace;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (oldvmspace->vm_refcnt == 1)
return;
newvmspace = vmspace_fork(oldvmspace);
@@ -2588,9 +2586,6 @@ vmspace_unshare(struct proc *p) {
* specified, the map may be changed to perform virtual
* copying operations, although the data referenced will
* remain the same.
- *
- * Can block locking maps and while calling vm_object_shadow().
- * Will drop/reaquire the vm_mtx.
*/
int
vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
@@ -2607,7 +2602,7 @@ vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
vm_prot_t prot;
vm_prot_t fault_type = fault_typea;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
RetryLookup:;
/*
@@ -2779,8 +2774,7 @@ vm_map_lookup_done(map, entry)
/*
* Unlock the main-level map
*/
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
vm_map_unlock_read(map);
}
@@ -2809,7 +2803,8 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
off_t ooffset;
int cnt;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (npages)
*npages = 0;
@@ -3021,8 +3016,6 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
* Performs the copy_on_write operations necessary to allow the virtual copies
* into user space to work. This has to be called for write(2) system calls
* from other processes, file unlinking, and file size shrinkage.
- *
- * Requires that the vm_mtx is held
*/
void
vm_freeze_copyopts(object, froma, toa)
@@ -3033,7 +3026,7 @@ vm_freeze_copyopts(object, froma, toa)
vm_object_t robject;
vm_pindex_t idx;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if ((object == NULL) ||
((object->flags & OBJ_OPT) == 0))
return;
OpenPOWER on IntegriCloud