diff options
author | dillon <dillon@FreeBSD.org> | 2001-07-04 16:20:28 +0000 |
---|---|---|
committer | dillon <dillon@FreeBSD.org> | 2001-07-04 16:20:28 +0000 |
commit | e028603b7e3e4fb35cdf00aab533f3965f4a13cc (patch) | |
tree | 7420cce169451a74c5b87963467a4aeff668ed12 /sys/vm/vm_kern.c | |
parent | 0b028660051eb7abf4306d34e7fec0e7fde86a28 (diff) | |
download | FreeBSD-src-e028603b7e3e4fb35cdf00aab533f3965f4a13cc.zip FreeBSD-src-e028603b7e3e4fb35cdf00aab533f3965f4a13cc.tar.gz |
With Alfred's permission, remove vm_mtx in favor of a fine-grained approach
(this commit is just the first stage). Also add various GIANT_ macros to
formalize the removal of Giant, making it easy to test in a more piecemeal
fashion. These macros will allow us to test fine-grained locks to a degree
before removing Giant, and also after, and to remove Giant in a piecemeal
fashion via sysctl's on those subsystems which the authors believe can
operate without Giant.
Diffstat (limited to 'sys/vm/vm_kern.c')
-rw-r--r-- | sys/vm/vm_kern.c | 70 |
1 files changed, 12 insertions, 58 deletions
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index 38f969e..96199cd 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -104,17 +104,13 @@ kmem_alloc_pageable(map, size) { vm_offset_t addr; int result; - int hadvmlock; - hadvmlock = mtx_owned(&vm_mtx); - if (!hadvmlock) - mtx_lock(&vm_mtx); + GIANT_REQUIRED; + size = round_page(size); addr = vm_map_min(map); result = vm_map_find(map, NULL, (vm_offset_t) 0, &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); - if (!hadvmlock) - mtx_unlock(&vm_mtx); if (result != KERN_SUCCESS) { return (0); } @@ -135,17 +131,12 @@ kmem_alloc_nofault(map, size) vm_offset_t addr; int result; - int hadvmlock; + GIANT_REQUIRED; - hadvmlock = mtx_owned(&vm_mtx); - if (!hadvmlock) - mtx_lock(&vm_mtx); size = round_page(size); addr = vm_map_min(map); result = vm_map_find(map, NULL, (vm_offset_t) 0, &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); - if (!hadvmlock) - mtx_unlock(&vm_mtx); if (result != KERN_SUCCESS) { return (0); } @@ -164,11 +155,9 @@ kmem_alloc(map, size) vm_offset_t addr; vm_offset_t offset; vm_offset_t i; - int hadvmlock; - hadvmlock = mtx_owned(&vm_mtx); - if (!hadvmlock) - mtx_lock(&vm_mtx); + GIANT_REQUIRED; + size = round_page(size); /* @@ -184,8 +173,6 @@ kmem_alloc(map, size) vm_map_lock(map); if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { vm_map_unlock(map); - if (!hadvmlock) - mtx_unlock(&vm_mtx); return (0); } offset = addr - VM_MIN_KERNEL_ADDRESS; @@ -230,8 +217,6 @@ kmem_alloc(map, size) (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); - if (!hadvmlock) - mtx_unlock(&vm_mtx); return (addr); } @@ -250,16 +235,9 @@ kmem_free(map, addr, size) vm_offset_t addr; vm_size_t size; { - int hadvmlock; - - hadvmlock = mtx_owned(&vm_mtx); - if (!hadvmlock) - mtx_lock(&vm_mtx); + GIANT_REQUIRED; (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); - - if (!hadvmlock) - mtx_unlock(&vm_mtx); } /* @@ -282,11 +260,8 @@ kmem_suballoc(parent, min, max, size) { int ret; vm_map_t result; - int hadvmlock; - hadvmlock = mtx_owned(&vm_mtx); - if (!hadvmlock) - mtx_lock(&vm_mtx); + GIANT_REQUIRED; size = round_page(size); @@ -304,8 +279,6 @@ kmem_suballoc(parent, min, max, size) panic("kmem_suballoc: cannot create submap"); if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) panic("kmem_suballoc: unable to change range to submap"); - if (!hadvmlock) - mtx_unlock(&vm_mtx); return (result); } @@ -343,12 +316,9 @@ kmem_malloc(map, size, flags) vm_map_entry_t entry; vm_offset_t addr; vm_page_t m; - int hadvmlock; - hadvmlock = mtx_owned(&vm_mtx); - if (!hadvmlock) - mtx_lock(&vm_mtx); - + GIANT_REQUIRED; + size = round_page(size); addr = vm_map_min(map); @@ -444,13 +414,9 @@ retry: } vm_map_unlock(map); - if (!hadvmlock) - mtx_unlock(&vm_mtx); return (addr); bad: - if (!hadvmlock) - mtx_unlock(&vm_mtx); return (0); } @@ -469,11 +435,8 @@ kmem_alloc_wait(map, size) vm_size_t size; { vm_offset_t addr; - int hadvmlock; - hadvmlock = mtx_owned(&vm_mtx); - if (!hadvmlock) - mtx_lock(&vm_mtx); + GIANT_REQUIRED; size = round_page(size); @@ -488,17 +451,13 @@ kmem_alloc_wait(map, size) /* no space now; see if we can ever get space */ if (vm_map_max(map) - vm_map_min(map) < size) { vm_map_unlock(map); - if (!hadvmlock) - mtx_unlock(&vm_mtx); return (0); } vm_map_unlock(map); - msleep(map, &vm_mtx, PVM, "kmaw", 0); + tsleep(map, PVM, "kmaw", 0); } vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); vm_map_unlock(map); - if (!hadvmlock) - mtx_unlock(&vm_mtx); return (addr); } @@ -514,17 +473,12 @@ kmem_free_wakeup(map, addr, size) vm_offset_t addr; vm_size_t size; { - int hadvmlock; + GIANT_REQUIRED; - hadvmlock = mtx_owned(&vm_mtx); - if (!hadvmlock) - mtx_lock(&vm_mtx); vm_map_lock(map); (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); wakeup(map); vm_map_unlock(map); - if (!hadvmlock) - mtx_unlock(&vm_mtx); } /* |