summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>2001-07-04 16:20:28 +0000
committerdillon <dillon@FreeBSD.org>2001-07-04 16:20:28 +0000
commite028603b7e3e4fb35cdf00aab533f3965f4a13cc (patch)
tree7420cce169451a74c5b87963467a4aeff668ed12 /sys/kern
parent0b028660051eb7abf4306d34e7fec0e7fde86a28 (diff)
downloadFreeBSD-src-e028603b7e3e4fb35cdf00aab533f3965f4a13cc.zip
FreeBSD-src-e028603b7e3e4fb35cdf00aab533f3965f4a13cc.tar.gz
With Alfred's permission, remove vm_mtx in favor of a fine-grained approach
(this commit is just the first stage). Also add various GIANT_ macros to formalize the removal of Giant, making it easy to test in a more piecemeal fashion. These macros will allow us to test fine-grained locks to a degree before removing Giant, and also after, and to remove Giant in a piecemeal fashion via sysctl's on those subsystems which the authors believe can operate without Giant.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/imgact_aout.c10
-rw-r--r--sys/kern/imgact_elf.c17
-rw-r--r--sys/kern/init_main.c8
-rw-r--r--sys/kern/kern_exec.c10
-rw-r--r--sys/kern/kern_exit.c4
-rw-r--r--sys/kern/kern_fork.c5
-rw-r--r--sys/kern/kern_resource.c4
-rw-r--r--sys/kern/link_elf.c10
-rw-r--r--sys/kern/link_elf_obj.c10
-rw-r--r--sys/kern/subr_blist.c1
-rw-r--r--sys/kern/sys_pipe.c18
-rw-r--r--sys/kern/sysv_shm.c24
-rw-r--r--sys/kern/uipc_syscalls.c17
-rw-r--r--sys/kern/vfs_bio.c112
-rw-r--r--sys/kern/vfs_cluster.c16
-rw-r--r--sys/kern/vfs_default.c17
-rw-r--r--sys/kern/vfs_extattr.c4
-rw-r--r--sys/kern/vfs_subr.c16
-rw-r--r--sys/kern/vfs_syscalls.c4
19 files changed, 103 insertions, 204 deletions
diff --git a/sys/kern/imgact_aout.c b/sys/kern/imgact_aout.c
index 8becda3..856d4ec 100644
--- a/sys/kern/imgact_aout.c
+++ b/sys/kern/imgact_aout.c
@@ -91,6 +91,8 @@ exec_aout_imgact(imgp)
unsigned long bss_size;
int error;
+ GIANT_REQUIRED;
+
/*
* Linux and *BSD binaries look very much alike,
* only the machine id is different:
@@ -171,7 +173,6 @@ exec_aout_imgact(imgp)
if (error)
return (error);
- mtx_lock(&vm_mtx);
/*
* Destroy old process VM and create a new one (with a new stack)
*/
@@ -185,9 +186,7 @@ exec_aout_imgact(imgp)
vp = imgp->vp;
map = &vmspace->vm_map;
vm_map_lock(map);
- mtx_unlock(&vm_mtx);
VOP_GETVOBJECT(vp, &object);
- mtx_lock(&vm_mtx);
vm_object_reference(object);
text_end = virtual_offset + a_out->a_text;
@@ -198,7 +197,6 @@ exec_aout_imgact(imgp)
MAP_COPY_ON_WRITE | MAP_PREFAULT);
if (error) {
vm_map_unlock(map);
- mtx_unlock(&vm_mtx);
return (error);
}
data_end = text_end + a_out->a_data;
@@ -211,7 +209,6 @@ exec_aout_imgact(imgp)
MAP_COPY_ON_WRITE | MAP_PREFAULT);
if (error) {
vm_map_unlock(map);
- mtx_unlock(&vm_mtx);
return (error);
}
}
@@ -222,7 +219,6 @@ exec_aout_imgact(imgp)
VM_PROT_ALL, VM_PROT_ALL, 0);
if (error) {
vm_map_unlock(map);
- mtx_unlock(&vm_mtx);
return (error);
}
}
@@ -235,8 +231,6 @@ exec_aout_imgact(imgp)
vmspace->vm_daddr = (caddr_t) (uintptr_t)
(virtual_offset + a_out->a_text);
- mtx_unlock(&vm_mtx);
-
/* Fill in image_params */
imgp->interpreted = 0;
imgp->entry_addr = a_out->a_entry;
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index fbb2a71..8cf2e24 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -198,6 +198,8 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_o
vm_offset_t file_addr;
vm_offset_t data_buf = 0;
+ GIANT_REQUIRED;
+
VOP_GETVOBJECT(vp, &object);
error = 0;
@@ -230,7 +232,6 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_o
else
map_len = round_page(offset+filsz) - file_addr;
- mtx_lock(&vm_mtx);
if (map_len != 0) {
vm_object_reference(object);
vm_map_lock(&vmspace->vm_map);
@@ -245,13 +246,11 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_o
vm_map_unlock(&vmspace->vm_map);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
- mtx_unlock(&vm_mtx);
return EINVAL;
}
/* we can stop now if we've covered it all */
if (memsz == filsz) {
- mtx_unlock(&vm_mtx);
return 0;
}
}
@@ -275,7 +274,6 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_o
VM_PROT_ALL, VM_PROT_ALL, 0);
vm_map_unlock(&vmspace->vm_map);
if (rv != KERN_SUCCESS) {
- mtx_unlock(&vm_mtx);
return EINVAL;
}
}
@@ -293,17 +291,13 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_o
MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
- mtx_unlock(&vm_mtx);
return EINVAL;
}
/* send the page fragment to user space */
- mtx_unlock(&vm_mtx);
error = copyout((caddr_t)data_buf, (caddr_t)map_addr, copy_len);
- mtx_lock(&vm_mtx);
vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
if (error) {
- mtx_unlock(&vm_mtx);
return (error);
}
}
@@ -314,7 +308,6 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_o
vm_map_protect(&vmspace->vm_map, map_addr, map_addr + map_len, prot,
FALSE);
- mtx_unlock(&vm_mtx);
return error;
}
@@ -475,6 +468,8 @@ exec_elf_imgact(struct image_params *imgp)
Elf_Brandinfo *brand_info;
char path[MAXPATHLEN];
+ GIANT_REQUIRED;
+
/*
* Do we have a valid ELF header ?
*/
@@ -510,11 +505,9 @@ exec_elf_imgact(struct image_params *imgp)
if ((error = exec_extract_strings(imgp)) != 0)
goto fail;
- mtx_lock(&vm_mtx);
exec_new_vmspace(imgp);
vmspace = imgp->proc->p_vmspace;
- mtx_unlock(&vm_mtx);
for (i = 0; i < hdr->e_phnum; i++) {
switch(phdr[i].p_type) {
@@ -571,12 +564,10 @@ exec_elf_imgact(struct image_params *imgp)
}
}
- mtx_lock(&vm_mtx);
vmspace->vm_tsize = text_size >> PAGE_SHIFT;
vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
vmspace->vm_dsize = data_size >> PAGE_SHIFT;
vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
- mtx_unlock(&vm_mtx);
addr = ELF_RTLD_ADDR(vmspace);
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index aa1fa74..3859c5b 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -274,6 +274,8 @@ proc0_init(void *dummy __unused)
register struct filedesc0 *fdp;
register unsigned i;
+ GIANT_REQUIRED;
+
p = &proc0;
/*
@@ -373,14 +375,12 @@ proc0_init(void *dummy __unused)
limit0.p_refcnt = 1;
/* Allocate a prototype map so we have something to fork. */
- mtx_lock(&vm_mtx);
pmap_pinit0(vmspace_pmap(&vmspace0));
p->p_vmspace = &vmspace0;
vmspace0.vm_refcnt = 1;
vm_map_init(&vmspace0.vm_map, round_page(VM_MIN_ADDRESS),
trunc_page(VM_MAXUSER_ADDRESS));
vmspace0.vm_map.pmap = vmspace_pmap(&vmspace0);
- mtx_unlock(&vm_mtx);
p->p_addr = proc0paddr; /* XXX */
/*
@@ -471,6 +471,8 @@ start_init(void *dummy)
mtx_lock(&Giant);
+ GIANT_REQUIRED;
+
p = curproc;
/* Get the vnode for '/'. Set p->p_fd->fd_cdir to reference it. */
@@ -486,13 +488,11 @@ start_init(void *dummy)
* Need just enough stack to hold the faked-up "execve()" arguments.
*/
addr = trunc_page(USRSTACK - PAGE_SIZE);
- mtx_lock(&vm_mtx);
if (vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr, PAGE_SIZE,
FALSE, VM_PROT_ALL, VM_PROT_ALL, 0) != 0)
panic("init: couldn't allocate argument space");
p->p_vmspace->vm_maxsaddr = (caddr_t)addr;
p->p_vmspace->vm_ssize = 1;
- mtx_unlock(&vm_mtx);
if ((var = getenv("init_path")) != NULL) {
strncpy(init_path, var, sizeof init_path);
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index ecadfed..2e0b60c 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -445,13 +445,13 @@ exec_map_first_page(imgp)
vm_page_t ma[VM_INITIAL_PAGEIN];
vm_object_t object;
+ GIANT_REQUIRED;
if (imgp->firstpage) {
exec_unmap_first_page(imgp);
}
VOP_GETVOBJECT(imgp->vp, &object);
- mtx_lock(&vm_mtx);
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
@@ -482,7 +482,6 @@ exec_map_first_page(imgp)
vm_page_protect(ma[0], VM_PROT_NONE);
vm_page_free(ma[0]);
}
- mtx_unlock(&vm_mtx);
return EIO;
}
}
@@ -493,7 +492,6 @@ exec_map_first_page(imgp)
pmap_kenter((vm_offset_t) imgp->image_header, VM_PAGE_TO_PHYS(ma[0]));
imgp->firstpage = ma[0];
- mtx_unlock(&vm_mtx);
return 0;
}
@@ -501,12 +499,11 @@ void
exec_unmap_first_page(imgp)
struct image_params *imgp;
{
+ GIANT_REQUIRED;
if (imgp->firstpage) {
- mtx_lock(&vm_mtx);
pmap_kremove((vm_offset_t) imgp->image_header);
vm_page_unwire(imgp->firstpage, 1);
- mtx_unlock(&vm_mtx);
imgp->firstpage = NULL;
}
}
@@ -525,7 +522,8 @@ exec_new_vmspace(imgp)
caddr_t stack_addr = (caddr_t) (USRSTACK - MAXSSIZ);
vm_map_t map = &vmspace->vm_map;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
imgp->vmspace_destroyed = 1;
/*
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index b1a2ee5..d73c70b 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -120,6 +120,8 @@ exit1(p, rv)
register struct vmspace *vm;
struct exitlist *ep;
+ GIANT_REQUIRED;
+
if (p->p_pid == 1) {
printf("init died (signal %d, exit %d)\n",
WTERMSIG(rv), WEXITSTATUS(rv));
@@ -213,7 +215,6 @@ exit1(p, rv)
* Can't free the entire vmspace as the kernel stack
* may be mapped within that space also.
*/
- mtx_lock(&vm_mtx);
if (vm->vm_refcnt == 1) {
if (vm->vm_shm)
shmexit(p);
@@ -222,7 +223,6 @@ exit1(p, rv)
(void) vm_map_remove(&vm->vm_map, VM_MIN_ADDRESS,
VM_MAXUSER_ADDRESS);
}
- mtx_unlock(&vm_mtx);
PROC_LOCK(p);
if (SESS_LEADER(p)) {
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index dd3eb04..9eecbc2 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -209,6 +209,8 @@ fork1(p1, flags, procp)
struct forklist *ep;
struct filedesc *fd;
+ GIANT_REQUIRED;
+
/* Can't copy and clear */
if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
return (EINVAL);
@@ -218,9 +220,7 @@ fork1(p1, flags, procp)
* certain parts of a process from itself.
*/
if ((flags & RFPROC) == 0) {
-
vm_fork(p1, 0, flags);
- mtx_assert(&vm_mtx, MA_NOTOWNED);
/*
* Close all file descriptors.
@@ -561,7 +561,6 @@ again:
* execution path later. (ie: directly into user mode)
*/
vm_fork(p1, p2, flags);
- mtx_assert(&vm_mtx, MA_NOTOWNED);
if (flags == (RFFDG | RFPROC)) {
cnt.v_forks++;
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index f46313c..f7503db 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -431,6 +431,8 @@ dosetrlimit(p, which, limp)
register struct rlimit *alimp;
int error;
+ GIANT_REQUIRED;
+
if (which >= RLIM_NLIMITS)
return (EINVAL);
alimp = &p->p_rlimit[which];
@@ -498,10 +500,8 @@ dosetrlimit(p, which, limp)
}
addr = trunc_page(addr);
size = round_page(size);
- mtx_lock(&vm_mtx);
(void) vm_map_protect(&p->p_vmspace->vm_map,
addr, addr+size, prot, FALSE);
- mtx_unlock(&vm_mtx);
}
break;
diff --git a/sys/kern/link_elf.c b/sys/kern/link_elf.c
index da7462a..439b9de 100644
--- a/sys/kern/link_elf.c
+++ b/sys/kern/link_elf.c
@@ -531,6 +531,8 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
int symcnt;
int strcnt;
+ GIANT_REQUIRED;
+
shdr = NULL;
lf = NULL;
@@ -657,10 +659,8 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
ef = (elf_file_t) lf;
#ifdef SPARSE_MAPPING
- mtx_lock(&vm_mtx);
ef->object = vm_object_allocate(OBJT_DEFAULT, mapsize >> PAGE_SHIFT);
if (ef->object == NULL) {
- mtx_unlock(&vm_mtx);
free(ef, M_LINKER);
error = ENOMEM;
goto out;
@@ -673,11 +673,9 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
VM_PROT_ALL, VM_PROT_ALL, 0);
if (error) {
vm_object_deallocate(ef->object);
- mtx_unlock(&vm_mtx);
ef->object = 0;
goto out;
}
- mtx_unlock(&vm_mtx);
#else
ef->address = malloc(mapsize, M_LINKER, M_WAITOK);
if (!ef->address) {
@@ -705,12 +703,10 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
/*
* Wire down the pages
*/
- mtx_lock(&vm_mtx);
vm_map_pageable(kernel_map,
(vm_offset_t) segbase,
(vm_offset_t) segbase + segs[i]->p_memsz,
FALSE);
- mtx_unlock(&vm_mtx);
#endif
}
@@ -834,12 +830,10 @@ link_elf_unload_file(linker_file_t file)
}
#ifdef SPARSE_MAPPING
if (ef->object) {
- mtx_lock(&vm_mtx);
vm_map_remove(kernel_map, (vm_offset_t) ef->address,
(vm_offset_t) ef->address
+ (ef->object->size << PAGE_SHIFT));
vm_object_deallocate(ef->object);
- mtx_unlock(&vm_mtx);
}
#else
if (ef->address)
diff --git a/sys/kern/link_elf_obj.c b/sys/kern/link_elf_obj.c
index da7462a..439b9de 100644
--- a/sys/kern/link_elf_obj.c
+++ b/sys/kern/link_elf_obj.c
@@ -531,6 +531,8 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
int symcnt;
int strcnt;
+ GIANT_REQUIRED;
+
shdr = NULL;
lf = NULL;
@@ -657,10 +659,8 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
ef = (elf_file_t) lf;
#ifdef SPARSE_MAPPING
- mtx_lock(&vm_mtx);
ef->object = vm_object_allocate(OBJT_DEFAULT, mapsize >> PAGE_SHIFT);
if (ef->object == NULL) {
- mtx_unlock(&vm_mtx);
free(ef, M_LINKER);
error = ENOMEM;
goto out;
@@ -673,11 +673,9 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
VM_PROT_ALL, VM_PROT_ALL, 0);
if (error) {
vm_object_deallocate(ef->object);
- mtx_unlock(&vm_mtx);
ef->object = 0;
goto out;
}
- mtx_unlock(&vm_mtx);
#else
ef->address = malloc(mapsize, M_LINKER, M_WAITOK);
if (!ef->address) {
@@ -705,12 +703,10 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
/*
* Wire down the pages
*/
- mtx_lock(&vm_mtx);
vm_map_pageable(kernel_map,
(vm_offset_t) segbase,
(vm_offset_t) segbase + segs[i]->p_memsz,
FALSE);
- mtx_unlock(&vm_mtx);
#endif
}
@@ -834,12 +830,10 @@ link_elf_unload_file(linker_file_t file)
}
#ifdef SPARSE_MAPPING
if (ef->object) {
- mtx_lock(&vm_mtx);
vm_map_remove(kernel_map, (vm_offset_t) ef->address,
(vm_offset_t) ef->address
+ (ef->object->size << PAGE_SHIFT));
vm_object_deallocate(ef->object);
- mtx_unlock(&vm_mtx);
}
#else
if (ef->address)
diff --git a/sys/kern/subr_blist.c b/sys/kern/subr_blist.c
index 061d151..6bb7ae7 100644
--- a/sys/kern/subr_blist.c
+++ b/sys/kern/subr_blist.c
@@ -71,6 +71,7 @@
#include <sys/kernel.h>
#include <sys/blist.h>
#include <sys/malloc.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
diff --git a/sys/kern/sys_pipe.c b/sys/kern/sys_pipe.c
index 4136532..5ee4f5c 100644
--- a/sys/kern/sys_pipe.c
+++ b/sys/kern/sys_pipe.c
@@ -248,13 +248,14 @@ pipespace(cpipe, size)
caddr_t buffer;
int npages, error;
+ GIANT_REQUIRED;
+
npages = round_page(size)/PAGE_SIZE;
/*
* Create an object, I don't like the idea of paging to/from
* kernel_object.
* XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
*/
- mtx_lock(&vm_mtx);
object = vm_object_allocate(OBJT_DEFAULT, npages);
buffer = (caddr_t) vm_map_min(kernel_map);
@@ -269,13 +270,11 @@ pipespace(cpipe, size)
if (error != KERN_SUCCESS) {
vm_object_deallocate(object);
- mtx_unlock(&vm_mtx);
return (ENOMEM);
}
/* free old resources if we're resizing */
pipe_free_kmem(cpipe);
- mtx_unlock(&vm_mtx);
cpipe->pipe_buffer.object = object;
cpipe->pipe_buffer.buffer = buffer;
cpipe->pipe_buffer.size = size;
@@ -551,12 +550,13 @@ pipe_build_write_buffer(wpipe, uio)
int i;
vm_offset_t addr, endaddr, paddr;
+ GIANT_REQUIRED;
+
size = (u_int) uio->uio_iov->iov_len;
if (size > wpipe->pipe_buffer.size)
size = wpipe->pipe_buffer.size;
endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
- mtx_lock(&vm_mtx);
addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
vm_page_t m;
@@ -567,7 +567,6 @@ pipe_build_write_buffer(wpipe, uio)
for (j = 0; j < i; j++)
vm_page_unwire(wpipe->pipe_map.ms[j], 1);
- mtx_unlock(&vm_mtx);
return (EFAULT);
}
@@ -599,7 +598,6 @@ pipe_build_write_buffer(wpipe, uio)
pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
wpipe->pipe_map.npages);
- mtx_unlock(&vm_mtx);
/*
* and update the uio data
*/
@@ -622,7 +620,8 @@ pipe_destroy_write_buffer(wpipe)
{
int i;
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
+
if (wpipe->pipe_map.kva) {
pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
@@ -636,7 +635,6 @@ pipe_destroy_write_buffer(wpipe)
}
for (i = 0; i < wpipe->pipe_map.npages; i++)
vm_page_unwire(wpipe->pipe_map.ms[i], 1);
- mtx_unlock(&vm_mtx);
}
/*
@@ -1167,8 +1165,8 @@ static void
pipe_free_kmem(cpipe)
struct pipe *cpipe;
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_OWNED);
if (cpipe->pipe_buffer.buffer != NULL) {
if (cpipe->pipe_buffer.size > PIPE_SIZE)
--nbigpipe;
@@ -1228,13 +1226,11 @@ pipeclose(cpipe)
/*
* free resources
*/
- mtx_lock(&vm_mtx);
pipe_free_kmem(cpipe);
/* XXX: erm, doesn't zalloc already have its own locks and
* not need the giant vm lock?
*/
zfree(pipe_zone, cpipe);
- mtx_unlock(&vm_mtx);
}
}
diff --git a/sys/kern/sysv_shm.c b/sys/kern/sysv_shm.c
index 1d96cff..96a4541 100644
--- a/sys/kern/sysv_shm.c
+++ b/sys/kern/sysv_shm.c
@@ -187,7 +187,8 @@ shm_deallocate_segment(shmseg)
struct shm_handle *shm_handle;
size_t size;
- mtx_assert(&vm_mtx, MA_OWNED); /* For vm_object_deallocate. */
+ GIANT_REQUIRED;
+
shm_handle = shmseg->shm_internal;
vm_object_deallocate(shm_handle->shm_object);
free((caddr_t)shm_handle, M_SHM);
@@ -207,8 +208,7 @@ shm_delete_mapping(p, shmmap_s)
int segnum, result;
size_t size;
- /* For vm_map_remove and shm_deallocate_segment. */
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
segnum = IPCID_TO_IX(shmmap_s->shmid);
shmseg = &shmsegs[segnum];
@@ -254,9 +254,7 @@ shmdt(p, uap)
break;
if (i == shminfo.shmseg)
return EINVAL;
- mtx_lock(&vm_mtx);
error = shm_delete_mapping(p, shmmap_s);
- mtx_unlock(&vm_mtx);
return error;
}
@@ -282,6 +280,8 @@ shmat(p, uap)
vm_size_t size;
int rv;
+ GIANT_REQUIRED;
+
if (!jail_sysvipc_allowed && jailed(p->p_ucred))
return (ENOSYS);
@@ -334,17 +334,14 @@ shmat(p, uap)
}
shm_handle = shmseg->shm_internal;
- mtx_lock(&vm_mtx);
vm_object_reference(shm_handle->shm_object);
rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
if (rv != KERN_SUCCESS) {
- mtx_unlock(&vm_mtx);
return ENOMEM;
}
vm_map_inherit(&p->p_vmspace->vm_map,
attach_va, attach_va + size, VM_INHERIT_SHARE);
- mtx_unlock(&vm_mtx);
shmmap_s->va = attach_va;
shmmap_s->shmid = uap->shmid;
@@ -434,6 +431,8 @@ shmctl(p, uap)
struct shmid_ds inbuf;
struct shmid_ds *shmseg;
+ GIANT_REQUIRED;
+
if (!jail_sysvipc_allowed && jailed(p->p_ucred))
return (ENOSYS);
@@ -470,9 +469,7 @@ shmctl(p, uap)
shmseg->shm_perm.key = IPC_PRIVATE;
shmseg->shm_perm.mode |= SHMSEG_REMOVED;
if (shmseg->shm_nattch <= 0) {
- mtx_lock(&vm_mtx);
shm_deallocate_segment(shmseg);
- mtx_unlock(&vm_mtx);
shm_last_free = IPCID_TO_IX(uap->shmid);
}
break;
@@ -539,6 +536,8 @@ shmget_allocate_segment(p, uap, mode)
struct shmid_ds *shmseg;
struct shm_handle *shm_handle;
+ GIANT_REQUIRED;
+
if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
return EINVAL;
if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
@@ -574,7 +573,6 @@ shmget_allocate_segment(p, uap, mode)
* We make sure that we have allocated a pager before we need
* to.
*/
- mtx_lock(&vm_mtx);
if (shm_use_phys) {
shm_handle->shm_object =
vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
@@ -584,7 +582,6 @@ shmget_allocate_segment(p, uap, mode)
}
vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
- mtx_unlock(&vm_mtx);
shmseg->shm_internal = shm_handle;
shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
@@ -680,7 +677,8 @@ shmexit_myhook(p)
struct shmmap_state *shmmap_s;
int i;
- mtx_assert(&vm_mtx, MA_OWNED); /* For shm_delete_mapping. */
+ GIANT_REQUIRED;
+
shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
if (shmmap_s->shmid != -1)
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index 2718591..b86d17f 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -1489,8 +1489,9 @@ sf_buf_free(caddr_t addr, void *args)
struct sf_buf *sf;
struct vm_page *m;
+ GIANT_REQUIRED;
+
sf = dtosf(addr);
- mtx_lock(&vm_mtx);
pmap_qremove((vm_offset_t)addr, 1);
m = sf->m;
vm_page_unwire(m, 0);
@@ -1501,7 +1502,6 @@ sf_buf_free(caddr_t addr, void *args)
*/
if (m->wire_count == 0 && m->object == NULL)
vm_page_free(m);
- mtx_unlock(&vm_mtx);
sf->m = NULL;
mtx_lock(&sf_freelist.sf_lock);
SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
@@ -1536,6 +1536,8 @@ sendfile(struct proc *p, struct sendfile_args *uap)
off_t off, xfsize, sbytes = 0;
int error = 0, s;
+ GIANT_REQUIRED;
+
vp = NULL;
/*
* Do argument checking. Must be a regular file in, stream
@@ -1646,19 +1648,16 @@ retry_lookup:
*
* Wait and loop if busy.
*/
- mtx_lock(&vm_mtx);
pg = vm_page_lookup(obj, pindex);
if (pg == NULL) {
pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL);
if (pg == NULL) {
VM_WAIT;
- mtx_unlock(&vm_mtx);
goto retry_lookup;
}
vm_page_wakeup(pg);
} else if (vm_page_sleep_busy(pg, TRUE, "sfpbsy")) {
- mtx_unlock(&vm_mtx);
goto retry_lookup;
}
@@ -1683,7 +1682,6 @@ retry_lookup:
* completes.
*/
vm_page_io_start(pg);
- mtx_unlock(&vm_mtx);
/*
* Get the page from backing store.
@@ -1702,7 +1700,6 @@ retry_lookup:
error = VOP_READ(vp, &auio, IO_VMIO | ((MAXBSIZE / bsize) << 16),
p->p_ucred);
VOP_UNLOCK(vp, 0, p);
- mtx_lock(&vm_mtx);
vm_page_flag_clear(pg, PG_ZERO);
vm_page_io_finish(pg);
if (error) {
@@ -1717,7 +1714,6 @@ retry_lookup:
vm_page_busy(pg);
vm_page_free(pg);
}
- mtx_unlock(&vm_mtx);
sbunlock(&so->so_snd);
goto done;
}
@@ -1728,13 +1724,10 @@ retry_lookup:
* Get a sendfile buf. We usually wait as long as necessary,
* but this wait can be interrupted.
*/
- mtx_unlock(&vm_mtx);
if ((sf = sf_buf_alloc()) == NULL) {
- mtx_lock(&vm_mtx);
vm_page_unwire(pg, 0);
if (pg->wire_count == 0 && pg->object == NULL)
vm_page_free(pg);
- mtx_unlock(&vm_mtx);
sbunlock(&so->so_snd);
error = EINTR;
goto done;
@@ -1744,10 +1737,8 @@ retry_lookup:
* Allocate a kernel virtual page and insert the physical page
* into it.
*/
- mtx_lock(&vm_mtx);
sf->m = pg;
pmap_qenter(sf->kva, &pg, 1);
- mtx_unlock(&vm_mtx);
/*
* Get an mbuf header and set it up as having external storage.
*/
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 246fc4c..94baa5a 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -281,8 +281,6 @@ waitrunningbufspace(void)
* Called when a buffer is extended. This function clears the B_CACHE
* bit if the newly extended portion of the buffer does not contain
* valid data.
- *
- * must be called with vm_mtx held
*/
static __inline__
void
@@ -290,6 +288,8 @@ vfs_buf_test_cache(struct buf *bp,
vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
vm_page_t m)
{
+ GIANT_REQUIRED;
+
if (bp->b_flags & B_CACHE) {
int base = (foff + off) & PAGE_MASK;
if (vm_page_is_valid(m, base, size) == 0)
@@ -340,6 +340,8 @@ bufinit(void)
struct buf *bp;
int i;
+ GIANT_REQUIRED;
+
TAILQ_INIT(&bswlist);
LIST_INIT(&invalhash);
mtx_init(&buftimelock, "buftime lock", MTX_DEF);
@@ -428,14 +430,11 @@ bufinit(void)
* from buf_daemon.
*/
- mtx_lock(&vm_mtx);
bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
bogus_page = vm_page_alloc(kernel_object,
((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
VM_ALLOC_NORMAL);
cnt.v_wire_count++;
- mtx_unlock(&vm_mtx);
-
}
/*
@@ -445,23 +444,19 @@ bufinit(void)
* buffer_map.
*
* Since this call frees up buffer space, we call bufspacewakeup().
- *
- * Must be called without the vm_mtx.
*/
static void
bfreekva(struct buf * bp)
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
if (bp->b_kvasize) {
++buffreekvacnt;
bufspace -= bp->b_kvasize;
- mtx_lock(&vm_mtx);
vm_map_delete(buffer_map,
(vm_offset_t) bp->b_kvabase,
(vm_offset_t) bp->b_kvabase + bp->b_kvasize
);
- mtx_unlock(&vm_mtx);
bp->b_kvasize = 0;
bufspacewakeup();
}
@@ -478,6 +473,8 @@ bremfree(struct buf * bp)
int s = splbio();
int old_qindex = bp->b_qindex;
+ GIANT_REQUIRED;
+
if (bp->b_qindex != QUEUE_NONE) {
KASSERT(BUF_REFCNT(bp) == 1, ("bremfree: bp %p not locked",bp));
TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
@@ -789,6 +786,8 @@ vfs_backgroundwritedone(bp)
void
bdwrite(struct buf * bp)
{
+ GIANT_REQUIRED;
+
if (BUF_REFCNT(bp) == 0)
panic("bdwrite: buffer is not busy");
@@ -817,7 +816,6 @@ bdwrite(struct buf * bp)
VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
}
- mtx_lock(&vm_mtx);
/*
* Set the *dirty* buffer range based upon the VM system dirty pages.
*/
@@ -831,7 +829,6 @@ bdwrite(struct buf * bp)
* out on the next sync, or perhaps the cluster will be completed.
*/
vfs_clean_pages(bp);
- mtx_unlock(&vm_mtx);
bqrelse(bp);
/*
@@ -985,15 +982,14 @@ buf_dirty_count_severe(void)
* Release a busy buffer and, if requested, free its resources. The
* buffer will be stashed in the appropriate bufqueue[] allowing it
* to be accessed later as a cache entity or reused for other purposes.
- *
- * vm_mtx must be not be held.
*/
void
brelse(struct buf * bp)
{
int s;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
+
KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
s = splbio();
@@ -1103,7 +1099,6 @@ brelse(struct buf * bp)
resid = bp->b_bufsize;
foff = bp->b_offset;
- mtx_lock(&vm_mtx);
for (i = 0; i < bp->b_npages; i++) {
int had_bogus = 0;
@@ -1115,12 +1110,10 @@ brelse(struct buf * bp)
* now.
*/
if (m == bogus_page) {
- mtx_unlock(&vm_mtx);
VOP_GETVOBJECT(vp, &obj);
poff = OFF_TO_IDX(bp->b_offset);
had_bogus = 1;
- mtx_lock(&vm_mtx);
for (j = i; j < bp->b_npages; j++) {
vm_page_t mtmp;
mtmp = bp->b_pages[j];
@@ -1154,14 +1147,11 @@ brelse(struct buf * bp)
if (bp->b_flags & (B_INVAL | B_RELBUF))
vfs_vmio_release(bp);
- mtx_unlock(&vm_mtx);
} else if (bp->b_flags & B_VMIO) {
if (bp->b_flags & (B_INVAL | B_RELBUF)) {
- mtx_lock(&vm_mtx);
vfs_vmio_release(bp);
- mtx_unlock(&vm_mtx);
}
}
@@ -1326,9 +1316,6 @@ bqrelse(struct buf * bp)
splx(s);
}
-/*
- * Must be called with vm_mtx held.
- */
static void
vfs_vmio_release(bp)
struct buf *bp;
@@ -1336,7 +1323,8 @@ vfs_vmio_release(bp)
int i;
vm_page_t m;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
bp->b_pages[i] = NULL;
@@ -1372,8 +1360,6 @@ vfs_vmio_release(bp)
}
}
pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
-
- /* could drop vm_mtx here */
if (bp->b_bufsize) {
bufspacewakeup();
@@ -1527,6 +1513,8 @@ getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
int nqindex;
static int flushingbufs;
+ GIANT_REQUIRED;
+
/*
* We can't afford to block since we might be holding a vnode lock,
* which may prevent system daemons from running. We deal with
@@ -1646,9 +1634,7 @@ restart:
if (qindex == QUEUE_CLEAN) {
if (bp->b_flags & B_VMIO) {
bp->b_flags &= ~B_ASYNC;
- mtx_lock(&vm_mtx);
vfs_vmio_release(bp);
- mtx_unlock(&vm_mtx);
}
if (bp->b_vp)
brelvp(bp);
@@ -1771,14 +1757,12 @@ restart:
bfreekva(bp);
- mtx_lock(&vm_mtx);
if (vm_map_findspace(buffer_map,
vm_map_min(buffer_map), maxsize, &addr)) {
/*
* Uh oh. Buffer map is to fragmented. We
* must defragment the map.
*/
- mtx_unlock(&vm_mtx);
++bufdefragcnt;
defrag = 1;
bp->b_flags |= B_INVAL;
@@ -1795,7 +1779,6 @@ restart:
bufspace += bp->b_kvasize;
++bufreusecnt;
}
- mtx_unlock(&vm_mtx);
}
bp->b_data = bp->b_kvabase;
}
@@ -1961,6 +1944,8 @@ inmem(struct vnode * vp, daddr_t blkno)
vm_page_t m;
vm_ooffset_t off;
+ GIANT_REQUIRED;
+
if (incore(vp, blkno))
return 1;
if (vp->v_mount == NULL)
@@ -1973,7 +1958,6 @@ inmem(struct vnode * vp, daddr_t blkno)
size = vp->v_mount->mnt_stat.f_iosize;
off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
- mtx_lock(&vm_mtx);
for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
if (!m)
@@ -1985,11 +1969,9 @@ inmem(struct vnode * vp, daddr_t blkno)
(vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
goto notinmem;
}
- mtx_unlock(&vm_mtx);
return 1;
notinmem:
- mtx_unlock(&vm_mtx);
return (0);
}
@@ -2003,8 +1985,6 @@ notinmem:
*
* This routine is primarily used by NFS, but is generalized for the
* B_VMIO case.
- *
- * Must be called with vm_mtx
*/
static void
vfs_setdirty(struct buf *bp)
@@ -2012,7 +1992,7 @@ vfs_setdirty(struct buf *bp)
int i;
vm_object_t object;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Degenerate case - empty buffer
*/
@@ -2365,6 +2345,8 @@ allocbuf(struct buf *bp, int size)
int newbsize, mbsize;
int i;
+ GIANT_REQUIRED;
+
if (BUF_REFCNT(bp) == 0)
panic("allocbuf: buffer not busy");
@@ -2487,7 +2469,6 @@ allocbuf(struct buf *bp, int size)
* DEV_BSIZE aligned existing buffer size. Figure out
* if we have to remove any pages.
*/
- mtx_lock(&vm_mtx);
if (desiredpages < bp->b_npages) {
for (i = desiredpages; i < bp->b_npages; i++) {
/*
@@ -2508,7 +2489,6 @@ allocbuf(struct buf *bp, int size)
(desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
bp->b_npages = desiredpages;
}
- mtx_unlock(&vm_mtx);
} else if (size > bp->b_bcount) {
/*
* We are growing the buffer, possibly in a
@@ -2529,7 +2509,6 @@ allocbuf(struct buf *bp, int size)
vp = bp->b_vp;
VOP_GETVOBJECT(vp, &obj);
- mtx_lock(&vm_mtx);
while (bp->b_npages < desiredpages) {
vm_page_t m;
vm_pindex_t pi;
@@ -2639,8 +2618,6 @@ allocbuf(struct buf *bp, int size)
bp->b_npages
);
- mtx_unlock(&vm_mtx);
-
bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
(vm_offset_t)(bp->b_offset & PAGE_MASK));
}
@@ -2718,6 +2695,8 @@ bufdone(struct buf *bp)
int s, error;
void (*biodone) __P((struct buf *));
+ GIANT_REQUIRED;
+
s = splbio();
KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp, BUF_REFCNT(bp)));
@@ -2778,7 +2757,6 @@ bufdone(struct buf *bp)
if (error) {
panic("biodone: no object");
}
- mtx_lock(&vm_mtx);
#if defined(VFS_BIO_DEBUG)
if (obj->paging_in_progress < bp->b_npages) {
printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
@@ -2867,7 +2845,6 @@ bufdone(struct buf *bp)
}
if (obj)
vm_object_pip_wakeupn(obj, 0);
- mtx_unlock(&vm_mtx);
}
/*
@@ -2891,15 +2868,14 @@ bufdone(struct buf *bp)
* This routine is called in lieu of iodone in the case of
* incomplete I/O. This keeps the busy status for pages
* consistant.
- *
- * vm_mtx should not be held
*/
void
vfs_unbusy_pages(struct buf * bp)
{
int i;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
+
runningbufwakeup(bp);
if (bp->b_flags & B_VMIO) {
struct vnode *vp = bp->b_vp;
@@ -2907,7 +2883,6 @@ vfs_unbusy_pages(struct buf * bp)
VOP_GETVOBJECT(vp, &obj);
- mtx_lock(&vm_mtx);
for (i = 0; i < bp->b_npages; i++) {
vm_page_t m = bp->b_pages[i];
@@ -2924,7 +2899,6 @@ vfs_unbusy_pages(struct buf * bp)
vm_page_io_finish(m);
}
vm_object_pip_wakeupn(obj, 0);
- mtx_unlock(&vm_mtx);
}
}
@@ -2935,15 +2909,13 @@ vfs_unbusy_pages(struct buf * bp)
* range is restricted to the buffer's size.
*
* This routine is typically called after a read completes.
- *
- * vm_mtx should be held
*/
static void
vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
{
vm_ooffset_t soff, eoff;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Start and end offsets in buffer. eoff - soff may not cross a
* page boundry or cross the end of the buffer. The end of the
@@ -2979,15 +2951,14 @@ vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
* Since I/O has not been initiated yet, certain buffer flags
* such as BIO_ERROR or B_INVAL may be in an inconsistant state
* and should be ignored.
- *
- * vm_mtx should not be held
*/
void
vfs_busy_pages(struct buf * bp, int clear_modify)
{
int i, bogus;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
+
if (bp->b_flags & B_VMIO) {
struct vnode *vp = bp->b_vp;
vm_object_t obj;
@@ -2997,7 +2968,6 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
foff = bp->b_offset;
KASSERT(bp->b_offset != NOOFFSET,
("vfs_busy_pages: no buffer offset"));
- mtx_lock(&vm_mtx);
vfs_setdirty(bp);
retry:
@@ -3045,7 +3015,6 @@ retry:
}
if (bogus)
pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
- mtx_unlock(&vm_mtx);
}
}
@@ -3056,15 +3025,14 @@ retry:
*
* Note that while we only really need to clean through to b_bcount, we
* just go ahead and clean through to b_bufsize.
- *
- * should be called with vm_mtx held
*/
static void
vfs_clean_pages(struct buf * bp)
{
int i;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (bp->b_flags & B_VMIO) {
vm_ooffset_t foff;
@@ -3132,9 +3100,6 @@ vfs_bio_set_validclean(struct buf *bp, int base, int size)
*
* Note that while we only theoretically need to clear through b_bcount,
* we go ahead and clear through b_bufsize.
- *
- * We'll get vm_mtx here for safety if processing a VMIO buffer.
- * I don't think vm_mtx is needed, but we're twiddling vm_page flags.
*/
void
@@ -3142,8 +3107,9 @@ vfs_bio_clrbuf(struct buf *bp) {
int i, mask = 0;
caddr_t sa, ea;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
- mtx_lock(&vm_mtx);
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
@@ -3155,7 +3121,6 @@ vfs_bio_clrbuf(struct buf *bp) {
}
bp->b_pages[0]->valid |= mask;
bp->b_resid = 0;
- mtx_unlock(&vm_mtx);
return;
}
ea = sa = bp->b_data;
@@ -3183,7 +3148,6 @@ vfs_bio_clrbuf(struct buf *bp) {
vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
}
bp->b_resid = 0;
- mtx_unlock(&vm_mtx);
} else {
clrbuf(bp);
}
@@ -3193,8 +3157,6 @@ vfs_bio_clrbuf(struct buf *bp) {
* vm_hold_load_pages and vm_hold_free_pages get pages into
* a buffers address space. The pages are anonymous and are
* not associated with a file object.
- *
- * vm_mtx should not be held
*/
static void
vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
@@ -3203,16 +3165,14 @@ vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
vm_page_t p;
int index;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
+
to = round_page(to);
from = round_page(from);
index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
- mtx_lock(&vm_mtx);
for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
-
tryagain:
-
/*
* note: must allocate system pages since blocking here
* could intefere with paging I/O, no matter which
@@ -3234,7 +3194,6 @@ tryagain:
vm_page_wakeup(p);
}
bp->b_npages = index;
- mtx_unlock(&vm_mtx);
}
void
@@ -3244,12 +3203,12 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
vm_page_t p;
int index, newnpages;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
+
from = round_page(from);
to = round_page(to);
newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
- mtx_lock(&vm_mtx);
for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
p = bp->b_pages[index];
if (p && (index < bp->b_npages)) {
@@ -3265,7 +3224,6 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
}
}
bp->b_npages = newnpages;
- mtx_unlock(&vm_mtx);
}
diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c
index c9c09cb..b685740 100644
--- a/sys/kern/vfs_cluster.c
+++ b/sys/kern/vfs_cluster.c
@@ -320,6 +320,8 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
daddr_t bn;
int i, inc, j;
+ GIANT_REQUIRED;
+
KASSERT(size == vp->v_mount->mnt_stat.f_iosize,
("cluster_rbuild: size %ld != filesize %ld\n",
size, vp->v_mount->mnt_stat.f_iosize));
@@ -433,7 +435,6 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
BUF_KERNPROC(tbp);
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
tbp, b_cluster.cluster_entry);
- mtx_lock(&vm_mtx);
for (j = 0; j < tbp->b_npages; j += 1) {
vm_page_t m;
m = tbp->b_pages[j];
@@ -447,12 +448,10 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
tbp->b_pages[j] = bogus_page;
}
- mtx_unlock(&vm_mtx);
bp->b_bcount += tbp->b_bcount;
bp->b_bufsize += tbp->b_bufsize;
}
- mtx_lock(&vm_mtx);
for(j=0;j<bp->b_npages;j++) {
if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) ==
VM_PAGE_BITS_ALL)
@@ -465,7 +464,6 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
(vm_page_t *)bp->b_pages, bp->b_npages);
- mtx_unlock(&vm_mtx);
return (bp);
}
@@ -482,15 +480,15 @@ cluster_callback(bp)
struct buf *nbp, *tbp;
int error = 0;
+ GIANT_REQUIRED;
+
/*
* Must propogate errors to all the components.
*/
if (bp->b_ioflags & BIO_ERROR)
error = bp->b_error;
- mtx_lock(&vm_mtx);
pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
- mtx_unlock(&vm_mtx);
/*
* Move memory from the large cluster buffer into the component
* buffers and mark IO as done on these.
@@ -724,6 +722,8 @@ cluster_wbuild(vp, size, start_lbn, len)
int totalwritten = 0;
int dbsize = btodb(size);
+ GIANT_REQUIRED;
+
while (len > 0) {
s = splbio();
/*
@@ -866,7 +866,6 @@ cluster_wbuild(vp, size, start_lbn, len)
}
}
- mtx_lock(&vm_mtx);
for (j = 0; j < tbp->b_npages; j += 1) {
m = tbp->b_pages[j];
vm_page_io_start(m);
@@ -877,7 +876,6 @@ cluster_wbuild(vp, size, start_lbn, len)
bp->b_npages++;
}
}
- mtx_unlock(&vm_mtx);
}
bp->b_bcount += size;
bp->b_bufsize += size;
@@ -896,10 +894,8 @@ cluster_wbuild(vp, size, start_lbn, len)
tbp, b_cluster.cluster_entry);
}
finishcluster:
- mtx_lock(&vm_mtx);
pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
(vm_page_t *) bp->b_pages, bp->b_npages);
- mtx_unlock(&vm_mtx);
if (bp->b_bufsize > bp->b_kvasize)
panic(
"cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index dc5b9c9..1a1f55b 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -527,6 +527,8 @@ vop_stdcreatevobject(ap)
vm_object_t object;
int error = 0;
+ GIANT_REQUIRED;
+
if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
return (0);
@@ -535,7 +537,6 @@ retry:
if (vp->v_type == VREG || vp->v_type == VDIR) {
if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
goto retn;
- mtx_lock(&vm_mtx);
object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
} else if (devsw(vp->v_rdev) != NULL) {
/*
@@ -543,7 +544,6 @@ retry:
* for a disk vnode. This should be fixed, but doesn't
* cause any problems (yet).
*/
- mtx_lock(&vm_mtx);
object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
} else {
goto retn;
@@ -553,21 +553,14 @@ retry:
* that the object is associated with the vp.
*/
object->ref_count--;
- mtx_unlock(&vm_mtx);
vp->v_usecount--;
} else {
- /*
- * XXX: safe to hold vm mutex through VOP_UNLOCK?
- */
- mtx_lock(&vm_mtx);
if (object->flags & OBJ_DEAD) {
VOP_UNLOCK(vp, 0, p);
- msleep(object, VM_OBJECT_MTX(object), PVM, "vodead", 0);
- mtx_unlock(&vm_mtx);
+ tsleep(object, PVM, "vodead", 0);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
goto retry;
}
- mtx_unlock(&vm_mtx);
}
KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
@@ -586,10 +579,11 @@ vop_stddestroyvobject(ap)
struct vnode *vp = ap->a_vp;
vm_object_t obj = vp->v_object;
+ GIANT_REQUIRED;
+
if (vp->v_object == NULL)
return (0);
- mtx_lock(&vm_mtx);
if (obj->ref_count == 0) {
/*
* vclean() may be called twice. The first time
@@ -604,7 +598,6 @@ vop_stddestroyvobject(ap)
*/
vm_pager_deallocate(obj);
}
- mtx_unlock(&vm_mtx);
return (0);
}
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index 850914c..c35a73d 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -2763,6 +2763,8 @@ fsync(p, uap)
vm_object_t obj;
int error;
+ GIANT_REQUIRED;
+
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
vp = (struct vnode *)fp->f_data;
@@ -2770,9 +2772,7 @@ fsync(p, uap)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
if (VOP_GETVOBJECT(vp, &obj) == 0) {
- mtx_lock(&vm_mtx);
vm_object_page_clean(obj, 0, 0, 0);
- mtx_unlock(&vm_mtx);
}
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
#ifdef SOFTUPDATES
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 04941e3..b421902 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -711,7 +711,7 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
int s, error;
vm_object_t object;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
if (flags & V_SAVE) {
s = splbio();
@@ -799,10 +799,8 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
*/
mtx_lock(&vp->v_interlock);
if (VOP_GETVOBJECT(vp, &object) == 0) {
- mtx_lock(&vm_mtx);
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
- mtx_unlock(&vm_mtx);
}
mtx_unlock(&vp->v_interlock);
@@ -1136,8 +1134,6 @@ speedup_syncer()
* Also sets B_PAGING flag to indicate that vnode is not fully associated
* with the buffer. i.e. the bp has not been linked into the vnode or
* ref-counted.
- *
- * Doesn't block, only vnode seems to need a lock.
*/
void
pbgetvp(vp, bp)
@@ -1560,7 +1556,8 @@ vput(vp)
{
struct proc *p = curproc; /* XXX */
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
+
KASSERT(vp != NULL, ("vput: null vp"));
mtx_lock(&vp->v_interlock);
/* Skip this v_writecount check if we're going to panic below. */
@@ -2363,6 +2360,8 @@ vfs_msync(struct mount *mp, int flags) {
struct vm_object *obj;
int anyio, tries;
+ GIANT_REQUIRED;
+
tries = 5;
loop:
anyio = 0;
@@ -2394,11 +2393,9 @@ loop:
if (!vget(vp,
LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) {
if (VOP_GETVOBJECT(vp, &obj) == 0) {
- mtx_lock(&vm_mtx);
vm_object_page_clean(obj, 0, 0,
flags == MNT_WAIT ?
OBJPC_SYNC : OBJPC_NOSYNC);
- mtx_unlock(&vm_mtx);
anyio = 1;
}
vput(vp);
@@ -2427,8 +2424,7 @@ vfs_object_create(vp, p, cred)
struct proc *p;
struct ucred *cred;
{
-
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
return (VOP_CREATEVOBJECT(vp, cred, p));
}
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 850914c..c35a73d 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -2763,6 +2763,8 @@ fsync(p, uap)
vm_object_t obj;
int error;
+ GIANT_REQUIRED;
+
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
vp = (struct vnode *)fp->f_data;
@@ -2770,9 +2772,7 @@ fsync(p, uap)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
if (VOP_GETVOBJECT(vp, &obj) == 0) {
- mtx_lock(&vm_mtx);
vm_object_page_clean(obj, 0, 0, 0);
- mtx_unlock(&vm_mtx);
}
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
#ifdef SOFTUPDATES
OpenPOWER on IntegriCloud