diff options
author | alfred <alfred@FreeBSD.org> | 2001-05-19 01:28:09 +0000 |
---|---|---|
committer | alfred <alfred@FreeBSD.org> | 2001-05-19 01:28:09 +0000 |
commit | a3f0842419d98da211706f921fc626e160cd960b (patch) | |
tree | e86922a5639c32e1242d4f3088fc487f3be5b236 /sys/kern/sysv_shm.c | |
parent | 9eda9187f024233436e6a743f13bd938b1a0f19c (diff) | |
download | FreeBSD-src-a3f0842419d98da211706f921fc626e160cd960b.zip FreeBSD-src-a3f0842419d98da211706f921fc626e160cd960b.tar.gz |
Introduce a global lock for the vm subsystem (vm_mtx).
vm_mtx does not recurse and is required for most low level
vm operations.
faults can not be taken without holding Giant.
Memory subsystems can now call the base page allocators safely.
Almost all atomic ops were removed as they are covered under the
vm mutex.
Alpha and ia64 now need to catch up to i386's trap handlers.
FFS and NFS have been tested, other filesystems will need minor
changes (grabbing the vm lock when twiddling page properties).
Reviewed (partially) by: jake, jhb
Diffstat (limited to 'sys/kern/sysv_shm.c')
-rw-r--r-- | sys/kern/sysv_shm.c | 6 |
1 files changed, 6 insertions, 0 deletions
diff --git a/sys/kern/sysv_shm.c b/sys/kern/sysv_shm.c index fab53a8..0a9abda 100644 --- a/sys/kern/sysv_shm.c +++ b/sys/kern/sysv_shm.c @@ -43,6 +43,7 @@ #include <sys/shm.h> #include <sys/proc.h> #include <sys/malloc.h> +#include <sys/mutex.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/syscall.h> @@ -314,14 +315,17 @@ shmat(p, uap) } shm_handle = shmseg->shm_internal; + mtx_lock(&vm_mtx); vm_object_reference(shm_handle->shm_object); rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object, 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0); if (rv != KERN_SUCCESS) { + mtx_unlock(&vm_mtx); return ENOMEM; } vm_map_inherit(&p->p_vmspace->vm_map, attach_va, attach_va + size, VM_INHERIT_SHARE); + mtx_unlock(&vm_mtx); shmmap_s->va = attach_va; shmmap_s->shmid = uap->shmid; @@ -549,6 +553,7 @@ shmget_allocate_segment(p, uap, mode) * We make sure that we have allocated a pager before we need * to. */ + mtx_lock(&vm_mtx); if (shm_use_phys) { shm_handle->shm_object = vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0); @@ -558,6 +563,7 @@ shmget_allocate_segment(p, uap, mode) } vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING); vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT); + mtx_unlock(&vm_mtx); shmseg->shm_internal = shm_handle; shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; |