summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_object.h
diff options
context:
space:
mode:
authoralfred <alfred@FreeBSD.org>2001-05-19 01:28:09 +0000
committeralfred <alfred@FreeBSD.org>2001-05-19 01:28:09 +0000
commita3f0842419d98da211706f921fc626e160cd960b (patch)
treee86922a5639c32e1242d4f3088fc487f3be5b236 /sys/vm/vm_object.h
parent9eda9187f024233436e6a743f13bd938b1a0f19c (diff)
downloadFreeBSD-src-a3f0842419d98da211706f921fc626e160cd960b.zip
FreeBSD-src-a3f0842419d98da211706f921fc626e160cd960b.tar.gz
Introduce a global lock for the vm subsystem (vm_mtx).
vm_mtx does not recurse and is required for most low level vm operations. faults can not be taken without holding Giant. Memory subsystems can now call the base page allocators safely. Almost all atomic ops were removed as they are covered under the vm mutex. Alpha and ia64 now need to catch up to i386's trap handlers. FFS and NFS have been tested, other filesystems will need minor changes (grabbing the vm lock when twiddling page properties). Reviewed (partially) by: jake, jhb
Diffstat (limited to 'sys/vm/vm_object.h')
-rw-r--r--sys/vm/vm_object.h35
1 files changed, 28 insertions, 7 deletions
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index ba4c026..2b29baf 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -169,34 +169,49 @@ extern vm_object_t kmem_object;
#ifdef _KERNEL
+/*
+ * For now a global vm lock.
+ */
+#define VM_OBJECT_MTX(object) (&vm_mtx)
+
static __inline void
vm_object_set_flag(vm_object_t object, u_short bits)
{
- atomic_set_short(&object->flags, bits);
+
+ mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ object->flags |= bits;
}
static __inline void
vm_object_clear_flag(vm_object_t object, u_short bits)
{
- atomic_clear_short(&object->flags, bits);
+
+ mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ object->flags &= ~bits;
}
static __inline void
vm_object_pip_add(vm_object_t object, short i)
{
- atomic_add_short(&object->paging_in_progress, i);
+
+ mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ object->paging_in_progress += i;
}
static __inline void
vm_object_pip_subtract(vm_object_t object, short i)
{
- atomic_subtract_short(&object->paging_in_progress, i);
+
+ mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ object->paging_in_progress -= i;
}
static __inline void
vm_object_pip_wakeup(vm_object_t object)
{
- atomic_subtract_short(&object->paging_in_progress, 1);
+
+ mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ object->paging_in_progress--;
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
vm_object_clear_flag(object, OBJ_PIPWNT);
wakeup(object);
@@ -206,8 +221,10 @@ vm_object_pip_wakeup(vm_object_t object)
static __inline void
vm_object_pip_wakeupn(vm_object_t object, short i)
{
+
+ mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
if (i)
- atomic_subtract_short(&object->paging_in_progress, i);
+ object->paging_in_progress -= i;
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
vm_object_clear_flag(object, OBJ_PIPWNT);
wakeup(object);
@@ -217,11 +234,13 @@ vm_object_pip_wakeupn(vm_object_t object, short i)
static __inline void
vm_object_pip_sleep(vm_object_t object, char *waitid)
{
+
+ mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
if (object->paging_in_progress) {
int s = splvm();
if (object->paging_in_progress) {
vm_object_set_flag(object, OBJ_PIPWNT);
- tsleep(object, PVM, waitid, 0);
+ msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0);
}
splx(s);
}
@@ -230,6 +249,8 @@ vm_object_pip_sleep(vm_object_t object, char *waitid)
static __inline void
vm_object_pip_wait(vm_object_t object, char *waitid)
{
+
+ mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
while (object->paging_in_progress)
vm_object_pip_sleep(object, waitid);
}
OpenPOWER on IntegriCloud