summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_page.h
diff options
context:
space:
mode:
authoralfred <alfred@FreeBSD.org>2001-05-19 01:28:09 +0000
committeralfred <alfred@FreeBSD.org>2001-05-19 01:28:09 +0000
commita3f0842419d98da211706f921fc626e160cd960b (patch)
treee86922a5639c32e1242d4f3088fc487f3be5b236 /sys/vm/vm_page.h
parent9eda9187f024233436e6a743f13bd938b1a0f19c (diff)
downloadFreeBSD-src-a3f0842419d98da211706f921fc626e160cd960b.zip
FreeBSD-src-a3f0842419d98da211706f921fc626e160cd960b.tar.gz
Introduce a global lock for the vm subsystem (vm_mtx).
vm_mtx does not recurse and is required for most low level vm operations. faults can not be taken without holding Giant. Memory subsystems can now call the base page allocators safely. Almost all atomic ops were removed as they are covered under the vm mutex. Alpha and ia64 now need to catch up to i386's trap handlers. FFS and NFS have been tested, other filesystems will need minor changes (grabbing the vm lock when twiddling page properties). Reviewed (partially) by: jake, jhb
Diffstat (limited to 'sys/vm/vm_page.h')
-rw-r--r--sys/vm/vm_page.h31
1 files changed, 25 insertions, 6 deletions
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index dc8290e..e1c1cc4 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -305,19 +305,28 @@ extern long first_page; /* first physical page number */
(&vm_page_array[atop(pa) - first_page ])
/*
+ * For now, a global vm lock
+ */
+#define VM_PAGE_MTX(m) (&vm_mtx)
+
+/*
* Functions implemented as macros
*/
static __inline void
vm_page_flag_set(vm_page_t m, unsigned short bits)
{
- atomic_set_short(&(m)->flags, bits);
+
+ mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
+ m->flags |= bits;
}
static __inline void
vm_page_flag_clear(vm_page_t m, unsigned short bits)
{
- atomic_clear_short(&(m)->flags, bits);
+
+ mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
+ m->flags &= ~bits;
}
#if 0
@@ -332,7 +341,9 @@ vm_page_assert_wait(vm_page_t m, int interruptible)
static __inline void
vm_page_busy(vm_page_t m)
{
- KASSERT((m->flags & PG_BUSY) == 0, ("vm_page_busy: page already busy!!!"));
+
+ KASSERT((m->flags & PG_BUSY) == 0,
+ ("vm_page_busy: page already busy!!!"));
vm_page_flag_set(m, PG_BUSY);
}
@@ -375,13 +386,17 @@ vm_page_wakeup(vm_page_t m)
static __inline void
vm_page_io_start(vm_page_t m)
{
- atomic_add_char(&(m)->busy, 1);
+
+ mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
+ m->busy++;
}
static __inline void
vm_page_io_finish(vm_page_t m)
{
- atomic_subtract_char(&m->busy, 1);
+
+ mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
+ m->busy--;
if (m->busy == 0)
vm_page_flash(m);
}
@@ -447,12 +462,16 @@ void vm_page_free_toq(vm_page_t m);
static __inline void
vm_page_hold(vm_page_t mem)
{
+
+ mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
mem->hold_count++;
}
static __inline void
vm_page_unhold(vm_page_t mem)
{
+
+ mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
--mem->hold_count;
KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
}
@@ -565,7 +584,7 @@ vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
* Page is busy. Wait and retry.
*/
vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
- tsleep(m, PVM, msg, 0);
+ msleep(m, VM_PAGE_MTX(m), PVM, msg, 0);
}
splx(s);
return(TRUE);
OpenPOWER on IntegriCloud