summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_map.h
diff options
context:
space:
mode:
authorjasone <jasone@FreeBSD.org>2000-10-12 22:37:28 +0000
committerjasone <jasone@FreeBSD.org>2000-10-12 22:37:28 +0000
commitdb944ecf00d69d85077da0ef0c75e61dfd5f7bba (patch)
treeb52d9423e5241af7749aef4cbdbca855606a8cd9 /sys/vm/vm_map.h
parent9a7ce3cd1a23050362e81c4b0d67a50cd643fb69 (diff)
downloadFreeBSD-src-db944ecf00d69d85077da0ef0c75e61dfd5f7bba.zip
FreeBSD-src-db944ecf00d69d85077da0ef0c75e61dfd5f7bba.tar.gz
For lockmgr mutex protection, use an array of mutexes that are allocated
and initialized during boot. This avoids bloating sizeof(struct lock). As a side effect, it is no longer necessary to enforce the assumtion that lockinit()/lockdestroy() calls are paired, so the LK_VALID flag has been removed. Idea taken from: BSD/OS.
Diffstat (limited to 'sys/vm/vm_map.h')
-rw-r--r--sys/vm/vm_map.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index d238488..ef48af2 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -291,15 +291,15 @@ _vm_map_lock_upgrade(vm_map_t map, struct proc *p) {
#define vm_map_set_recursive(map) \
do { \
- mtx_enter(&(map)->lock.lk_interlock, MTX_DEF); \
+ mtx_enter((map)->lock.lk_interlock, MTX_DEF); \
(map)->lock.lk_flags |= LK_CANRECURSE; \
- mtx_exit(&(map)->lock.lk_interlock, MTX_DEF); \
+ mtx_exit((map)->lock.lk_interlock, MTX_DEF); \
} while(0)
#define vm_map_clear_recursive(map) \
do { \
- mtx_enter(&(map)->lock.lk_interlock, MTX_DEF); \
+ mtx_enter((map)->lock.lk_interlock, MTX_DEF); \
(map)->lock.lk_flags &= ~LK_CANRECURSE; \
- mtx_exit(&(map)->lock.lk_interlock, MTX_DEF); \
+ mtx_exit((map)->lock.lk_interlock, MTX_DEF); \
} while(0)
/*
OpenPOWER on IntegriCloud