diff options
author | attilio <attilio@FreeBSD.org> | 2013-03-09 02:51:51 +0000 |
---|---|---|
committer | attilio <attilio@FreeBSD.org> | 2013-03-09 02:51:51 +0000 |
commit | 16a80466e5837ad617b6b144297fd6069188b9b3 (patch) | |
tree | 68f0fe73cda2b8a8692af0e8e2b09c7e6d1a7ed1 /sys/vm/vm_object.h | |
parent | 640e058da3cee2710fd66a67abbb548aad74104e (diff) | |
parent | 72f7f3e528d2e900546a7ccbe16525b12d64dd44 (diff) | |
download | FreeBSD-src-16a80466e5837ad617b6b144297fd6069188b9b3.zip FreeBSD-src-16a80466e5837ad617b6b144297fd6069188b9b3.tar.gz |
MFC
Diffstat (limited to 'sys/vm/vm_object.h')
-rw-r--r-- | sys/vm/vm_object.h | 44 |
1 files changed, 32 insertions, 12 deletions
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h index bfef79c..d588ff6 100644 --- a/sys/vm/vm_object.h +++ b/sys/vm/vm_object.h @@ -70,6 +70,7 @@ #include <sys/queue.h> #include <sys/_lock.h> #include <sys/_mutex.h> +#include <sys/_rwlock.h> #include <vm/vm_radix.h> @@ -78,9 +79,9 @@ * * vm_object_t Virtual memory object. * - * The root of cached pages pool is protected by both the per-object mutex + * The root of cached pages pool is protected by both the per-object lock * and the free pages queue mutex. - * On insert in the cache splay tree, the per-object mutex is expected + * On insert in the cache splay tree, the per-object lock is expected * to be already held and the free pages queue mutex will be * acquired during the operation too. * On remove and lookup from the cache splay tree, only the free @@ -91,13 +92,13 @@ * * List of locks * (c) const until freed - * (o) per-object mutex + * (o) per-object lock * (f) free pages queue mutex * */ struct vm_object { - struct mtx mtx; + struct rwlock lock; TAILQ_ENTRY(vm_object) object_list; /* list of all objects */ LIST_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */ LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */ @@ -205,14 +206,26 @@ extern struct vm_object kmem_object_store; #define kernel_object (&kernel_object_store) #define kmem_object (&kmem_object_store) -#define VM_OBJECT_LOCK(object) mtx_lock(&(object)->mtx) -#define VM_OBJECT_LOCK_ASSERT(object, type) \ - mtx_assert(&(object)->mtx, (type)) -#define VM_OBJECT_SLEEP(object, wchan, pri, wmesg, timo) \ - msleep((wchan), &(object)->mtx, (pri), \ - (wmesg), (timo)) -#define VM_OBJECT_TRYLOCK(object) mtx_trylock(&(object)->mtx) -#define VM_OBJECT_UNLOCK(object) mtx_unlock(&(object)->mtx) +#define VM_OBJECT_ASSERT_LOCKED(object) \ + rw_assert(&(object)->lock, RA_LOCKED) +#define VM_OBJECT_ASSERT_RLOCKED(object) \ + rw_assert(&(object)->lock, RA_RLOCKED) +#define VM_OBJECT_ASSERT_WLOCKED(object) \ + rw_assert(&(object)->lock, RA_WLOCKED) +#define VM_OBJECT_RLOCK(object) \ + rw_rlock(&(object)->lock) +#define VM_OBJECT_RUNLOCK(object) \ + rw_runlock(&(object)->lock) +#define VM_OBJECT_SLEEP(object, wchan, pri, wmesg, timo) \ + rw_sleep((wchan), &(object)->lock, (pri), (wmesg), (timo)) +#define VM_OBJECT_TRYRLOCK(object) \ + rw_try_rlock(&(object)->lock) +#define VM_OBJECT_TRYWLOCK(object) \ + rw_try_wlock(&(object)->lock) +#define VM_OBJECT_WLOCK(object) \ + rw_wlock(&(object)->lock) +#define VM_OBJECT_WUNLOCK(object) \ + rw_wunlock(&(object)->lock) /* * The object must be locked or thread private. @@ -231,6 +244,13 @@ void vm_object_pip_wakeup(vm_object_t object); void vm_object_pip_wakeupn(vm_object_t object, short i); void vm_object_pip_wait(vm_object_t object, char *waitid); +static __inline boolean_t +vm_object_cache_is_empty(vm_object_t object) +{ + + return (object->cache == NULL); +} + vm_object_t vm_object_allocate (objtype_t, vm_pindex_t); boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t, boolean_t); |