summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2004-07-22 19:44:49 +0000
committeralc <alc@FreeBSD.org>2004-07-22 19:44:49 +0000
commitc7df7afd46a0399217b8e16f13ad0c1d8f92cbd8 (patch)
tree08128b54a6a0c8278ebaf3d63342cb3d9922ac56 /sys
parent861b3c44169cf75066afaaa69d2ec46483eac929 (diff)
downloadFreeBSD-src-c7df7afd46a0399217b8e16f13ad0c1d8f92cbd8.zip
FreeBSD-src-c7df7afd46a0399217b8e16f13ad0c1d8f92cbd8.tar.gz
- Change uma_zone_set_obj() to call kmem_alloc_nofault() instead of
kmem_alloc_pageable(). The difference between these is that an errant memory access to the zone will be detected sooner with kmem_alloc_nofault(). The following changes serve to eliminate the following lock-order reversal reported by witness: 1st 0xc1a3c084 vm object (vm object) @ vm/swap_pager.c:1311 2nd 0xc07acb00 swap_pager swhash (swap_pager swhash) @ vm/swap_pager.c:1797 3rd 0xc1804bdc vm object (vm object) @ vm/uma_core.c:931 There is no potential deadlock in this case. However, witness is unable to recognize this because vm objects used by UMA have the same type as ordinary vm objects. To remedy this, we make the following changes: - Add a mutex type argument to VM_OBJECT_LOCK_INIT(). - Use the mutex type argument to assign distinct types to special vm objects such as the kernel object, kmem object, and UMA objects. - Define a static swap zone object for use by UMA. (Only static objects are assigned a special mutex type.)
Diffstat (limited to 'sys')
-rw-r--r--sys/vm/swap_pager.c3
-rw-r--r--sys/vm/uma_core.c4
-rw-r--r--sys/vm/vm_object.c11
-rw-r--r--sys/vm/vm_object.h5
4 files changed, 10 insertions, 13 deletions
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index f8d883c..3a41c33 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -219,6 +219,7 @@ SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
static struct mtx sw_alloc_mtx; /* protect list manipulation */
static struct pagerlst swap_pager_object_list[NOBJLISTS];
static uma_zone_t swap_zone;
+static struct vm_object swap_zone_obj;
/*
* pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
@@ -419,7 +420,7 @@ swap_pager_swap_init(void)
swap_zone = uma_zcreate("SWAPMETA", sizeof(struct swblock), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
do {
- if (uma_zone_set_obj(swap_zone, NULL, n))
+ if (uma_zone_set_obj(swap_zone, &swap_zone_obj, n))
break;
/*
* if the allocation failed, try a zone two thirds the
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 6f6157f..25d00b4 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -2304,7 +2304,7 @@ uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
if (pages * keg->uk_ipers < count)
pages++;
- kva = kmem_alloc_pageable(kernel_map, pages * UMA_SLAB_SIZE);
+ kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
if (kva == 0)
return (0);
@@ -2312,7 +2312,7 @@ uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
obj = vm_object_allocate(OBJT_DEFAULT,
pages);
} else {
- VM_OBJECT_LOCK_INIT(obj);
+ VM_OBJECT_LOCK_INIT(obj, "uma object");
_vm_object_allocate(OBJT_DEFAULT,
pages, obj);
}
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 3701737..01540f9 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -182,7 +182,7 @@ vm_object_zinit(void *mem, int size)
object = (vm_object_t)mem;
bzero(&object->mtx, sizeof(object->mtx));
- VM_OBJECT_LOCK_INIT(object);
+ VM_OBJECT_LOCK_INIT(object, "standard object");
/* These are true for any object that has been freed */
object->paging_in_progress = 0;
@@ -234,16 +234,11 @@ vm_object_init(void)
TAILQ_INIT(&vm_object_list);
mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
- VM_OBJECT_LOCK_INIT(&kernel_object_store);
+ VM_OBJECT_LOCK_INIT(&kernel_object_store, "kernel object");
_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kernel_object);
- /*
- * The kmem object's mutex is given a unique name, instead of
- * "vm object", to avoid false reports of lock-order reversal
- * with a system map mutex.
- */
- mtx_init(VM_OBJECT_MTX(kmem_object), "kmem object", NULL, MTX_DEF);
+ VM_OBJECT_LOCK_INIT(&kmem_object_store, "kmem object");
_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kmem_object);
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 8d32762..52ba63c 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -172,8 +172,9 @@ extern struct vm_object kmem_object_store;
#define VM_OBJECT_LOCK(object) mtx_lock(&(object)->mtx)
#define VM_OBJECT_LOCK_ASSERT(object, type) \
mtx_assert(&(object)->mtx, (type))
-#define VM_OBJECT_LOCK_INIT(object) mtx_init(&(object)->mtx, "vm object", \
- NULL, MTX_DEF | MTX_DUPOK)
+#define VM_OBJECT_LOCK_INIT(object, type) \
+ mtx_init(&(object)->mtx, "vm object", \
+ (type), MTX_DEF | MTX_DUPOK)
#define VM_OBJECT_LOCKED(object) mtx_owned(&(object)->mtx)
#define VM_OBJECT_MTX(object) (&(object)->mtx)
#define VM_OBJECT_TRYLOCK(object) mtx_trylock(&(object)->mtx)
OpenPOWER on IntegriCloud