From dbdc40242102e22430f96a48b45a6e6722d5e48c Mon Sep 17 00:00:00 2001 From: alc Date: Wed, 10 Mar 2004 04:44:43 +0000 Subject: - Make the acquisition of Giant in vm_fault_unwire() conditional on the pmap. For the kernel pmap, Giant is not required. In general, for other pmaps, Giant is required by i386's pmap_pte() implementation. Specifically, the use of PMAP2/PADDR2 is synchronized by Giant. Note: In principle, updates to the kernel pmap's wired count could be lost without Giant. However, in practice, we never use the kernel pmap's wired count. This will be resolved when pmap locking appears. - With the above change, cpu_thread_clean() and uma_large_free() need not acquire Giant. (The first case is simply the revival of i386/i386/vm_machdep.c's revision 1.226 by peter.) --- sys/i386/i386/vm_machdep.c | 2 -- sys/vm/uma_core.c | 12 +----------- sys/vm/vm_fault.c | 6 ++++-- 3 files changed, 5 insertions(+), 15 deletions(-) diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c index b09128d..e9e236f 100644 --- a/sys/i386/i386/vm_machdep.c +++ b/sys/i386/i386/vm_machdep.c @@ -320,10 +320,8 @@ cpu_thread_clean(struct thread *td) * XXX do we need to move the TSS off the allocated pages * before freeing them? (not done here) */ - mtx_lock(&Giant); kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext, ctob(IOPAGES + 1)); - mtx_unlock(&Giant); pcb->pcb_ext = 0; } } diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index fb5f5fc..f693540 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -2052,17 +2052,7 @@ void uma_large_free(uma_slab_t slab) { vsetobj((vm_offset_t)slab->us_data, kmem_object); - /* - * XXX: We get a lock order reversal if we don't have Giant: - * vm_map_remove (locks system map) -> vm_map_delete -> - * vm_map_entry_unwire -> vm_fault_unwire -> mtx_lock(&Giant) - */ - if (!mtx_owned(&Giant)) { - mtx_lock(&Giant); - page_free(slab->us_data, slab->us_size, slab->us_flags); - mtx_unlock(&Giant); - } else - page_free(slab->us_data, slab->us_size, slab->us_flags); + page_free(slab->us_data, slab->us_size, slab->us_flags); uma_zfree_internal(slabzone, slab, NULL, 0); } diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index 602b659..fded099 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -1081,7 +1081,8 @@ vm_fault_unwire(map, start, end) pmap = vm_map_pmap(map); - mtx_lock(&Giant); + if (pmap != kernel_pmap) + mtx_lock(&Giant); /* * Since the pages are wired down, we must be able to get their * mappings from the physical map system. @@ -1095,7 +1096,8 @@ vm_fault_unwire(map, start, end) vm_page_unlock_queues(); } } - mtx_unlock(&Giant); + if (pmap != kernel_pmap) + mtx_unlock(&Giant); } /* -- cgit v1.1