diff options
author | alc <alc@FreeBSD.org> | 2004-10-24 07:12:13 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2004-10-24 07:12:13 +0000 |
commit | 15e3b9f75096dfb3aec0ea09a7ffbd7cec2570a6 (patch) | |
tree | c215113b08410ede4e5455bee7b35c58900f3c15 /sys/dev/agp | |
parent | f411c994819b6177809bb5558e332d43cd2ebb86 (diff) | |
download | FreeBSD-src-15e3b9f75096dfb3aec0ea09a7ffbd7cec2570a6.zip FreeBSD-src-15e3b9f75096dfb3aec0ea09a7ffbd7cec2570a6.tar.gz |
Avoid repeated acquisition and release of the vm object lock inside of
two loops in agp_generic_bind_memory(). As an intended side-effect, all
of the calls to vm_page_wakeup() are now performed with the containing
vm object lock held.
Diffstat (limited to 'sys/dev/agp')
-rw-r--r-- | sys/dev/agp/agp.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/sys/dev/agp/agp.c b/sys/dev/agp/agp.c index a7f2bfe..a31c4d0 100644 --- a/sys/dev/agp/agp.c +++ b/sys/dev/agp/agp.c @@ -501,6 +501,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem, * because vm_page_grab() used with VM_ALLOC_RETRY may * block and we can't hold a mutex while blocking. */ + VM_OBJECT_LOCK(mem->am_obj); for (i = 0; i < mem->am_size; i += PAGE_SIZE) { /* * Find a page from the object and wire it @@ -509,18 +510,18 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem, * AGP_PAGE_SIZE. If this is the first call to bind, * the pages will be allocated and zeroed. */ - VM_OBJECT_LOCK(mem->am_obj); m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i), VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY); - VM_OBJECT_UNLOCK(mem->am_obj); AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m)); } + VM_OBJECT_UNLOCK(mem->am_obj); mtx_lock(&sc->as_lock); if (mem->am_is_bound) { device_printf(dev, "memory already bound\n"); error = EINVAL; + VM_OBJECT_LOCK(mem->am_obj); goto bad; } @@ -532,10 +533,9 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem, * (i.e. use alpha_XXX_dmamap()). I don't have access to any * alpha AGP hardware to check. */ + VM_OBJECT_LOCK(mem->am_obj); for (i = 0; i < mem->am_size; i += PAGE_SIZE) { - VM_OBJECT_LOCK(mem->am_obj); m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i)); - VM_OBJECT_UNLOCK(mem->am_obj); /* * Install entries in the GATT, making sure that if @@ -566,6 +566,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem, vm_page_wakeup(m); vm_page_unlock_queues(); } + VM_OBJECT_UNLOCK(mem->am_obj); /* * Flush the cpu cache since we are providing a new mapping @@ -586,7 +587,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem, return 0; bad: mtx_unlock(&sc->as_lock); - VM_OBJECT_LOCK(mem->am_obj); + VM_OBJECT_LOCK_ASSERT(mem->am_obj, MA_OWNED); for (i = 0; i < mem->am_size; i += PAGE_SIZE) { m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i)); vm_page_lock_queues(); |