diff options
author | kib <kib@FreeBSD.org> | 2017-09-16 13:49:26 +0000 |
---|---|---|
committer | Luiz Souza <luiz@netgate.com> | 2018-02-21 15:13:26 -0300 |
commit | f0ea3d38dfcefcb14e12731990303f5a4ebb5fe4 (patch) | |
tree | db2ebca078f0300b7f67011ae32d6943be444632 | |
parent | 18ada5c5bdd7305e494b2af51ffa6d334ace2451 (diff) | |
download | FreeBSD-src-f0ea3d38dfcefcb14e12731990303f5a4ebb5fe4.zip FreeBSD-src-f0ea3d38dfcefcb14e12731990303f5a4ebb5fe4.tar.gz |
MFC r323368:
Add a vm_page_change_lock() helper.
(cherry picked from commit e44297aa7c8b20f74352986ad5c27fed648542cc)
-rw-r--r-- | sys/vm/vm_object.c | 26 | ||||
-rw-r--r-- | sys/vm/vm_page.c | 60 | ||||
-rw-r--r-- | sys/vm/vm_page.h | 1 |
3 files changed, 34 insertions, 53 deletions
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index a06d043..ccac79a 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -1898,6 +1898,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int options) { vm_page_t p, next; + struct mtx *mtx; VM_OBJECT_ASSERT_WLOCKED(object); KASSERT((object->flags & OBJ_UNMANAGED) == 0 || @@ -1908,6 +1909,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, vm_object_pip_add(object, 1); again: p = vm_page_find_least(object, start); + mtx = NULL; /* * Here, the variable "p" is either (1) the page with the least pindex @@ -1924,7 +1926,7 @@ again: * however, be invalidated if the option OBJPR_CLEANONLY is * not specified. */ - vm_page_lock(p); + vm_page_change_lock(p, &mtx); if (vm_page_xbusied(p)) { VM_OBJECT_WUNLOCK(object); vm_page_busy_sleep(p, "vmopax", true); @@ -1938,7 +1940,7 @@ again: p->valid = 0; vm_page_undirty(p); } - goto next; + continue; } if (vm_page_busied(p)) { VM_OBJECT_WUNLOCK(object); @@ -1952,14 +1954,14 @@ again: if ((options & OBJPR_NOTMAPPED) == 0) pmap_remove_write(p); if (p->dirty) - goto next; + continue; } if ((options & OBJPR_NOTMAPPED) == 0) pmap_remove_all(p); vm_page_free(p); -next: - vm_page_unlock(p); } + if (mtx != NULL) + mtx_unlock(mtx); vm_object_pip_wakeup(object); } @@ -1982,7 +1984,7 @@ next: void vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) { - struct mtx *mtx, *new_mtx; + struct mtx *mtx; vm_page_t p, next; VM_OBJECT_ASSERT_LOCKED(object); @@ -1999,17 +2001,7 @@ vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) mtx = NULL; for (; p != NULL && (p->pindex < end || end == 0); p = next) { next = TAILQ_NEXT(p, listq); - - /* - * Avoid releasing and reacquiring the same page lock. - */ - new_mtx = vm_page_lockptr(p); - if (mtx != new_mtx) { - if (mtx != NULL) - mtx_unlock(mtx); - mtx = new_mtx; - mtx_lock(mtx); - } + vm_page_change_lock(p, &mtx); vm_page_deactivate_noreuse(p); } if (mtx != NULL) diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index d8a9c21..3a5e84b 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -905,6 +905,23 @@ vm_page_flash(vm_page_t m) } /* + * Avoid releasing and reacquiring the same page lock. + */ +void +vm_page_change_lock(vm_page_t m, struct mtx **mtx) +{ + struct mtx *mtx1; + + mtx1 = vm_page_lockptr(m); + if (*mtx == mtx1) + return; + if (*mtx != NULL) + mtx_unlock(*mtx); + *mtx = mtx1; + mtx_lock(mtx1); +} + +/* * Keep page from being freed by the page daemon * much of the same effect as wiring, except much lower * overhead and should be used only for *very* temporary @@ -937,20 +954,11 @@ vm_page_unhold(vm_page_t mem) void vm_page_unhold_pages(vm_page_t *ma, int count) { - struct mtx *mtx, *new_mtx; + struct mtx *mtx; mtx = NULL; for (; count != 0; count--) { - /* - * Avoid releasing and reacquiring the same page lock. - */ - new_mtx = vm_page_lockptr(*ma); - if (mtx != new_mtx) { - if (mtx != NULL) - mtx_unlock(mtx); - mtx = new_mtx; - mtx_lock(mtx); - } + vm_page_change_lock(*ma, &mtx); vm_page_unhold(*ma); ma++; } @@ -1989,7 +1997,7 @@ vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options) { - struct mtx *m_mtx, *new_mtx; + struct mtx *m_mtx; vm_object_t object; vm_paddr_t pa; vm_page_t m, m_run; @@ -2032,16 +2040,7 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, } else KASSERT(m_run != NULL, ("m_run == NULL")); - /* - * Avoid releasing and reacquiring the same page lock. - */ - new_mtx = vm_page_lockptr(m); - if (m_mtx != new_mtx) { - if (m_mtx != NULL) - mtx_unlock(m_mtx); - m_mtx = new_mtx; - mtx_lock(m_mtx); - } + vm_page_change_lock(m, &m_mtx); m_inc = 1; retry: if (m->wire_count != 0 || m->hold_count != 0) @@ -2191,7 +2190,7 @@ static int vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run, vm_paddr_t high) { - struct mtx *m_mtx, *new_mtx; + struct mtx *m_mtx; struct spglist free; vm_object_t object; vm_paddr_t pa; @@ -2212,13 +2211,7 @@ vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run, /* * Avoid releasing and reacquiring the same page lock. */ - new_mtx = vm_page_lockptr(m); - if (m_mtx != new_mtx) { - if (m_mtx != NULL) - mtx_unlock(m_mtx); - m_mtx = new_mtx; - mtx_lock(m_mtx); - } + vm_page_change_lock(m, &m_mtx); retry: if (m->wire_count != 0 || m->hold_count != 0) error = EBUSY; @@ -2331,12 +2324,7 @@ retry: * The new page must be deactivated * before the object is unlocked. */ - new_mtx = vm_page_lockptr(m_new); - if (m_mtx != new_mtx) { - mtx_unlock(m_mtx); - m_mtx = new_mtx; - mtx_lock(m_mtx); - } + vm_page_change_lock(m_new, &m_mtx); vm_page_deactivate(m_new); } else { m->flags &= ~PG_ZERO; diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index 1ee8dde..00d04c2 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -448,6 +448,7 @@ vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr); vm_page_t vm_page_alloc_freelist(int, int); +void vm_page_change_lock(vm_page_t m, struct mtx **mtx); vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); int vm_page_try_to_free (vm_page_t); void vm_page_deactivate (vm_page_t); |