diff options
Diffstat (limited to 'sys/vm')
-rw-r--r-- | sys/vm/device_pager.c | 19 | ||||
-rw-r--r-- | sys/vm/phys_pager.c | 8 | ||||
-rw-r--r-- | sys/vm/sg_pager.c | 9 | ||||
-rw-r--r-- | sys/vm/swap_pager.c | 42 | ||||
-rw-r--r-- | sys/vm/uma_core.c | 12 | ||||
-rw-r--r-- | sys/vm/vm_contig.c | 19 | ||||
-rw-r--r-- | sys/vm/vm_fault.c | 79 | ||||
-rw-r--r-- | sys/vm/vm_glue.c | 52 | ||||
-rw-r--r-- | sys/vm/vm_kern.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_map.c | 6 | ||||
-rw-r--r-- | sys/vm/vm_mmap.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_object.c | 104 | ||||
-rw-r--r-- | sys/vm/vm_page.c | 149 | ||||
-rw-r--r-- | sys/vm/vm_page.h | 46 | ||||
-rw-r--r-- | sys/vm/vm_pageout.c | 114 | ||||
-rw-r--r-- | sys/vm/vm_param.h | 8 | ||||
-rw-r--r-- | sys/vm/vnode_pager.c | 85 |
17 files changed, 567 insertions, 189 deletions
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c index 9002e77..996f740 100644 --- a/sys/vm/device_pager.c +++ b/sys/vm/device_pager.c @@ -251,12 +251,16 @@ dev_pager_getpages(object, m, count, reqpage) VM_OBJECT_LOCK(object); dev_pager_updatefake(page, paddr, memattr); if (count > 1) { - vm_page_lock_queues(); + for (i = 0; i < count; i++) { - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(m[i]); + vm_page_lock_queues(); vm_page_free(m[i]); + vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } } - vm_page_unlock_queues(); } } else { /* @@ -266,10 +270,13 @@ dev_pager_getpages(object, m, count, reqpage) page = dev_pager_getfake(paddr, memattr); VM_OBJECT_LOCK(object); TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, page, pageq); - vm_page_lock_queues(); - for (i = 0; i < count; i++) + for (i = 0; i < count; i++) { + vm_page_lock(m[i]); + vm_page_lock_queues(); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } vm_page_insert(page, object, offset); m[reqpage] = page; } diff --git a/sys/vm/phys_pager.c b/sys/vm/phys_pager.c index 42cdab3..97674e3 100644 --- a/sys/vm/phys_pager.c +++ b/sys/vm/phys_pager.c @@ -152,10 +152,10 @@ phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) KASSERT(m[i]->dirty == 0, ("phys_pager_getpages: dirty page %p", m[i])); /* The requested page must remain busy, the others not. */ - if (reqpage != i) { - m[i]->oflags &= ~VPO_BUSY; - m[i]->busy = 0; - } + if (i == reqpage) + vm_page_flash(m[i]); + else + vm_page_wakeup(m[i]); } return (VM_PAGER_OK); } diff --git a/sys/vm/sg_pager.c b/sys/vm/sg_pager.c index a17fe82..845401a 100644 --- a/sys/vm/sg_pager.c +++ b/sys/vm/sg_pager.c @@ -198,10 +198,13 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, pageq); /* Free the original pages and insert this fake page into the object. */ - vm_page_lock_queues(); - for (i = 0; i < count; i++) + for (i = 0; i < count; i++) { + vm_page_lock(m[i]); + vm_page_lock_queues(); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } vm_page_insert(page, object, offset); m[reqpage] = page; page->valid = VM_PAGE_BITS_ALL; diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c index f1d89d5..6017a52 100644 --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -378,6 +378,14 @@ static void swp_pager_meta_free(vm_object_t, vm_pindex_t, daddr_t); static void swp_pager_meta_free_all(vm_object_t); static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); +static void +swp_pager_free_nrpage(vm_page_t m) +{ + + if (m->wire_count == 0) + vm_page_free(m); +} + /* * SWP_SIZECHECK() - update swap_pager_full indication * @@ -1129,12 +1137,21 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) if (0 < i || j < count) { int k; - vm_page_lock_queues(); - for (k = 0; k < i; ++k) - vm_page_free(m[k]); - for (k = j; k < count; ++k) - vm_page_free(m[k]); - vm_page_unlock_queues(); + + for (k = 0; k < i; ++k) { + vm_page_lock(m[k]); + vm_page_lock_queues(); + swp_pager_free_nrpage(m[k]); + vm_page_unlock_queues(); + vm_page_unlock(m[k]); + } + for (k = j; k < count; ++k) { + vm_page_lock(m[k]); + vm_page_lock_queues(); + swp_pager_free_nrpage(m[k]); + vm_page_unlock_queues(); + vm_page_unlock(m[k]); + } } /* @@ -1489,7 +1506,7 @@ swp_pager_async_iodone(struct buf *bp) object = bp->b_pages[0]->object; VM_OBJECT_LOCK(object); } - vm_page_lock_queues(); + /* * cleanup pages. If an error occurs writing to swap, we are in * very serious trouble. If it happens to be a disk error, though, @@ -1501,6 +1518,8 @@ swp_pager_async_iodone(struct buf *bp) for (i = 0; i < bp->b_npages; ++i) { vm_page_t m = bp->b_pages[i]; + vm_page_lock(m); + vm_page_lock_queues(); m->oflags &= ~VPO_SWAPINPROG; if (bp->b_ioflags & BIO_ERROR) { @@ -1529,7 +1548,7 @@ swp_pager_async_iodone(struct buf *bp) */ m->valid = 0; if (i != bp->b_pager.pg_reqpage) - vm_page_free(m); + swp_pager_free_nrpage(m); else vm_page_flash(m); /* @@ -1597,8 +1616,9 @@ swp_pager_async_iodone(struct buf *bp) if (vm_page_count_severe()) vm_page_try_to_cache(m); } + vm_page_unlock_queues(); + vm_page_unlock(m); } - vm_page_unlock_queues(); /* * adjust pip. NOTE: the original parent may still have its own @@ -1694,10 +1714,12 @@ swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex) m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL|VM_ALLOC_RETRY); if (m->valid == VM_PAGE_BITS_ALL) { vm_object_pip_subtract(object, 1); + vm_page_lock(m); vm_page_lock_queues(); vm_page_activate(m); vm_page_dirty(m); vm_page_unlock_queues(); + vm_page_unlock(m); vm_page_wakeup(m); vm_pager_page_unswapped(m); return; @@ -1706,10 +1728,12 @@ swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex) if (swap_pager_getpages(object, &m, 1, 0) != VM_PAGER_OK) panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ vm_object_pip_subtract(object, 1); + vm_page_lock(m); vm_page_lock_queues(); vm_page_dirty(m); vm_page_dontneed(m); vm_page_unlock_queues(); + vm_page_unlock(m); vm_page_wakeup(m); vm_pager_page_unswapped(m); } diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index a2d5633..e1b9a08 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -1022,10 +1022,8 @@ obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) while (pages != startpages) { pages--; p = TAILQ_LAST(&object->memq, pglist); - vm_page_lock_queues(); vm_page_unwire(p, 0); vm_page_free(p); - vm_page_unlock_queues(); } retkva = 0; goto done; @@ -2891,13 +2889,11 @@ uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count) if (kva == 0) return (0); - if (obj == NULL) { - obj = vm_object_allocate(OBJT_DEFAULT, - pages); - } else { + if (obj == NULL) + obj = vm_object_allocate(OBJT_PHYS, pages); + else { VM_OBJECT_LOCK_INIT(obj, "uma object"); - _vm_object_allocate(OBJT_DEFAULT, - pages, obj); + _vm_object_allocate(OBJT_PHYS, pages, obj); } ZONE_LOCK(zone); keg->uk_kva = kva; diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c index 78d7e28..c1c8618 100644 --- a/sys/vm/vm_contig.c +++ b/sys/vm/vm_contig.c @@ -99,9 +99,11 @@ vm_contig_launder_page(vm_page_t m, vm_page_t *next) int vfslocked; mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); object = m->object; if (!VM_OBJECT_TRYLOCK(object) && !vm_pageout_fallback_object_lock(m, next)) { + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); return (EAGAIN); } @@ -113,7 +115,8 @@ vm_contig_launder_page(vm_page_t m, vm_page_t *next) vm_page_test_dirty(m); if (m->dirty == 0 && m->hold_count == 0) pmap_remove_all(m); - if (m->dirty) { + if (m->dirty != 0) { + vm_page_unlock(m); if ((object->flags & OBJ_DEAD) != 0) { VM_OBJECT_UNLOCK(object); return (EAGAIN); @@ -137,13 +140,18 @@ vm_contig_launder_page(vm_page_t m, vm_page_t *next) return (0); } else if (object->type == OBJT_SWAP || object->type == OBJT_DEFAULT) { + vm_page_unlock_queues(); m_tmp = m; vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC); VM_OBJECT_UNLOCK(object); + vm_page_lock_queues(); return (0); } - } else if (m->hold_count == 0) - vm_page_cache(m); + } else { + if (m->hold_count == 0) + vm_page_cache(m); + vm_page_unlock(m); + } VM_OBJECT_UNLOCK(object); return (0); } @@ -160,9 +168,12 @@ vm_contig_launder(int queue) if ((m->flags & PG_MARKER) != 0) continue; + if (!vm_page_trylock(m)) + continue; KASSERT(VM_PAGE_INQUEUE2(m, queue), ("vm_contig_launder: page %p's queue is not %d", m, queue)); error = vm_contig_launder_page(m, &next); + vm_page_lock_assert(m, MA_NOTOWNED); if (error == 0) return (TRUE); if (error == EBUSY) @@ -257,9 +268,7 @@ retry: i -= PAGE_SIZE; m = vm_page_lookup(object, OFF_TO_IDX(offset + i)); - vm_page_lock_queues(); vm_page_free(m); - vm_page_unlock_queues(); } VM_OBJECT_UNLOCK(object); vm_map_delete(map, addr, addr + size); diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index f409856..2978f7f 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -137,9 +137,11 @@ release_page(struct faultstate *fs) { vm_page_wakeup(fs->m); + vm_page_lock(fs->m); vm_page_lock_queues(); vm_page_deactivate(fs->m); vm_page_unlock_queues(); + vm_page_unlock(fs->m); fs->m = NULL; } @@ -161,9 +163,11 @@ unlock_and_deallocate(struct faultstate *fs) VM_OBJECT_UNLOCK(fs->object); if (fs->object != fs->first_object) { VM_OBJECT_LOCK(fs->first_object); + vm_page_lock(fs->first_m); vm_page_lock_queues(); vm_page_free(fs->first_m); vm_page_unlock_queues(); + vm_page_unlock(fs->first_m); vm_object_pip_wakeup(fs->first_object); VM_OBJECT_UNLOCK(fs->first_object); fs->first_m = NULL; @@ -211,7 +215,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, { vm_prot_t prot; int is_first_object_locked, result; - boolean_t are_queues_locked, growstack, wired; + boolean_t growstack, wired; int map_generation; vm_object_t next_object; vm_page_t marray[VM_FAULT_READ]; @@ -305,12 +309,12 @@ RetryFault:; * removes the page from the backing object, * which is not what we want. */ + vm_page_lock(fs.m); vm_page_lock_queues(); if ((fs.m->cow) && (fault_type & VM_PROT_WRITE) && (fs.object == fs.first_object)) { vm_page_cowfault(fs.m); - vm_page_unlock_queues(); unlock_and_deallocate(&fs); goto RetryFault; } @@ -332,13 +336,22 @@ RetryFault:; * to pmap it. */ if ((fs.m->oflags & VPO_BUSY) || fs.m->busy) { + /* + * Reference the page before unlocking and + * sleeping so that the page daemon is less + * likely to reclaim it. + */ + vm_page_flag_set(fs.m, PG_REFERENCED); vm_page_unlock_queues(); + vm_page_unlock(fs.m); VM_OBJECT_UNLOCK(fs.object); if (fs.object != fs.first_object) { VM_OBJECT_LOCK(fs.first_object); + vm_page_lock(fs.first_m); vm_page_lock_queues(); vm_page_free(fs.first_m); vm_page_unlock_queues(); + vm_page_unlock(fs.first_m); vm_object_pip_wakeup(fs.first_object); VM_OBJECT_UNLOCK(fs.first_object); fs.first_m = NULL; @@ -358,6 +371,7 @@ RetryFault:; } vm_pageq_remove(fs.m); vm_page_unlock_queues(); + vm_page_unlock(fs.m); /* * Mark page busy for other processes, and the @@ -462,7 +476,6 @@ readrest: else firstpindex = fs.first_pindex - 2 * VM_FAULT_READ; - are_queues_locked = FALSE; /* * note: partially valid pages cannot be * included in the lookahead - NFS piecemeal @@ -479,22 +492,23 @@ readrest: if (mt->busy || (mt->oflags & VPO_BUSY)) continue; - if (!are_queues_locked) { - are_queues_locked = TRUE; - vm_page_lock_queues(); - } + vm_page_lock(mt); + vm_page_lock_queues(); if (mt->hold_count || - mt->wire_count) + mt->wire_count) { + vm_page_unlock_queues(); + vm_page_unlock(mt); continue; + } pmap_remove_all(mt); if (mt->dirty) { vm_page_deactivate(mt); } else { vm_page_cache(mt); } - } - if (are_queues_locked) vm_page_unlock_queues(); + vm_page_unlock(mt); + } ahead += behind; behind = 0; } @@ -623,17 +637,21 @@ vnode_locked: */ if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { + vm_page_lock(fs.m); vm_page_lock_queues(); vm_page_free(fs.m); vm_page_unlock_queues(); + vm_page_unlock(fs.m); fs.m = NULL; unlock_and_deallocate(&fs); return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); } if (fs.object != fs.first_object) { + vm_page_lock(fs.m); vm_page_lock_queues(); vm_page_free(fs.m); vm_page_unlock_queues(); + vm_page_unlock(fs.m); fs.m = NULL; /* * XXX - we cannot just fall out at this @@ -746,18 +764,22 @@ vnode_locked: * We don't chase down the shadow chain */ fs.object == fs.first_object->backing_object) { + vm_page_lock(fs.first_m); vm_page_lock_queues(); /* * get rid of the unnecessary page */ vm_page_free(fs.first_m); + vm_page_unlock_queues(); + vm_page_unlock(fs.first_m); /* * grab the page and put it into the * process'es object. The page is * automatically made dirty. */ + vm_page_lock(fs.m); vm_page_rename(fs.m, fs.first_object, fs.first_pindex); - vm_page_unlock_queues(); + vm_page_unlock(fs.m); vm_page_busy(fs.m); fs.first_m = fs.m; fs.m = NULL; @@ -770,10 +792,13 @@ vnode_locked: fs.first_m->valid = VM_PAGE_BITS_ALL; if (wired && (fault_flags & VM_FAULT_CHANGE_WIRING) == 0) { - vm_page_lock_queues(); + vm_page_lock(fs.first_m); vm_page_wire(fs.first_m); + vm_page_unlock(fs.first_m); + + vm_page_lock(fs.m); vm_page_unwire(fs.m, FALSE); - vm_page_unlock_queues(); + vm_page_unlock(fs.m); } /* * We no longer need the old page or object. @@ -923,8 +948,8 @@ vnode_locked: if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0) vm_fault_prefault(fs.map->pmap, vaddr, fs.entry); VM_OBJECT_LOCK(fs.object); + vm_page_lock(fs.m); vm_page_lock_queues(); - vm_page_flag_set(fs.m, PG_REFERENCED); /* * If the page is not wired down, then put it where the pageout daemon @@ -939,6 +964,7 @@ vnode_locked: vm_page_activate(fs.m); } vm_page_unlock_queues(); + vm_page_unlock(fs.m); vm_page_wakeup(fs.m); /* @@ -1015,9 +1041,11 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry) } if (m->valid == VM_PAGE_BITS_ALL && (m->flags & PG_FICTITIOUS) == 0) { + vm_page_lock(m); vm_page_lock_queues(); pmap_enter_quick(pmap, addr, m, entry->protection); vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(lobject); } @@ -1080,6 +1108,7 @@ vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, { vm_paddr_t pa; vm_offset_t va; + vm_page_t m; pmap_t pmap; pmap = vm_map_pmap(map); @@ -1093,9 +1122,10 @@ vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, if (pa != 0) { pmap_change_wiring(pmap, va, FALSE); if (!fictitious) { - vm_page_lock_queues(); - vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1); - vm_page_unlock_queues(); + m = PHYS_TO_VM_PAGE(pa); + vm_page_lock(m); + vm_page_unwire(m, TRUE); + vm_page_unlock(m); } } } @@ -1238,13 +1268,22 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, * Mark it no longer busy, and put it on the active list. */ VM_OBJECT_LOCK(dst_object); - vm_page_lock_queues(); + if (upgrade) { + vm_page_lock(src_m); vm_page_unwire(src_m, 0); + vm_page_unlock(src_m); + + vm_page_lock(dst_m); vm_page_wire(dst_m); - } else + vm_page_unlock(dst_m); + } else { + vm_page_lock(dst_m); + vm_page_lock_queues(); vm_page_activate(dst_m); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + vm_page_unlock(dst_m); + } vm_page_wakeup(dst_m); } VM_OBJECT_UNLOCK(dst_object); diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index dbd5065d8..f86212f 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -257,16 +257,18 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset) if (m == NULL) goto out; if (rv != VM_PAGER_OK) { + vm_page_lock(m); vm_page_lock_queues(); vm_page_free(m); vm_page_unlock_queues(); + vm_page_unlock(m); m = NULL; goto out; } } - vm_page_lock_queues(); + vm_page_lock(m); vm_page_hold(m); - vm_page_unlock_queues(); + vm_page_unlock(m); vm_page_wakeup(m); out: VM_OBJECT_UNLOCK(object); @@ -300,9 +302,9 @@ vm_imgact_unmap_page(struct sf_buf *sf) m = sf_buf_page(sf); sf_buf_free(sf); sched_unpin(); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unhold(m); - vm_page_unlock_queues(); + vm_page_unlock(m); } void @@ -434,10 +436,12 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages) m = vm_page_lookup(ksobj, i); if (m == NULL) panic("vm_thread_dispose: kstack already missing?"); + vm_page_lock(m); vm_page_lock_queues(); vm_page_unwire(m, 0); vm_page_free(m); vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(ksobj); vm_object_deallocate(ksobj); @@ -524,9 +528,9 @@ vm_thread_swapout(struct thread *td) if (m == NULL) panic("vm_thread_swapout: kstack already missing?"); vm_page_dirty(m); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unwire(m, 0); - vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(ksobj); } @@ -538,23 +542,37 @@ static void vm_thread_swapin(struct thread *td) { vm_object_t ksobj; - vm_page_t m, ma[KSTACK_MAX_PAGES]; - int i, pages, rv; + vm_page_t ma[KSTACK_MAX_PAGES]; + int i, j, k, pages, rv; pages = td->td_kstack_pages; ksobj = td->td_kstack_obj; VM_OBJECT_LOCK(ksobj); - for (i = 0; i < pages; i++) { - m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY | + for (i = 0; i < pages; i++) + ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); - if (m->valid != VM_PAGE_BITS_ALL) { - rv = vm_pager_get_pages(ksobj, &m, 1, 0); + for (i = 0; i < pages; i++) { + if (ma[i]->valid != VM_PAGE_BITS_ALL) { + KASSERT(ma[i]->oflags & VPO_BUSY, + ("lost busy 1")); + vm_object_pip_add(ksobj, 1); + for (j = i + 1; j < pages; j++) { + KASSERT(ma[j]->valid == VM_PAGE_BITS_ALL || + (ma[j]->oflags & VPO_BUSY), + ("lost busy 2")); + if (ma[j]->valid == VM_PAGE_BITS_ALL) + break; + } + rv = vm_pager_get_pages(ksobj, ma + i, j - i, 0); if (rv != VM_PAGER_OK) - panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid); - m = vm_page_lookup(ksobj, i); - } - ma[i] = m; - vm_page_wakeup(m); + panic("vm_thread_swapin: cannot get kstack for proc: %d", + td->td_proc->p_pid); + vm_object_pip_wakeup(ksobj); + for (k = i; k < j; k++) + ma[k] = vm_page_lookup(ksobj, k); + vm_page_wakeup(ma[i]); + } else if (ma[i]->oflags & VPO_BUSY) + vm_page_wakeup(ma[i]); } VM_OBJECT_UNLOCK(ksobj); pmap_qenter(td->td_kstack, ma, pages); diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index 739d289..95a4e9d 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -380,10 +380,8 @@ retry: i -= PAGE_SIZE; m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); - vm_page_lock_queues(); vm_page_unwire(m, 0); vm_page_free(m); - vm_page_unlock_queues(); } VM_OBJECT_UNLOCK(kmem_object); vm_map_delete(map, addr, addr + size); diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 1d22fa6..2a57e33 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -1412,7 +1412,11 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, } result = vm_map_insert(map, object, offset, start, start + length, prot, max, cow); - } while (result == KERN_NO_SPACE && find_space == VMFS_ALIGNED_SPACE); + } while (result == KERN_NO_SPACE && (find_space == VMFS_ALIGNED_SPACE +#ifdef VMFS_TLB_ALIGNED_SPACE + || find_space == VMFS_TLB_ALIGNED_SPACE +#endif + )); vm_map_unlock(map); return (result); } diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c index 88ed3d5..f9b3db3 100644 --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -866,6 +866,7 @@ RestartScan: */ if (m != NULL && m->valid != 0) { mincoreinfo = MINCORE_INCORE; + vm_page_lock(m); vm_page_lock_queues(); if (m->dirty || pmap_is_modified(m)) @@ -874,6 +875,7 @@ RestartScan: pmap_is_referenced(m)) mincoreinfo |= MINCORE_REFERENCED_OTHER; vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(current->object.vm_object); } diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index 7e4dbc8..f8c50bc 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -717,19 +717,21 @@ vm_object_terminate(vm_object_t object) * removes them from paging queues. Don't free wired pages, just * remove them from the object. */ - vm_page_lock_queues(); while ((p = TAILQ_FIRST(&object->memq)) != NULL) { KASSERT(!p->busy && (p->oflags & VPO_BUSY) == 0, ("vm_object_terminate: freeing busy page %p " "p->busy = %d, p->oflags %x\n", p, p->busy, p->oflags)); + vm_page_lock(p); + vm_page_lock_queues(); if (p->wire_count == 0) { vm_page_free(p); cnt.v_pfree++; } else { vm_page_remove(p); } + vm_page_unlock_queues(); + vm_page_unlock(p); } - vm_page_unlock_queues(); #if VM_NRESERVLEVEL > 0 if (__predict_false(!LIST_EMPTY(&object->rvq))) @@ -772,6 +774,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int int pagerflags; int curgeneration; + mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); if ((object->flags & OBJ_MIGHTBEDIRTY) == 0) return; @@ -789,7 +792,6 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int tend = end; } - vm_page_lock_queues(); /* * If the caller is smart and only msync()s a range he knows is * dirty, we may be able to avoid an object scan. This results in @@ -818,13 +820,19 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int ++tscan; continue; } + vm_page_lock(p); + vm_page_lock_queues(); vm_page_test_dirty(p); if (p->dirty == 0) { + vm_page_unlock_queues(); + vm_page_unlock(p); if (--scanlimit == 0) break; ++tscan; continue; } + vm_page_unlock_queues(); + vm_page_unlock(p); /* * If we have been asked to skip nosync pages and * this is a nosync page, we can't continue. @@ -842,6 +850,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int * page (i.e. had to sleep). */ tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags); + } /* @@ -851,7 +860,6 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int * return immediately. */ if (tscan >= tend && (tstart || tend < object->size)) { - vm_page_unlock_queues(); vm_object_clear_flag(object, OBJ_CLEANING); return; } @@ -871,8 +879,13 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int p->oflags |= VPO_CLEANCHK; if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) clearobjflags = 0; - else + else { + vm_page_lock(p); + vm_page_lock_queues(); pmap_remove_write(p); + vm_page_unlock_queues(); + vm_page_unlock(p); + } } if (clearobjflags && (tstart == 0) && (tend == object->size)) @@ -895,12 +908,17 @@ again: continue; } + vm_page_lock(p); + vm_page_lock_queues(); vm_page_test_dirty(p); if (p->dirty == 0) { + vm_page_unlock_queues(); + vm_page_unlock(p); p->oflags &= ~VPO_CLEANCHK; continue; } - + vm_page_unlock_queues(); + vm_page_unlock(p); /* * If we have been asked to skip nosync pages and this is a * nosync page, skip it. Note that the object flags were @@ -923,12 +941,10 @@ again: * Try to optimize the next page. If we can't we pick up * our (random) scan where we left off. */ - if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) { + if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) if ((p = vm_page_lookup(object, pi + n)) != NULL) goto again; - } } - vm_page_unlock_queues(); #if 0 VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc); #endif @@ -950,10 +966,11 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, vm_page_t mab[vm_pageout_page_count]; vm_page_t ma[vm_pageout_page_count]; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); + vm_page_lock_assert(p, MA_NOTOWNED); + VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); pi = p->pindex; while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) { - vm_page_lock_queues(); if (object->generation != curgeneration) { return(0); } @@ -968,11 +985,17 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, (tp->oflags & VPO_CLEANCHK) == 0) || (tp->busy != 0)) break; + vm_page_lock(tp); + vm_page_lock_queues(); vm_page_test_dirty(tp); if (tp->dirty == 0) { + vm_page_unlock(tp); + vm_page_unlock_queues(); tp->oflags &= ~VPO_CLEANCHK; break; } + vm_page_unlock(tp); + vm_page_unlock_queues(); maf[ i - 1 ] = tp; maxf++; continue; @@ -992,11 +1015,17 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, (tp->oflags & VPO_CLEANCHK) == 0) || (tp->busy != 0)) break; + vm_page_lock(tp); + vm_page_lock_queues(); vm_page_test_dirty(tp); if (tp->dirty == 0) { + vm_page_unlock_queues(); + vm_page_unlock(tp); tp->oflags &= ~VPO_CLEANCHK; break; } + vm_page_unlock_queues(); + vm_page_unlock(tp); mab[ i - 1 ] = tp; maxb++; continue; @@ -1022,7 +1051,11 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, vm_pageout_flush(ma, runlen, pagerflags); for (i = 0; i < runlen; i++) { if (ma[i]->dirty) { + vm_page_lock(ma[i]); + vm_page_lock_queues(); pmap_remove_write(ma[i]); + vm_page_unlock_queues(); + vm_page_unlock(ma[i]); ma[i]->oflags |= VPO_CLEANCHK; /* @@ -1162,7 +1195,8 @@ shadowlookup: (tobject->flags & OBJ_ONEMAPPING) == 0) { goto unlock_tobject; } - } + } else if (tobject->type == OBJT_PHYS) + goto unlock_tobject; m = vm_page_lookup(tobject, tpindex); if (m == NULL && advise == MADV_WILLNEED) { /* @@ -1189,19 +1223,16 @@ shadowlookup: VM_OBJECT_UNLOCK(tobject); tobject = backing_object; goto shadowlookup; - } + } else if (m->valid != VM_PAGE_BITS_ALL) + goto unlock_tobject; /* - * If the page is busy or not in a normal active state, - * we skip it. If the page is not managed there are no - * page queues to mess with. Things can break if we mess - * with pages in any of the below states. + * If the page is not in a normal state, skip it. */ + vm_page_lock(m); vm_page_lock_queues(); - if (m->hold_count || - m->wire_count || - (m->flags & PG_UNMANAGED) || - m->valid != VM_PAGE_BITS_ALL) { + if (m->hold_count != 0 || m->wire_count != 0) { vm_page_unlock_queues(); + vm_page_unlock(m); goto unlock_tobject; } if ((m->oflags & VPO_BUSY) || m->busy) { @@ -1213,6 +1244,7 @@ shadowlookup: */ vm_page_flag_set(m, PG_REFERENCED); vm_page_unlock_queues(); + vm_page_unlock(m); if (object != tobject) VM_OBJECT_UNLOCK(object); m->oflags |= VPO_WANTED; @@ -1247,6 +1279,7 @@ shadowlookup: vm_page_dontneed(m); } vm_page_unlock_queues(); + vm_page_unlock(m); if (advise == MADV_FREE && tobject->type == OBJT_SWAP) swap_pager_freespace(tobject, tpindex, 1); unlock_tobject: @@ -1409,7 +1442,6 @@ retry: m = TAILQ_NEXT(m, listq); } } - vm_page_lock_queues(); for (; m != NULL && (idx = m->pindex - offidxstart) < size; m = m_next) { m_next = TAILQ_NEXT(m, listq); @@ -1422,18 +1454,18 @@ retry: * not be changed by this operation. */ if ((m->oflags & VPO_BUSY) || m->busy) { - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(new_object); m->oflags |= VPO_WANTED; msleep(m, VM_OBJECT_MTX(orig_object), PVM, "spltwt", 0); VM_OBJECT_LOCK(new_object); goto retry; } + vm_page_lock(m); vm_page_rename(m, new_object, idx); + vm_page_unlock(m); /* page automatically made dirty by rename and cache handled */ vm_page_busy(m); } - vm_page_unlock_queues(); if (orig_object->type == OBJT_SWAP) { /* * swap_pager_copy() can sleep, in which case the orig_object's @@ -1601,6 +1633,7 @@ vm_object_backing_scan(vm_object_t object, int op) * Page is out of the parent object's range, we * can simply destroy it. */ + vm_page_lock(p); vm_page_lock_queues(); KASSERT(!pmap_page_is_mapped(p), ("freeing mapped page %p", p)); @@ -1609,6 +1642,7 @@ vm_object_backing_scan(vm_object_t object, int op) else vm_page_remove(p); vm_page_unlock_queues(); + vm_page_unlock(p); p = next; continue; } @@ -1625,6 +1659,7 @@ vm_object_backing_scan(vm_object_t object, int op) * * Leave the parent's page alone */ + vm_page_lock(p); vm_page_lock_queues(); KASSERT(!pmap_page_is_mapped(p), ("freeing mapped page %p", p)); @@ -1633,6 +1668,7 @@ vm_object_backing_scan(vm_object_t object, int op) else vm_page_remove(p); vm_page_unlock_queues(); + vm_page_unlock(p); p = next; continue; } @@ -1652,9 +1688,9 @@ vm_object_backing_scan(vm_object_t object, int op) * If the page was mapped to a process, it can remain * mapped through the rename. */ - vm_page_lock_queues(); + vm_page_lock(p); vm_page_rename(p, object, new_pindex); - vm_page_unlock_queues(); + vm_page_unlock(p); /* page automatically made dirty by rename */ } p = next; @@ -1919,7 +1955,7 @@ again: p = TAILQ_NEXT(p, listq); } } - vm_page_lock_queues(); + /* * Assert: the variable p is either (1) the page with the * least pindex greater than or equal to the parameter pindex @@ -1938,6 +1974,8 @@ again: * cannot be freed. They can, however, be invalidated * if "clean_only" is FALSE. */ + vm_page_lock(p); + vm_page_lock_queues(); if ((wirings = p->wire_count) != 0 && (wirings = pmap_page_wired_mappings(p)) != p->wire_count) { /* Fictitious pages do not have managed mappings. */ @@ -1949,6 +1987,8 @@ again: p->valid = 0; vm_page_undirty(p); } + vm_page_unlock_queues(); + vm_page_unlock(p); continue; } if (vm_page_sleep_if_busy(p, TRUE, "vmopar")) @@ -1957,16 +1997,20 @@ again: ("vm_object_page_remove: page %p is fictitious", p)); if (clean_only && p->valid) { pmap_remove_write(p); - if (p->dirty) + if (p->dirty) { + vm_page_unlock_queues(); + vm_page_unlock(p); continue; + } } pmap_remove_all(p); /* Account for removal of managed, wired mappings. */ if (wirings != 0) p->wire_count -= wirings; vm_page_free(p); + vm_page_unlock_queues(); + vm_page_unlock(p); } - vm_page_unlock_queues(); vm_object_pip_wakeup(object); skipmemq: if (__predict_false(object->cache != NULL)) @@ -2001,9 +2045,11 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) if (m == NULL) break; if (rv != VM_PAGER_OK) { + vm_page_lock(m); vm_page_lock_queues(); vm_page_free(m); vm_page_unlock_queues(); + vm_page_unlock(m); break; } } diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 7c149c9..cc34c1b 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -115,6 +115,7 @@ __FBSDID("$FreeBSD$"); #include <sys/vnode.h> #include <vm/vm.h> +#include <vm/pmap.h> #include <vm/vm_param.h> #include <vm/vm_kern.h> #include <vm/vm_object.h> @@ -129,6 +130,24 @@ __FBSDID("$FreeBSD$"); #include <machine/md_var.h> +#if defined(__amd64__) || defined (__i386__) +extern struct sysctl_oid_list sysctl__vm_pmap_children; +#else +SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); +#endif + +static uint64_t pmap_tryrelock_calls; +SYSCTL_QUAD(_vm_pmap, OID_AUTO, tryrelock_calls, CTLFLAG_RD, + &pmap_tryrelock_calls, 0, "Number of tryrelock calls"); + +static int pmap_tryrelock_restart; +SYSCTL_INT(_vm_pmap, OID_AUTO, tryrelock_restart, CTLFLAG_RD, + &pmap_tryrelock_restart, 0, "Number of tryrelock restarts"); + +static int pmap_tryrelock_race; +SYSCTL_INT(_vm_pmap, OID_AUTO, tryrelock_race, CTLFLAG_RD, + &pmap_tryrelock_race, 0, "Number of tryrelock pmap race cases"); + /* * Associated with page of user-allocatable memory is a * page structure. @@ -138,6 +157,8 @@ struct vpgqueues vm_page_queues[PQ_COUNT]; struct vpglocks vm_page_queue_lock; struct vpglocks vm_page_queue_free_lock; +struct vpglocks pa_lock[PA_LOCK_COUNT] __aligned(CACHE_LINE_SIZE); + vm_page_t vm_page_array = 0; int vm_page_array_size = 0; long first_page = 0; @@ -158,6 +179,43 @@ CTASSERT(sizeof(u_long) >= 8); #endif /* + * Try to acquire a physical address lock while a pmap is locked. If we + * fail to trylock we unlock and lock the pmap directly and cache the + * locked pa in *locked. The caller should then restart their loop in case + * the virtual to physical mapping has changed. + */ +int +vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) +{ + vm_paddr_t lockpa; + uint32_t gen_count; + + gen_count = pmap->pm_gen_count; + atomic_add_long((volatile long *)&pmap_tryrelock_calls, 1); + lockpa = *locked; + *locked = pa; + if (lockpa) { + PA_LOCK_ASSERT(lockpa, MA_OWNED); + if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa)) + return (0); + PA_UNLOCK(lockpa); + } + if (PA_TRYLOCK(pa)) + return (0); + PMAP_UNLOCK(pmap); + atomic_add_int((volatile int *)&pmap_tryrelock_restart, 1); + PA_LOCK(pa); + PMAP_LOCK(pmap); + + if (pmap->pm_gen_count != gen_count + 1) { + pmap->pm_retries++; + atomic_add_int((volatile int *)&pmap_tryrelock_race, 1); + return (EAGAIN); + } + return (0); +} + +/* * vm_set_page_size: * * Sets the page size, perhaps based upon the memory @@ -271,6 +329,11 @@ vm_page_startup(vm_offset_t vaddr) mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL, MTX_DEF); + /* Setup page locks. */ + for (i = 0; i < PA_LOCK_COUNT; i++) + mtx_init(&pa_lock[i].data, "page lock", NULL, + MTX_DEF | MTX_RECURSE | MTX_DUPOK); + /* * Initialize the queue headers for the hold queue, the active queue, * and the inactive queue. @@ -489,7 +552,7 @@ void vm_page_hold(vm_page_t mem) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(mem, MA_OWNED); mem->hold_count++; } @@ -497,11 +560,14 @@ void vm_page_unhold(vm_page_t mem) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(mem, MA_OWNED); --mem->hold_count; KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); - if (mem->hold_count == 0 && VM_PAGE_INQUEUE2(mem, PQ_HOLD)) + if (mem->hold_count == 0 && VM_PAGE_INQUEUE2(mem, PQ_HOLD)) { + vm_page_lock_queues(); vm_page_free_toq(mem); + vm_page_unlock_queues(); + } } /* @@ -533,7 +599,7 @@ vm_page_free_zero(vm_page_t m) /* * vm_page_sleep: * - * Sleep and release the page queues lock. + * Sleep and release the page and page queues locks. * * The object containing the given page must be locked. */ @@ -542,10 +608,10 @@ vm_page_sleep(vm_page_t m, const char *msg) { VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); - if (!mtx_owned(&vm_page_queue_mtx)) - vm_page_lock_queues(); - vm_page_flag_set(m, PG_REFERENCED); - vm_page_unlock_queues(); + if (mtx_owned(&vm_page_queue_mtx)) + vm_page_unlock_queues(); + if (mtx_owned(vm_page_lockptr(m))) + vm_page_unlock(m); /* * It's possible that while we sleep, the page will get @@ -724,6 +790,8 @@ vm_page_remove(vm_page_t m) vm_object_t object; vm_page_t root; + if ((m->flags & PG_UNMANAGED) == 0) + vm_page_lock_assert(m, MA_OWNED); if ((object = m->object) == NULL) return; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); @@ -731,7 +799,6 @@ vm_page_remove(vm_page_t m) m->oflags &= ~VPO_BUSY; vm_page_flash(m); } - mtx_assert(&vm_page_queue_mtx, MA_OWNED); /* * Now remove from the object's list of backed pages. @@ -1318,6 +1385,7 @@ vm_page_activate(vm_page_t m) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) { vm_pageq_remove(m); if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { @@ -1425,6 +1493,7 @@ vm_page_free_toq(vm_page_t m) panic("vm_page_free: freeing wired page"); } if (m->hold_count != 0) { + vm_page_lock_assert(m, MA_OWNED); m->flags &= ~PG_ZERO; vm_page_enqueue(PQ_HOLD, m); } else { @@ -1463,7 +1532,7 @@ vm_page_free_toq(vm_page_t m) * another map, removing it from paging queues * as necessary. * - * The page queues must be locked. + * The page must be locked. * This routine may not block. */ void @@ -1475,12 +1544,15 @@ vm_page_wire(vm_page_t m) * and only unqueue the page if it is on some queue (if it is unmanaged * it is already off the queues). */ - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); if (m->flags & PG_FICTITIOUS) return; if (m->wire_count == 0) { - if ((m->flags & PG_UNMANAGED) == 0) + if ((m->flags & PG_UNMANAGED) == 0) { + vm_page_lock_queues(); vm_pageq_remove(m); + vm_page_unlock_queues(); + } atomic_add_int(&cnt.v_wire_count, 1); } m->wire_count++; @@ -1512,28 +1584,31 @@ vm_page_wire(vm_page_t m) * be placed in the cache - for example, just after dirtying a page. * dirty pages in the cache are not allowed. * - * The page queues must be locked. + * The page must be locked. * This routine may not block. */ void vm_page_unwire(vm_page_t m, int activate) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + if ((m->flags & PG_UNMANAGED) == 0) + vm_page_lock_assert(m, MA_OWNED); if (m->flags & PG_FICTITIOUS) return; if (m->wire_count > 0) { m->wire_count--; if (m->wire_count == 0) { atomic_subtract_int(&cnt.v_wire_count, 1); - if (m->flags & PG_UNMANAGED) { - ; - } else if (activate) + if ((m->flags & PG_UNMANAGED) != 0) + return; + vm_page_lock_queues(); + if (activate) vm_page_enqueue(PQ_ACTIVE, m); else { vm_page_flag_clear(m, PG_WINATCFLS); vm_page_enqueue(PQ_INACTIVE, m); } + vm_page_unlock_queues(); } } else { panic("vm_page_unwire: invalid wire count: %d", m->wire_count); @@ -1556,6 +1631,7 @@ _vm_page_deactivate(vm_page_t m, int athead) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); /* * Ignore if already inactive. @@ -1590,6 +1666,7 @@ vm_page_try_to_cache(vm_page_t m) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if (m->dirty || m->hold_count || m->busy || m->wire_count || (m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED)) { @@ -1613,6 +1690,7 @@ vm_page_try_to_free(vm_page_t m) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); if (m->object != NULL) VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if (m->dirty || m->hold_count || m->busy || m->wire_count || @@ -1640,6 +1718,7 @@ vm_page_cache(vm_page_t m) vm_page_t root; mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); object = m->object; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); if ((m->flags & PG_UNMANAGED) || (m->oflags & VPO_BUSY) || m->busy || @@ -1772,6 +1851,8 @@ vm_page_dontneed(vm_page_t m) int head; mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); dnw = ++dnweight; /* @@ -1826,15 +1907,25 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); retrylookup: if ((m = vm_page_lookup(object, pindex)) != NULL) { - if (vm_page_sleep_if_busy(m, TRUE, "pgrbwt")) { + if ((m->oflags & VPO_BUSY) != 0 || m->busy != 0) { + if ((allocflags & VM_ALLOC_RETRY) != 0) { + /* + * Reference the page before unlocking and + * sleeping so that the page daemon is less + * likely to reclaim it. + */ + vm_page_lock_queues(); + vm_page_flag_set(m, PG_REFERENCED); + } + vm_page_sleep(m, "pgrbwt"); if ((allocflags & VM_ALLOC_RETRY) == 0) return (NULL); goto retrylookup; } else { if ((allocflags & VM_ALLOC_WIRED) != 0) { - vm_page_lock_queues(); + vm_page_lock(m); vm_page_wire(m); - vm_page_unlock_queues(); + vm_page_unlock(m); } if ((allocflags & VM_ALLOC_NOBUSY) == 0) vm_page_busy(m); @@ -2133,6 +2224,7 @@ vm_page_cowfault(vm_page_t m) vm_object_t object; vm_pindex_t pindex; + vm_page_lock_assert(m, MA_OWNED); object = m->object; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT(object->paging_in_progress != 0, @@ -2142,22 +2234,23 @@ vm_page_cowfault(vm_page_t m) retry_alloc: pmap_remove_all(m); + vm_page_unlock_queues(); vm_page_remove(m); mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY); if (mnew == NULL) { vm_page_insert(m, object, pindex); - vm_page_unlock_queues(); + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); VM_WAIT; VM_OBJECT_LOCK(object); if (m == vm_page_lookup(object, pindex)) { + vm_page_lock(m); vm_page_lock_queues(); goto retry_alloc; } else { /* * Page disappeared during the wait. */ - vm_page_lock_queues(); return; } } @@ -2168,7 +2261,12 @@ vm_page_cowfault(vm_page_t m) * waiting to allocate a page. If so, put things back * the way they were */ + vm_page_unlock(m); + vm_page_lock(mnew); + vm_page_lock_queues(); vm_page_free(mnew); + vm_page_unlock_queues(); + vm_page_unlock(mnew); vm_page_insert(m, object, pindex); } else { /* clear COW & copy page */ if (!so_zerocp_fullpage) @@ -2177,6 +2275,7 @@ vm_page_cowfault(vm_page_t m) vm_page_dirty(mnew); mnew->wire_count = m->wire_count - m->cow; m->wire_count = m->cow; + vm_page_unlock(m); } } @@ -2184,7 +2283,7 @@ void vm_page_cowclear(vm_page_t m) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); if (m->cow) { m->cow--; /* @@ -2200,11 +2299,13 @@ int vm_page_cowsetup(vm_page_t m) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); if (m->cow == USHRT_MAX - 1) return (EBUSY); m->cow++; + vm_page_lock_queues(); pmap_remove_write(m); + vm_page_unlock_queues(); return (0); } diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index 662af98..da4d42a 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -90,20 +90,21 @@ * and sundry status bits. * * Fields in this structure are locked either by the lock on the - * object that the page belongs to (O) or by the lock on the page - * queues (P). + * object that the page belongs to (O), its corresponding page lock (P), + * or by the lock on the page queues (Q). + * */ TAILQ_HEAD(pglist, vm_page); struct vm_page { - TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO queue or free list (P) */ + TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO queue or free list (Q) */ TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */ struct vm_page *left; /* splay tree link (O) */ struct vm_page *right; /* splay tree link (O) */ vm_object_t object; /* which object am I in (O,P)*/ - vm_pindex_t pindex; /* offset into object (O,P) */ + vm_pindex_t pindex; /* offset into object (O,Q) */ vm_paddr_t phys_addr; /* physical address of page */ struct md_page md; /* machine dependant stuff */ uint8_t queue; /* page queue index */ @@ -111,11 +112,11 @@ struct vm_page { u_short flags; /* see below */ uint8_t order; /* index of the buddy queue */ uint8_t pool; - u_short cow; /* page cow mapping count */ - u_int wire_count; /* wired down maps refs (P) */ - short hold_count; /* page hold count */ + u_short cow; /* page cow mapping count (Q) */ + u_int wire_count; /* wired down maps refs (Q) */ + short hold_count; /* page hold count (P) */ u_short oflags; /* page flags (O) */ - u_char act_count; /* page usage count */ + u_char act_count; /* page usage count (Q) */ u_char busy; /* page busy count (O) */ /* NOTE that these must support one bit per DEV_BSIZE in a page!!! */ /* so, on normal X86 kernels, they must be at least 8 bits wide */ @@ -177,9 +178,35 @@ struct vpglocks { } __aligned(CACHE_LINE_SIZE); extern struct vpglocks vm_page_queue_free_lock; +extern struct vpglocks pa_lock[]; -#define vm_page_queue_free_mtx vm_page_queue_free_lock.data +#if defined(__arm__) +#define PDRSHIFT PDR_SHIFT +#elif !defined(PDRSHIFT) +#define PDRSHIFT 21 +#endif +#define pa_index(pa) ((pa) >> PDRSHIFT) +#define PA_LOCKPTR(pa) &pa_lock[pa_index((pa)) % PA_LOCK_COUNT].data +#define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa))) +#define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa)) +#define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa)) +#define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa)) +#define PA_UNLOCK_COND(pa) \ + do { \ + if (pa) \ + PA_UNLOCK(pa); \ + } while (0) + +#define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a)) + +#define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m)))) +#define vm_page_lock(m) mtx_lock(vm_page_lockptr((m))) +#define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m))) +#define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m))) +#define vm_page_lock_assert(m, a) mtx_assert(vm_page_lockptr((m)), (a)) + +#define vm_page_queue_free_mtx vm_page_queue_free_lock.data /* * These are the flags defined for vm_page. * @@ -324,6 +351,7 @@ void vm_page_dontneed(vm_page_t); void vm_page_deactivate (vm_page_t); void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); +int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *); void vm_page_remove (vm_page_t); void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t); void vm_page_requeue(vm_page_t m); diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 735beee..9921a84 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -252,7 +252,9 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next) TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, m, &marker, pageq); vm_page_unlock_queues(); + vm_page_unlock(m); VM_OBJECT_LOCK(object); + vm_page_lock(m); vm_page_lock_queues(); /* Page queue might have changed. */ @@ -275,8 +277,7 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next) * late and we cannot do anything that will mess with the page. */ static int -vm_pageout_clean(m) - vm_page_t m; +vm_pageout_clean(vm_page_t m) { vm_object_t object; vm_page_t mc[2*vm_pageout_page_count]; @@ -284,7 +285,8 @@ vm_pageout_clean(m) int ib, is, page_base; vm_pindex_t pindex = m->pindex; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_NOTOWNED); + vm_page_lock(m); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); /* @@ -301,6 +303,7 @@ vm_pageout_clean(m) */ if ((m->hold_count != 0) || ((m->busy != 0) || (m->oflags & VPO_BUSY))) { + vm_page_unlock(m); return 0; } @@ -347,13 +350,19 @@ more: ib = 0; break; } + vm_page_lock(p); + vm_page_lock_queues(); vm_page_test_dirty(p); if (p->dirty == 0 || p->queue != PQ_INACTIVE || p->hold_count != 0) { /* may be undergoing I/O */ + vm_page_unlock(p); + vm_page_unlock_queues(); ib = 0; break; } + vm_page_unlock_queues(); + vm_page_unlock(p); mc[--page_base] = p; ++pageout_count; ++ib; @@ -374,12 +383,18 @@ more: if ((p->oflags & VPO_BUSY) || p->busy) { break; } + vm_page_lock(p); + vm_page_lock_queues(); vm_page_test_dirty(p); if (p->dirty == 0 || p->queue != PQ_INACTIVE || p->hold_count != 0) { /* may be undergoing I/O */ + vm_page_unlock_queues(); + vm_page_unlock(p); break; } + vm_page_unlock_queues(); + vm_page_unlock(p); mc[page_base + pageout_count] = p; ++pageout_count; ++is; @@ -393,6 +408,7 @@ more: if (ib && pageout_count < vm_pageout_page_count) goto more; + vm_page_unlock(m); /* * we allow reads during pageouts... */ @@ -416,8 +432,9 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags) int numpagedout = 0; int i; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); + /* * Initiate I/O. Bump the vm_page_t->busy counter and * mark the pages read-only. @@ -433,17 +450,21 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags) ("vm_pageout_flush: partially invalid page %p index %d/%d", mc[i], i, count)); vm_page_io_start(mc[i]); + vm_page_lock(mc[i]); + vm_page_lock_queues(); pmap_remove_write(mc[i]); + vm_page_unlock(mc[i]); + vm_page_unlock_queues(); } - vm_page_unlock_queues(); vm_object_pip_add(object, count); vm_pager_put_pages(object, mc, count, flags, pageout_status); - vm_page_lock_queues(); for (i = 0; i < count; i++) { vm_page_t mt = mc[i]; + vm_page_lock(mt); + vm_page_lock_queues(); KASSERT(pageout_status[i] == VM_PAGER_PEND || (mt->flags & PG_WRITEABLE) == 0, ("vm_pageout_flush: page %p is not write protected", mt)); @@ -485,6 +506,8 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags) if (vm_page_count_severe()) vm_page_try_to_cache(mt); } + vm_page_unlock_queues(); + vm_page_unlock(mt); } return numpagedout; } @@ -508,17 +531,17 @@ vm_pageout_object_deactivate_pages(pmap, first_object, desired) { vm_object_t backing_object, object; vm_page_t p, next; - int actcount, rcount, remove_mode; + int actcount, remove_mode; VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED); if (first_object->type == OBJT_DEVICE || - first_object->type == OBJT_SG || - first_object->type == OBJT_PHYS) + first_object->type == OBJT_SG) return; for (object = first_object;; object = backing_object) { if (pmap_resident_count(pmap) <= desired) goto unlock_return; - if (object->paging_in_progress) + VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + if (object->type == OBJT_PHYS || object->paging_in_progress) goto unlock_return; remove_mode = 0; @@ -527,22 +550,23 @@ vm_pageout_object_deactivate_pages(pmap, first_object, desired) /* * scan the objects entire memory queue */ - rcount = object->resident_page_count; p = TAILQ_FIRST(&object->memq); - vm_page_lock_queues(); - while (p && (rcount-- > 0)) { - if (pmap_resident_count(pmap) <= desired) { - vm_page_unlock_queues(); + while (p != NULL) { + if (pmap_resident_count(pmap) <= desired) goto unlock_return; - } next = TAILQ_NEXT(p, listq); + if ((p->oflags & VPO_BUSY) != 0 || p->busy != 0) { + p = next; + continue; + } + vm_page_lock(p); + vm_page_lock_queues(); cnt.v_pdpages++; if (p->wire_count != 0 || p->hold_count != 0 || - p->busy != 0 || - (p->oflags & VPO_BUSY) || - (p->flags & PG_UNMANAGED) || !pmap_page_exists_quick(pmap, p)) { + vm_page_unlock_queues(); + vm_page_unlock(p); p = next; continue; } @@ -576,9 +600,10 @@ vm_pageout_object_deactivate_pages(pmap, first_object, desired) } else if (p->queue == PQ_INACTIVE) { pmap_remove_all(p); } + vm_page_unlock_queues(); + vm_page_unlock(p); p = next; } - vm_page_unlock_queues(); if ((backing_object = object->backing_object) == NULL) goto unlock_return; VM_OBJECT_LOCK(backing_object); @@ -745,7 +770,6 @@ rescan0: } next = TAILQ_NEXT(m, pageq); - object = m->object; /* * skip marker pages @@ -753,26 +777,35 @@ rescan0: if (m->flags & PG_MARKER) continue; + if (!vm_page_trylock(m)) { + addl_page_shortage++; + continue; + } + /* * A held page may be undergoing I/O, so skip it. */ - if (m->hold_count) { + if (m->hold_count || (object = m->object) == NULL) { + vm_page_unlock(m); vm_page_requeue(m); addl_page_shortage++; continue; } + /* * Don't mess with busy pages, keep in the front of the * queue, most likely are being paged out. */ if (!VM_OBJECT_TRYLOCK(object) && (!vm_pageout_fallback_object_lock(m, &next) || - m->hold_count != 0)) { + m->hold_count != 0)) { VM_OBJECT_UNLOCK(object); + vm_page_unlock(m); addl_page_shortage++; continue; } if (m->busy || (m->oflags & VPO_BUSY)) { + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); addl_page_shortage++; continue; @@ -801,6 +834,7 @@ rescan0: vm_page_activate(m); VM_OBJECT_UNLOCK(object); m->act_count += (actcount + ACT_ADVANCE); + vm_page_unlock(m); continue; } @@ -816,6 +850,7 @@ rescan0: vm_page_activate(m); VM_OBJECT_UNLOCK(object); m->act_count += (actcount + ACT_ADVANCE + 1); + vm_page_unlock(m); continue; } @@ -901,6 +936,7 @@ rescan0: * Those objects are in a "rundown" state. */ if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); vm_page_requeue(m); continue; @@ -940,6 +976,8 @@ rescan0: * of time. */ if (object->type == OBJT_VNODE) { + vm_page_unlock_queues(); + vm_page_unlock(m); vp = object->handle; if (vp->v_type == VREG && vn_start_write(vp, &mp, V_NOWAIT) != 0) { @@ -947,11 +985,11 @@ rescan0: ++pageout_lock_miss; if (object->flags & OBJ_MIGHTBEDIRTY) vnodes_skipped++; + vm_page_lock_queues(); goto unlock_and_continue; } KASSERT(mp != NULL, ("vp %p with NULL v_mount", vp)); - vm_page_unlock_queues(); vm_object_reference_locked(object); VM_OBJECT_UNLOCK(object); vfslocked = VFS_LOCK_GIANT(vp->v_mount); @@ -966,6 +1004,7 @@ rescan0: goto unlock_and_continue; } VM_OBJECT_LOCK(object); + vm_page_lock(m); vm_page_lock_queues(); /* * The page might have been moved to another @@ -976,6 +1015,7 @@ rescan0: if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE || m->object != object || TAILQ_NEXT(m, pageq) != &marker) { + vm_page_unlock(m); if (object->flags & OBJ_MIGHTBEDIRTY) vnodes_skipped++; goto unlock_and_continue; @@ -988,6 +1028,7 @@ rescan0: * statistics are more correct if we don't. */ if (m->busy || (m->oflags & VPO_BUSY)) { + vm_page_unlock(m); goto unlock_and_continue; } @@ -996,12 +1037,14 @@ rescan0: * be undergoing I/O, so skip it */ if (m->hold_count) { + vm_page_unlock(m); vm_page_requeue(m); if (object->flags & OBJ_MIGHTBEDIRTY) vnodes_skipped++; goto unlock_and_continue; } } + vm_page_unlock(m); /* * If a page is dirty, then it is either being washed @@ -1013,11 +1056,14 @@ rescan0: * the (future) cleaned page. Otherwise we could wind * up laundering or cleaning too many pages. */ + vm_page_unlock_queues(); if (vm_pageout_clean(m) != 0) { --page_shortage; --maxlaunder; } + vm_page_lock_queues(); unlock_and_continue: + vm_page_lock_assert(m, MA_NOTOWNED); VM_OBJECT_UNLOCK(object); if (mp != NULL) { vm_page_unlock_queues(); @@ -1031,8 +1077,10 @@ unlock_and_continue: next = TAILQ_NEXT(&marker, pageq); TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); + vm_page_lock_assert(m, MA_NOTOWNED); continue; } + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); } @@ -1051,6 +1099,7 @@ unlock_and_continue: */ pcount = cnt.v_active_count; m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); + mtx_assert(&vm_page_queue_mtx, MA_OWNED); while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { @@ -1063,9 +1112,14 @@ unlock_and_continue: m = next; continue; } + if (!vm_page_trylock(m) || (object = m->object) == NULL) { + m = next; + continue; + } if (!VM_OBJECT_TRYLOCK(object) && !vm_pageout_fallback_object_lock(m, &next)) { VM_OBJECT_UNLOCK(object); + vm_page_unlock(m); m = next; continue; } @@ -1076,6 +1130,7 @@ unlock_and_continue: if ((m->busy != 0) || (m->oflags & VPO_BUSY) || (m->hold_count != 0)) { + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); vm_page_requeue(m); m = next; @@ -1135,6 +1190,7 @@ unlock_and_continue: vm_page_requeue(m); } } + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); m = next; } @@ -1314,15 +1370,19 @@ vm_pageout_page_stats() ("vm_pageout_page_stats: page %p isn't active", m)); next = TAILQ_NEXT(m, pageq); - object = m->object; - if ((m->flags & PG_MARKER) != 0) { m = next; continue; } + vm_page_lock_assert(m, MA_NOTOWNED); + if (vm_page_trylock(m) == 0 || (object = m->object) == NULL) { + m = next; + continue; + } if (!VM_OBJECT_TRYLOCK(object) && !vm_pageout_fallback_object_lock(m, &next)) { VM_OBJECT_UNLOCK(object); + vm_page_unlock(m); m = next; continue; } @@ -1333,6 +1393,7 @@ vm_pageout_page_stats() if ((m->busy != 0) || (m->oflags & VPO_BUSY) || (m->hold_count != 0)) { + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); vm_page_requeue(m); m = next; @@ -1369,6 +1430,7 @@ vm_pageout_page_stats() vm_page_requeue(m); } } + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); m = next; } diff --git a/sys/vm/vm_param.h b/sys/vm/vm_param.h index 2ff2603..c404989 100644 --- a/sys/vm/vm_param.h +++ b/sys/vm/vm_param.h @@ -126,6 +126,14 @@ struct xswdev { #define KERN_NOT_RECEIVER 7 #define KERN_NO_ACCESS 8 +#ifndef PA_LOCK_COUNT +#ifdef SMP +#define PA_LOCK_COUNT 32 +#else +#define PA_LOCK_COUNT 1 +#endif /* !SMP */ +#endif /* !PA_LOCK_COUNT */ + #ifndef ASSEMBLER #ifdef _KERNEL #define num_pages(x) \ diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index aedc794..eb21c60 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -429,9 +429,11 @@ vnode_pager_setsize(vp, nsize) * bits. This would prevent bogus_page * replacement from working properly. */ + vm_page_lock(m); vm_page_lock_queues(); vm_page_clear_dirty(m, base, PAGE_SIZE - base); vm_page_unlock_queues(); + vm_page_unlock(m); } else if ((nsize & PAGE_MASK) && __predict_false(object->cache != NULL)) { vm_page_cache_free(object, OFF_TO_IDX(nsize), @@ -719,11 +721,15 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) error = VOP_BMAP(vp, foff / bsize, &bo, &reqblock, NULL, NULL); if (error == EOPNOTSUPP) { VM_OBJECT_LOCK(object); - vm_page_lock_queues(); + for (i = 0; i < count; i++) - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(m[i]); + vm_page_lock_queues(); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } PCPU_INC(cnt.v_vnodein); PCPU_INC(cnt.v_vnodepgsin); error = vnode_pager_input_old(object, m[reqpage]); @@ -731,11 +737,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) return (error); } else if (error != 0) { VM_OBJECT_LOCK(object); - vm_page_lock_queues(); for (i = 0; i < count; i++) - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(m[i]); + vm_page_lock_queues(); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } VM_OBJECT_UNLOCK(object); return (VM_PAGER_ERROR); @@ -747,11 +756,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) } else if ((PAGE_SIZE / bsize) > 1 && (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) { VM_OBJECT_LOCK(object); - vm_page_lock_queues(); for (i = 0; i < count; i++) - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(m[i]); + vm_page_lock_queues(); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } VM_OBJECT_UNLOCK(object); PCPU_INC(cnt.v_vnodein); PCPU_INC(cnt.v_vnodepgsin); @@ -765,11 +777,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) */ VM_OBJECT_LOCK(object); if (m[reqpage]->valid == VM_PAGE_BITS_ALL) { - vm_page_lock_queues(); for (i = 0; i < count; i++) - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(m[i]); + vm_page_lock_queues(); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } VM_OBJECT_UNLOCK(object); return VM_PAGER_OK; } else if (reqblock == -1) { @@ -777,11 +792,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) KASSERT(m[reqpage]->dirty == 0, ("vnode_pager_generic_getpages: page %p is dirty", m)); m[reqpage]->valid = VM_PAGE_BITS_ALL; - vm_page_lock_queues(); for (i = 0; i < count; i++) - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(m[i]); + vm_page_lock_queues(); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } VM_OBJECT_UNLOCK(object); return (VM_PAGER_OK); } @@ -800,11 +818,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) if (vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &firstaddr, &runpg) != 0) { VM_OBJECT_LOCK(object); - vm_page_lock_queues(); for (; i < count; i++) - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(m[i]); + vm_page_lock_queues(); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } VM_OBJECT_UNLOCK(object); return (VM_PAGER_ERROR); } @@ -818,9 +839,11 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) (object->un_pager.vnp.vnp_size >> 32), (uintmax_t)object->un_pager.vnp.vnp_size); } + vm_page_lock(m[i]); vm_page_lock_queues(); vm_page_free(m[i]); vm_page_unlock_queues(); + vm_page_unlock(m[i]); VM_OBJECT_UNLOCK(object); runend = i + 1; first = runend; @@ -829,18 +852,24 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) runend = i + runpg; if (runend <= reqpage) { VM_OBJECT_LOCK(object); - vm_page_lock_queues(); - for (j = i; j < runend; j++) + for (j = i; j < runend; j++) { + vm_page_lock(m[j]); + vm_page_lock_queues(); vm_page_free(m[j]); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + vm_page_unlock(m[j]); + } VM_OBJECT_UNLOCK(object); } else { if (runpg < (count - first)) { VM_OBJECT_LOCK(object); - vm_page_lock_queues(); - for (i = first + runpg; i < count; i++) + for (i = first + runpg; i < count; i++) { + vm_page_lock(m[i]); + vm_page_lock_queues(); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } VM_OBJECT_UNLOCK(object); count = first + runpg; } @@ -931,13 +960,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) relpbuf(bp, &vnode_pbuf_freecnt); VM_OBJECT_LOCK(object); - vm_page_lock_queues(); for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) { vm_page_t mt; nextoff = tfoff + PAGE_SIZE; mt = m[i]; + vm_page_lock(mt); + vm_page_lock_queues(); if (nextoff <= object->un_pager.vnp.vnp_size) { /* * Read filled up entire page. @@ -989,8 +1019,9 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) vm_page_free(mt); } } + vm_page_unlock_queues(); + vm_page_unlock(mt); } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); if (error) { printf("vnode_pager_getpages: I/O read error\n"); @@ -1113,10 +1144,12 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals) maxsize = object->un_pager.vnp.vnp_size - poffset; ncount = btoc(maxsize); if ((pgoff = (int)maxsize & PAGE_MASK) != 0) { + vm_page_lock(m[ncount - 1]); vm_page_lock_queues(); vm_page_clear_dirty(m[ncount - 1], pgoff, PAGE_SIZE - pgoff); vm_page_unlock_queues(); + vm_page_unlock(m[ncount - 1]); } } else { maxsize = 0; |