diff options
author | alc <alc@FreeBSD.org> | 2002-11-10 07:12:04 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2002-11-10 07:12:04 +0000 |
commit | fc8a5bc419be7f957bbf8bf9e3b3a1a081ec1af1 (patch) | |
tree | c2353317d021437c05c64b489549bbc567f14a5c /sys | |
parent | 55c878faa1969b792313405f42e5c61771d3f9d3 (diff) | |
download | FreeBSD-src-fc8a5bc419be7f957bbf8bf9e3b3a1a081ec1af1.zip FreeBSD-src-fc8a5bc419be7f957bbf8bf9e3b3a1a081ec1af1.tar.gz |
When prot is VM_PROT_NONE, call pmap_page_protect() directly rather than
indirectly through vm_page_protect(). The one remaining page flag that
is updated by vm_page_protect() is already being updated by our various
pmap implementations.
Note: A later commit will similarly change the VM_PROT_READ case and
eliminate vm_page_protect().
Diffstat (limited to 'sys')
-rw-r--r-- | sys/kern/kern_exec.c | 2 | ||||
-rw-r--r-- | sys/kern/vfs_bio.c | 4 | ||||
-rw-r--r-- | sys/vm/vm_fault.c | 4 | ||||
-rw-r--r-- | sys/vm/vm_object.c | 16 | ||||
-rw-r--r-- | sys/vm/vm_page.c | 6 | ||||
-rw-r--r-- | sys/vm/vm_pageout.c | 10 | ||||
-rw-r--r-- | sys/vm/vnode_pager.c | 2 |
7 files changed, 22 insertions, 22 deletions
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 0241c4d..0dc89f3 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -771,7 +771,7 @@ exec_map_first_page(imgp) (ma[0]->valid == 0)) { if (ma[0]) { vm_page_lock_queues(); - vm_page_protect(ma[0], VM_PROT_NONE); + pmap_page_protect(ma[0], VM_PROT_NONE); vm_page_free(ma[0]); vm_page_unlock_queues(); } diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index 4c25621..e4f41e4 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -1504,7 +1504,7 @@ vfs_vmio_release(bp) if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && m->hold_count == 0) { vm_page_busy(m); - vm_page_protect(m, VM_PROT_NONE); + pmap_page_protect(m, VM_PROT_NONE); vm_page_free(m); } else if (bp->b_flags & B_DIRECT) { vm_page_try_to_free(m); @@ -3268,7 +3268,7 @@ retry: * It may not work properly with small-block devices. * We need to find a better way. */ - vm_page_protect(m, VM_PROT_NONE); + pmap_page_protect(m, VM_PROT_NONE); if (clear_modify) vfs_page_set_valid(bp, foff, i, m); else if (m->valid == VM_PAGE_BITS_ALL && diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index 058107e..9bfe847 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -474,7 +474,7 @@ readrest: if (mt->dirty == 0) vm_page_test_dirty(mt); if (mt->dirty) { - vm_page_protect(mt, VM_PROT_NONE); + pmap_page_protect(mt, VM_PROT_NONE); vm_page_deactivate(mt); } else { vm_page_cache(mt); @@ -700,7 +700,7 @@ readrest: * get rid of the unnecessary page */ vm_page_lock_queues(); - vm_page_protect(fs.first_m, VM_PROT_NONE); + pmap_page_protect(fs.first_m, VM_PROT_NONE); vm_page_free(fs.first_m); vm_page_unlock_queues(); fs.first_m = NULL; diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index bf34508..3a27442 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -990,7 +990,7 @@ vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end) return; TAILQ_FOREACH(p, &object->memq, listq) { if (p->pindex >= start && p->pindex < end) - vm_page_protect(p, VM_PROT_NONE); + pmap_page_protect(p, VM_PROT_NONE); } if ((start == 0) && (object->size == end)) vm_object_clear_flag(object, OBJ_WRITEABLE); @@ -1439,7 +1439,7 @@ vm_object_backing_scan(vm_object_t object, int op) * can simply destroy it. */ vm_page_lock_queues(); - vm_page_protect(p, VM_PROT_NONE); + pmap_page_protect(p, VM_PROT_NONE); vm_page_free(p); vm_page_unlock_queues(); p = next; @@ -1459,7 +1459,7 @@ vm_object_backing_scan(vm_object_t object, int op) * Leave the parent's page alone */ vm_page_lock_queues(); - vm_page_protect(p, VM_PROT_NONE); + pmap_page_protect(p, VM_PROT_NONE); vm_page_free(p); vm_page_unlock_queues(); p = next; @@ -1746,7 +1746,7 @@ again: next = TAILQ_NEXT(p, listq); if (all || ((start <= p->pindex) && (p->pindex < end))) { if (p->wire_count != 0) { - vm_page_protect(p, VM_PROT_NONE); + pmap_page_protect(p, VM_PROT_NONE); if (!clean_only) p->valid = 0; continue; @@ -1765,7 +1765,7 @@ again: continue; } vm_page_busy(p); - vm_page_protect(p, VM_PROT_NONE); + pmap_page_protect(p, VM_PROT_NONE); vm_page_free(p); } } @@ -1773,7 +1773,7 @@ again: while (size > 0) { if ((p = vm_page_lookup(object, start)) != NULL) { if (p->wire_count != 0) { - vm_page_protect(p, VM_PROT_NONE); + pmap_page_protect(p, VM_PROT_NONE); if (!clean_only) p->valid = 0; start += 1; @@ -1797,7 +1797,7 @@ again: } } vm_page_busy(p); - vm_page_protect(p, VM_PROT_NONE); + pmap_page_protect(p, VM_PROT_NONE); vm_page_free(p); } start += 1; @@ -1968,7 +1968,7 @@ vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa) vm_page_unlock_queues(); } - vm_page_protect(m_in, VM_PROT_NONE); + pmap_page_protect(m_in, VM_PROT_NONE); pmap_copy_page(m_in, m_out); m_out->valid = m_in->valid; vm_page_dirty(m_out); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index a8052f3..b882991 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -878,7 +878,7 @@ loop: } KASSERT(m->dirty == 0, ("Found dirty cache page %p", m)); vm_page_busy(m); - vm_page_protect(m, VM_PROT_NONE); + pmap_page_protect(m, VM_PROT_NONE); vm_page_free(m); vm_page_unlock_queues(); goto loop; @@ -1384,7 +1384,7 @@ vm_page_try_to_free(vm_page_t m) if (m->dirty) return (0); vm_page_busy(m); - vm_page_protect(m, VM_PROT_NONE); + pmap_page_protect(m, VM_PROT_NONE); vm_page_free(m); return (1); } @@ -1413,7 +1413,7 @@ vm_page_cache(vm_page_t m) * Remove all pmaps and indicate that the page is not * writeable or mapped. */ - vm_page_protect(m, VM_PROT_NONE); + pmap_page_protect(m, VM_PROT_NONE); if (m->dirty != 0) { panic("vm_page_cache: caching a dirty page, pindex: %ld", (long)m->pindex); diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 14d058e..00569a0 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -512,7 +512,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) if ((p->flags & PG_REFERENCED) == 0) { p->act_count -= min(p->act_count, ACT_DECLINE); if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { - vm_page_protect(p, VM_PROT_NONE); + pmap_page_protect(p, VM_PROT_NONE); vm_page_deactivate(p); } else { vm_pageq_requeue(p); @@ -525,7 +525,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) vm_pageq_requeue(p); } } else if (p->queue == PQ_INACTIVE) { - vm_page_protect(p, VM_PROT_NONE); + pmap_page_protect(p, VM_PROT_NONE); } p = next; } @@ -618,7 +618,7 @@ vm_pageout_page_free(vm_page_t m) { if (type == OBJT_SWAP || type == OBJT_DEFAULT) vm_object_reference(object); vm_page_busy(m); - vm_page_protect(m, VM_PROT_NONE); + pmap_page_protect(m, VM_PROT_NONE); vm_page_free(m); cnt.v_dfree++; if (type == OBJT_SWAP || type == OBJT_DEFAULT) @@ -1043,7 +1043,7 @@ rescan0: m->act_count == 0) { page_shortage--; if (m->object->ref_count == 0) { - vm_page_protect(m, VM_PROT_NONE); + pmap_page_protect(m, VM_PROT_NONE); if (m->dirty == 0) vm_page_cache(m); else @@ -1278,7 +1278,7 @@ vm_pageout_page_stats() * operations would be higher than the value * of doing the operation. */ - vm_page_protect(m, VM_PROT_NONE); + pmap_page_protect(m, VM_PROT_NONE); vm_page_deactivate(m); } else { m->act_count -= min(m->act_count, ACT_DECLINE); diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index 1583d4a..8782684 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -348,7 +348,7 @@ vnode_pager_setsize(vp, nsize) * XXX should vm_pager_unmap_page() have * dealt with this? */ - vm_page_protect(m, VM_PROT_NONE); + pmap_page_protect(m, VM_PROT_NONE); /* * Clear out partial-page dirty bits. This |