diff options
author | peter <peter@FreeBSD.org> | 2002-04-28 00:15:48 +0000 |
---|---|---|
committer | peter <peter@FreeBSD.org> | 2002-04-28 00:15:48 +0000 |
commit | 0feba1c376c506a50823d2756c48550954655faf (patch) | |
tree | ed9386cd8bc5bf5db1a8d49e5119aef3aa591423 /sys/vm | |
parent | 91d576b0c5019ef4261a145263a2f6cb196c4e41 (diff) | |
download | FreeBSD-src-0feba1c376c506a50823d2756c48550954655faf.zip FreeBSD-src-0feba1c376c506a50823d2756c48550954655faf.tar.gz |
We do not necessarily need to map/unmap pages to zero parts of them.
On systems where physical memory is also direct mapped (alpha, sparc,
ia64 etc) this is slightly harmful.
Diffstat (limited to 'sys/vm')
-rw-r--r-- | sys/vm/vm_page.c | 12 | ||||
-rw-r--r-- | sys/vm/vm_page.h | 1 | ||||
-rw-r--r-- | sys/vm/vnode_pager.c | 5 |
3 files changed, 14 insertions, 4 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 746c03a..431ac70 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -457,6 +457,18 @@ vm_page_zero_fill(vm_page_t m) } /* + * vm_page_zero_fill_area: + * + * Like vm_page_zero_fill but only fill the specified area. + */ +boolean_t +vm_page_zero_fill_area(vm_page_t m, int off, int size) +{ + pmap_zero_page_area(m, off, size); + return (TRUE); +} + +/* * vm_page_copy: * * Copy one page to another diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index f7dfaa6..d4777ee 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -327,6 +327,7 @@ void vm_page_hold(vm_page_t mem); void vm_page_unhold(vm_page_t mem); void vm_page_protect(vm_page_t mem, int prot); boolean_t vm_page_zero_fill(vm_page_t m); +boolean_t vm_page_zero_fill_area(vm_page_t m, int off, int len); void vm_page_copy(vm_page_t src_m, vm_page_t dest_m); void vm_page_free(vm_page_t m); void vm_page_free_zero(vm_page_t m); diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index f069b6a..46c7cef 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -305,7 +305,6 @@ vnode_pager_setsize(vp, nsize) * it can screw up NFS reads, so we don't allow the case. */ if (nsize & PAGE_MASK) { - vm_offset_t kva; vm_page_t m; m = vm_page_lookup(object, OFF_TO_IDX(nsize)); @@ -317,9 +316,7 @@ vnode_pager_setsize(vp, nsize) * Clear out partial-page garbage in case * the page has been mapped. */ - kva = vm_pager_map_page(m); - bzero((caddr_t)kva + base, size); - vm_pager_unmap_page(kva); + vm_page_zero_fill_area(m, base, size); /* * XXX work around SMP data integrity race |