summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2004-05-06 05:03:23 +0000
committeralc <alc@FreeBSD.org>2004-05-06 05:03:23 +0000
commitb57e5e03fd70fac20a610e56ad7729e47f198a33 (patch)
tree14ae6e4ab50ff0759faf5365c3a928333e1b9c90
parented742a54c4d82222ed831fc8e7321cf7149ed8ed (diff)
downloadFreeBSD-src-b57e5e03fd70fac20a610e56ad7729e47f198a33.zip
FreeBSD-src-b57e5e03fd70fac20a610e56ad7729e47f198a33.tar.gz
Make vm_page's PG_ZERO flag immutable between the time of the page's
allocation and deallocation. This flag's principal use is shortly after allocation. For such cases, clearing the flag is pointless. The only unusual use of PG_ZERO is in vfs_bio_clrbuf(). However, allocbuf() never requests a prezeroed page. So, vfs_bio_clrbuf() never sees a prezeroed page. Reviewed by: tegge@
-rw-r--r--sys/fs/nwfs/nwfs_io.c2
-rw-r--r--sys/fs/smbfs/smbfs_io.c2
-rw-r--r--sys/fs/specfs/spec_vnops.c2
-rw-r--r--sys/kern/uipc_syscalls.c1
-rw-r--r--sys/kern/vfs_bio.c15
-rw-r--r--sys/nfsclient/nfs_bio.c2
-rw-r--r--sys/vm/swap_pager.c4
-rw-r--r--sys/vm/vm_fault.c1
-rw-r--r--sys/vm/vnode_pager.c3
9 files changed, 1 insertions, 31 deletions
diff --git a/sys/fs/nwfs/nwfs_io.c b/sys/fs/nwfs/nwfs_io.c
index c1d19ab..22f0e61 100644
--- a/sys/fs/nwfs/nwfs_io.c
+++ b/sys/fs/nwfs/nwfs_io.c
@@ -464,8 +464,6 @@ nwfs_getpages(ap)
nextoff = toff + PAGE_SIZE;
m = pages[i];
- m->flags &= ~PG_ZERO;
-
if (nextoff <= size) {
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
diff --git a/sys/fs/smbfs/smbfs_io.c b/sys/fs/smbfs/smbfs_io.c
index 5a0606c..6a1ea45 100644
--- a/sys/fs/smbfs/smbfs_io.c
+++ b/sys/fs/smbfs/smbfs_io.c
@@ -517,8 +517,6 @@ smbfs_getpages(ap)
nextoff = toff + PAGE_SIZE;
m = pages[i];
- m->flags &= ~PG_ZERO;
-
if (nextoff <= size) {
/*
* Read operation filled an entire page
diff --git a/sys/fs/specfs/spec_vnops.c b/sys/fs/specfs/spec_vnops.c
index 6d39307..9d53ca5 100644
--- a/sys/fs/specfs/spec_vnops.c
+++ b/sys/fs/specfs/spec_vnops.c
@@ -780,8 +780,6 @@ spec_getpages(ap)
nextoff = toff + PAGE_SIZE;
m = ap->a_m[i];
- m->flags &= ~PG_ZERO;
-
if (nextoff <= nread) {
m->valid = VM_PAGE_BITS_ALL;
vm_page_undirty(m);
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index b5ab1de..e8aa5fa 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -1893,7 +1893,6 @@ retry_lookup:
if (error)
VM_OBJECT_LOCK(obj);
vm_page_lock_queues();
- vm_page_flag_clear(pg, PG_ZERO);
vm_page_io_finish(pg);
mbstat.sf_iocnt++;
}
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 3b1f0d2..e4924c9 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -1336,9 +1336,6 @@ brelse(struct buf * bp)
int had_bogus = 0;
m = bp->b_pages[i];
- vm_page_lock_queues();
- vm_page_flag_clear(m, PG_ZERO);
- vm_page_unlock_queues();
/*
* If we hit a bogus page, fixup *all* the bogus pages
@@ -1582,7 +1579,6 @@ vfs_vmio_release(bp)
continue;
if (m->wire_count == 0) {
- vm_page_flag_clear(m, PG_ZERO);
/*
* Might as well free the page if we can and it has
* no valid data. We also free the page if the
@@ -2326,10 +2322,8 @@ vfs_setdirty(struct buf *bp)
* test the pages to see if they have been modified directly
* by users through the VM system.
*/
- for (i = 0; i < bp->b_npages; i++) {
- vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
+ for (i = 0; i < bp->b_npages; i++)
vm_page_test_dirty(bp->b_pages[i]);
- }
/*
* Calculate the encompassing dirty range, boffset and eoffset,
@@ -2919,7 +2913,6 @@ allocbuf(struct buf *bp, int size)
(cnt.v_free_min + cnt.v_cache_min))) {
pagedaemon_wakeup();
}
- vm_page_flag_clear(m, PG_ZERO);
vm_page_wire(m);
vm_page_unlock_queues();
bp->b_pages[bp->b_npages] = m;
@@ -3233,7 +3226,6 @@ bufdone(struct buf *bp)
if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
vfs_page_set_valid(bp, foff, i, m);
}
- vm_page_flag_clear(m, PG_ZERO);
/*
* when debugging new filesystems or buffer I/O methods, this
@@ -3316,7 +3308,6 @@ vfs_unbusy_pages(struct buf * bp)
pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
}
vm_object_pip_subtract(obj, 1);
- vm_page_flag_clear(m, PG_ZERO);
vm_page_io_finish(m);
}
vm_page_unlock_queues();
@@ -3402,7 +3393,6 @@ retry:
for (i = 0; i < bp->b_npages; i++) {
vm_page_t m = bp->b_pages[i];
- vm_page_flag_clear(m, PG_ZERO);
if ((bp->b_flags & B_CLUSTER) == 0) {
vm_object_pip_add(obj, 1);
vm_page_io_start(m);
@@ -3579,9 +3569,6 @@ vfs_bio_clrbuf(struct buf *bp)
}
}
bp->b_pages[i]->valid |= mask;
- vm_page_lock_queues();
- vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
- vm_page_unlock_queues();
}
unlock:
VM_OBJECT_UNLOCK(bp->b_object);
diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c
index 7df0531..02fdd00 100644
--- a/sys/nfsclient/nfs_bio.c
+++ b/sys/nfsclient/nfs_bio.c
@@ -215,8 +215,6 @@ nfs_getpages(struct vop_getpages_args *ap)
nextoff = toff + PAGE_SIZE;
m = pages[i];
- m->flags &= ~PG_ZERO;
-
if (nextoff <= size) {
/*
* Read operation filled an entire page
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 196085e..258ee4d 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -1483,7 +1483,6 @@ swp_pager_async_iodone(struct buf *bp)
* interrupt.
*/
m->valid = 0;
- vm_page_flag_clear(m, PG_ZERO);
if (i != bp->b_pager.pg_reqpage)
vm_page_free(m);
else
@@ -1516,8 +1515,6 @@ swp_pager_async_iodone(struct buf *bp)
* that existed in the old swapper for a time before
* it got ripped out due to precisely this problem.
*
- * clear PG_ZERO in page.
- *
* If not the requested page then deactivate it.
*
* Note that the requested page, reqpage, is left
@@ -1529,7 +1526,6 @@ swp_pager_async_iodone(struct buf *bp)
pmap_clear_modify(m);
m->valid = VM_PAGE_BITS_ALL;
vm_page_undirty(m);
- vm_page_flag_clear(m, PG_ZERO);
/*
* We have to wake specifically requested pages
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index fded099..000e2a2 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -898,7 +898,6 @@ readrest:
}
mtx_unlock(&Giant);
vm_page_lock_queues();
- vm_page_flag_clear(fs.m, PG_ZERO);
vm_page_flag_set(fs.m, PG_REFERENCED);
/*
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index a85aee2..4f6ec45 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -513,7 +513,6 @@ vnode_pager_input_smlfs(object, m)
sf_buf_free(sf);
vm_page_lock_queues();
pmap_clear_modify(m);
- vm_page_flag_clear(m, PG_ZERO);
vm_page_unlock_queues();
if (error) {
return VM_PAGER_ERROR;
@@ -586,7 +585,6 @@ vnode_pager_input_old(object, m)
vm_page_lock_queues();
pmap_clear_modify(m);
vm_page_undirty(m);
- vm_page_flag_clear(m, PG_ZERO);
vm_page_unlock_queues();
if (!error)
m->valid = VM_PAGE_BITS_ALL;
@@ -884,7 +882,6 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
/* vm_page_zero_invalid(mt, FALSE); */
}
- vm_page_flag_clear(mt, PG_ZERO);
if (i != reqpage) {
/*
OpenPOWER on IntegriCloud