summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>1999-01-24 06:04:52 +0000
committerdillon <dillon@FreeBSD.org>1999-01-24 06:04:52 +0000
commita4c067a4598ea12fc0079892fe4b6695efdd39c0 (patch)
tree6384be4a4ca9965fe123e445cdfe4967e00c2c08 /sys
parent6f8753800cd94485b29ccd7a74ddc331cb3b9902 (diff)
downloadFreeBSD-src-a4c067a4598ea12fc0079892fe4b6695efdd39c0.zip
FreeBSD-src-a4c067a4598ea12fc0079892fe4b6695efdd39c0.tar.gz
Change all manual settings of vm_page_t->dirty = VM_PAGE_BITS_ALL
to use the vm_page_dirty() inline. The inline can thus do sanity checks ( or not ) over all cases.
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/pmap.c8
-rw-r--r--sys/amd64/amd64/pmap.c16
-rw-r--r--sys/i386/i386/pmap.c16
-rw-r--r--sys/vm/swap_pager.c6
-rw-r--r--sys/vm/vm_fault.c4
-rw-r--r--sys/vm/vm_map.c8
-rw-r--r--sys/vm/vm_pageout.c4
7 files changed, 30 insertions, 32 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index fe8741d..aec63d3 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -43,7 +43,7 @@
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp
* with some ideas from NetBSD's alpha pmap
- * $Id: pmap.c,v 1.12 1998/10/28 13:36:49 dg Exp $
+ * $Id: pmap.c,v 1.13 1999/01/21 08:29:02 dillon Exp $
*/
/*
@@ -1068,9 +1068,9 @@ pmap_swapout_proc(p)
for(i=0;i<UPAGES;i++) {
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_swapout_proc: upage already missing???");
- m->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(m);
vm_page_unwire(m, 0);
- pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
+ pmap_kremove((vm_offset_t)p->p_addr + PAGE_SIZE * i);
}
}
@@ -2980,7 +2980,7 @@ pmap_emulate_reference(struct proc *p, vm_offset_t v, int user, int write)
vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
if (write) {
ppv->pv_flags |= PV_TABLE_MOD;
- ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(ppv->pv_vm_page);
faultoff |= PG_FOW;
}
pmap_changebit(pa, faultoff, FALSE);
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 2a378d3..12c60da 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
- * $Id: pmap.c,v 1.219 1999/01/12 00:17:53 eivind Exp $
+ * $Id: pmap.c,v 1.220 1999/01/21 08:29:03 dillon Exp $
*/
/*
@@ -1070,7 +1070,7 @@ pmap_swapout_proc(p)
for(i=0;i<UPAGES;i++) {
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_swapout_proc: upage already missing???");
- m->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(m);
vm_page_unwire(m, 0);
pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
}
@@ -1787,7 +1787,7 @@ pmap_remove_pte(pmap, ptq, va)
}
#endif
if (pmap_track_modified(va))
- ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(ppv->pv_vm_page);
}
if (oldpte & PG_A)
vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
@@ -1989,7 +1989,7 @@ pmap_remove_all(pa)
}
#endif
if (pmap_track_modified(pv->pv_va))
- ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(ppv->pv_vm_page);
}
if (!update_needed &&
((!curproc || (&curproc->p_vmspace->vm_pmap == pv->pv_pmap)) ||
@@ -2087,7 +2087,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
if (pmap_track_modified(i386_ptob(sindex))) {
if (ppv == NULL)
ppv = pa_to_pvh(pbits);
- ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(ppv->pv_vm_page);
pbits &= ~PG_M;
}
}
@@ -2231,7 +2231,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
if ((origpte & PG_M) && pmap_track_modified(va)) {
pv_table_t *ppv;
ppv = pa_to_pvh(opa);
- ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(ppv->pv_vm_page);
}
pa |= PG_MANAGED;
}
@@ -3015,7 +3015,7 @@ pmap_remove_pages(pmap, sva, eva)
* Update the vm_page_t clean and reference bits.
*/
if (tpte & PG_M) {
- ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(ppv->pv_vm_page);
}
@@ -3145,7 +3145,7 @@ pmap_changebit(pa, bit, setem)
changed = 1;
if (bit == PG_RW) {
if (pbits & PG_M) {
- ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(ppv->pv_vm_page);
}
*(int *)pte = pbits & ~(PG_M|PG_RW);
} else {
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 2a378d3..12c60da 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
- * $Id: pmap.c,v 1.219 1999/01/12 00:17:53 eivind Exp $
+ * $Id: pmap.c,v 1.220 1999/01/21 08:29:03 dillon Exp $
*/
/*
@@ -1070,7 +1070,7 @@ pmap_swapout_proc(p)
for(i=0;i<UPAGES;i++) {
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_swapout_proc: upage already missing???");
- m->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(m);
vm_page_unwire(m, 0);
pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
}
@@ -1787,7 +1787,7 @@ pmap_remove_pte(pmap, ptq, va)
}
#endif
if (pmap_track_modified(va))
- ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(ppv->pv_vm_page);
}
if (oldpte & PG_A)
vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
@@ -1989,7 +1989,7 @@ pmap_remove_all(pa)
}
#endif
if (pmap_track_modified(pv->pv_va))
- ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(ppv->pv_vm_page);
}
if (!update_needed &&
((!curproc || (&curproc->p_vmspace->vm_pmap == pv->pv_pmap)) ||
@@ -2087,7 +2087,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
if (pmap_track_modified(i386_ptob(sindex))) {
if (ppv == NULL)
ppv = pa_to_pvh(pbits);
- ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(ppv->pv_vm_page);
pbits &= ~PG_M;
}
}
@@ -2231,7 +2231,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
if ((origpte & PG_M) && pmap_track_modified(va)) {
pv_table_t *ppv;
ppv = pa_to_pvh(opa);
- ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(ppv->pv_vm_page);
}
pa |= PG_MANAGED;
}
@@ -3015,7 +3015,7 @@ pmap_remove_pages(pmap, sva, eva)
* Update the vm_page_t clean and reference bits.
*/
if (tpte & PG_M) {
- ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(ppv->pv_vm_page);
}
@@ -3145,7 +3145,7 @@ pmap_changebit(pa, bit, setem)
changed = 1;
if (bit == PG_RW) {
if (pbits & PG_M) {
- ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(ppv->pv_vm_page);
}
*(int *)pte = pbits & ~(PG_M|PG_RW);
} else {
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 4a992d3..091a8c6 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -64,7 +64,7 @@
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
*
- * $Id: swap_pager.c,v 1.109 1999/01/21 09:33:07 dillon Exp $
+ * $Id: swap_pager.c,v 1.110 1999/01/24 02:32:14 dillon Exp $
*/
#include <sys/param.h>
@@ -1098,7 +1098,7 @@ swap_pager_putpages(object, m, count, sync, rtvals)
blk + j,
0
);
- mreq->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(mreq);
rtvals[i+j] = VM_PAGER_OK;
vm_page_flag_set(mreq, PG_SWAPINPROG);
@@ -1319,7 +1319,7 @@ swp_pager_async_iodone(bp)
* so it doesn't clog the inactive list,
* then finish the I/O.
*/
- m->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(m);
vm_page_activate(m);
vm_page_io_finish(m);
}
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index d44a248..8692fff 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_fault.c,v 1.96 1999/01/23 06:00:27 dillon Exp $
+ * $Id: vm_fault.c,v 1.97 1999/01/24 00:55:04 dillon Exp $
*/
/*
@@ -760,7 +760,7 @@ readrest:
* any swap backing since the page is now dirty.
*/
if (fault_flags & VM_FAULT_DIRTY) {
- fs.m->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(fs.m);
vm_pager_page_unswapped(fs.m);
}
}
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 6878dd3..6a287b8 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.140 1999/01/21 08:29:10 dillon Exp $
+ * $Id: vm_map.c,v 1.141 1999/01/21 09:40:48 dillon Exp $
*/
/*
@@ -2225,7 +2225,7 @@ vm_map_split(entry)
vm_page_busy(m);
vm_page_protect(m, VM_PROT_NONE);
vm_page_rename(m, new_object, idx);
- /* page automatically made dirty by rename */
+ /* page automatically made dirty by rename and cache handled */
vm_page_busy(m);
}
@@ -3036,10 +3036,8 @@ vm_freeze_copyopts(object, froma, toa)
vm_page_protect(m_in, VM_PROT_NONE);
pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
m_out->valid = m_in->valid;
- m_out->dirty = VM_PAGE_BITS_ALL;
-
+ vm_page_dirty(m_out);
vm_page_activate(m_out);
-
vm_page_wakeup(m_in);
}
vm_page_wakeup(m_out);
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index aa95a37..a220fd2 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.132 1999/01/24 01:06:31 dillon Exp $
+ * $Id: vm_pageout.c,v 1.133 1999/01/24 01:33:22 dillon Exp $
*/
/*
@@ -784,7 +784,7 @@ rescan0:
if (m->dirty == 0) {
vm_page_test_dirty(m);
} else {
- m->dirty = VM_PAGE_BITS_ALL;
+ vm_page_dirty(m);
}
/*
OpenPOWER on IntegriCloud