diff options
Diffstat (limited to 'sys/i386')
-rw-r--r-- | sys/i386/i386/pmap.c | 46 | ||||
-rw-r--r-- | sys/i386/xen/pmap.c | 40 |
2 files changed, 43 insertions, 43 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index e6b8669..db5f995 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -2207,7 +2207,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq) KASSERT((tpte & PG_W) == 0, ("pmap_collect: wired pte %#jx", (uintmax_t)tpte)); if (tpte & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); free = NULL; @@ -2221,7 +2221,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq) } if (TAILQ_EMPTY(&m->md.pv_list) && TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } sched_unpin(); } @@ -2461,7 +2461,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) if (TAILQ_EMPTY(&m->md.pv_list)) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } } @@ -2714,10 +2714,10 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if (oldpde & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); if (TAILQ_EMPTY(&m->md.pv_list) && TAILQ_EMPTY(&pvh->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } } if (pmap == kernel_pmap) { @@ -2763,7 +2763,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free) if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if (oldpte & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); pmap_remove_entry(pmap, m, va); } return (pmap_unuse_pt(pmap, va, free)); @@ -2953,7 +2953,7 @@ pmap_remove_all(vm_page_t m) if (tpte & PG_W) pmap->pm_stats.wired_count--; if (tpte & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); /* * Update the vm_page_t clean and reference bits. @@ -2966,7 +2966,7 @@ pmap_remove_all(vm_page_t m) free_pv_entry(pmap, pv); PMAP_UNLOCK(pmap); } - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); sched_unpin(); vm_page_unlock_queues(); pmap_free_zero_pages(free); @@ -3413,7 +3413,7 @@ validate: if ((prot & VM_PROT_WRITE) != 0) { newpte |= PG_RW; if ((newpte & PG_MANAGED) != 0) - vm_page_flag_set(m, PG_WRITEABLE); + vm_page_aflag_set(m, PGA_WRITEABLE); } #ifdef PAE if ((prot & VM_PROT_EXECUTE) == 0) @@ -3439,7 +3439,7 @@ validate: origpte = pte_load_store(pte, newpte); if (origpte & PG_A) { if (origpte & PG_MANAGED) - vm_page_flag_set(om, PG_REFERENCED); + vm_page_aflag_set(om, PGA_REFERENCED); if (opa != VM_PAGE_TO_PHYS(m)) invlva = TRUE; #ifdef PAE @@ -3457,7 +3457,7 @@ validate: if ((origpte & PG_MANAGED) != 0 && TAILQ_EMPTY(&om->md.pv_list) && TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)) - vm_page_flag_clear(om, PG_WRITEABLE); + vm_page_aflag_clear(om, PGA_WRITEABLE); if (invlva) pmap_invalidate_page(pmap, va); } else @@ -4287,7 +4287,7 @@ pmap_remove_pages(pmap_t pmap) if (TAILQ_EMPTY(&pvh->pv_list)) { for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) if (TAILQ_EMPTY(&mt->md.pv_list)) - vm_page_flag_clear(mt, PG_WRITEABLE); + vm_page_aflag_clear(mt, PGA_WRITEABLE); } mpte = pmap_lookup_pt_page(pmap, pv->pv_va); if (mpte != NULL) { @@ -4305,7 +4305,7 @@ pmap_remove_pages(pmap_t pmap) if (TAILQ_EMPTY(&m->md.pv_list)) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } pmap_unuse_pt(pmap, pv->pv_va, &free); } @@ -4345,13 +4345,13 @@ pmap_is_modified(vm_page_t m) ("pmap_is_modified: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PG_M set. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) + (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); vm_page_lock_queues(); rv = pmap_is_modified_pvh(&m->md) || @@ -4478,13 +4478,13 @@ pmap_remove_write(vm_page_t m) ("pmap_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by - * another thread while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) + (m->aflags & PGA_WRITEABLE) == 0) return; vm_page_lock_queues(); sched_pin(); @@ -4522,7 +4522,7 @@ retry: } PMAP_UNLOCK(pmap); } - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); sched_unpin(); vm_page_unlock_queues(); } @@ -4633,11 +4633,11 @@ pmap_clear_modify(vm_page_t m) ("pmap_clear_modify: page %p is busy", m)); /* - * If the page is not PG_WRITEABLE, then no PTEs can have PG_M set. + * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. * If the object containing the page is locked and the page is not - * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. */ - if ((m->flags & PG_WRITEABLE) == 0) + if ((m->aflags & PGA_WRITEABLE) == 0) return; vm_page_lock_queues(); sched_pin(); diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c index ece1b6c..b19f75c 100644 --- a/sys/i386/xen/pmap.c +++ b/sys/i386/xen/pmap.c @@ -2037,7 +2037,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq) KASSERT((tpte & PG_W) == 0, ("pmap_collect: wired pte %#jx", (uintmax_t)tpte)); if (tpte & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); free = NULL; @@ -2050,7 +2050,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq) PMAP_UNLOCK(pmap); } if (TAILQ_EMPTY(&m->md.pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } sched_unpin(); } @@ -2222,7 +2222,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) mtx_assert(&vm_page_queue_mtx, MA_OWNED); pmap_pvh_free(&m->md, pmap, va); if (TAILQ_EMPTY(&m->md.pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); } /* @@ -2274,7 +2274,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free) if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if (oldpte & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); pmap_remove_entry(pmap, m, va); } return (pmap_unuse_pt(pmap, va, free)); @@ -2446,7 +2446,7 @@ pmap_remove_all(vm_page_t m) if (tpte & PG_W) pmap->pm_stats.wired_count--; if (tpte & PG_A) - vm_page_flag_set(m, PG_REFERENCED); + vm_page_aflag_set(m, PGA_REFERENCED); /* * Update the vm_page_t clean and reference bits. @@ -2459,7 +2459,7 @@ pmap_remove_all(vm_page_t m) free_pv_entry(pmap, pv); PMAP_UNLOCK(pmap); } - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); PT_UPDATES_FLUSH(); if (*PMAP1) PT_SET_MA(PADDR1, 0); @@ -2739,7 +2739,7 @@ validate: if ((prot & VM_PROT_WRITE) != 0) { newpte |= PG_RW; if ((newpte & PG_MANAGED) != 0) - vm_page_flag_set(m, PG_WRITEABLE); + vm_page_aflag_set(m, PGA_WRITEABLE); } #ifdef PAE if ((prot & VM_PROT_EXECUTE) == 0) @@ -2764,7 +2764,7 @@ validate: PT_SET_VA(pte, newpte | PG_A, FALSE); if (origpte & PG_A) { if (origpte & PG_MANAGED) - vm_page_flag_set(om, PG_REFERENCED); + vm_page_aflag_set(om, PGA_REFERENCED); if (opa != VM_PAGE_TO_PHYS(m)) invlva = TRUE; #ifdef PAE @@ -2781,7 +2781,7 @@ validate: } if ((origpte & PG_MANAGED) != 0 && TAILQ_EMPTY(&om->md.pv_list)) - vm_page_flag_clear(om, PG_WRITEABLE); + vm_page_aflag_clear(om, PGA_WRITEABLE); if (invlva) pmap_invalidate_page(pmap, va); } else{ @@ -3549,7 +3549,7 @@ pmap_remove_pages(pmap_t pmap) TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); if (TAILQ_EMPTY(&m->md.pv_list)) - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); pmap_unuse_pt(pmap, pv->pv_va, &free); @@ -3604,13 +3604,13 @@ pmap_is_modified(vm_page_t m) rv = FALSE; /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PG_M set. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) + (m->aflags & PGA_WRITEABLE) == 0) return (rv); vm_page_lock_queues(); sched_pin(); @@ -3735,13 +3735,13 @@ pmap_remove_write(vm_page_t m) ("pmap_remove_write: page %p is not managed", m)); /* - * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by - * another thread while the object is locked. Thus, if PG_WRITEABLE + * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by + * another thread while the object is locked. Thus, if PGA_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && - (m->flags & PG_WRITEABLE) == 0) + (m->aflags & PGA_WRITEABLE) == 0) return; vm_page_lock_queues(); sched_pin(); @@ -3769,7 +3769,7 @@ retry: } PMAP_UNLOCK(pmap); } - vm_page_flag_clear(m, PG_WRITEABLE); + vm_page_aflag_clear(m, PGA_WRITEABLE); PT_UPDATES_FLUSH(); if (*PMAP1) PT_SET_MA(PADDR1, 0); @@ -3846,11 +3846,11 @@ pmap_clear_modify(vm_page_t m) ("pmap_clear_modify: page %p is busy", m)); /* - * If the page is not PG_WRITEABLE, then no PTEs can have PG_M set. + * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. * If the object containing the page is locked and the page is not - * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. */ - if ((m->flags & PG_WRITEABLE) == 0) + if ((m->aflags & PGA_WRITEABLE) == 0) return; vm_page_lock_queues(); sched_pin(); |