diff options
-rw-r--r-- | sys/amd64/amd64/pmap.c | 19 | ||||
-rw-r--r-- | sys/arm/arm/pmap.c | 28 | ||||
-rw-r--r-- | sys/i386/i386/pmap.c | 19 | ||||
-rw-r--r-- | sys/ia64/ia64/pmap.c | 71 | ||||
-rw-r--r-- | sys/powerpc/aim/mmu_oea.c | 121 | ||||
-rw-r--r-- | sys/powerpc/powerpc/mmu_if.m | 26 | ||||
-rw-r--r-- | sys/powerpc/powerpc/mmu_oea.c | 121 | ||||
-rw-r--r-- | sys/powerpc/powerpc/pmap_dispatch.c | 12 | ||||
-rw-r--r-- | sys/sparc64/sparc64/pmap.c | 19 | ||||
-rw-r--r-- | sys/vm/pmap.h | 3 | ||||
-rw-r--r-- | sys/vm/vm_object.c | 6 | ||||
-rw-r--r-- | sys/vm/vm_page.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_pageout.c | 2 |
13 files changed, 153 insertions, 296 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index bf24aec..1747d0b 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -2972,7 +2972,7 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) * Clear the write and modified bits in each of the given page's mappings. */ void -pmap_clear_write(vm_page_t m) +pmap_remove_write(vm_page_t m) { pv_entry_t pv; pmap_t pmap; @@ -3002,23 +3002,6 @@ retry: } /* - * pmap_page_protect: - * - * Lower the permission for all mappings to a given page. - */ -void -pmap_page_protect(vm_page_t m, vm_prot_t prot) -{ - if ((prot & VM_PROT_WRITE) == 0) { - if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { - pmap_clear_write(m); - } else { - pmap_remove_all(m); - } - } -} - -/* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c index 9d8f4af..1e5be9c 100644 --- a/sys/arm/arm/pmap.c +++ b/sys/arm/arm/pmap.c @@ -2759,32 +2759,6 @@ pmap_growkernel(vm_offset_t addr) /* - * pmap_page_protect: - * - * Lower the permission for all mappings to a given page. - */ -void -pmap_page_protect(vm_page_t m, vm_prot_t prot) -{ - switch(prot) { - case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: - case VM_PROT_READ|VM_PROT_WRITE: - return; - - case VM_PROT_READ: - case VM_PROT_READ|VM_PROT_EXECUTE: - pmap_clearbit(m, PVF_WRITE); - break; - - default: - pmap_remove_all(m); - break; - } - -} - - -/* * Remove all pages from specified address space * this aids process exit speeds. Also, this code * is special cased for current process only, but @@ -4464,7 +4438,7 @@ pmap_clear_reference(vm_page_t m) * Clear the write and modified bits in each of the given page's mappings. */ void -pmap_clear_write(vm_page_t m) +pmap_remove_write(vm_page_t m) { if (m->md.pvh_attrs & PVF_WRITE) diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 8b8bcb6..bd32e54 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -3066,7 +3066,7 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) * Clear the write and modified bits in each of the given page's mappings. */ void -pmap_clear_write(vm_page_t m) +pmap_remove_write(vm_page_t m) { pv_entry_t pv; pmap_t pmap; @@ -3103,23 +3103,6 @@ retry: } /* - * pmap_page_protect: - * - * Lower the permission for all mappings to a given page. - */ -void -pmap_page_protect(vm_page_t m, vm_prot_t prot) -{ - if ((prot & VM_PROT_WRITE) == 0) { - if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { - pmap_clear_write(m); - } else { - pmap_remove_all(m); - } - } -} - -/* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c index 18d9644..e6b09ad 100644 --- a/sys/ia64/ia64/pmap.c +++ b/sys/ia64/ia64/pmap.c @@ -1939,40 +1939,6 @@ pmap_remove_pages(pmap_t pmap) } /* - * pmap_page_protect: - * - * Lower the permission for all mappings to a given page. - */ -void -pmap_page_protect(vm_page_t m, vm_prot_t prot) -{ - struct ia64_lpte *pte; - pmap_t oldpmap, pmap; - pv_entry_t pv; - - if ((prot & VM_PROT_WRITE) != 0) - return; - if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { - if ((m->flags & PG_WRITEABLE) == 0) - return; - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { - pmap = pv->pv_pmap; - PMAP_LOCK(pmap); - oldpmap = pmap_install(pmap); - pte = pmap_find_vhpt(pv->pv_va); - KASSERT(pte != NULL, ("pte")); - pmap_pte_prot(pmap, pte, prot); - pmap_invalidate_page(pmap, pv->pv_va); - pmap_install(oldpmap); - PMAP_UNLOCK(pmap); - } - vm_page_flag_clear(m, PG_WRITEABLE); - } else { - pmap_remove_all(m); - } -} - -/* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. @@ -2119,6 +2085,43 @@ pmap_clear_reference(vm_page_t m) } /* + * Clear the write and modified bits in each of the given page's mappings. + */ +void +pmap_remove_write(vm_page_t m) +{ + struct ia64_lpte *pte; + pmap_t oldpmap, pmap; + pv_entry_t pv; + vm_prot_t prot; + + mtx_assert(&vm_page_queue_mtx, MA_OWNED); + if ((m->flags & PG_FICTITIOUS) != 0 || + (m->flags & PG_WRITEABLE) == 0) + return; + TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { + pmap = pv->pv_pmap; + PMAP_LOCK(pmap); + oldpmap = pmap_install(pmap); + pte = pmap_find_vhpt(pv->pv_va); + KASSERT(pte != NULL, ("pte")); + prot = pmap_prot(pte); + if ((prot & VM_PROT_WRITE) != 0) { + if (pmap_dirty(pte)) { + vm_page_dirty(m); + pmap_clear_dirty(pte); + } + prot &= ~VM_PROT_WRITE; + pmap_pte_prot(pmap, pte, prot); + pmap_invalidate_page(pmap, pv->pv_va); + } + pmap_install(oldpmap); + PMAP_UNLOCK(pmap); + } + vm_page_flag_clear(m, PG_WRITEABLE); +} + +/* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c index 9729fa3..df5431d 100644 --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -322,7 +322,6 @@ boolean_t moea_is_modified(mmu_t, vm_page_t); boolean_t moea_ts_referenced(mmu_t, vm_page_t); vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); -void moea_page_protect(mmu_t, vm_page_t, vm_prot_t); void moea_pinit(mmu_t, pmap_t); void moea_pinit0(mmu_t, pmap_t); void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); @@ -331,6 +330,7 @@ void moea_qremove(mmu_t, vm_offset_t, int); void moea_release(mmu_t, pmap_t); void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); void moea_remove_all(mmu_t, vm_page_t); +void moea_remove_write(mmu_t, vm_page_t); void moea_zero_page(mmu_t, vm_page_t); void moea_zero_page_area(mmu_t, vm_page_t, int, int); void moea_zero_page_idle(mmu_t, vm_page_t); @@ -358,7 +358,6 @@ static mmu_method_t moea_methods[] = { MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), MMUMETHOD(mmu_map, moea_map), MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), - MMUMETHOD(mmu_page_protect, moea_page_protect), MMUMETHOD(mmu_pinit, moea_pinit), MMUMETHOD(mmu_pinit0, moea_pinit0), MMUMETHOD(mmu_protect, moea_protect), @@ -367,6 +366,7 @@ static mmu_method_t moea_methods[] = { MMUMETHOD(mmu_release, moea_release), MMUMETHOD(mmu_remove, moea_remove), MMUMETHOD(mmu_remove_all, moea_remove_all), + MMUMETHOD(mmu_remove_write, moea_remove_write), MMUMETHOD(mmu_zero_page, moea_zero_page), MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), @@ -1293,6 +1293,48 @@ moea_clear_modify(mmu_t mmu, vm_page_t m) } /* + * Clear the write and modified bits in each of the given page's mappings. + */ +void +moea_remove_write(mmu_t mmu, vm_page_t m) +{ + struct pvo_entry *pvo; + struct pte *pt; + pmap_t pmap; + u_int lo; + + mtx_assert(&vm_page_queue_mtx, MA_OWNED); + if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || + (m->flags & PG_WRITEABLE) == 0) + return; + lo = moea_attr_fetch(m); + SYNC(); + LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { + pmap = pvo->pvo_pmap; + PMAP_LOCK(pmap); + if ((pvo->pvo_pte.pte_lo & PTE_PP) != PTE_BR) { + pt = moea_pvo_to_pte(pvo, -1); + pvo->pvo_pte.pte_lo &= ~PTE_PP; + pvo->pvo_pte.pte_lo |= PTE_BR; + if (pt != NULL) { + moea_pte_synch(pt, &pvo->pvo_pte); + lo |= pvo->pvo_pte.pte_lo; + pvo->pvo_pte.pte_lo &= ~PTE_CHG; + moea_pte_change(pt, &pvo->pvo_pte, + pvo->pvo_vaddr); + mtx_unlock(&moea_table_mutex); + } + } + PMAP_UNLOCK(pmap); + } + if ((lo & PTE_CHG) != 0) { + moea_attr_clear(m, PTE_CHG); + vm_page_dirty(m); + } + vm_page_flag_clear(m, PG_WRITEABLE); +} + +/* * moea_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. @@ -1420,81 +1462,6 @@ moea_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, } /* - * Lower the permission for all mappings to a given page. - */ -void -moea_page_protect(mmu_t mmu, vm_page_t m, vm_prot_t prot) -{ - struct pvo_head *pvo_head; - struct pvo_entry *pvo, *next_pvo; - struct pte *pt; - pmap_t pmap; - - /* - * Since the routine only downgrades protection, if the - * maximal protection is desired, there isn't any change - * to be made. - */ - if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == - (VM_PROT_READ|VM_PROT_WRITE)) - return; - - pvo_head = vm_page_to_pvoh(m); - for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { - next_pvo = LIST_NEXT(pvo, pvo_vlink); - MOEA_PVO_CHECK(pvo); /* sanity check */ - pmap = pvo->pvo_pmap; - PMAP_LOCK(pmap); - - /* - * Downgrading to no mapping at all, we just remove the entry. - */ - if ((prot & VM_PROT_READ) == 0) { - moea_pvo_remove(pvo, -1); - PMAP_UNLOCK(pmap); - continue; - } - - /* - * If EXEC permission is being revoked, just clear the flag - * in the PVO. - */ - if ((prot & VM_PROT_EXECUTE) == 0) - pvo->pvo_vaddr &= ~PVO_EXECUTABLE; - - /* - * If this entry is already RO, don't diddle with the page - * table. - */ - if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { - PMAP_UNLOCK(pmap); - MOEA_PVO_CHECK(pvo); - continue; - } - - /* - * Grab the PTE before we diddle the bits so pvo_to_pte can - * verify the pte contents are as expected. - */ - pt = moea_pvo_to_pte(pvo, -1); - pvo->pvo_pte.pte_lo &= ~PTE_PP; - pvo->pvo_pte.pte_lo |= PTE_BR; - if (pt != NULL) { - moea_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); - mtx_unlock(&moea_table_mutex); - } - PMAP_UNLOCK(pmap); - MOEA_PVO_CHECK(pvo); /* sanity check */ - } - - /* - * Downgrading from writeable: clear the VM page flag - */ - if ((prot & VM_PROT_WRITE) != VM_PROT_WRITE) - vm_page_flag_clear(m, PG_WRITEABLE); -} - -/* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m index 5fd0510..548660f 100644 --- a/sys/powerpc/powerpc/mmu_if.m +++ b/sys/powerpc/powerpc/mmu_if.m @@ -147,6 +147,18 @@ METHOD void clear_reference { /** + * @brief Clear the write and modified bits in each of the given + * physical page's mappings + * + * @param _pg physical page + */ +METHOD void remove_write { + mmu_t _mmu; + vm_page_t _pg; +}; + + +/** * @brief Copy the address range given by the source physical map, virtual * address and length to the destination physical map and virtual address. * This routine is optional (xxx default null implementation ?) @@ -419,20 +431,6 @@ METHOD void page_init { /** - * @brief Lower the protection to the given value for all mappings of the - * given physical page. - * - * @param _pg physical page - * @param _prot updated page protection - */ -METHOD void page_protect { - mmu_t _mmu; - vm_page_t _pg; - vm_prot_t _prot; -}; - - -/** * @brief Initialise a physical map data structure * * @param _pmap physical map diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c index 9729fa3..df5431d 100644 --- a/sys/powerpc/powerpc/mmu_oea.c +++ b/sys/powerpc/powerpc/mmu_oea.c @@ -322,7 +322,6 @@ boolean_t moea_is_modified(mmu_t, vm_page_t); boolean_t moea_ts_referenced(mmu_t, vm_page_t); vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); -void moea_page_protect(mmu_t, vm_page_t, vm_prot_t); void moea_pinit(mmu_t, pmap_t); void moea_pinit0(mmu_t, pmap_t); void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); @@ -331,6 +330,7 @@ void moea_qremove(mmu_t, vm_offset_t, int); void moea_release(mmu_t, pmap_t); void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); void moea_remove_all(mmu_t, vm_page_t); +void moea_remove_write(mmu_t, vm_page_t); void moea_zero_page(mmu_t, vm_page_t); void moea_zero_page_area(mmu_t, vm_page_t, int, int); void moea_zero_page_idle(mmu_t, vm_page_t); @@ -358,7 +358,6 @@ static mmu_method_t moea_methods[] = { MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), MMUMETHOD(mmu_map, moea_map), MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), - MMUMETHOD(mmu_page_protect, moea_page_protect), MMUMETHOD(mmu_pinit, moea_pinit), MMUMETHOD(mmu_pinit0, moea_pinit0), MMUMETHOD(mmu_protect, moea_protect), @@ -367,6 +366,7 @@ static mmu_method_t moea_methods[] = { MMUMETHOD(mmu_release, moea_release), MMUMETHOD(mmu_remove, moea_remove), MMUMETHOD(mmu_remove_all, moea_remove_all), + MMUMETHOD(mmu_remove_write, moea_remove_write), MMUMETHOD(mmu_zero_page, moea_zero_page), MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), @@ -1293,6 +1293,48 @@ moea_clear_modify(mmu_t mmu, vm_page_t m) } /* + * Clear the write and modified bits in each of the given page's mappings. + */ +void +moea_remove_write(mmu_t mmu, vm_page_t m) +{ + struct pvo_entry *pvo; + struct pte *pt; + pmap_t pmap; + u_int lo; + + mtx_assert(&vm_page_queue_mtx, MA_OWNED); + if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || + (m->flags & PG_WRITEABLE) == 0) + return; + lo = moea_attr_fetch(m); + SYNC(); + LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { + pmap = pvo->pvo_pmap; + PMAP_LOCK(pmap); + if ((pvo->pvo_pte.pte_lo & PTE_PP) != PTE_BR) { + pt = moea_pvo_to_pte(pvo, -1); + pvo->pvo_pte.pte_lo &= ~PTE_PP; + pvo->pvo_pte.pte_lo |= PTE_BR; + if (pt != NULL) { + moea_pte_synch(pt, &pvo->pvo_pte); + lo |= pvo->pvo_pte.pte_lo; + pvo->pvo_pte.pte_lo &= ~PTE_CHG; + moea_pte_change(pt, &pvo->pvo_pte, + pvo->pvo_vaddr); + mtx_unlock(&moea_table_mutex); + } + } + PMAP_UNLOCK(pmap); + } + if ((lo & PTE_CHG) != 0) { + moea_attr_clear(m, PTE_CHG); + vm_page_dirty(m); + } + vm_page_flag_clear(m, PG_WRITEABLE); +} + +/* * moea_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. @@ -1420,81 +1462,6 @@ moea_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, } /* - * Lower the permission for all mappings to a given page. - */ -void -moea_page_protect(mmu_t mmu, vm_page_t m, vm_prot_t prot) -{ - struct pvo_head *pvo_head; - struct pvo_entry *pvo, *next_pvo; - struct pte *pt; - pmap_t pmap; - - /* - * Since the routine only downgrades protection, if the - * maximal protection is desired, there isn't any change - * to be made. - */ - if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == - (VM_PROT_READ|VM_PROT_WRITE)) - return; - - pvo_head = vm_page_to_pvoh(m); - for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { - next_pvo = LIST_NEXT(pvo, pvo_vlink); - MOEA_PVO_CHECK(pvo); /* sanity check */ - pmap = pvo->pvo_pmap; - PMAP_LOCK(pmap); - - /* - * Downgrading to no mapping at all, we just remove the entry. - */ - if ((prot & VM_PROT_READ) == 0) { - moea_pvo_remove(pvo, -1); - PMAP_UNLOCK(pmap); - continue; - } - - /* - * If EXEC permission is being revoked, just clear the flag - * in the PVO. - */ - if ((prot & VM_PROT_EXECUTE) == 0) - pvo->pvo_vaddr &= ~PVO_EXECUTABLE; - - /* - * If this entry is already RO, don't diddle with the page - * table. - */ - if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { - PMAP_UNLOCK(pmap); - MOEA_PVO_CHECK(pvo); - continue; - } - - /* - * Grab the PTE before we diddle the bits so pvo_to_pte can - * verify the pte contents are as expected. - */ - pt = moea_pvo_to_pte(pvo, -1); - pvo->pvo_pte.pte_lo &= ~PTE_PP; - pvo->pvo_pte.pte_lo |= PTE_BR; - if (pt != NULL) { - moea_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); - mtx_unlock(&moea_table_mutex); - } - PMAP_UNLOCK(pmap); - MOEA_PVO_CHECK(pvo); /* sanity check */ - } - - /* - * Downgrading from writeable: clear the VM page flag - */ - if ((prot & VM_PROT_WRITE) != VM_PROT_WRITE) - vm_page_flag_clear(m, PG_WRITEABLE); -} - -/* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c index 77f8368..e970d48 100644 --- a/sys/powerpc/powerpc/pmap_dispatch.c +++ b/sys/powerpc/powerpc/pmap_dispatch.c @@ -196,12 +196,6 @@ pmap_page_init(vm_page_t m) } void -pmap_page_protect(vm_page_t m, vm_prot_t prot) -{ - MMU_PAGE_PROTECT(mmu_obj, m, prot); -} - -void pmap_pinit(pmap_t pmap) { MMU_PINIT(mmu_obj, pmap); @@ -256,6 +250,12 @@ pmap_remove_pages(pmap_t pmap) } void +pmap_remove_write(vm_page_t m) +{ + MMU_REMOVE_WRITE(mmu_obj, m); +} + +void pmap_zero_page(vm_page_t m) { MMU_ZERO_PAGE(mmu_obj, m); diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c index 200867b..39b0f88 100644 --- a/sys/sparc64/sparc64/pmap.c +++ b/sys/sparc64/sparc64/pmap.c @@ -1770,23 +1770,6 @@ pmap_page_is_mapped(vm_page_t m) } /* - * Lower the permission for all mappings to a given page. - */ -void -pmap_page_protect(vm_page_t m, vm_prot_t prot) -{ - - KASSERT((m->flags & PG_FICTITIOUS) == 0, - ("pmap_page_protect: fake page")); - if ((prot & VM_PROT_WRITE) == 0) { - if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) - pmap_clear_write(m); - else - pmap_remove_all(m); - } -} - -/* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. @@ -1895,7 +1878,7 @@ pmap_clear_reference(vm_page_t m) } void -pmap_clear_write(vm_page_t m) +pmap_remove_write(vm_page_t m) { struct tte *tp; u_long data; diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h index 8c1f9a3..44c6a82 100644 --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -93,7 +93,6 @@ extern vm_offset_t kernel_vm_end; void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t); void pmap_clear_modify(vm_page_t m); void pmap_clear_reference(vm_page_t m); -void pmap_clear_write(vm_page_t m); void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t); void pmap_copy_page(vm_page_t, vm_page_t); void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, @@ -115,7 +114,6 @@ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size); boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m); void pmap_page_init(vm_page_t m); -void pmap_page_protect(vm_page_t m, vm_prot_t prot); void pmap_pinit(pmap_t); void pmap_pinit0(pmap_t); void pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); @@ -125,6 +123,7 @@ void pmap_release(pmap_t); void pmap_remove(pmap_t, vm_offset_t, vm_offset_t); void pmap_remove_all(vm_page_t m); void pmap_remove_pages(pmap_t); +void pmap_remove_write(vm_page_t m); void pmap_zero_page(vm_page_t); void pmap_zero_page_area(vm_page_t, int off, int size); void pmap_zero_page_idle(vm_page_t); diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index 4f6696e..20cb80d 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -808,7 +808,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) clearobjflags = 0; else - pmap_page_protect(p, VM_PROT_READ); + pmap_remove_write(p); } if (clearobjflags && (tstart == 0) && (tend == object->size)) { @@ -977,7 +977,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, vm_pageout_flush(ma, runlen, pagerflags); for (i = 0; i < runlen; i++) { if (ma[i]->valid & ma[i]->dirty) { - pmap_page_protect(ma[i], VM_PROT_READ); + pmap_remove_write(ma[i]); vm_page_flag_set(ma[i], PG_CLEANCHK); /* @@ -1829,7 +1829,7 @@ again: if (vm_page_sleep_if_busy(p, TRUE, "vmopar")) goto again; if (clean_only && p->valid) { - pmap_page_protect(p, VM_PROT_READ | VM_PROT_EXECUTE); + pmap_remove_write(p); if (p->valid & p->dirty) continue; } diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index b36b1fa..af77ada 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -1786,7 +1786,7 @@ vm_page_cowsetup(vm_page_t m) mtx_assert(&vm_page_queue_mtx, MA_OWNED); m->cow++; - pmap_page_protect(m, VM_PROT_READ); + pmap_remove_write(m); } #include "opt_ddb.h" diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index e7611d2..115777d 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -431,7 +431,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags) ("vm_pageout_flush: partially invalid page %p index %d/%d", mc[i], i, count)); vm_page_io_start(mc[i]); - pmap_page_protect(mc[i], VM_PROT_READ); + pmap_remove_write(mc[i]); } vm_page_unlock_queues(); vm_object_pip_add(object, count); |