diff options
author | alc <alc@FreeBSD.org> | 2004-10-08 08:23:43 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2004-10-08 08:23:43 +0000 |
commit | 417a40f2bff86acecbf99e0fde8bd75b42920fa4 (patch) | |
tree | 7a5a0495f446462dd1a370313683c21899917462 /sys/amd64 | |
parent | 8b1be96ee507ff704291bae4e80e020485ada346 (diff) | |
download | FreeBSD-src-417a40f2bff86acecbf99e0fde8bd75b42920fa4.zip FreeBSD-src-417a40f2bff86acecbf99e0fde8bd75b42920fa4.tar.gz |
Make pte_load_store() an atomic operation in all cases, not just i386 PAE.
Restructure pmap_enter() to prevent the loss of a page modified (PG_M) bit
in a race between processors. (This restructuring assumes the newly atomic
pte_load_store() for correct operation.)
Reviewed by: tegge@
PR: i386/61852
Diffstat (limited to 'sys/amd64')
-rw-r--r-- | sys/amd64/amd64/pmap.c | 27 | ||||
-rw-r--r-- | sys/amd64/include/pmap.h | 18 |
2 files changed, 34 insertions, 11 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 4b39ce0..5f815ad 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -1839,7 +1839,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_paddr_t opa; pd_entry_t ptepde; pt_entry_t origpte, newpte; - vm_page_t mpte; + vm_page_t mpte, om; va = trunc_page(va); #ifdef PMAP_DIAGNOSTIC @@ -1881,6 +1881,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, panic("pmap_enter: invalid page directory va=%#lx\n", va); pa = VM_PAGE_TO_PHYS(m); + om = NULL; origpte = *pte; opa = origpte & PG_FRAME; @@ -1921,8 +1922,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, * so we go ahead and sense modify status. */ if (origpte & PG_MANAGED) { - if ((origpte & PG_M) && pmap_track_modified(va)) - vm_page_dirty(m); + om = m; pa |= PG_MANAGED; } goto validate; @@ -1933,10 +1933,17 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, */ if (opa) { int err; - err = pmap_remove_pte(pmap, pte, va, ptepde); + if (origpte & PG_W) + pmap->pm_stats.wired_count--; + if (origpte & PG_MANAGED) { + om = PHYS_TO_VM_PAGE(opa); + err = pmap_remove_entry(pmap, om, va, ptepde); + } else + err = pmap_unuse_pt(pmap, va, ptepde); if (err) panic("pmap_enter: pte vanished, va: 0x%lx", va); - } + } else + pmap->pm_stats.resident_count++; /* * Enter on the PV list if part of our managed memory. Note that we @@ -1952,7 +1959,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, /* * Increment counters */ - pmap->pm_stats.resident_count++; if (wired) pmap->pm_stats.wired_count++; @@ -1977,7 +1983,14 @@ validate: * to update the pte. */ if ((origpte & ~(PG_M|PG_A)) != newpte) { - pte_store(pte, newpte | PG_A); + if (origpte & PG_MANAGED) { + origpte = pte_load_store(pte, newpte | PG_A); + if ((origpte & PG_M) && pmap_track_modified(va)) + vm_page_dirty(om); + if (origpte & PG_A) + vm_page_flag_set(om, PG_REFERENCED); + } else + pte_store(pte, newpte | PG_A); if (origpte) { pmap_invalidate_page(pmap, va); } diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h index 90da462..3069a66 100644 --- a/sys/amd64/include/pmap.h +++ b/sys/amd64/include/pmap.h @@ -194,15 +194,25 @@ pte_load_store(pt_entry_t *ptep, pt_entry_t pte) { pt_entry_t r; - r = *ptep; - *ptep = pte; + __asm __volatile( + "xchgq %0,%1" + : "=m" (*ptep), + "=r" (r) + : "1" (pte), + "m" (*ptep)); return (r); } #define pte_load_clear(pte) atomic_readandclear_long(pte) -#define pte_clear(ptep) pte_load_store((ptep), (pt_entry_t)0ULL) -#define pte_store(ptep, pte) pte_load_store((ptep), (pt_entry_t)pte) +static __inline void +pte_store(pt_entry_t *ptep, pt_entry_t pte) +{ + + *ptep = pte; +} + +#define pte_clear(ptep) pte_store((ptep), (pt_entry_t)0ULL) #define pde_store(pdep, pde) pte_store((pdep), (pde)) |