summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2004-10-08 08:23:43 +0000
committeralc <alc@FreeBSD.org>2004-10-08 08:23:43 +0000
commit417a40f2bff86acecbf99e0fde8bd75b42920fa4 (patch)
tree7a5a0495f446462dd1a370313683c21899917462 /sys
parent8b1be96ee507ff704291bae4e80e020485ada346 (diff)
downloadFreeBSD-src-417a40f2bff86acecbf99e0fde8bd75b42920fa4.zip
FreeBSD-src-417a40f2bff86acecbf99e0fde8bd75b42920fa4.tar.gz
Make pte_load_store() an atomic operation in all cases, not just i386 PAE.
Restructure pmap_enter() to prevent the loss of a page modified (PG_M) bit in a race between processors. (This restructuring assumes the newly atomic pte_load_store() for correct operation.) Reviewed by: tegge@ PR: i386/61852
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/pmap.c27
-rw-r--r--sys/amd64/include/pmap.h18
-rw-r--r--sys/i386/i386/pmap.c27
-rw-r--r--sys/i386/include/pmap.h20
4 files changed, 70 insertions, 22 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 4b39ce0..5f815ad 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -1839,7 +1839,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_paddr_t opa;
pd_entry_t ptepde;
pt_entry_t origpte, newpte;
- vm_page_t mpte;
+ vm_page_t mpte, om;
va = trunc_page(va);
#ifdef PMAP_DIAGNOSTIC
@@ -1881,6 +1881,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
panic("pmap_enter: invalid page directory va=%#lx\n", va);
pa = VM_PAGE_TO_PHYS(m);
+ om = NULL;
origpte = *pte;
opa = origpte & PG_FRAME;
@@ -1921,8 +1922,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* so we go ahead and sense modify status.
*/
if (origpte & PG_MANAGED) {
- if ((origpte & PG_M) && pmap_track_modified(va))
- vm_page_dirty(m);
+ om = m;
pa |= PG_MANAGED;
}
goto validate;
@@ -1933,10 +1933,17 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*/
if (opa) {
int err;
- err = pmap_remove_pte(pmap, pte, va, ptepde);
+ if (origpte & PG_W)
+ pmap->pm_stats.wired_count--;
+ if (origpte & PG_MANAGED) {
+ om = PHYS_TO_VM_PAGE(opa);
+ err = pmap_remove_entry(pmap, om, va, ptepde);
+ } else
+ err = pmap_unuse_pt(pmap, va, ptepde);
if (err)
panic("pmap_enter: pte vanished, va: 0x%lx", va);
- }
+ } else
+ pmap->pm_stats.resident_count++;
/*
* Enter on the PV list if part of our managed memory. Note that we
@@ -1952,7 +1959,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* Increment counters
*/
- pmap->pm_stats.resident_count++;
if (wired)
pmap->pm_stats.wired_count++;
@@ -1977,7 +1983,14 @@ validate:
* to update the pte.
*/
if ((origpte & ~(PG_M|PG_A)) != newpte) {
- pte_store(pte, newpte | PG_A);
+ if (origpte & PG_MANAGED) {
+ origpte = pte_load_store(pte, newpte | PG_A);
+ if ((origpte & PG_M) && pmap_track_modified(va))
+ vm_page_dirty(om);
+ if (origpte & PG_A)
+ vm_page_flag_set(om, PG_REFERENCED);
+ } else
+ pte_store(pte, newpte | PG_A);
if (origpte) {
pmap_invalidate_page(pmap, va);
}
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index 90da462..3069a66 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -194,15 +194,25 @@ pte_load_store(pt_entry_t *ptep, pt_entry_t pte)
{
pt_entry_t r;
- r = *ptep;
- *ptep = pte;
+ __asm __volatile(
+ "xchgq %0,%1"
+ : "=m" (*ptep),
+ "=r" (r)
+ : "1" (pte),
+ "m" (*ptep));
return (r);
}
#define pte_load_clear(pte) atomic_readandclear_long(pte)
-#define pte_clear(ptep) pte_load_store((ptep), (pt_entry_t)0ULL)
-#define pte_store(ptep, pte) pte_load_store((ptep), (pt_entry_t)pte)
+static __inline void
+pte_store(pt_entry_t *ptep, pt_entry_t pte)
+{
+
+ *ptep = pte;
+}
+
+#define pte_clear(ptep) pte_store((ptep), (pt_entry_t)0ULL)
#define pde_store(pdep, pde) pte_store((pdep), (pde))
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index b270f72..b7e8565 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -1894,7 +1894,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
register pt_entry_t *pte;
vm_paddr_t opa;
pt_entry_t origpte, newpte;
- vm_page_t mpte;
+ vm_page_t mpte, om;
va &= PG_FRAME;
#ifdef PMAP_DIAGNOSTIC
@@ -1939,6 +1939,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
pa = VM_PAGE_TO_PHYS(m);
+ om = NULL;
origpte = *pte;
opa = origpte & PG_FRAME;
@@ -1986,8 +1987,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* so we go ahead and sense modify status.
*/
if (origpte & PG_MANAGED) {
- if ((origpte & PG_M) && pmap_track_modified(va))
- vm_page_dirty(m);
+ om = m;
pa |= PG_MANAGED;
}
goto validate;
@@ -1998,10 +1998,17 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*/
if (opa) {
int err;
- err = pmap_remove_pte(pmap, pte, va);
+ if (origpte & PG_W)
+ pmap->pm_stats.wired_count--;
+ if (origpte & PG_MANAGED) {
+ om = PHYS_TO_VM_PAGE(opa);
+ err = pmap_remove_entry(pmap, om, va);
+ } else
+ err = pmap_unuse_pt(pmap, va);
if (err)
panic("pmap_enter: pte vanished, va: 0x%x", va);
- }
+ } else
+ pmap->pm_stats.resident_count++;
/*
* Enter on the PV list if part of our managed memory. Note that we
@@ -2017,7 +2024,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* Increment counters
*/
- pmap->pm_stats.resident_count++;
if (wired)
pmap->pm_stats.wired_count++;
@@ -2040,7 +2046,14 @@ validate:
* to update the pte.
*/
if ((origpte & ~(PG_M|PG_A)) != newpte) {
- pte_store(pte, newpte | PG_A);
+ if (origpte & PG_MANAGED) {
+ origpte = pte_load_store(pte, newpte | PG_A);
+ if ((origpte & PG_M) && pmap_track_modified(va))
+ vm_page_dirty(om);
+ if (origpte & PG_A)
+ vm_page_flag_set(om, PG_REFERENCED);
+ } else
+ pte_store(pte, newpte | PG_A);
if (origpte) {
pmap_invalidate_page(pmap, va);
}
diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h
index eb7b53b..7c54006 100644
--- a/sys/i386/include/pmap.h
+++ b/sys/i386/include/pmap.h
@@ -236,6 +236,8 @@ pte_load_store(pt_entry_t *ptep, pt_entry_t v)
#define pte_load_clear(ptep) pte_load_store((ptep), (pt_entry_t)0ULL)
+#define pte_store(ptep, pte) pte_load_store((ptep), (pt_entry_t)pte)
+
#else /* PAE */
static __inline pt_entry_t
@@ -252,17 +254,27 @@ pte_load_store(pt_entry_t *ptep, pt_entry_t pte)
{
pt_entry_t r;
- r = *ptep;
- *ptep = pte;
+ __asm __volatile(
+ "xchgl %0,%1"
+ : "=m" (*ptep),
+ "=r" (r)
+ : "1" (pte),
+ "m" (*ptep));
return (r);
}
#define pte_load_clear(pte) atomic_readandclear_int(pte)
+static __inline void
+pte_store(pt_entry_t *ptep, pt_entry_t pte)
+{
+
+ *ptep = pte;
+}
+
#endif /* PAE */
-#define pte_clear(ptep) pte_load_store((ptep), (pt_entry_t)0ULL)
-#define pte_store(ptep, pte) pte_load_store((ptep), (pt_entry_t)pte)
+#define pte_clear(ptep) pte_store((ptep), (pt_entry_t)0ULL)
#define pde_store(pdep, pde) pte_store((pdep), (pde))
OpenPOWER on IntegriCloud