summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2007-11-17 22:52:29 +0000
committeralc <alc@FreeBSD.org>2007-11-17 22:52:29 +0000
commitd1ab859bdce52e381ae74e98a1e951e839b6726f (patch)
tree8c219244ed89f7561e53ea8b24266c64a0c5d008 /sys
parent4e6e975846d9ebf395f824bc88086133d51086b1 (diff)
downloadFreeBSD-src-d1ab859bdce52e381ae74e98a1e951e839b6726f.zip
FreeBSD-src-d1ab859bdce52e381ae74e98a1e951e839b6726f.tar.gz
Prevent the leakage of wired pages in the following circumstances:
First, a file is mmap(2)ed and then mlock(2)ed. Later, it is truncated. Under "normal" circumstances, i.e., when the file is not mlock(2)ed, the pages beyond the EOF are unmapped and freed. However, when the file is mlock(2)ed, the pages beyond the EOF are unmapped but not freed because they have a non-zero wire count. This can be a mistake. Specifically, it is a mistake if the sole reason why the pages are wired is because of wired, managed mappings. Previously, unmapping the pages destroys these wired, managed mappings, but does not reduce the pages' wire count. Consequently, when the file is unmapped, the pages are not unwired because the wired mapping has been destroyed. Moreover, when the vm object is finally destroyed, the pages are leaked because they are still wired. The fix is to reduce the pages' wired count by the number of wired, managed mappings destroyed. To do this, I introduce a new pmap function pmap_page_wired_mappings() that returns the number of managed mappings to the given physical page that are wired, and I use this function in vm_object_page_remove(). Reviewed by: tegge MFC after: 6 weeks
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/pmap.c29
-rw-r--r--sys/arm/arm/pmap.c21
-rw-r--r--sys/i386/i386/pmap.c31
-rw-r--r--sys/ia64/ia64/pmap.c32
-rw-r--r--sys/powerpc/aim/mmu_oea.c22
-rw-r--r--sys/powerpc/powerpc/mmu_if.m15
-rw-r--r--sys/powerpc/powerpc/mmu_oea.c22
-rw-r--r--sys/powerpc/powerpc/pmap_dispatch.c7
-rw-r--r--sys/sparc64/sparc64/pmap.c20
-rw-r--r--sys/sun4v/sun4v/pmap.c28
-rw-r--r--sys/vm/pmap.h1
-rw-r--r--sys/vm/vm_object.c14
12 files changed, 241 insertions, 1 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 9d3cd4d..e3afa4f 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2925,6 +2925,35 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
}
/*
+ * pmap_page_wired_mappings:
+ *
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+pmap_page_wired_mappings(vm_page_t m)
+{
+ pv_entry_t pv;
+ pt_entry_t *pte;
+ pmap_t pmap;
+ int count;
+
+ count = 0;
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ return (count);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pte = pmap_pte(pmap, pv->pv_va);
+ if ((*pte & PG_W) != 0)
+ count++;
+ PMAP_UNLOCK(pmap);
+ }
+ return (count);
+}
+
+/*
* Remove all pages from specified address space
* this aids process exit speeds. Also, this code
* is special cased for current process only, but
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 4978460..71d7429 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -4490,6 +4490,27 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
return (FALSE);
}
+/*
+ * pmap_page_wired_mappings:
+ *
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+pmap_page_wired_mappings(vm_page_t m)
+{
+ pv_entry_t pv;
+ int count;
+
+ count = 0;
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ return (count);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
+ if ((pv->pv_flags & PVF_WIRED) != 0)
+ count++;
+ return (count);
+}
/*
* pmap_ts_referenced:
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index d37d07a..f3c2dc4 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -3030,6 +3030,37 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
}
/*
+ * pmap_page_wired_mappings:
+ *
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+pmap_page_wired_mappings(vm_page_t m)
+{
+ pv_entry_t pv;
+ pt_entry_t *pte;
+ pmap_t pmap;
+ int count;
+
+ count = 0;
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ return (count);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ sched_pin();
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pte = pmap_pte_quick(pmap, pv->pv_va);
+ if ((*pte & PG_W) != 0)
+ count++;
+ PMAP_UNLOCK(pmap);
+ }
+ sched_unpin();
+ return (count);
+}
+
+/*
* Remove all pages from specified address space
* this aids process exit speeds. Also, this code
* is special cased for current process only, but
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 253211c..59160c8 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -1915,6 +1915,38 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
}
/*
+ * pmap_page_wired_mappings:
+ *
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+pmap_page_wired_mappings(vm_page_t m)
+{
+ struct ia64_lpte *pte;
+ pmap_t oldpmap, pmap;
+ pv_entry_t pv;
+ int count;
+
+ count = 0;
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ return (count);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ pmap = pv->pv_pmap;
+ PMAP_LOCK(pmap);
+ oldpmap = pmap_switch(pmap);
+ pte = pmap_find_vhpt(pv->pv_va);
+ KASSERT(pte != NULL, ("pte"));
+ if (pmap_wired(pte))
+ count++;
+ pmap_switch(oldpmap);
+ PMAP_UNLOCK(pmap);
+ }
+ return (count);
+}
+
+/*
* Remove all pages from specified address space
* this aids process exit speeds. Also, this code
* is special cased for current process only, but
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index d1a0faa..db2c3bb 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -322,6 +322,7 @@ boolean_t moea_is_modified(mmu_t, vm_page_t);
boolean_t moea_ts_referenced(mmu_t, vm_page_t);
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
+int moea_page_wired_mappings(mmu_t, vm_page_t);
void moea_pinit(mmu_t, pmap_t);
void moea_pinit0(mmu_t, pmap_t);
void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
@@ -359,6 +360,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_ts_referenced, moea_ts_referenced),
MMUMETHOD(mmu_map, moea_map),
MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
+ MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings),
MMUMETHOD(mmu_pinit, moea_pinit),
MMUMETHOD(mmu_pinit0, moea_pinit0),
MMUMETHOD(mmu_protect, moea_protect),
@@ -1492,6 +1494,26 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
return (FALSE);
}
+/*
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
+{
+ struct pvo_entry *pvo;
+ int count;
+
+ count = 0;
+ if (!moea_initialized || (m->flags & PG_FICTITIOUS) != 0)
+ return (count);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
+ if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
+ count++;
+ return (count);
+}
+
static u_int moea_vsidcontext;
void
diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m
index 0fc546f..0903e3d 100644
--- a/sys/powerpc/powerpc/mmu_if.m
+++ b/sys/powerpc/powerpc/mmu_if.m
@@ -431,6 +431,21 @@ METHOD void page_init {
/**
+ * @brief Count the number of managed mappings to the given physical
+ * page that are wired.
+ *
+ * @param _pg physical page
+ *
+ * @retval int the number of wired, managed mappings to the
+ * given physical page
+ */
+METHOD int page_wired_mappings {
+ mmu_t _mmu;
+ vm_page_t _pg;
+};
+
+
+/**
* @brief Initialise a physical map data structure
*
* @param _pmap physical map
diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c
index d1a0faa..db2c3bb 100644
--- a/sys/powerpc/powerpc/mmu_oea.c
+++ b/sys/powerpc/powerpc/mmu_oea.c
@@ -322,6 +322,7 @@ boolean_t moea_is_modified(mmu_t, vm_page_t);
boolean_t moea_ts_referenced(mmu_t, vm_page_t);
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
+int moea_page_wired_mappings(mmu_t, vm_page_t);
void moea_pinit(mmu_t, pmap_t);
void moea_pinit0(mmu_t, pmap_t);
void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
@@ -359,6 +360,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_ts_referenced, moea_ts_referenced),
MMUMETHOD(mmu_map, moea_map),
MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
+ MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings),
MMUMETHOD(mmu_pinit, moea_pinit),
MMUMETHOD(mmu_pinit0, moea_pinit0),
MMUMETHOD(mmu_protect, moea_protect),
@@ -1492,6 +1494,26 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
return (FALSE);
}
+/*
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
+{
+ struct pvo_entry *pvo;
+ int count;
+
+ count = 0;
+ if (!moea_initialized || (m->flags & PG_FICTITIOUS) != 0)
+ return (count);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
+ if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
+ count++;
+ return (count);
+}
+
static u_int moea_vsidcontext;
void
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index 9a9c1f5..c102b17 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
@@ -194,6 +194,13 @@ pmap_page_init(vm_page_t m)
}
int
+pmap_page_wired_mappings(vm_page_t m)
+{
+
+ return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
+}
+
+int
pmap_pinit(pmap_t pmap)
{
MMU_PINIT(mmu_obj, pmap);
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 1c71c5f..fa645d5 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1753,6 +1753,26 @@ pmap_page_exists_quick(pmap_t pm, vm_page_t m)
}
/*
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+pmap_page_wired_mappings(vm_page_t m)
+{
+ struct tte *tp;
+ int count;
+
+ count = 0;
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ return (count);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
+ if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED))
+ count++;
+ return (count);
+}
+
+/*
* Remove all pages from specified address space, this aids process exit
* speeds. This is much faster than pmap_remove n the case of running down
* an entire address space. Only works for the current pmap.
diff --git a/sys/sun4v/sun4v/pmap.c b/sys/sun4v/sun4v/pmap.c
index ecfe3db..1b88115 100644
--- a/sys/sun4v/sun4v/pmap.c
+++ b/sys/sun4v/sun4v/pmap.c
@@ -1669,6 +1669,34 @@ pmap_page_init(vm_page_t m)
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
}
+
+/*
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+pmap_page_wired_mappings(vm_page_t m)
+{
+ pmap_t pmap;
+ pv_entry_t pv;
+ uint64_t tte_data;
+ int count;
+
+ count = 0;
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ return (count);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ pmap = pv->pv_pmap;
+ PMAP_LOCK(pmap);
+ tte_data = tte_hash_lookup(pmap->pm_hash, pv->pv_va);
+ if ((tte_data & VTD_WIRED) != 0)
+ count++;
+ PMAP_UNLOCK(pmap);
+ }
+ return (count);
+}
+
/*
* Lower the permission for all mappings to a given page.
*/
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 3d9045f..61c5fa6 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -114,6 +114,7 @@ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size);
boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m);
void pmap_page_init(vm_page_t m);
+int pmap_page_wired_mappings(vm_page_t m);
int pmap_pinit(pmap_t);
void pmap_pinit0(pmap_t);
void pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 0426bb9..4c433d3 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -1797,6 +1797,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
boolean_t clean_only)
{
vm_page_t p, next;
+ int wirings;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
if (object->resident_page_count == 0)
@@ -1831,8 +1832,16 @@ again:
p = next) {
next = TAILQ_NEXT(p, listq);
- if (p->wire_count != 0) {
+ /*
+ * If the page is wired for any reason besides the
+ * existence of managed, wired mappings, then it cannot
+ * be freed.
+ */
+ if ((wirings = p->wire_count) != 0 &&
+ (wirings = pmap_page_wired_mappings(p)) != p->wire_count) {
pmap_remove_all(p);
+ /* Account for removal of managed, wired mappings. */
+ p->wire_count -= wirings;
if (!clean_only)
p->valid = 0;
continue;
@@ -1845,6 +1854,9 @@ again:
continue;
}
pmap_remove_all(p);
+ /* Account for removal of managed, wired mappings. */
+ if (wirings != 0)
+ p->wire_count -= wirings;
vm_page_free(p);
}
vm_page_unlock_queues();
OpenPOWER on IntegriCloud