summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2007-11-17 22:52:29 +0000
committeralc <alc@FreeBSD.org>2007-11-17 22:52:29 +0000
commitd1ab859bdce52e381ae74e98a1e951e839b6726f (patch)
tree8c219244ed89f7561e53ea8b24266c64a0c5d008 /sys/powerpc
parent4e6e975846d9ebf395f824bc88086133d51086b1 (diff)
downloadFreeBSD-src-d1ab859bdce52e381ae74e98a1e951e839b6726f.zip
FreeBSD-src-d1ab859bdce52e381ae74e98a1e951e839b6726f.tar.gz
Prevent the leakage of wired pages in the following circumstances:
First, a file is mmap(2)ed and then mlock(2)ed. Later, it is truncated. Under "normal" circumstances, i.e., when the file is not mlock(2)ed, the pages beyond the EOF are unmapped and freed. However, when the file is mlock(2)ed, the pages beyond the EOF are unmapped but not freed because they have a non-zero wire count. This can be a mistake. Specifically, it is a mistake if the sole reason why the pages are wired is because of wired, managed mappings. Previously, unmapping the pages destroys these wired, managed mappings, but does not reduce the pages' wire count. Consequently, when the file is unmapped, the pages are not unwired because the wired mapping has been destroyed. Moreover, when the vm object is finally destroyed, the pages are leaked because they are still wired. The fix is to reduce the pages' wired count by the number of wired, managed mappings destroyed. To do this, I introduce a new pmap function pmap_page_wired_mappings() that returns the number of managed mappings to the given physical page that are wired, and I use this function in vm_object_page_remove(). Reviewed by: tegge MFC after: 6 weeks
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/mmu_oea.c22
-rw-r--r--sys/powerpc/powerpc/mmu_if.m15
-rw-r--r--sys/powerpc/powerpc/mmu_oea.c22
-rw-r--r--sys/powerpc/powerpc/pmap_dispatch.c7
4 files changed, 66 insertions, 0 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index d1a0faa..db2c3bb 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -322,6 +322,7 @@ boolean_t moea_is_modified(mmu_t, vm_page_t);
boolean_t moea_ts_referenced(mmu_t, vm_page_t);
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
+int moea_page_wired_mappings(mmu_t, vm_page_t);
void moea_pinit(mmu_t, pmap_t);
void moea_pinit0(mmu_t, pmap_t);
void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
@@ -359,6 +360,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_ts_referenced, moea_ts_referenced),
MMUMETHOD(mmu_map, moea_map),
MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
+ MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings),
MMUMETHOD(mmu_pinit, moea_pinit),
MMUMETHOD(mmu_pinit0, moea_pinit0),
MMUMETHOD(mmu_protect, moea_protect),
@@ -1492,6 +1494,26 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
return (FALSE);
}
+/*
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
+{
+ struct pvo_entry *pvo;
+ int count;
+
+ count = 0;
+ if (!moea_initialized || (m->flags & PG_FICTITIOUS) != 0)
+ return (count);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
+ if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
+ count++;
+ return (count);
+}
+
static u_int moea_vsidcontext;
void
diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m
index 0fc546f..0903e3d 100644
--- a/sys/powerpc/powerpc/mmu_if.m
+++ b/sys/powerpc/powerpc/mmu_if.m
@@ -431,6 +431,21 @@ METHOD void page_init {
/**
+ * @brief Count the number of managed mappings to the given physical
+ * page that are wired.
+ *
+ * @param _pg physical page
+ *
+ * @retval int the number of wired, managed mappings to the
+ * given physical page
+ */
+METHOD int page_wired_mappings {
+ mmu_t _mmu;
+ vm_page_t _pg;
+};
+
+
+/**
* @brief Initialise a physical map data structure
*
* @param _pmap physical map
diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c
index d1a0faa..db2c3bb 100644
--- a/sys/powerpc/powerpc/mmu_oea.c
+++ b/sys/powerpc/powerpc/mmu_oea.c
@@ -322,6 +322,7 @@ boolean_t moea_is_modified(mmu_t, vm_page_t);
boolean_t moea_ts_referenced(mmu_t, vm_page_t);
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
+int moea_page_wired_mappings(mmu_t, vm_page_t);
void moea_pinit(mmu_t, pmap_t);
void moea_pinit0(mmu_t, pmap_t);
void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
@@ -359,6 +360,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_ts_referenced, moea_ts_referenced),
MMUMETHOD(mmu_map, moea_map),
MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
+ MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings),
MMUMETHOD(mmu_pinit, moea_pinit),
MMUMETHOD(mmu_pinit0, moea_pinit0),
MMUMETHOD(mmu_protect, moea_protect),
@@ -1492,6 +1494,26 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
return (FALSE);
}
+/*
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
+{
+ struct pvo_entry *pvo;
+ int count;
+
+ count = 0;
+ if (!moea_initialized || (m->flags & PG_FICTITIOUS) != 0)
+ return (count);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
+ if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
+ count++;
+ return (count);
+}
+
static u_int moea_vsidcontext;
void
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index 9a9c1f5..c102b17 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
@@ -194,6 +194,13 @@ pmap_page_init(vm_page_t m)
}
int
+pmap_page_wired_mappings(vm_page_t m)
+{
+
+ return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
+}
+
+int
pmap_pinit(pmap_t pmap)
{
MMU_PINIT(mmu_obj, pmap);
OpenPOWER on IntegriCloud