summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c99
-rw-r--r--sys/amd64/include/pmap.h14
2 files changed, 93 insertions, 20 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 532bd36..e0ccc8e 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -306,6 +306,7 @@ static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
pd_entry_t newpde);
static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde);
+static vm_page_t pmap_vmpage_splay(vm_pindex_t pindex, vm_page_t root);
static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
struct rwlock **lockp);
@@ -1498,7 +1499,8 @@ pmap_free_zero_pages(vm_page_t free)
while (free != NULL) {
m = free;
- free = m->right;
+ free = (void *)m->object;
+ m->object = NULL;
/* Preserve the page's PG_ZERO setting. */
vm_page_free_toq(m);
}
@@ -1517,7 +1519,7 @@ pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
m->flags |= PG_ZERO;
else
m->flags &= ~PG_ZERO;
- m->right = *free;
+ m->object = (void *)*free;
*free = m;
}
@@ -1535,20 +1537,20 @@ pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
root = pmap->pm_root;
if (root == NULL) {
- mpte->left = NULL;
- mpte->right = NULL;
+ mpte->md.pv_left = NULL;
+ mpte->md.pv_right = NULL;
} else {
- root = vm_page_splay(mpte->pindex, root);
+ root = pmap_vmpage_splay(mpte->pindex, root);
if (mpte->pindex < root->pindex) {
- mpte->left = root->left;
- mpte->right = root;
- root->left = NULL;
+ mpte->md.pv_left = root->md.pv_left;
+ mpte->md.pv_right = root;
+ root->md.pv_left = NULL;
} else if (mpte->pindex == root->pindex)
panic("pmap_insert_pt_page: pindex already inserted");
else {
- mpte->right = root->right;
- mpte->left = root;
- root->right = NULL;
+ mpte->md.pv_right = root->md.pv_right;
+ mpte->md.pv_left = root;
+ root->md.pv_right = NULL;
}
}
pmap->pm_root = mpte;
@@ -1567,7 +1569,7 @@ pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
if ((mpte = pmap->pm_root) != NULL && mpte->pindex != pindex) {
- mpte = vm_page_splay(pindex, mpte);
+ mpte = pmap_vmpage_splay(pindex, mpte);
if ((pmap->pm_root = mpte)->pindex != pindex)
mpte = NULL;
}
@@ -1586,18 +1588,24 @@ pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
if (mpte != pmap->pm_root) {
- root = vm_page_splay(mpte->pindex, pmap->pm_root);
+ root = pmap_vmpage_splay(mpte->pindex, pmap->pm_root);
KASSERT(mpte == root,
("pmap_remove_pt_page: mpte %p is missing from pmap %p",
mpte, pmap));
}
- if (mpte->left == NULL)
- root = mpte->right;
+ if (mpte->md.pv_left == NULL)
+ root = mpte->md.pv_right;
else {
- root = vm_page_splay(mpte->pindex, mpte->left);
- root->right = mpte->right;
+ root = pmap_vmpage_splay(mpte->pindex, mpte->md.pv_left);
+ root->md.pv_right = mpte->md.pv_right;
}
pmap->pm_root = root;
+
+ /*
+ * Reinitialize the pv_list which could be dirty now because of the
+ * splay tree work.
+ */
+ TAILQ_INIT(&mpte->md.pv_list);
}
/*
@@ -1673,6 +1681,61 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
}
/*
+ * Implements Sleator and Tarjan's top-down splay algorithm. Returns
+ * the vm_page containing the given pindex. If, however, that
+ * pindex is not found in the pmap, returns a vm_page that is
+ * adjacent to the pindex, coming before or after it.
+ */
+static vm_page_t
+pmap_vmpage_splay(vm_pindex_t pindex, vm_page_t root)
+{
+ struct vm_page dummy;
+ vm_page_t lefttreemax, righttreemin, y;
+
+ if (root == NULL)
+ return (root);
+ lefttreemax = righttreemin = &dummy;
+ for (;; root = y) {
+ if (pindex < root->pindex) {
+ if ((y = root->md.pv_left) == NULL)
+ break;
+ if (pindex < y->pindex) {
+ /* Rotate right. */
+ root->md.pv_left = y->md.pv_right;
+ y->md.pv_right = root;
+ root = y;
+ if ((y = root->md.pv_left) == NULL)
+ break;
+ }
+ /* Link into the new root's right tree. */
+ righttreemin->md.pv_left = root;
+ righttreemin = root;
+ } else if (pindex > root->pindex) {
+ if ((y = root->md.pv_right) == NULL)
+ break;
+ if (pindex > y->pindex) {
+ /* Rotate left. */
+ root->md.pv_right = y->md.pv_left;
+ y->md.pv_left = root;
+ root = y;
+ if ((y = root->md.pv_right) == NULL)
+ break;
+ }
+ /* Link into the new root's left tree. */
+ lefttreemax->md.pv_right = root;
+ lefttreemax = root;
+ } else
+ break;
+ }
+ /* Assemble the new root. */
+ lefttreemax->md.pv_right = root->md.pv_left;
+ righttreemin->md.pv_left = root->md.pv_right;
+ root->md.pv_left = dummy.md.pv_right;
+ root->md.pv_right = dummy.md.pv_left;
+ return (root);
+}
+
+/*
* After removing a page table entry, this routine is used to
* conditionally free the page, and manage the hold/wire counts.
*/
@@ -2274,7 +2337,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
}
if (m_pc == NULL && free != NULL) {
m_pc = free;
- free = m_pc->right;
+ free = (void *)m_pc->object;
/* Recycle a freed page table page. */
m_pc->wire_count = 1;
atomic_add_int(&cnt.v_wire_count, 1);
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index 0fc8867..24fd2bc 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -235,10 +235,20 @@ struct pv_entry;
struct pv_chunk;
struct md_page {
- TAILQ_HEAD(,pv_entry) pv_list;
- int pat_mode;
+ union {
+ TAILQ_HEAD(,pv_entry) pvi_list;
+ struct {
+ vm_page_t pii_left;
+ vm_page_t pii_right;
+ } pvi_siters;
+ } pv_structs;
+ int pat_mode;
};
+#define pv_list pv_structs.pvi_list
+#define pv_left pv_structs.pvi_siters.pii_left
+#define pv_right pv_structs.pvi_siters.pii_right
+
/*
* The kernel virtual address (KVA) of the level 4 page table page is always
* within the direct map (DMAP) region.
OpenPOWER on IntegriCloud