summaryrefslogtreecommitdiffstats
path: root/sys/i386
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2012-03-19 09:34:22 +0000
committerkib <kib@FreeBSD.org>2012-03-19 09:34:22 +0000
commit151deea148c8f17b6f8e9d0e163522812a441406 (patch)
tree0fe56f02b12f17508a7eadb901b9c7506ef067df /sys/i386
parent7544a6bbeae69feddb05e4884135fdd7bbd6cf08 (diff)
downloadFreeBSD-src-151deea148c8f17b6f8e9d0e163522812a441406.zip
FreeBSD-src-151deea148c8f17b6f8e9d0e163522812a441406.tar.gz
If we ever allow for managed fictitious pages, the pages shall be
excluded from superpage promotions. At least one of the reason is that pv_table is sized for non-fictitious pages only. Consistently check for the page to be non-fictitious before accesing superpage pv list. Sponsored by: The FreeBSD Foundation Reviewed by: alc MFC after: 2 weeks
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/pmap.c49
1 files changed, 37 insertions, 12 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index cdafc43..dee48a1 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -2483,7 +2483,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
pmap_pvh_free(&m->md, pmap, va);
- if (TAILQ_EMPTY(&m->md.pv_list)) {
+ if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
if (TAILQ_EMPTY(&pvh->pv_list))
vm_page_aflag_clear(m, PGA_WRITEABLE);
@@ -2957,6 +2957,8 @@ pmap_remove_all(vm_page_t m)
free = NULL;
vm_page_lock_queues();
sched_pin();
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ goto small_mappings;
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
va = pv->pv_va;
@@ -2966,6 +2968,7 @@ pmap_remove_all(vm_page_t m)
(void)pmap_demote_pde(pmap, pde, va);
PMAP_UNLOCK(pmap);
}
+small_mappings:
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
@@ -3481,7 +3484,8 @@ validate:
}
if ((origpte & PG_MANAGED) != 0 &&
TAILQ_EMPTY(&om->md.pv_list) &&
- TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))
+ ((om->flags & PG_FICTITIOUS) != 0 ||
+ TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
vm_page_aflag_clear(om, PGA_WRITEABLE);
if (invlva)
pmap_invalidate_page(pmap, va);
@@ -3494,7 +3498,8 @@ validate:
* populated, then attempt promotion.
*/
if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
- pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0)
+ pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
+ vm_reserv_level_iffullpop(m) == 0)
pmap_promote_pde(pmap, pde, va);
sched_unpin();
@@ -4134,7 +4139,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
if (loops >= 16)
break;
}
- if (!rv && loops < 16) {
+ if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
if (PV_PMAP(pv) == pmap) {
@@ -4166,7 +4171,10 @@ pmap_page_wired_mappings(vm_page_t m)
return (count);
vm_page_lock_queues();
count = pmap_pvh_wired_mappings(&m->md, count);
- count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count);
+ if ((m->flags & PG_FICTITIOUS) == 0) {
+ count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
+ count);
+ }
vm_page_unlock_queues();
return (count);
}
@@ -4210,7 +4218,8 @@ pmap_page_is_mapped(vm_page_t m)
return (FALSE);
vm_page_lock_queues();
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
- !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list);
+ ((m->flags & PG_FICTITIOUS) == 0 &&
+ !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
vm_page_unlock_queues();
return (rv);
}
@@ -4283,9 +4292,10 @@ pmap_remove_pages(pmap_t pmap)
m, (uintmax_t)m->phys_addr,
(uintmax_t)tpte));
- KASSERT(m < &vm_page_array[vm_page_array_size],
- ("pmap_remove_pages: bad tpte %#jx",
- (uintmax_t)tpte));
+ KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
+ m < &vm_page_array[vm_page_array_size],
+ ("pmap_remove_pages: bad tpte %#jx",
+ (uintmax_t)tpte));
pte_clear(pte);
@@ -4327,7 +4337,8 @@ pmap_remove_pages(pmap_t pmap)
} else {
pmap->pm_stats.resident_count--;
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
- if (TAILQ_EMPTY(&m->md.pv_list)) {
+ if (TAILQ_EMPTY(&m->md.pv_list) &&
+ (m->flags & PG_FICTITIOUS) == 0) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
if (TAILQ_EMPTY(&pvh->pv_list))
vm_page_aflag_clear(m, PGA_WRITEABLE);
@@ -4380,7 +4391,8 @@ pmap_is_modified(vm_page_t m)
return (FALSE);
vm_page_lock_queues();
rv = pmap_is_modified_pvh(&m->md) ||
- pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
+ ((m->flags & PG_FICTITIOUS) == 0 &&
+ pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
vm_page_unlock_queues();
return (rv);
}
@@ -4453,7 +4465,8 @@ pmap_is_referenced(vm_page_t m)
("pmap_is_referenced: page %p is not managed", m));
vm_page_lock_queues();
rv = pmap_is_referenced_pvh(&m->md) ||
- pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
+ ((m->flags & PG_FICTITIOUS) == 0 &&
+ pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
vm_page_unlock_queues();
return (rv);
}
@@ -4513,6 +4526,8 @@ pmap_remove_write(vm_page_t m)
return;
vm_page_lock_queues();
sched_pin();
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ goto small_mappings;
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
va = pv->pv_va;
@@ -4523,6 +4538,7 @@ pmap_remove_write(vm_page_t m)
(void)pmap_demote_pde(pmap, pde, va);
PMAP_UNLOCK(pmap);
}
+small_mappings:
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
@@ -4580,6 +4596,8 @@ pmap_ts_referenced(vm_page_t m)
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
vm_page_lock_queues();
sched_pin();
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ goto small_mappings;
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
va = pv->pv_va;
pmap = PV_PMAP(pv);
@@ -4610,6 +4628,7 @@ pmap_ts_referenced(vm_page_t m)
}
PMAP_UNLOCK(pmap);
}
+small_mappings:
if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
pvf = pv;
do {
@@ -4666,6 +4685,8 @@ pmap_clear_modify(vm_page_t m)
return;
vm_page_lock_queues();
sched_pin();
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ goto small_mappings;
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
va = pv->pv_va;
@@ -4703,6 +4724,7 @@ pmap_clear_modify(vm_page_t m)
}
PMAP_UNLOCK(pmap);
}
+small_mappings:
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
@@ -4744,6 +4766,8 @@ pmap_clear_reference(vm_page_t m)
("pmap_clear_reference: page %p is not managed", m));
vm_page_lock_queues();
sched_pin();
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ goto small_mappings;
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
va = pv->pv_va;
@@ -4767,6 +4791,7 @@ pmap_clear_reference(vm_page_t m)
}
PMAP_UNLOCK(pmap);
}
+small_mappings:
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
OpenPOWER on IntegriCloud