summaryrefslogtreecommitdiffstats
path: root/sys/arm
diff options
context:
space:
mode:
authorgber <gber@FreeBSD.org>2013-05-23 12:24:46 +0000
committergber <gber@FreeBSD.org>2013-05-23 12:24:46 +0000
commitb44da0fb82647f2cfb06f65a6695c7e36c98828c (patch)
tree88d32fbca758e9f6ebc8d49d75d926e06c1e3ef6 /sys/arm
parent5145ecb15400880dc6aaea09f0343ec3a0e221a0 (diff)
downloadFreeBSD-src-b44da0fb82647f2cfb06f65a6695c7e36c98828c.zip
FreeBSD-src-b44da0fb82647f2cfb06f65a6695c7e36c98828c.tar.gz
Rework and organize pmap_enter_locked() function.
pmap_enter_locked() implementation was very ambiguous and confusing. Rearrange it so that each part of the mapping creation is separated. Avoid walking through the redundant conditions. Extract vector_page specific PTE setup from normal PTE setting. Submitted by: Zbigniew Bodek <zbb@semihalf.com> Sponsored by: The FreeBSD Foundation, Semihalf
Diffstat (limited to 'sys/arm')
-rw-r--r--sys/arm/arm/pmap-v6.c158
1 files changed, 72 insertions, 86 deletions
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index cd98395..56acb3a 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -2723,38 +2723,54 @@ do_l2b_alloc:
is_exec = is_refd = 0;
if (opte) {
- /*
- * There is already a mapping at this address.
- * If the physical address is different, lookup the
- * vm_page.
- */
- if (l2pte_pa(opte) != pa)
- om = PHYS_TO_VM_PAGE(l2pte_pa(opte));
- else
- om = m;
- } else
- om = NULL;
-
- if ((prot & (VM_PROT_ALL)) || !m) {
- /*
- * - The access type indicates that we don't need
- * to do referenced emulation.
- * OR
- * - The physical page has already been referenced
- * so no need to re-do referenced emulation here.
- */
- npte |= L2_S_REF;
+ if (l2pte_pa(opte) == pa) {
+ /*
+ * We're changing the attrs of an existing mapping.
+ */
+ if (m != NULL)
+ pmap_modify_pv(m, pmap, va,
+ PVF_WRITE | PVF_WIRED, nflags);
+ is_exec |= PTE_BEEN_EXECD(opte);
+ is_refd |= PTE_BEEN_REFD(opte);
+ goto validate;
+ }
+ if ((om = PHYS_TO_VM_PAGE(l2pte_pa(opte)))) {
+ /*
+ * Replacing an existing mapping with a new one.
+ * It is part of our managed memory so we
+ * must remove it from the PV list
+ */
+ if ((pve = pmap_remove_pv(om, pmap, va))) {
+ is_exec |= PTE_BEEN_EXECD(opte);
+ is_refd |= PTE_BEEN_REFD(opte);
+
+ if (m && ((m->oflags & VPO_UNMANAGED)))
+ pmap_free_pv_entry(pmap, pve);
+ }
+ }
- if (m != NULL &&
- (m->oflags & VPO_UNMANAGED) == 0)
- vm_page_aflag_set(m, PGA_REFERENCED);
} else {
/*
- * Need to do page referenced emulation.
+ * Keep the stats up to date
*/
- npte &= ~L2_S_REF;
+ l2b->l2b_occupancy++;
+ pmap->pm_stats.resident_count++;
}
+ /*
+ * Enter on the PV list if part of our managed memory.
+ */
+ if ((m && !(m->oflags & VPO_UNMANAGED))) {
+ if ((!pve) && (pve = pmap_get_pv_entry(pmap, FALSE)) == NULL)
+ panic("pmap_enter: no pv entries");
+
+ KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
+ ("pmap_enter: managed mapping within the clean submap"));
+ KASSERT(pve != NULL, ("No pv"));
+ pmap_enter_pv(m, pve, pmap, va, nflags);
+ }
+
+validate:
/* Make the new PTE valid */
npte |= L2_S_PROTO;
#ifdef SMP
@@ -2763,78 +2779,48 @@ do_l2b_alloc:
/* Set defaults first - kernel read access */
npte |= L2_APX;
npte |= L2_S_PROT_R;
+ /* Set "referenced" flag */
+ npte |= L2_S_REF;
/* Now tune APs as desired */
if (user)
npte |= L2_S_PROT_U;
-
- if (prot & VM_PROT_WRITE) {
- npte &= ~(L2_APX);
-
- if (m != NULL && (m->oflags & VPO_UNMANAGED) == 0) {
- vm_page_aflag_set(m, PGA_WRITEABLE);
+ /*
+ * If this is not a vector_page
+ * then continue setting mapping parameters
+ */
+ if (m != NULL) {
+ if (prot & (VM_PROT_ALL)) {
+ if ((m->oflags & VPO_UNMANAGED) == 0)
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ } else {
/*
- * The access type and permissions indicate
- * that the page will be written as soon as returned
- * from fault service.
- * Mark it dirty from the outset.
+ * Need to do page referenced emulation.
*/
- if ((access & VM_PROT_WRITE) != 0)
- vm_page_dirty(m);
+ npte &= ~L2_S_REF;
}
- }
-
- if (!(prot & VM_PROT_EXECUTE) && m)
- npte |= L2_XN;
- if (m && (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE))
- npte |= pte_l2_s_cache_mode;
-
- if (m && m == om) {
- /*
- * We're changing the attrs of an existing mapping.
- */
- pmap_modify_pv(m, pmap, va, PVF_WRITE | PVF_WIRED, nflags);
- is_exec |= PTE_BEEN_EXECD(opte);
- is_refd |= PTE_BEEN_REFD(opte);
- } else {
- /*
- * New mapping, or changing the backing page
- * of an existing mapping.
- */
- if (om) {
- /*
- * Replacing an existing mapping with a new one.
- * It is part of our managed memory so we
- * must remove it from the PV list
- */
- if ((pve = pmap_remove_pv(om, pmap, va))) {
- is_exec |= PTE_BEEN_EXECD(opte);
- is_refd |= PTE_BEEN_REFD(opte);
+ if (prot & VM_PROT_WRITE) {
+ /* Write enable */
+ npte &= ~(L2_APX);
- if (m && ((m->oflags & VPO_UNMANAGED)))
- pmap_free_pv_entry(pmap, pve);
+ if ((m->oflags & VPO_UNMANAGED) == 0) {
+ vm_page_aflag_set(m, PGA_WRITEABLE);
+ /*
+ * The access type and permissions indicate
+ * that the page will be written as soon as
+ * returned from fault service.
+ * Mark it dirty from the outset.
+ */
+ if ((access & VM_PROT_WRITE) != 0)
+ vm_page_dirty(m);
}
}
+ if (!(prot & VM_PROT_EXECUTE))
+ npte |= L2_XN;
- if ((m && !(m->oflags & VPO_UNMANAGED))) {
- if ((!pve) &&
- (pve = pmap_get_pv_entry(pmap, FALSE)) == NULL)
- panic("pmap_enter: no pv entries");
-
- KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
- ("pmap_enter: managed mapping within the clean submap"));
- KASSERT(pve != NULL, ("No pv"));
- pmap_enter_pv(m, pve, pmap, va, nflags);
- }
- }
-
- /*
- * Keep the stats up to date
- */
- if (opte == 0) {
- l2b->l2b_occupancy++;
- pmap->pm_stats.resident_count++;
+ if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE)
+ npte |= pte_l2_s_cache_mode;
}
CTR5(KTR_PMAP,"enter: pmap:%p va:%x prot:%x pte:%x->%x",
OpenPOWER on IntegriCloud