summaryrefslogtreecommitdiffstats
path: root/sys/arm
diff options
context:
space:
mode:
authorcognet <cognet@FreeBSD.org>2012-12-19 00:24:31 +0000
committercognet <cognet@FreeBSD.org>2012-12-19 00:24:31 +0000
commit58faac84ca39267f2f9368819febdac421e89a6a (patch)
tree14bcf41da925fb77b6562d71e57e5a6e413b3470 /sys/arm
parent99451e9e7417c523343c14e3808924954ae700da (diff)
downloadFreeBSD-src-58faac84ca39267f2f9368819febdac421e89a6a.zip
FreeBSD-src-58faac84ca39267f2f9368819febdac421e89a6a.tar.gz
Properly implement pmap_[get|set]_memattr
Submitted by: Ian Lepore <freebsd@damnhippie.dyndns.org>
Diffstat (limited to 'sys/arm')
-rw-r--r--sys/arm/arm/pmap-v6.c23
-rw-r--r--sys/arm/arm/pmap.c52
-rw-r--r--sys/arm/include/pmap.h5
-rw-r--r--sys/arm/include/vm.h3
4 files changed, 67 insertions, 16 deletions
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index a44bdbf..e40a938 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -1131,6 +1131,7 @@ pmap_page_init(vm_page_t m)
{
TAILQ_INIT(&m->md.pv_list);
+ m->md.pv_memattr = VM_MEMATTR_DEFAULT;
}
/*
@@ -2662,7 +2663,8 @@ do_l2b_alloc:
if (!(prot & VM_PROT_EXECUTE) && m)
npte |= L2_XN;
- npte |= pte_l2_s_cache_mode;
+ if (!(m->md.pv_memattr & VM_MEMATTR_UNCACHEABLE))
+ npte |= pte_l2_s_cache_mode;
if (m && m == opg) {
/*
@@ -3817,3 +3819,22 @@ pmap_dmap_iscurrent(pmap_t pmap)
return(pmap_is_current(pmap));
}
+void
+pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
+{
+ /*
+ * Remember the memattr in a field that gets used to set the appropriate
+ * bits in the PTEs as mappings are established.
+ */
+ m->md.pv_memattr = ma;
+
+ /*
+ * It appears that this function can only be called before any mappings
+ * for the page are established on ARM. If this ever changes, this code
+ * will need to walk the pv_list and make each of the existing mappings
+ * uncacheable, being careful to sync caches and PTEs (and maybe
+ * invalidate TLB?) for any current mapping it modifies.
+ */
+ if (m->md.pv_kva != 0 || TAILQ_FIRST(&m->md.pv_list) != NULL)
+ panic("Can't change memattr on page with existing mappings");
+}
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 6a794b7..549583e 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -1366,7 +1366,8 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va)
(pv->pv_flags & PVF_NC)) {
pv->pv_flags &= ~PVF_NC;
- pmap_set_cache_entry(pv, pm, va, 1);
+ if (!(pg->md.pv_memattr & VM_MEMATTR_UNCACHEABLE))
+ pmap_set_cache_entry(pv, pm, va, 1);
continue;
}
/* user is no longer sharable and writable */
@@ -1375,7 +1376,8 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va)
!pmwc && (pv->pv_flags & PVF_NC)) {
pv->pv_flags &= ~(PVF_NC | PVF_MWC);
- pmap_set_cache_entry(pv, pm, va, 1);
+ if (!(pg->md.pv_memattr & VM_MEMATTR_UNCACHEABLE))
+ pmap_set_cache_entry(pv, pm, va, 1);
}
}
@@ -1426,15 +1428,16 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
if (!(oflags & maskbits)) {
if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_NC)) {
- /* It is safe to re-enable cacheing here. */
- PMAP_LOCK(pm);
- l2b = pmap_get_l2_bucket(pm, va);
- ptep = &l2b->l2b_kva[l2pte_index(va)];
- *ptep |= pte_l2_s_cache_mode;
- PTE_SYNC(ptep);
- PMAP_UNLOCK(pm);
+ if (!(pg->md.pv_memattr &
+ VM_MEMATTR_UNCACHEABLE)) {
+ PMAP_LOCK(pm);
+ l2b = pmap_get_l2_bucket(pm, va);
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ *ptep |= pte_l2_s_cache_mode;
+ PTE_SYNC(ptep);
+ PMAP_UNLOCK(pm);
+ }
pv->pv_flags &= ~(PVF_NC | PVF_MWC);
-
}
continue;
}
@@ -1463,7 +1466,9 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
* permission.
*/
if (maskbits & PVF_WRITE) {
- npte |= pte_l2_s_cache_mode;
+ if (!(pg->md.pv_memattr &
+ VM_MEMATTR_UNCACHEABLE))
+ npte |= pte_l2_s_cache_mode;
pv->pv_flags &= ~(PVF_NC | PVF_MWC);
}
} else
@@ -1794,6 +1799,7 @@ pmap_page_init(vm_page_t m)
{
TAILQ_INIT(&m->md.pv_list);
+ m->md.pv_memattr = VM_MEMATTR_DEFAULT;
}
/*
@@ -3393,7 +3399,8 @@ do_l2b_alloc:
(m->oflags & VPO_UNMANAGED) == 0)
vm_page_aflag_set(m, PGA_WRITEABLE);
}
- npte |= pte_l2_s_cache_mode;
+ if (!(m->md.pv_memattr & VM_MEMATTR_UNCACHEABLE))
+ npte |= pte_l2_s_cache_mode;
if (m && m == opg) {
/*
* We're changing the attrs of an existing mapping.
@@ -4929,3 +4936,24 @@ pmap_devmap_find_va(vm_offset_t va, vm_size_t size)
return (NULL);
}
+void
+pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
+{
+ /*
+ * Remember the memattr in a field that gets used to set the appropriate
+ * bits in the PTEs as mappings are established.
+ */
+ m->md.pv_memattr = ma;
+
+ /*
+ * It appears that this function can only be called before any mappings
+ * for the page are established on ARM. If this ever changes, this code
+ * will need to walk the pv_list and make each of the existing mappings
+ * uncacheable, being careful to sync caches and PTEs (and maybe
+ * invalidate TLB?) for any current mapping it modifies.
+ */
+ if (m->md.pv_kva != 0 || TAILQ_FIRST(&m->md.pv_list) != NULL)
+ panic("Can't change memattr on page with existing mappings");
+}
+
+
diff --git a/sys/arm/include/pmap.h b/sys/arm/include/pmap.h
index e20bf18..e58ba9f 100644
--- a/sys/arm/include/pmap.h
+++ b/sys/arm/include/pmap.h
@@ -96,10 +96,10 @@ enum mem_type {
#endif
-#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
+#define pmap_page_get_memattr(m) ((m)->md.pv_memattr)
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
-#define pmap_page_set_memattr(m, ma) (void)0
+void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
/*
* Pmap stuff
@@ -119,6 +119,7 @@ struct pv_entry;
struct md_page {
int pvh_attrs;
+ vm_memattr_t pv_memattr;
vm_offset_t pv_kva; /* first kernel VA mapping */
TAILQ_HEAD(,pv_entry) pv_list;
};
diff --git a/sys/arm/include/vm.h b/sys/arm/include/vm.h
index 7ec2d9e..6f27276 100644
--- a/sys/arm/include/vm.h
+++ b/sys/arm/include/vm.h
@@ -29,7 +29,8 @@
#ifndef _MACHINE_VM_H_
#define _MACHINE_VM_H_
-/* Memory attribute configuration is not (yet) implemented. */
+/* Memory attribute configuration. */
#define VM_MEMATTR_DEFAULT 0
+#define VM_MEMATTR_UNCACHEABLE 1
#endif /* !_MACHINE_VM_H_ */
OpenPOWER on IntegriCloud