summaryrefslogtreecommitdiffstats
path: root/sys/i386
diff options
context:
space:
mode:
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/pmap.c65
-rw-r--r--sys/i386/include/pmap.h3
-rw-r--r--sys/i386/include/vm.h16
-rw-r--r--sys/i386/xen/pmap.c3
4 files changed, 62 insertions, 25 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index d16632a..adcbd83 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -559,6 +559,7 @@ pmap_page_init(vm_page_t m)
{
TAILQ_INIT(&m->md.pv_list);
+ m->md.pat_mode = PAT_WRITE_BACK;
}
#ifdef PAE
@@ -569,7 +570,7 @@ pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
/* Inform UMA that this allocator uses kernel_map/object. */
*flags = UMA_SLAB_KERNEL;
return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL,
- 0xffffffffULL, 1, 0, VM_CACHE_DEFAULT));
+ 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
}
#endif
@@ -1210,7 +1211,8 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
endpte = pte + count;
while (pte < endpte) {
oldpte |= *pte;
- pte_store(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag | PG_RW | PG_V);
+ pte_store(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag |
+ pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V);
pte++;
ma++;
}
@@ -3132,7 +3134,7 @@ validate:
/*
* Now validate mapping with desired protection/wiring.
*/
- newpte = (pt_entry_t)(pa | PG_V);
+ newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
if ((prot & VM_PROT_WRITE) != 0) {
newpte |= PG_RW;
vm_page_flag_set(m, PG_WRITEABLE);
@@ -3214,7 +3216,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
" in pmap %p", va, pmap);
return (FALSE);
}
- newpde = VM_PAGE_TO_PHYS(m) | PG_PS | PG_V;
+ newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
+ PG_PS | PG_V;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
newpde |= PG_MANAGED;
@@ -3399,7 +3402,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
*/
pmap->pm_stats.resident_count++;
- pa = VM_PAGE_TO_PHYS(m);
+ pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
#ifdef PAE
if ((prot & VM_PROT_EXECUTE) == 0)
pa |= pg_nx;
@@ -3442,6 +3445,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
pd_entry_t *pde;
vm_paddr_t pa, ptepa;
vm_page_t p;
+ int pat_mode;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
KASSERT(object->type == OBJT_DEVICE,
@@ -3453,6 +3457,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
p = vm_page_lookup(object, pindex);
KASSERT(p->valid == VM_PAGE_BITS_ALL,
("pmap_object_init_pt: invalid page %p", p));
+ pat_mode = p->md.pat_mode;
/*
* Abort the mapping if the first page is not physically
@@ -3464,21 +3469,28 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
/*
* Skip the first page. Abort the mapping if the rest of
- * the pages are not physically contiguous.
+ * the pages are not physically contiguous or have differing
+ * memory attributes.
*/
p = TAILQ_NEXT(p, listq);
for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
pa += PAGE_SIZE) {
KASSERT(p->valid == VM_PAGE_BITS_ALL,
("pmap_object_init_pt: invalid page %p", p));
- if (pa != VM_PAGE_TO_PHYS(p))
+ if (pa != VM_PAGE_TO_PHYS(p) ||
+ pat_mode != p->md.pat_mode)
return;
p = TAILQ_NEXT(p, listq);
}
- /* Map using 2/4MB pages. */
+ /*
+ * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and
+ * "size" is a multiple of 2/4M, adding the PAT setting to
+ * "pa" will not affect the termination of this loop.
+ */
PMAP_LOCK(pmap);
- for (pa = ptepa; pa < ptepa + size; pa += NBPDR) {
+ for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
+ size; pa += NBPDR) {
pde = pmap_pde(pmap, addr);
if (*pde == 0) {
pde_store(pde, pa | PG_PS | PG_M | PG_A |
@@ -3696,7 +3708,8 @@ pmap_zero_page(vm_page_t m)
if (*sysmaps->CMAP2)
panic("pmap_zero_page: CMAP2 busy");
sched_pin();
- *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
+ *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
+ pmap_cache_bits(m->md.pat_mode, 0);
invlcaddr(sysmaps->CADDR2);
pagezero(sysmaps->CADDR2);
*sysmaps->CMAP2 = 0;
@@ -3718,9 +3731,10 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
mtx_lock(&sysmaps->lock);
if (*sysmaps->CMAP2)
- panic("pmap_zero_page: CMAP2 busy");
+ panic("pmap_zero_page_area: CMAP2 busy");
sched_pin();
- *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
+ *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
+ pmap_cache_bits(m->md.pat_mode, 0);
invlcaddr(sysmaps->CADDR2);
if (off == 0 && size == PAGE_SIZE)
pagezero(sysmaps->CADDR2);
@@ -3742,9 +3756,10 @@ pmap_zero_page_idle(vm_page_t m)
{
if (*CMAP3)
- panic("pmap_zero_page: CMAP3 busy");
+ panic("pmap_zero_page_idle: CMAP3 busy");
sched_pin();
- *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
+ *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
+ pmap_cache_bits(m->md.pat_mode, 0);
invlcaddr(CADDR3);
pagezero(CADDR3);
*CMAP3 = 0;
@@ -3771,8 +3786,10 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
sched_pin();
invlpg((u_int)sysmaps->CADDR1);
invlpg((u_int)sysmaps->CADDR2);
- *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A;
- *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M;
+ *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A |
+ pmap_cache_bits(src->md.pat_mode, 0);
+ *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M |
+ pmap_cache_bits(dst->md.pat_mode, 0);
bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
*sysmaps->CMAP1 = 0;
*sysmaps->CMAP2 = 0;
@@ -4437,6 +4454,22 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size)
kmem_free(kernel_map, base, size);
}
+/*
+ * Sets the memory attribute for the specified page.
+ */
+void
+pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
+{
+
+ m->md.pat_mode = ma;
+
+ /*
+ * Flush CPU caches to make sure any data isn't cached that shouldn't
+ * be, etc.
+ */
+ pmap_invalidate_cache();
+}
+
int
pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
{
diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h
index d19c3a9f..42c5d2e 100644
--- a/sys/i386/include/pmap.h
+++ b/sys/i386/include/pmap.h
@@ -389,6 +389,7 @@ struct pv_chunk;
struct md_page {
TAILQ_HEAD(,pv_entry) pv_list;
+ int pat_mode;
};
struct pmap {
@@ -458,6 +459,7 @@ extern char *ptvmmap; /* poor name! */
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
+#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
void pmap_bootstrap(vm_paddr_t);
@@ -470,6 +472,7 @@ void *pmap_mapbios(vm_paddr_t, vm_size_t);
void *pmap_mapdev(vm_paddr_t, vm_size_t);
void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
boolean_t pmap_page_is_mapped(vm_page_t m);
+void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
void pmap_unmapdev(vm_offset_t, vm_size_t);
pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2;
void pmap_set_pg(void);
diff --git a/sys/i386/include/vm.h b/sys/i386/include/vm.h
index 4796dbc..4065a36 100644
--- a/sys/i386/include/vm.h
+++ b/sys/i386/include/vm.h
@@ -32,14 +32,14 @@
#include <machine/specialreg.h>
-/* Cache control options. */
-#define VM_CACHE_UNCACHEABLE ((vm_cache_mode_t)PAT_UNCACHEABLE)
-#define VM_CACHE_WRITE_COMBINING ((vm_cache_mode_t)PAT_WRITE_COMBINING)
-#define VM_CACHE_WRITE_THROUGH ((vm_cache_mode_t)PAT_WRITE_THROUGH)
-#define VM_CACHE_WRITE_PROTECTED ((vm_cache_mode_t)PAT_WRITE_PROTECTED)
-#define VM_CACHE_WRITE_BACK ((vm_cache_mode_t)PAT_WRITE_BACK)
-#define VM_CACHE_UNCACHED ((vm_cache_mode_t)PAT_UNCACHED)
+/* Memory attributes. */
+#define VM_MEMATTR_UNCACHEABLE ((vm_memattr_t)PAT_UNCACHEABLE)
+#define VM_MEMATTR_WRITE_COMBINING ((vm_memattr_t)PAT_WRITE_COMBINING)
+#define VM_MEMATTR_WRITE_THROUGH ((vm_memattr_t)PAT_WRITE_THROUGH)
+#define VM_MEMATTR_WRITE_PROTECTED ((vm_memattr_t)PAT_WRITE_PROTECTED)
+#define VM_MEMATTR_WRITE_BACK ((vm_memattr_t)PAT_WRITE_BACK)
+#define VM_MEMATTR_UNCACHED ((vm_memattr_t)PAT_UNCACHED)
-#define VM_CACHE_DEFAULT VM_CACHE_WRITE_BACK
+#define VM_MEMATTR_DEFAULT VM_MEMATTR_WRITE_BACK
#endif /* !_MACHINE_VM_H_ */
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index ed4300f..9a9a1b4 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -605,6 +605,7 @@ pmap_page_init(vm_page_t m)
{
TAILQ_INIT(&m->md.pv_list);
+ m->md.pat_mode = PAT_WRITE_BACK;
}
#if defined(PAE) && !defined(XEN)
@@ -615,7 +616,7 @@ pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
/* Inform UMA that this allocator uses kernel_map/object. */
*flags = UMA_SLAB_KERNEL;
return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL,
- 0xffffffffULL, 1, 0, VM_CACHE_DEFAULT));
+ 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
}
#endif
OpenPOWER on IntegriCloud