summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2008-08-09 05:46:13 +0000
committeralc <alc@FreeBSD.org>2008-08-09 05:46:13 +0000
commitd53364aaabf956c53fb21accf05741dc91b4d975 (patch)
tree6b2432e4944be12c133f438bfdb0e562a299bb0c /sys/amd64
parent38a3b8ee43fac3f029495ef12f790b4300865e08 (diff)
downloadFreeBSD-src-d53364aaabf956c53fb21accf05741dc91b4d975.zip
FreeBSD-src-d53364aaabf956c53fb21accf05741dc91b4d975.tar.gz
Intel describes the behavior of their processors as "undefined" if two or
more mappings to the same physical page have different memory types, i.e., PAT settings. Consequently, if pmap_change_attr() is applied to a virtual address range within the kernel map, then the corresponding ranges of the direct map also need to be changed. Enhance pmap_change_attr() to handle this case automatically. Add a comment describing what pmap_change_attr() does. Discussed with: jhb
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c86
1 files changed, 84 insertions, 2 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index cde54ef..f2705ba 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -4417,6 +4417,25 @@ pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
return (TRUE);
}
+/*
+ * Changes the specified virtual address range's memory type to that given by
+ * the parameter "mode". The specified virtual address range must be
+ * completely contained within either the direct map or the kernel map. If
+ * the virtual address range is contained within the kernel map, then the
+ * memory type for each of the corresponding ranges of the direct map is also
+ * changed. (The corresponding ranges of the direct map are those ranges that
+ * map the same physical pages as the specified virtual address range.) These
+ * changes to the direct map are necessary because Intel describes the
+ * behavior of their processors as "undefined" if two or more mappings to the
+ * same physical page have different memory types.
+ *
+ * Returns zero if the change completed successfully, and either EINVAL or
+ * ENOMEM if the change failed. Specifically, EINVAL is returned if some part
+ * of the virtual address range was not mapped, and ENOMEM is returned if
+ * there was insufficient memory available to complete the change. In the
+ * latter case, the memory type may have been changed on some part of the
+ * virtual address range or the direct map.
+ */
int
pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
{
@@ -4432,10 +4451,11 @@ static int
pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
{
vm_offset_t base, offset, tmpva;
+ vm_paddr_t pa_start, pa_end;
pdp_entry_t *pdpe;
pd_entry_t *pde;
pt_entry_t *pte;
- int cache_bits_pte, cache_bits_pde;
+ int cache_bits_pte, cache_bits_pde, error;
boolean_t changed;
PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
@@ -4521,11 +4541,13 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
return (EINVAL);
tmpva += PAGE_SIZE;
}
+ error = 0;
/*
* Ok, all the pages exist, so run through them updating their
* cache mode if required.
*/
+ pa_start = pa_end = 0;
for (tmpva = base; tmpva < base + size; ) {
pdpe = pmap_pdpe(kernel_pmap, tmpva);
if (*pdpe & PG_PS) {
@@ -4536,6 +4558,25 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
if (!changed)
changed = TRUE;
}
+ if (tmpva >= VM_MIN_KERNEL_ADDRESS) {
+ if (pa_start == pa_end) {
+ /* Start physical address run. */
+ pa_start = *pdpe & PG_PS_FRAME;
+ pa_end = pa_start + NBPDP;
+ } else if (pa_end == (*pdpe & PG_PS_FRAME))
+ pa_end += NBPDP;
+ else {
+ /* Run ended, update direct map. */
+ error = pmap_change_attr_locked(
+ PHYS_TO_DMAP(pa_start),
+ pa_end - pa_start, mode);
+ if (error != 0)
+ break;
+ /* Start physical address run. */
+ pa_start = *pdpe & PG_PS_FRAME;
+ pa_end = pa_start + NBPDP;
+ }
+ }
tmpva = trunc_1gpage(tmpva) + NBPDP;
continue;
}
@@ -4548,6 +4589,25 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
if (!changed)
changed = TRUE;
}
+ if (tmpva >= VM_MIN_KERNEL_ADDRESS) {
+ if (pa_start == pa_end) {
+ /* Start physical address run. */
+ pa_start = *pde & PG_PS_FRAME;
+ pa_end = pa_start + NBPDR;
+ } else if (pa_end == (*pde & PG_PS_FRAME))
+ pa_end += NBPDR;
+ else {
+ /* Run ended, update direct map. */
+ error = pmap_change_attr_locked(
+ PHYS_TO_DMAP(pa_start),
+ pa_end - pa_start, mode);
+ if (error != 0)
+ break;
+ /* Start physical address run. */
+ pa_start = *pde & PG_PS_FRAME;
+ pa_end = pa_start + NBPDR;
+ }
+ }
tmpva = trunc_2mpage(tmpva) + NBPDR;
} else {
if (cache_bits_pte < 0)
@@ -4558,9 +4618,31 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
if (!changed)
changed = TRUE;
}
+ if (tmpva >= VM_MIN_KERNEL_ADDRESS) {
+ if (pa_start == pa_end) {
+ /* Start physical address run. */
+ pa_start = *pte & PG_FRAME;
+ pa_end = pa_start + PAGE_SIZE;
+ } else if (pa_end == (*pte & PG_FRAME))
+ pa_end += PAGE_SIZE;
+ else {
+ /* Run ended, update direct map. */
+ error = pmap_change_attr_locked(
+ PHYS_TO_DMAP(pa_start),
+ pa_end - pa_start, mode);
+ if (error != 0)
+ break;
+ /* Start physical address run. */
+ pa_start = *pte & PG_FRAME;
+ pa_end = pa_start + PAGE_SIZE;
+ }
+ }
tmpva += PAGE_SIZE;
}
}
+ if (error == 0 && pa_start != pa_end)
+ error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
+ pa_end - pa_start, mode);
/*
* Flush CPU caches if required to make sure any data isn't cached that
@@ -4570,7 +4652,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
pmap_invalidate_range(kernel_pmap, base, tmpva);
pmap_invalidate_cache();
}
- return (0);
+ return (error);
}
/*
OpenPOWER on IntegriCloud