summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2006-08-11 19:22:57 +0000
committerjhb <jhb@FreeBSD.org>2006-08-11 19:22:57 +0000
commitce9f8963fd6c7fcb782d8b9eebf51fb108d847f8 (patch)
tree1ef0dbab4a5d1ad1c224a7eec5af2c8597bfef7d
parent11b5f41beac4542db8456ad35e02e9a1d89d2291 (diff)
downloadFreeBSD-src-ce9f8963fd6c7fcb782d8b9eebf51fb108d847f8.zip
FreeBSD-src-ce9f8963fd6c7fcb782d8b9eebf51fb108d847f8.tar.gz
First pass at allowing memory to be mapped using cache modes other than
WB (write-back) on x86 via control bits in PTEs and PDEs (including making use of the PAT MSR). Changes include: - A new pmap_mapdev_attr() function for amd64 and i386 which takes an additional parameter (relative to pmap_mapdev()) specifying the cache mode for this mapping. Note that on amd64 only WB mappings are done with the direct map, all other modes result in a private mapping. - pmap_mapdev() on i386 and amd64 now defaults to using UC (uncached) mappings rather than WB. Previously we relied on the BIOS setting up MTRR's to enforce memio regions being treated as UC. This might make hw.cbb_start_memory unnecessary in some cases now for example. - A new pmap_mapbios()/pmap_unmapbios() API has been added to allow places that used pmap_mapdev() to map non-device memory (such as ACPI tables) to do so using WB as before. - A new pmap_change_attr() function for amd64 and i386 that changes the caching mode for a range of KVA. Reviewed by: alc
-rw-r--r--sys/amd64/acpica/madt.c10
-rw-r--r--sys/amd64/amd64/pmap.c224
-rw-r--r--sys/amd64/include/pmap.h5
-rw-r--r--sys/dev/acpica/Osd/OsdMemory.c4
-rw-r--r--sys/i386/acpica/acpi_machdep.c4
-rw-r--r--sys/i386/acpica/madt.c10
-rw-r--r--sys/i386/i386/pmap.c171
-rw-r--r--sys/i386/include/pmap.h5
-rw-r--r--sys/ia64/include/pmap.h2
9 files changed, 415 insertions, 20 deletions
diff --git a/sys/amd64/acpica/madt.c b/sys/amd64/acpica/madt.c
index bfbeaac..ba3c0c2 100644
--- a/sys/amd64/acpica/madt.c
+++ b/sys/amd64/acpica/madt.c
@@ -203,15 +203,15 @@ madt_probe(void)
/*
* Map in the RSDP. Since ACPI uses AcpiOsMapMemory() which in turn
- * calls pmap_mapdev() to find the RSDP, we assume that we can use
- * pmap_mapdev() to map the RSDP.
+ * calls pmap_mapbios() to find the RSDP, we assume that we can use
+ * pmap_mapbios() to map the RSDP.
*/
if (AcpiOsGetRootPointer(ACPI_LOGICAL_ADDRESSING, &rsdp_ptr) != AE_OK)
return (ENXIO);
#ifdef __i386__
KASSERT(rsdp_ptr.Pointer.Physical < KERNLOAD, ("RSDP too high"));
#endif
- rsdp = pmap_mapdev(rsdp_ptr.Pointer.Physical, sizeof(RSDP_DESCRIPTOR));
+ rsdp = pmap_mapbios(rsdp_ptr.Pointer.Physical, sizeof(RSDP_DESCRIPTOR));
if (rsdp == NULL) {
if (bootverbose)
printf("MADT: Failed to map RSDP\n");
@@ -261,7 +261,7 @@ madt_probe(void)
break;
madt_unmap_table(rsdt);
}
- pmap_unmapdev((vm_offset_t)rsdp, sizeof(RSDP_DESCRIPTOR));
+ pmap_unmapbios((vm_offset_t)rsdp, sizeof(RSDP_DESCRIPTOR));
if (madt_physaddr == 0) {
if (bootverbose)
printf("MADT: No MADT table found\n");
@@ -335,7 +335,7 @@ static int
madt_setup_local(void)
{
- madt = pmap_mapdev(madt_physaddr, madt_length);
+ madt = pmap_mapbios(madt_physaddr, madt_length);
lapic_init((uintptr_t)madt->LocalApicAddress);
printf("ACPI APIC Table: <%.*s %.*s>\n",
(int)sizeof(madt->OemId), madt->OemId,
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 5ceb497..47921a5 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -665,6 +665,84 @@ SYSCTL_PROC(_vm_pmap, OID_AUTO, shpgperproc, CTLTYPE_INT|CTLFLAG_RW,
* Low level helper routines.....
***************************************************/
+/*
+ * Determine the appropriate bits to set in a PTE or PDE for a specified
+ * caching mode.
+ */
+static int
+pmap_cache_bits(int mode, boolean_t is_pde)
+{
+ int pat_flag, pat_index, cache_bits;
+
+ /* The PAT bit is different for PTE's and PDE's. */
+ pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
+
+ /* If we don't support PAT, map extended modes to older ones. */
+ if (!(cpu_feature & CPUID_PAT)) {
+ switch (mode) {
+ case PAT_UNCACHEABLE:
+ case PAT_WRITE_THROUGH:
+ case PAT_WRITE_BACK:
+ break;
+ case PAT_UNCACHED:
+ case PAT_WRITE_COMBINING:
+ case PAT_WRITE_PROTECTED:
+ mode = PAT_UNCACHEABLE;
+ break;
+ }
+ }
+
+ /* Map the caching mode to a PAT index. */
+ switch (mode) {
+#ifdef PAT_WORKS
+ case PAT_UNCACHEABLE:
+ pat_index = 3;
+ break;
+ case PAT_WRITE_THROUGH:
+ pat_index = 1;
+ break;
+ case PAT_WRITE_BACK:
+ pat_index = 0;
+ break;
+ case PAT_UNCACHED:
+ pat_index = 2;
+ break;
+ case PAT_WRITE_COMBINING:
+ pat_index = 5;
+ break;
+ case PAT_WRITE_PROTECTED:
+ pat_index = 4;
+ break;
+#else
+ case PAT_UNCACHED:
+ case PAT_UNCACHEABLE:
+ case PAT_WRITE_PROTECTED:
+ pat_index = 3;
+ break;
+ case PAT_WRITE_THROUGH:
+ pat_index = 1;
+ break;
+ case PAT_WRITE_BACK:
+ pat_index = 0;
+ break;
+ case PAT_WRITE_COMBINING:
+ pat_index = 2;
+ break;
+#endif
+ default:
+ panic("Unknown caching mode %d\n", mode);
+ }
+
+ /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
+ cache_bits = 0;
+ if (pat_index & 0x4)
+ cache_bits |= pat_flag;
+ if (pat_index & 0x2)
+ cache_bits |= PG_NC_PCD;
+ if (pat_index & 0x1)
+ cache_bits |= PG_NC_PWT;
+ return (cache_bits);
+}
#ifdef SMP
/*
* For SMP, these functions have to use the IPI mechanism for coherence.
@@ -962,6 +1040,15 @@ pmap_kenter(vm_offset_t va, vm_paddr_t pa)
pte_store(pte, pa | PG_RW | PG_V | PG_G);
}
+PMAP_INLINE void
+pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
+{
+ pt_entry_t *pte;
+
+ pte = vtopte(va);
+ pte_store(pte, pa | PG_RW | PG_V | PG_G | pmap_cache_bits(mode, 0));
+}
+
/*
* Remove a page from the kernel pagetables.
* Note: not SMP coherent.
@@ -2281,6 +2368,10 @@ validate:
if (pmap == kernel_pmap)
newpte |= PG_G;
+ /* Preserve any caching attributes. */
+ /* XXX: Should this be conditional on something? */
+ newpte |= (origpte & (PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT));
+
/*
* if the mapping or permission bits are different, we need
* to update the pte.
@@ -3101,6 +3192,46 @@ pmap_clear_reference(vm_page_t m)
* Miscellaneous support routines follow
*/
+/* Adjust the cache mode for a 4KB page mapped via a PTE. */
+static __inline void
+pmap_pte_attr(vm_offset_t va, int mode)
+{
+ pt_entry_t *pte;
+ u_int opte, npte;
+
+ pte = vtopte(va);
+
+ /*
+ * The cache mode bits are all in the low 32-bits of the
+ * PTE, so we can just spin on updating the low 32-bits.
+ */
+ do {
+ opte = *(u_int *)pte;
+ npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT);
+ npte |= pmap_cache_bits(mode, 0);
+ } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
+}
+
+/* Adjust the cache mode for a 2MB page mapped via a PDE. */
+static __inline void
+pmap_pde_attr(vm_offset_t va, int mode)
+{
+ pd_entry_t *pde;
+ u_int opde, npde;
+
+ pde = pmap_pde(kernel_pmap, va);
+
+ /*
+ * The cache mode bits are all in the low 32-bits of the
+ * PDE, so we can just spin on updating the low 32-bits.
+ */
+ do {
+ opde = *(u_int *)pde;
+ npde = opde & ~(PG_PDE_PAT | PG_NC_PCD | PG_NC_PWT);
+ npde |= pmap_cache_bits(mode, 1);
+ } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
+}
+
/*
* Map a set of physical memory pages into the kernel virtual
* address space. Return a pointer to where it is mapped. This
@@ -3108,12 +3239,15 @@ pmap_clear_reference(vm_page_t m)
* NOT real memory.
*/
void *
-pmap_mapdev(vm_paddr_t pa, vm_size_t size)
+pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
{
vm_offset_t va, tmpva, offset;
- /* If this fits within the direct map window, use it */
- if (pa < dmaplimit && (pa + size) < dmaplimit)
+ /*
+ * If this fits within the direct map window and use WB caching
+ * mode, use the direct map.
+ */
+ if (pa < dmaplimit && (pa + size) < dmaplimit && mode == PAT_WRITE_BACK)
return ((void *)PHYS_TO_DMAP(pa));
offset = pa & PAGE_MASK;
size = roundup(offset + size, PAGE_SIZE);
@@ -3122,15 +3256,30 @@ pmap_mapdev(vm_paddr_t pa, vm_size_t size)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
pa = trunc_page(pa);
for (tmpva = va; size > 0; ) {
- pmap_kenter(tmpva, pa);
+ pmap_kenter_attr(tmpva, pa, mode);
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
pa += PAGE_SIZE;
}
pmap_invalidate_range(kernel_pmap, va, tmpva);
+ pmap_invalidate_cache();
return ((void *)(va + offset));
}
+void *
+pmap_mapdev(vm_paddr_t pa, vm_size_t size)
+{
+
+ return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
+}
+
+void *
+pmap_mapbios(vm_paddr_t pa, vm_size_t size)
+{
+
+ return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
+}
+
void
pmap_unmapdev(vm_offset_t va, vm_size_t size)
{
@@ -3148,6 +3297,73 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size)
kmem_free(kernel_map, base, size);
}
+int
+pmap_change_attr(va, size, mode)
+ vm_offset_t va;
+ vm_size_t size;
+ int mode;
+{
+ vm_offset_t base, offset, tmpva;
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+
+ base = va & PG_FRAME;
+ offset = va & PAGE_MASK;
+ size = roundup(offset + size, PAGE_SIZE);
+
+ /* Only supported on kernel virtual addresses. */
+ if (base <= VM_MAXUSER_ADDRESS)
+ return (EINVAL);
+
+ /*
+ * XXX: We have to support tearing 2MB pages down into 4k pages if
+ * needed here.
+ */
+ /* Pages that aren't mapped aren't supported. */
+ for (tmpva = base; tmpva < (base + size); ) {
+ pde = pmap_pde(kernel_pmap, tmpva);
+ if (*pde == 0)
+ return (EINVAL);
+ if (*pde & PG_PS) {
+ /* Handle 2MB pages that are completely contained. */
+ if (size >= NBPDR) {
+ tmpva += NBPDR;
+ continue;
+ }
+ return (EINVAL);
+ }
+ pte = vtopte(va);
+ if (*pte == 0)
+ return (EINVAL);
+ tmpva += PAGE_SIZE;
+ }
+
+ /*
+ * Ok, all the pages exist, so run through them updating their
+ * cache mode.
+ */
+ for (tmpva = base; size > 0; ) {
+ pde = pmap_pde(kernel_pmap, tmpva);
+ if (*pde & PG_PS) {
+ pmap_pde_attr(tmpva, mode);
+ tmpva += NBPDR;
+ size -= NBPDR;
+ } else {
+ pmap_pte_attr(tmpva, mode);
+ tmpva += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+
+ /*
+ * Flush CPU caches to make sure any data isn't cached that shouldn't
+ * be, etc.
+ */
+ pmap_invalidate_range(kernel_pmap, base, tmpva);
+ pmap_invalidate_cache();
+ return (0);
+}
+
/*
* perform the pmap work for mincore
*/
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index 007b545..9475626 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -302,14 +302,19 @@ extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
+#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
void pmap_bootstrap(vm_paddr_t *);
+int pmap_change_attr(vm_offset_t, vm_size_t, int);
void pmap_init_pat(void);
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
+void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
void *pmap_kenter_temporary(vm_paddr_t pa, int i);
vm_paddr_t pmap_kextract(vm_offset_t);
void pmap_kremove(vm_offset_t);
+void *pmap_mapbios(vm_paddr_t, vm_size_t);
void *pmap_mapdev(vm_paddr_t, vm_size_t);
+void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
void pmap_unmapdev(vm_offset_t, vm_size_t);
void pmap_invalidate_page(pmap_t, vm_offset_t);
void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
diff --git a/sys/dev/acpica/Osd/OsdMemory.c b/sys/dev/acpica/Osd/OsdMemory.c
index 8e496b3..b99bd62 100644
--- a/sys/dev/acpica/Osd/OsdMemory.c
+++ b/sys/dev/acpica/Osd/OsdMemory.c
@@ -58,7 +58,7 @@ ACPI_STATUS
AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Length,
void **LogicalAddress)
{
- *LogicalAddress = pmap_mapdev((vm_offset_t)PhysicalAddress, Length);
+ *LogicalAddress = pmap_mapbios((vm_offset_t)PhysicalAddress, Length);
if (*LogicalAddress == NULL)
return (AE_BAD_ADDRESS);
return (AE_OK);
@@ -67,7 +67,7 @@ AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Length,
void
AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Length)
{
- pmap_unmapdev((vm_offset_t)LogicalAddress, Length);
+ pmap_unmapbios((vm_offset_t)LogicalAddress, Length);
}
ACPI_STATUS
diff --git a/sys/i386/acpica/acpi_machdep.c b/sys/i386/acpica/acpi_machdep.c
index 725f43f..151439c 100644
--- a/sys/i386/acpica/acpi_machdep.c
+++ b/sys/i386/acpica/acpi_machdep.c
@@ -347,9 +347,9 @@ acpi_machdep_quirks(int *quirks)
int year;
/* BIOS address 0xffff5 contains the date in the format mm/dd/yy. */
- va = pmap_mapdev(0xffff0, 16);
+ va = pmap_mapbios(0xffff0, 16);
sscanf(va + 11, "%2d", &year);
- pmap_unmapdev((vm_offset_t)va, 16);
+ pmap_unmapbios((vm_offset_t)va, 16);
/*
* Date must be >= 1/1/1999 or we don't trust ACPI. Note that this
diff --git a/sys/i386/acpica/madt.c b/sys/i386/acpica/madt.c
index f6b09a4..abe525e 100644
--- a/sys/i386/acpica/madt.c
+++ b/sys/i386/acpica/madt.c
@@ -203,15 +203,15 @@ madt_probe(void)
/*
* Map in the RSDP. Since ACPI uses AcpiOsMapMemory() which in turn
- * calls pmap_mapdev() to find the RSDP, we assume that we can use
- * pmap_mapdev() to map the RSDP.
+ * calls pmap_mapbios() to find the RSDP, we assume that we can use
+ * pmap_mapbios() to map the RSDP.
*/
if (AcpiOsGetRootPointer(ACPI_LOGICAL_ADDRESSING, &rsdp_ptr) != AE_OK)
return (ENXIO);
#ifdef __i386__
KASSERT(rsdp_ptr.Pointer.Physical < KERNLOAD, ("RSDP too high"));
#endif
- rsdp = pmap_mapdev(rsdp_ptr.Pointer.Physical, sizeof(RSDP_DESCRIPTOR));
+ rsdp = pmap_mapbios(rsdp_ptr.Pointer.Physical, sizeof(RSDP_DESCRIPTOR));
if (rsdp == NULL) {
if (bootverbose)
printf("MADT: Failed to map RSDP\n");
@@ -261,7 +261,7 @@ madt_probe(void)
break;
madt_unmap_table(rsdt);
}
- pmap_unmapdev((vm_offset_t)rsdp, sizeof(RSDP_DESCRIPTOR));
+ pmap_unmapbios((vm_offset_t)rsdp, sizeof(RSDP_DESCRIPTOR));
if (madt_physaddr == 0) {
if (bootverbose)
printf("MADT: No MADT table found\n");
@@ -335,7 +335,7 @@ static int
madt_setup_local(void)
{
- madt = pmap_mapdev(madt_physaddr, madt_length);
+ madt = pmap_mapbios(madt_physaddr, madt_length);
lapic_init((uintptr_t)madt->LocalApicAddress);
printf("ACPI APIC Table: <%.*s %.*s>\n",
(int)sizeof(madt->OemId), madt->OemId,
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 111b4f7..6311478 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -630,6 +630,84 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
* Low level helper routines.....
***************************************************/
+/*
+ * Determine the appropriate bits to set in a PTE or PDE for a specified
+ * caching mode.
+ */
+static int
+pmap_cache_bits(int mode, boolean_t is_pde)
+{
+ int pat_flag, pat_index, cache_bits;
+
+ /* The PAT bit is different for PTE's and PDE's. */
+ pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
+
+ /* If we don't support PAT, map extended modes to older ones. */
+ if (!(cpu_feature & CPUID_PAT)) {
+ switch (mode) {
+ case PAT_UNCACHEABLE:
+ case PAT_WRITE_THROUGH:
+ case PAT_WRITE_BACK:
+ break;
+ case PAT_UNCACHED:
+ case PAT_WRITE_COMBINING:
+ case PAT_WRITE_PROTECTED:
+ mode = PAT_UNCACHEABLE;
+ break;
+ }
+ }
+
+ /* Map the caching mode to a PAT index. */
+ switch (mode) {
+#ifdef PAT_WORKS
+ case PAT_UNCACHEABLE:
+ pat_index = 3;
+ break;
+ case PAT_WRITE_THROUGH:
+ pat_index = 1;
+ break;
+ case PAT_WRITE_BACK:
+ pat_index = 0;
+ break;
+ case PAT_UNCACHED:
+ pat_index = 2;
+ break;
+ case PAT_WRITE_COMBINING:
+ pat_index = 5;
+ break;
+ case PAT_WRITE_PROTECTED:
+ pat_index = 4;
+ break;
+#else
+ case PAT_UNCACHED:
+ case PAT_UNCACHEABLE:
+ case PAT_WRITE_PROTECTED:
+ pat_index = 3;
+ break;
+ case PAT_WRITE_THROUGH:
+ pat_index = 1;
+ break;
+ case PAT_WRITE_BACK:
+ pat_index = 0;
+ break;
+ case PAT_WRITE_COMBINING:
+ pat_index = 2;
+ break;
+#endif
+ default:
+ panic("Unknown caching mode %d\n", mode);
+ }
+
+ /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
+ cache_bits = 0;
+ if (pat_index & 0x4)
+ cache_bits |= pat_flag;
+ if (pat_index & 0x2)
+ cache_bits |= PG_NC_PCD;
+ if (pat_index & 0x1)
+ cache_bits |= PG_NC_PWT;
+ return (cache_bits);
+}
#ifdef SMP
/*
* For SMP, these functions have to use the IPI mechanism for coherence.
@@ -1002,6 +1080,15 @@ pmap_kenter(vm_offset_t va, vm_paddr_t pa)
pte_store(pte, pa | PG_RW | PG_V | pgeflag);
}
+PMAP_INLINE void
+pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
+{
+ pt_entry_t *pte;
+
+ pte = vtopte(va);
+ pte_store(pte, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0));
+}
+
/*
* Remove a page from the kernel pagetables.
* Note: not SMP coherent.
@@ -2356,6 +2443,10 @@ validate:
if (pmap == kernel_pmap)
newpte |= pgeflag;
+ /* Preserve any caching attributes. */
+ /* XXX: Should this be conditional on something? */
+ newpte |= (origpte & (PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT));
+
/*
* if the mapping or permission bits are different, we need
* to update the pte.
@@ -3225,7 +3316,7 @@ pmap_clear_reference(vm_page_t m)
* NOT real memory.
*/
void *
-pmap_mapdev(vm_paddr_t pa, vm_size_t size)
+pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
{
vm_offset_t va, tmpva, offset;
@@ -3241,15 +3332,30 @@ pmap_mapdev(vm_paddr_t pa, vm_size_t size)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
for (tmpva = va; size > 0; ) {
- pmap_kenter(tmpva, pa);
+ pmap_kenter_attr(tmpva, pa, mode);
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
pa += PAGE_SIZE;
}
pmap_invalidate_range(kernel_pmap, va, tmpva);
+ pmap_invalidate_cache();
return ((void *)(va + offset));
}
+void *
+pmap_mapdev(vm_paddr_t pa, vm_size_t size)
+{
+
+ return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
+}
+
+void *
+pmap_mapbios(vm_paddr_t pa, vm_size_t size)
+{
+
+ return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
+}
+
void
pmap_unmapdev(vm_offset_t va, vm_size_t size)
{
@@ -3266,6 +3372,67 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size)
kmem_free(kernel_map, base, size);
}
+int
+pmap_change_attr(va, size, mode)
+ vm_offset_t va;
+ vm_size_t size;
+ int mode;
+{
+ vm_offset_t base, offset, tmpva;
+ pt_entry_t *pte;
+ u_int opte, npte;
+ pd_entry_t *pde;
+
+ base = va & PG_FRAME;
+ offset = va & PAGE_MASK;
+ size = roundup(offset + size, PAGE_SIZE);
+
+ /* Only supported on kernel virtual addresses. */
+ if (base <= VM_MAXUSER_ADDRESS)
+ return (EINVAL);
+
+ /* 4MB pages and pages that aren't mapped aren't supported. */
+ for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) {
+ pde = pmap_pde(kernel_pmap, tmpva);
+ if (*pde & PG_PS)
+ return (EINVAL);
+ if (*pde == 0)
+ return (EINVAL);
+ pte = vtopte(va);
+ if (*pte == 0)
+ return (EINVAL);
+ }
+
+ /*
+ * Ok, all the pages exist and are 4k, so run through them updating
+ * their cache mode.
+ */
+ for (tmpva = base; size > 0; ) {
+ pte = vtopte(tmpva);
+
+ /*
+ * The cache mode bits are all in the low 32-bits of the
+ * PTE, so we can just spin on updating the low 32-bits.
+ */
+ do {
+ opte = *(u_int *)pte;
+ npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT);
+ npte |= pmap_cache_bits(mode, 0);
+ } while (npte != opte &&
+ !atomic_cmpset_int((u_int *)pte, opte, npte));
+ tmpva += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+
+ /*
+ * Flush CPU caches to make sure any data isn't cached that shouldn't
+ * be, etc.
+ */
+ pmap_invalidate_range(kernel_pmap, base, tmpva);
+ pmap_invalidate_cache();
+ return (0);
+}
+
/*
* perform the pmap work for mincore
*/
diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h
index 584bc43..e04506c 100644
--- a/sys/i386/include/pmap.h
+++ b/sys/i386/include/pmap.h
@@ -368,13 +368,18 @@ extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
+#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
void pmap_bootstrap(vm_paddr_t, vm_paddr_t);
+int pmap_change_attr(vm_offset_t, vm_size_t, int);
void pmap_init_pat(void);
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
+void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
void *pmap_kenter_temporary(vm_paddr_t pa, int i);
void pmap_kremove(vm_offset_t);
+void *pmap_mapbios(vm_paddr_t, vm_size_t);
void *pmap_mapdev(vm_paddr_t, vm_size_t);
+void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
void pmap_unmapdev(vm_offset_t, vm_size_t);
pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2;
void pmap_set_pg(void);
diff --git a/sys/ia64/include/pmap.h b/sys/ia64/include/pmap.h
index 978917d..b025aef 100644
--- a/sys/ia64/include/pmap.h
+++ b/sys/ia64/include/pmap.h
@@ -119,6 +119,8 @@ extern uint64_t pmap_vhpt_base[];
extern int pmap_vhpt_log2size;
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
+#define pmap_mapbios(pa, sz) pmap_mapdev(pa, sz)
+#define pmap_unmapbios(va, sz) pmap_unmapdev(va, sz)
vm_offset_t pmap_steal_memory(vm_size_t);
void pmap_bootstrap(void);
OpenPOWER on IntegriCloud