summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2009-07-18 01:50:05 +0000
committeralc <alc@FreeBSD.org>2009-07-18 01:50:05 +0000
commit40432bac3b537e4cec296ffff5f03ccc6e206725 (patch)
tree3735260e5ee08da379a2641b39b41827e7347d4d
parent524e45ea991626d48d957b1617e055ac1daa1a75 (diff)
downloadFreeBSD-src-40432bac3b537e4cec296ffff5f03ccc6e206725.zip
FreeBSD-src-40432bac3b537e4cec296ffff5f03ccc6e206725.tar.gz
An addendum to r195649, "Add support to the virtual memory system for
configuring machine-dependent memory attributes...": Don't set the memory attribute for a "real" page that is allocated to a device object in vm_page_alloc(). It is a pointless act, because the device pager replaces this "real" page with a "fake" page and sets the memory attribute on that "fake" page. Eliminate pointless code from pmap_cache_bits() on amd64. Employ the "Self Snoop" feature supported by some x86 processors to avoid cache flushes in the pmap. Approved by: re (kib)
-rw-r--r--sys/amd64/amd64/pmap.c23
-rw-r--r--sys/i386/i386/pmap.c12
-rw-r--r--sys/vm/vm_page.c4
3 files changed, 18 insertions, 21 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index fce2818..99f55cb 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -752,21 +752,6 @@ pmap_cache_bits(int mode, boolean_t is_pde)
/* The PAT bit is different for PTE's and PDE's. */
pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
- /* If we don't support PAT, map extended modes to older ones. */
- if (!(cpu_feature & CPUID_PAT)) {
- switch (mode) {
- case PAT_UNCACHEABLE:
- case PAT_WRITE_THROUGH:
- case PAT_WRITE_BACK:
- break;
- case PAT_UNCACHED:
- case PAT_WRITE_COMBINING:
- case PAT_WRITE_PROTECTED:
- mode = PAT_UNCACHEABLE;
- break;
- }
- }
-
/* Map the caching mode to a PAT index. */
switch (mode) {
case PAT_UNCACHEABLE:
@@ -4295,7 +4280,9 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
pa += PAGE_SIZE;
}
pmap_invalidate_range(kernel_pmap, va, tmpva);
- pmap_invalidate_cache();
+ /* If "Self Snoop" is supported, do nothing. */
+ if (!(cpu_feature & CPUID_SS))
+ pmap_invalidate_cache();
return ((void *)(va + offset));
}
@@ -4634,7 +4621,9 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
*/
if (changed) {
pmap_invalidate_range(kernel_pmap, base, tmpva);
- pmap_invalidate_cache();
+ /* If "Self Snoop" is supported, do nothing. */
+ if (!(cpu_feature & CPUID_SS))
+ pmap_invalidate_cache();
}
return (error);
}
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index adcbd83..6103fd5 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -4420,7 +4420,9 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
pa += PAGE_SIZE;
}
pmap_invalidate_range(kernel_pmap, va, tmpva);
- pmap_invalidate_cache();
+ /* If "Self Snoop" is supported, do nothing. */
+ if (!(cpu_feature & CPUID_SS))
+ pmap_invalidate_cache();
return ((void *)(va + offset));
}
@@ -4467,7 +4469,9 @@ pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
* Flush CPU caches to make sure any data isn't cached that shouldn't
* be, etc.
*/
- pmap_invalidate_cache();
+ /* If "Self Snoop" is supported, do nothing. */
+ if (!(cpu_feature & CPUID_SS))
+ pmap_invalidate_cache();
}
int
@@ -4526,7 +4530,9 @@ pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
* be, etc.
*/
pmap_invalidate_range(kernel_pmap, base, tmpva);
- pmap_invalidate_cache();
+ /* If "Self Snoop" is supported, do nothing. */
+ if (!(cpu_feature & CPUID_SS))
+ pmap_invalidate_cache();
return (0);
}
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index b9c4ebc..d8d74c0 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1161,7 +1161,9 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
mtx_unlock(&vm_page_queue_free_mtx);
if (object != NULL) {
- if (object->memattr != VM_MEMATTR_DEFAULT)
+ /* Ignore device objects; the pager sets "memattr" for them. */
+ if (object->memattr != VM_MEMATTR_DEFAULT &&
+ object->type != OBJT_DEVICE)
pmap_page_set_memattr(m, object->memattr);
vm_page_insert(m, object, pindex);
} else
OpenPOWER on IntegriCloud