summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2014-10-15 14:07:24 +0000
committerkib <kib@FreeBSD.org>2014-10-15 14:07:24 +0000
commit92c5a45b89b79dd541cdd42a298bd9ef50c0bd23 (patch)
treef9b50e42c297a49f05d1d59a12a960b704595ab6
parentc4d948e4452aa678c9052326a2ca21297e209b9f (diff)
downloadFreeBSD-src-92c5a45b89b79dd541cdd42a298bd9ef50c0bd23.zip
FreeBSD-src-92c5a45b89b79dd541cdd42a298bd9ef50c0bd23.tar.gz
MFC r272761:
Add an argument to the x86 pmap_invalidate_cache_range() to request forced invalidation of the cache range regardless of the presence of self-snoop feature. MFC r272943: MFi386 r272761.
-rw-r--r--sys/amd64/amd64/pmap.c22
-rw-r--r--sys/amd64/include/pmap.h3
-rw-r--r--sys/dev/drm2/i915/intel_ringbuffer.c4
-rw-r--r--sys/i386/i386/pmap.c22
-rw-r--r--sys/i386/i386/vm_machdep.c2
-rw-r--r--sys/i386/include/pmap.h3
-rw-r--r--sys/i386/xen/pmap.c20
7 files changed, 45 insertions, 31 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 8bd6f93..5ee64da 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -1710,16 +1710,20 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
void
-pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
+pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
{
- KASSERT((sva & PAGE_MASK) == 0,
- ("pmap_invalidate_cache_range: sva not page-aligned"));
- KASSERT((eva & PAGE_MASK) == 0,
- ("pmap_invalidate_cache_range: eva not page-aligned"));
+ if (force) {
+ sva &= ~(vm_offset_t)cpu_clflush_line_size;
+ } else {
+ KASSERT((sva & PAGE_MASK) == 0,
+ ("pmap_invalidate_cache_range: sva not page-aligned"));
+ KASSERT((eva & PAGE_MASK) == 0,
+ ("pmap_invalidate_cache_range: eva not page-aligned"));
+ }
- if (cpu_feature & CPUID_SS)
- ; /* If "Self Snoop" is supported, do nothing. */
+ if ((cpu_feature & CPUID_SS) != 0 && !force)
+ ; /* If "Self Snoop" is supported and allowed, do nothing. */
else if ((cpu_feature & CPUID_CLFSH) != 0 &&
eva - sva < PMAP_CLFLUSH_THRESHOLD) {
@@ -6222,7 +6226,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
- pmap_invalidate_cache_range(va, va + tmpsize);
+ pmap_invalidate_cache_range(va, va + tmpsize, FALSE);
return ((void *)(va + offset));
}
@@ -6558,7 +6562,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
*/
if (changed) {
pmap_invalidate_range(kernel_pmap, base, tmpva);
- pmap_invalidate_cache_range(base, tmpva);
+ pmap_invalidate_cache_range(base, tmpva, FALSE);
}
return (error);
}
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index e83e07e..ebf32c6 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -394,7 +394,8 @@ void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
void pmap_invalidate_all(pmap_t);
void pmap_invalidate_cache(void);
void pmap_invalidate_cache_pages(vm_page_t *pages, int count);
-void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
+void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva,
+ boolean_t force);
void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num);
#endif /* _KERNEL */
diff --git a/sys/dev/drm2/i915/intel_ringbuffer.c b/sys/dev/drm2/i915/intel_ringbuffer.c
index 26bc695..89a5c94 100644
--- a/sys/dev/drm2/i915/intel_ringbuffer.c
+++ b/sys/dev/drm2/i915/intel_ringbuffer.c
@@ -366,7 +366,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
goto err_unpin;
pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1);
pmap_invalidate_cache_range((vm_offset_t)pc->cpu_page,
- (vm_offset_t)pc->cpu_page + PAGE_SIZE);
+ (vm_offset_t)pc->cpu_page + PAGE_SIZE, FALSE);
pc->obj = obj;
ring->private = pc;
@@ -1014,7 +1014,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0],
1);
pmap_invalidate_cache_range((vm_offset_t)ring->status_page.page_addr,
- (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE);
+ (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE, FALSE);
ring->status_page.obj = obj;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 9e8f2ee..c9bff6b 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -1172,16 +1172,20 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
void
-pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
+pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
{
- KASSERT((sva & PAGE_MASK) == 0,
- ("pmap_invalidate_cache_range: sva not page-aligned"));
- KASSERT((eva & PAGE_MASK) == 0,
- ("pmap_invalidate_cache_range: eva not page-aligned"));
+ if (force) {
+ sva &= ~(vm_offset_t)cpu_clflush_line_size;
+ } else {
+ KASSERT((sva & PAGE_MASK) == 0,
+ ("pmap_invalidate_cache_range: sva not page-aligned"));
+ KASSERT((eva & PAGE_MASK) == 0,
+ ("pmap_invalidate_cache_range: eva not page-aligned"));
+ }
- if (cpu_feature & CPUID_SS)
- ; /* If "Self Snoop" is supported, do nothing. */
+ if ((cpu_feature & CPUID_SS) != 0 && !force)
+ ; /* If "Self Snoop" is supported and allowed, do nothing. */
else if ((cpu_feature & CPUID_CLFSH) != 0 &&
eva - sva < PMAP_CLFLUSH_THRESHOLD) {
@@ -5164,7 +5168,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
- pmap_invalidate_cache_range(va, va + size);
+ pmap_invalidate_cache_range(va, va + size, FALSE);
return ((void *)(va + offset));
}
@@ -5370,7 +5374,7 @@ pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
*/
if (changed) {
pmap_invalidate_range(kernel_pmap, base, tmpva);
- pmap_invalidate_cache_range(base, tmpva);
+ pmap_invalidate_cache_range(base, tmpva, FALSE);
}
return (0);
}
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index 50c6433..664ad70 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -799,7 +799,7 @@ sf_buf_invalidate_cache(vm_page_t m)
*/
pmap_qenter(sf->kva, &m, 1);
pmap_invalidate_cache_range(sf->kva, sf->kva +
- PAGE_SIZE);
+ PAGE_SIZE, FALSE);
ret = TRUE;
break;
}
diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h
index 100475c..05656cd 100644
--- a/sys/i386/include/pmap.h
+++ b/sys/i386/include/pmap.h
@@ -458,7 +458,8 @@ void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
void pmap_invalidate_all(pmap_t);
void pmap_invalidate_cache(void);
void pmap_invalidate_cache_pages(vm_page_t *pages, int count);
-void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
+void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva,
+ boolean_t force);
#endif /* _KERNEL */
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index 48139a1..4d15115 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -888,15 +888,19 @@ pmap_invalidate_cache(void)
#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
void
-pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
+pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
{
- KASSERT((sva & PAGE_MASK) == 0,
- ("pmap_invalidate_cache_range: sva not page-aligned"));
- KASSERT((eva & PAGE_MASK) == 0,
- ("pmap_invalidate_cache_range: eva not page-aligned"));
+ if (force) {
+ sva &= ~(vm_offset_t)cpu_clflush_line_size;
+ } else {
+ KASSERT((sva & PAGE_MASK) == 0,
+ ("pmap_invalidate_cache_range: sva not page-aligned"));
+ KASSERT((eva & PAGE_MASK) == 0,
+ ("pmap_invalidate_cache_range: eva not page-aligned"));
+ }
- if (cpu_feature & CPUID_SS)
+ if ((cpu_feature & CPUID_SS) != 0 && !force)
; /* If "Self Snoop" is supported, do nothing. */
else if ((cpu_feature & CPUID_CLFSH) != 0 &&
eva - sva < PMAP_CLFLUSH_THRESHOLD) {
@@ -4073,7 +4077,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
- pmap_invalidate_cache_range(va, va + size);
+ pmap_invalidate_cache_range(va, va + size, FALSE);
return ((void *)(va + offset));
}
@@ -4241,7 +4245,7 @@ pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
*/
if (changed) {
pmap_invalidate_range(kernel_pmap, base, tmpva);
- pmap_invalidate_cache_range(base, tmpva);
+ pmap_invalidate_cache_range(base, tmpva, FALSE);
}
return (0);
}
OpenPOWER on IntegriCloud