summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorups <ups@FreeBSD.org>2005-10-12 01:41:48 +0000
committerups <ups@FreeBSD.org>2005-10-12 01:41:48 +0000
commit06cb0fb4c0030897b32f55bdf4777f7a2f643690 (patch)
tree70348d183447cd0d933197f8377e81ec9bc8ea2f /sys
parent1bf7a6828731d94e7268fed4d3a2c0bf70f9d9fa (diff)
downloadFreeBSD-src-06cb0fb4c0030897b32f55bdf4777f7a2f643690.zip
FreeBSD-src-06cb0fb4c0030897b32f55bdf4777f7a2f643690.tar.gz
Ensure that a thread stays on same CPU when calculating per CPU
TLB shootdown requirements. Otherwise a CPU may not get the needed TLB invalidation. The PTE valid and access flags can not be used here to avoid TLB shootdowns unless sf->cpumask == all_cpus. ( Otherwise some CPUs may still hold an even older entry in the TLB) Since sf_buf_alloc mappings are normally always used this is also not really useful and presetting accessed and modified allows the CPU to speculatively load the entry into the TLB. Both bugs can cause random data corruption. MFC after: 3 days
Diffstat (limited to 'sys')
-rw-r--r--sys/i386/i386/vm_machdep.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index 1cec91b..ce2ea83 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -63,6 +63,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/sf_buf.h>
#include <sys/smp.h>
+#include <sys/sched.h>
#include <sys/sysctl.h>
#include <sys/unistd.h>
#include <sys/vnode.h>
@@ -672,6 +673,7 @@ sf_buf_alloc(struct vm_page *m, int flags)
nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
}
#ifdef SMP
+ sched_pin();
cpumask = PCPU_GET(cpumask);
if ((sf->cpumask & cpumask) == 0) {
sf->cpumask |= cpumask;
@@ -686,6 +688,8 @@ sf_buf_alloc(struct vm_page *m, int flags)
mtx_unlock_spin(&smp_ipi_mtx);
}
}
+ sched_unpin();
+
#endif
goto done;
}
@@ -716,28 +720,24 @@ sf_buf_alloc(struct vm_page *m, int flags)
/*
* Update the sf_buf's virtual-to-physical mapping, flushing the
- * virtual address from the TLB only if the PTE implies that the old
- * mapping has been used. Since the reference count for the sf_buf's
- * old mapping was zero, that mapping is not currently in use.
- * Consequently, there is no need to exchange the old and new PTEs
- * atomically, even under PAE.
+ * virtual address from the TLB. Since the reference count for
+ * the sf_buf's old mapping was zero, that mapping is not
+ * currently in use. Consequently, there is no need to exchange
+ * the old and new PTEs atomically, even under PAE.
*/
ptep = vtopte(sf->kva);
opte = *ptep;
- *ptep = VM_PAGE_TO_PHYS(m) | pgeflag | PG_RW | PG_V;
+ *ptep = VM_PAGE_TO_PHYS(m) | pgeflag | PG_RW | PG_V | PG_A | PG_M;
#ifdef SMP
if (flags & SFB_CPUPRIVATE) {
- if ((opte & (PG_A | PG_V)) == (PG_A | PG_V)) {
- sf->cpumask = PCPU_GET(cpumask);
- invlpg(sf->kva);
- } else
- sf->cpumask = all_cpus;
+ sf->cpumask = PCPU_GET(cpumask);
+ invlpg(sf->kva);
goto done;
- } else
- sf->cpumask = all_cpus;
+ }
+
+ sf->cpumask = all_cpus;
#endif
- if ((opte & (PG_A | PG_V)) == (PG_A | PG_V))
- pmap_invalidate_page(kernel_pmap, sf->kva);
+ pmap_invalidate_page(kernel_pmap, sf->kva);
done:
mtx_unlock(&sf_buf_lock);
return (sf);
OpenPOWER on IntegriCloud