summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authornwhitehorn <nwhitehorn@FreeBSD.org>2010-07-31 21:35:15 +0000
committernwhitehorn <nwhitehorn@FreeBSD.org>2010-07-31 21:35:15 +0000
commit1a14bf4648a73ec1bef9d8b27aefcd912d6a1f2f (patch)
tree90743fa4b480cb7ba17438a347ff6c673f1e4505 /sys/powerpc
parentb6078715a19dff6c6a77090896b8a42d4b38b1eb (diff)
downloadFreeBSD-src-1a14bf4648a73ec1bef9d8b27aefcd912d6a1f2f.zip
FreeBSD-src-1a14bf4648a73ec1bef9d8b27aefcd912d6a1f2f.tar.gz
Improve hash coverage for kernel page table entries by modifying the kernel
ESID -> VSID map function. This makes ZFS run stably on PowerPC under heavy loads (repeated simultaneous SVN checkouts and updates).
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/mmu_oea64.c2
-rw-r--r--sys/powerpc/aim/slb.c11
-rw-r--r--sys/powerpc/include/slb.h13
3 files changed, 10 insertions, 16 deletions
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 33b3ed9..962e8ec 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -838,7 +838,7 @@ moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
}
entry.slbe = slbe;
- entry.slbv = KERNEL_VSID(esid, large) << SLBV_VSID_SHIFT;
+ entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
if (large)
entry.slbv |= SLBV_L;
diff --git a/sys/powerpc/aim/slb.c b/sys/powerpc/aim/slb.c
index 0990f94..7ea4593 100644
--- a/sys/powerpc/aim/slb.c
+++ b/sys/powerpc/aim/slb.c
@@ -104,17 +104,10 @@ uint64_t
va_to_vsid(pmap_t pm, vm_offset_t va)
{
struct slb entry;
- int large;
/* Shortcut kernel case */
- if (pm == kernel_pmap) {
- large = 0;
- if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS &&
- mem_valid(va, 0) == 0)
- large = 1;
-
- return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT, large));
- }
+ if (pm == kernel_pmap)
+ return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT));
/*
* If there is no vsid for this VA, we need to add a new entry
diff --git a/sys/powerpc/include/slb.h b/sys/powerpc/include/slb.h
index a189987..0d650b3 100644
--- a/sys/powerpc/include/slb.h
+++ b/sys/powerpc/include/slb.h
@@ -47,14 +47,15 @@
#define SLBV_VSID_MASK 0xfffffffffffff000UL /* Virtual segment ID mask */
#define SLBV_VSID_SHIFT 12
-#define KERNEL_VSID_BIT 0x0000001000000000UL /* Bit set in all kernel VSIDs */
-
/*
- * Shift large-page VSIDs one place left. At present, they are only used in the
- * kernel direct map, and we already assume in the placement of KVA that the
- * CPU cannot address more than 63 bits of memory.
+ * Make a predictable 1:1 map from ESIDs to VSIDs for the kernel. Hash table
+ * coverage is increased by swizzling the ESID and multiplying by a prime
+ * number (0x13bb).
*/
-#define KERNEL_VSID(esid, large) (((uint64_t)(esid) << (large ? 1 : 0)) | KERNEL_VSID_BIT)
+#define KERNEL_VSID_BIT 0x0000001000000000UL /* Bit set in all kernel VSIDs */
+#define KERNEL_VSID(esid) ((((((uint64_t)esid << 8) | ((uint64_t)esid >> 28)) \
+ * 0x13bbUL) & (KERNEL_VSID_BIT - 1)) | \
+ KERNEL_VSID_BIT)
#define SLBE_VALID 0x0000000008000000UL /* SLB entry valid */
#define SLBE_INDEX_MASK 0x0000000000000fffUL /* SLB index mask*/
OpenPOWER on IntegriCloud