summaryrefslogtreecommitdiffstats
path: root/sys/powerpc/aim/slb.c
diff options
context:
space:
mode:
authornwhitehorn <nwhitehorn@FreeBSD.org>2012-01-15 00:08:14 +0000
committernwhitehorn <nwhitehorn@FreeBSD.org>2012-01-15 00:08:14 +0000
commit19c997ffb187fa972ee676f70314969915c35584 (patch)
tree1fedd675c12139ce0991a5a3ad286a5f769b575e /sys/powerpc/aim/slb.c
parente19a997d4a9c059fd70e3096ca9be23cf2f7a7c3 (diff)
downloadFreeBSD-src-19c997ffb187fa972ee676f70314969915c35584.zip
FreeBSD-src-19c997ffb187fa972ee676f70314969915c35584.tar.gz
Rework SLB trap handling so that double-faults into an SLB trap handler are
possible, and double faults within an SLB trap handler are not. The result is that it possible to take an SLB fault at any time, on any address, for any reason, at any point in the kernel. This lets us do two important things. First, it removes the (soft) 16 GB RAM ceiling on PPC64 as well as any architectural limitations on KVA space. Second, it lets the kernel tolerate poorly designed hypervisors that have a tendency to fail to restore the SLB properly after a hypervisor context switch. MFC after: 6 weeks
Diffstat (limited to 'sys/powerpc/aim/slb.c')
-rw-r--r--sys/powerpc/aim/slb.c18
1 files changed, 4 insertions, 14 deletions
diff --git a/sys/powerpc/aim/slb.c b/sys/powerpc/aim/slb.c
index 1a5ce65..7f4b2ef 100644
--- a/sys/powerpc/aim/slb.c
+++ b/sys/powerpc/aim/slb.c
@@ -409,15 +409,11 @@ slb_alloc_tree(void)
/* Lock entries mapping kernel text and stacks */
-#define SLB_SPILLABLE(slbe) \
- (((slbe & SLBE_ESID_MASK) < VM_MIN_KERNEL_ADDRESS && \
- (slbe & SLBE_ESID_MASK) > 16*SEGMENT_LENGTH) || \
- (slbe & SLBE_ESID_MASK) > VM_MAX_KERNEL_ADDRESS)
void
slb_insert_kernel(uint64_t slbe, uint64_t slbv)
{
struct slb *slbcache;
- int i, j;
+ int i;
/* We don't want to be preempted while modifying the kernel map */
critical_enter();
@@ -437,15 +433,9 @@ slb_insert_kernel(uint64_t slbe, uint64_t slbv)
slbcache[USER_SLB_SLOT].slbe = 1;
}
- for (i = mftb() % n_slbs, j = 0; j < n_slbs; j++, i = (i+1) % n_slbs) {
- if (i == USER_SLB_SLOT)
- continue;
-
- if (SLB_SPILLABLE(slbcache[i].slbe))
- break;
- }
-
- KASSERT(j < n_slbs, ("All kernel SLB slots locked!"));
+ i = mftb() % n_slbs;
+ if (i == USER_SLB_SLOT)
+ i = (i+1) % n_slbs;
fillkernslb:
KASSERT(i != USER_SLB_SLOT,
OpenPOWER on IntegriCloud