diff options
author | nwhitehorn <nwhitehorn@FreeBSD.org> | 2010-11-03 15:15:48 +0000 |
---|---|---|
committer | nwhitehorn <nwhitehorn@FreeBSD.org> | 2010-11-03 15:15:48 +0000 |
commit | 88576008938a9baeffeba03f558f4d409d97ebae (patch) | |
tree | 032a65aa5d2c19a39eca9e450a60b598c6ead21f | |
parent | 54aa1adf6cb449f4e79a7a602aaabc668b524e65 (diff) | |
download | FreeBSD-src-88576008938a9baeffeba03f558f4d409d97ebae.zip FreeBSD-src-88576008938a9baeffeba03f558f4d409d97ebae.tar.gz |
Clean up the user segment handling code a little more. Now that
set_user_sr() itself caches the user segment VSID, there is no need for
cpu_switch() to do it again. This change also unifies the 32 and 64-bit
code paths for kernel faults on user pages and remaps the user SLB slot
on 64-bit systems when taking a syscall to avoid some unnecessary segment
exception traps.
-rw-r--r-- | sys/powerpc/aim/copyinout.c | 7 | ||||
-rw-r--r-- | sys/powerpc/aim/swtch32.S | 2 | ||||
-rw-r--r-- | sys/powerpc/aim/swtch64.S | 5 | ||||
-rw-r--r-- | sys/powerpc/aim/trap.c | 13 |
4 files changed, 12 insertions, 15 deletions
diff --git a/sys/powerpc/aim/copyinout.c b/sys/powerpc/aim/copyinout.c index 15623ed..84476cd 100644 --- a/sys/powerpc/aim/copyinout.c +++ b/sys/powerpc/aim/copyinout.c @@ -102,11 +102,12 @@ set_user_sr(pmap_t pm, const void *addr) if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == slbv) return; - __asm __volatile ("isync; slbie %0; slbmte %1, %2; isync" :: - "r"(USER_ADDR), "r"(slbv), "r"(USER_SLB_SLBE)); + __asm __volatile("isync"); curthread->td_pcb->pcb_cpu.aim.usr_segm = (uintptr_t)addr >> ADDR_SR_SHFT; curthread->td_pcb->pcb_cpu.aim.usr_vsid = slbv; + __asm __volatile ("slbie %0; slbmte %1, %2; isync" :: + "r"(USER_ADDR), "r"(slbv), "r"(USER_SLB_SLBE)); } #else static __inline void @@ -124,6 +125,8 @@ set_user_sr(pmap_t pm, const void *addr) vsid |= SR_N; __asm __volatile("isync"); + curthread->td_pcb->pcb_cpu.aim.usr_segm = + (uintptr_t)addr >> ADDR_SR_SHFT; curthread->td_pcb->pcb_cpu.aim.usr_vsid = vsid; __asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(vsid)); } diff --git a/sys/powerpc/aim/swtch32.S b/sys/powerpc/aim/swtch32.S index 3b608f8..cd141aa 100644 --- a/sys/powerpc/aim/swtch32.S +++ b/sys/powerpc/aim/swtch32.S @@ -88,8 +88,6 @@ ENTRY(cpu_switch) stw %r16,PCB_CR(%r6) mflr %r16 /* Save the link register */ stw %r16,PCB_LR(%r6) - mfsr %r16,USER_SR /* Save USER_SR for copyin/out */ - stw %r16,PCB_AIM_USR_VSID(%r6) stw %r1,PCB_SP(%r6) /* Save the stack pointer */ stw %r2,PCB_TOC(%r6) /* Save the TOC pointer */ diff --git a/sys/powerpc/aim/swtch64.S b/sys/powerpc/aim/swtch64.S index f1af24e..dd76e0a 100644 --- a/sys/powerpc/aim/swtch64.S +++ b/sys/powerpc/aim/swtch64.S @@ -110,11 +110,6 @@ ENTRY(cpu_switch) std %r1,PCB_SP(%r6) /* Save the stack pointer */ std %r2,PCB_TOC(%r6) /* Save the TOC pointer */ - li %r15,0 /* Save user segment for copyin/out */ - li %r16,USER_SLB_SLOT - slbmfev %r15, %r16 - std %r15,PCB_AIM_USR_VSID(%r6) - mr %r14,%r3 /* Copy the old thread ptr... */ mr %r15,%r4 /* and the new thread ptr in scratch */ mr %r16,%r5 /* and the new lock */ diff --git a/sys/powerpc/aim/trap.c b/sys/powerpc/aim/trap.c index 22f55a7..de4c6d8 100644 --- a/sys/powerpc/aim/trap.c +++ b/sys/powerpc/aim/trap.c @@ -455,6 +455,13 @@ syscall(struct trapframe *frame) td = PCPU_GET(curthread); td->td_frame = frame; + /* + * Speculatively restore last user SLB segment, which we know is + * invalid already, since we are likely to do copyin()/copyout(). + */ + __asm __volatile ("slbmte %0, %1; isync" :: + "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); + error = syscallenter(td, &sa); syscallret(td, error, &sa); } @@ -532,13 +539,7 @@ trap_pfault(struct trapframe *frame, int user) map = &p->p_vmspace->vm_map; - #ifdef __powerpc64__ user_sr = td->td_pcb->pcb_cpu.aim.usr_segm; - #else - __asm ("mfsr %0, %1" - : "=r"(user_sr) - : "K"(USER_SR)); - #endif eva &= ADDR_PIDX | ADDR_POFF; eva |= user_sr << ADDR_SR_SHFT; } else { |