diff options
author | jake <jake@FreeBSD.org> | 2002-03-07 05:15:43 +0000 |
---|---|---|
committer | jake <jake@FreeBSD.org> | 2002-03-07 05:15:43 +0000 |
commit | 04926795beeb25b72ce20663b539e0ef79aa909b (patch) | |
tree | 4d7af075c44114ef81b46d024f54929c2f96bb44 /sys | |
parent | e3a68020c787177e586910ce3b34c34c62fc5ff6 (diff) | |
download | FreeBSD-src-04926795beeb25b72ce20663b539e0ef79aa909b.zip FreeBSD-src-04926795beeb25b72ce20663b539e0ef79aa909b.tar.gz |
Implement kthread context stealing. This is a bit of a misnomer because
the context is not actually stolen, as it would be for i386. Instead of
deactivating a user vmspace immediately when switching out, and recycling
its tlb context, wait until the next context switch to a different user
vmspace. In this way we can switch from a user process to any number of
kernel threads and back to the same user process again, without losing any
of its mappings in the tlb that would not already be knocked by the automatic
replacement algorithm. This is not expected to have a measurable performance
improvement on the machines we currently run on, but it sounds cool and makes
the sparc64 port SMPng buzz word compliant.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/sparc64/sparc64/pmap.c | 1 | ||||
-rw-r--r-- | sys/sparc64/sparc64/swtch.S | 53 | ||||
-rw-r--r-- | sys/sparc64/sparc64/swtch.s | 53 |
3 files changed, 53 insertions, 54 deletions
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c index 19409e9..69d7f88 100644 --- a/sys/sparc64/sparc64/pmap.c +++ b/sys/sparc64/sparc64/pmap.c @@ -1819,6 +1819,7 @@ pmap_activate(struct thread *td) context = pmap_context_alloc(); pm->pm_context[PCPU_GET(cpuid)] = context; pm->pm_active |= PCPU_GET(cpumask); + PCPU_SET(vmspace, vm); stxa(AA_DMMU_PCXR, ASI_DMMU, context); membar(Sync); mtx_unlock_spin(&sched_lock); diff --git a/sys/sparc64/sparc64/swtch.S b/sys/sparc64/sparc64/swtch.S index 70d95ec..429e961 100644 --- a/sys/sparc64/sparc64/swtch.S +++ b/sys/sparc64/sparc64/swtch.S @@ -119,11 +119,11 @@ ENTRY(cpu_switch) wrpr %g0, PSTATE_KERNEL, %pstate /* - * Point to the vmspaces of the new and old processes. + * Point to the vmspaces of the new process, and of the last non-kernel + * process to run. */ -2: ldx [%l0 + TD_PROC], %l2 ldx [%o0 + TD_PROC], %o2 - ldx [%l2 + P_VMSPACE], %l2 + ldx [PCPU(VMSPACE)], %l2 ldx [%o2 + P_VMSPACE], %o2 #if KTR_COMPILE & KTR_PROC @@ -141,24 +141,33 @@ ENTRY(cpu_switch) be,a,pn %xcc, 4f nop + /* + * If the new process has nucleus context we are done. + */ lduw [PCPU(CPUID)], %o3 sllx %o3, INT_SHIFT, %o3 add %o2, VM_PMAP + PM_CONTEXT, %o4 + lduw [%o3 + %o4], %o5 - lduw [PCPU(CPUID)], %l3 - sllx %l3, INT_SHIFT, %l3 - add %l2, VM_PMAP + PM_CONTEXT, %l4 +#if KTR_COMPILE & KTR_PROC + CATR(KTR_PROC, "cpu_switch: ctx=%#lx" + , %g1, %g2, %g3, 7, 8, 9) + stx %o5, [%g1 + KTR_PARM1] +9: +#endif + + brz,a,pn %o5, 4f + nop /* - * If the old process has nucleus context we don't want to deactivate - * its pmap on this cpu. + * If there was no non-kernel vmspace, don't try to deactivate it. */ - lduw [%l3 + %l4], %l5 - brz,a %l5, 2f + brz,a,pn %l2, 2f nop /* - * Mark the pmap no longer active on this cpu. + * Mark the pmap of the last non-kernel vmspace to run as no longer + * active on this cpu. */ lduw [%l2 + VM_PMAP + PM_ACTIVE], %l3 lduw [PCPU(CPUMASK)], %l4 @@ -175,25 +184,10 @@ ENTRY(cpu_switch) stw %l5, [%l3 + %l4] /* - * If the new process has nucleus context we are done. - */ -2: lduw [%o3 + %o4], %o5 - -#if KTR_COMPILE & KTR_PROC - CATR(KTR_PROC, "cpu_switch: ctx=%#lx" - , %g1, %g2, %g3, 7, 8, 9) - stx %o5, [%g1 + KTR_PARM1] -9: -#endif - - brz,a,pn %o5, 4f - nop - - /* * Find the current free tlb context for this cpu and install it as * the new primary context. */ - lduw [PCPU(TLB_CTX)], %o5 +2: lduw [PCPU(TLB_CTX)], %o5 stw %o5, [%o3 + %o4] mov AA_DMMU_PCXR, %o4 stxa %o5, [%o4] ASI_DMMU @@ -244,6 +238,11 @@ ENTRY(cpu_switch) stw %o3, [%o2 + VM_PMAP + PM_ACTIVE] /* + * Make note of the change in vmspace. + */ + stx %o2, [PCPU(VMSPACE)] + + /* * Load the address of the tsb, switch to mmu globals, and install * the preloaded tsb pointer. */ diff --git a/sys/sparc64/sparc64/swtch.s b/sys/sparc64/sparc64/swtch.s index 70d95ec..429e961 100644 --- a/sys/sparc64/sparc64/swtch.s +++ b/sys/sparc64/sparc64/swtch.s @@ -119,11 +119,11 @@ ENTRY(cpu_switch) wrpr %g0, PSTATE_KERNEL, %pstate /* - * Point to the vmspaces of the new and old processes. + * Point to the vmspaces of the new process, and of the last non-kernel + * process to run. */ -2: ldx [%l0 + TD_PROC], %l2 ldx [%o0 + TD_PROC], %o2 - ldx [%l2 + P_VMSPACE], %l2 + ldx [PCPU(VMSPACE)], %l2 ldx [%o2 + P_VMSPACE], %o2 #if KTR_COMPILE & KTR_PROC @@ -141,24 +141,33 @@ ENTRY(cpu_switch) be,a,pn %xcc, 4f nop + /* + * If the new process has nucleus context we are done. + */ lduw [PCPU(CPUID)], %o3 sllx %o3, INT_SHIFT, %o3 add %o2, VM_PMAP + PM_CONTEXT, %o4 + lduw [%o3 + %o4], %o5 - lduw [PCPU(CPUID)], %l3 - sllx %l3, INT_SHIFT, %l3 - add %l2, VM_PMAP + PM_CONTEXT, %l4 +#if KTR_COMPILE & KTR_PROC + CATR(KTR_PROC, "cpu_switch: ctx=%#lx" + , %g1, %g2, %g3, 7, 8, 9) + stx %o5, [%g1 + KTR_PARM1] +9: +#endif + + brz,a,pn %o5, 4f + nop /* - * If the old process has nucleus context we don't want to deactivate - * its pmap on this cpu. + * If there was no non-kernel vmspace, don't try to deactivate it. */ - lduw [%l3 + %l4], %l5 - brz,a %l5, 2f + brz,a,pn %l2, 2f nop /* - * Mark the pmap no longer active on this cpu. + * Mark the pmap of the last non-kernel vmspace to run as no longer + * active on this cpu. */ lduw [%l2 + VM_PMAP + PM_ACTIVE], %l3 lduw [PCPU(CPUMASK)], %l4 @@ -175,25 +184,10 @@ ENTRY(cpu_switch) stw %l5, [%l3 + %l4] /* - * If the new process has nucleus context we are done. - */ -2: lduw [%o3 + %o4], %o5 - -#if KTR_COMPILE & KTR_PROC - CATR(KTR_PROC, "cpu_switch: ctx=%#lx" - , %g1, %g2, %g3, 7, 8, 9) - stx %o5, [%g1 + KTR_PARM1] -9: -#endif - - brz,a,pn %o5, 4f - nop - - /* * Find the current free tlb context for this cpu and install it as * the new primary context. */ - lduw [PCPU(TLB_CTX)], %o5 +2: lduw [PCPU(TLB_CTX)], %o5 stw %o5, [%o3 + %o4] mov AA_DMMU_PCXR, %o4 stxa %o5, [%o4] ASI_DMMU @@ -244,6 +238,11 @@ ENTRY(cpu_switch) stw %o3, [%o2 + VM_PMAP + PM_ACTIVE] /* + * Make note of the change in vmspace. + */ + stx %o2, [PCPU(VMSPACE)] + + /* * Load the address of the tsb, switch to mmu globals, and install * the preloaded tsb pointer. */ |