summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2003-08-12 19:33:36 +0000
committerjhb <jhb@FreeBSD.org>2003-08-12 19:33:36 +0000
commit1c016824f155ae85ec1c6dd307a32cabdc234ddd (patch)
treef34ee929d8203238056fb038fb006518180967b2 /sys
parent4e0802ee6605a319fd2ddcbc847b184f1b5e9680 (diff)
downloadFreeBSD-src-1c016824f155ae85ec1c6dd307a32cabdc234ddd.zip
FreeBSD-src-1c016824f155ae85ec1c6dd307a32cabdc234ddd.tar.gz
- Convert Alpha over to the new calling conventions for cpu_throw() and
cpu_switch() where both the old and new threads are passed in as arguments. Only powerpc uses the old conventions now. - Update comments in the Alpha swtch.s to reflect KSE changes. Tested by: obrien, marcel
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/mp_machdep.c2
-rw-r--r--sys/alpha/alpha/swtch.s101
-rw-r--r--sys/kern/kern_kse.c2
-rw-r--r--sys/kern/kern_synch.c4
-rw-r--r--sys/kern/kern_thr.c2
-rw-r--r--sys/kern/kern_thread.c2
-rw-r--r--sys/sys/proc.h2
7 files changed, 50 insertions, 65 deletions
diff --git a/sys/alpha/alpha/mp_machdep.c b/sys/alpha/alpha/mp_machdep.c
index 78f9c86..8d39559 100644
--- a/sys/alpha/alpha/mp_machdep.c
+++ b/sys/alpha/alpha/mp_machdep.c
@@ -208,7 +208,7 @@ smp_init_secondary(void)
/* ok, now grab sched_lock and enter the scheduler */
mtx_lock_spin(&sched_lock);
- cpu_throw(); /* doesn't return */
+ cpu_throw(NULL, choosethread()); /* doesn't return */
panic("scheduler returned us to %s", __func__);
}
diff --git a/sys/alpha/alpha/swtch.s b/sys/alpha/alpha/swtch.s
index 34f3453..8523a24 100644
--- a/sys/alpha/alpha/swtch.s
+++ b/sys/alpha/alpha/swtch.s
@@ -49,7 +49,7 @@
/*
* savectx: save process context, i.e. callee-saved registers
*
- * Note that savectx() only works for processes other than curthread,
+ * Note that savectx() only works for threads other than curthread,
* since cpu_switch will copy over the info saved here. (It _can_
* sanely be used for curthread iff cpu_switch won't be called again, e.g.
* from if called from boot().)
@@ -88,72 +88,49 @@ IMPORT(Lev1map, 8)
/*
* cpu_throw()
- * Switch to a new task discarding our current state.
+ * Switch to a new thread discarding our current state.
+ *
+ * Arguments:
+ * a0 'struct thread *' of the old thread
+ * a1 'struct thread *' of the new thread
*/
LEAF(cpu_throw, 0)
LDGP(pv)
- mov zero, s0 /* ensure newproc != oldproc */
CALL(Lcs1)
END(cpu_throw)
/*
* cpu_switch()
- * Find the highest priority process and resume it.
+ * Switch to a new thread saving the current state in the old thread.
+ *
+ * Arguments:
+ * a0 'struct thread *' of the old thread
+ * a1 'struct thread *' of the new thread
*/
LEAF(cpu_switch, 1)
LDGP(pv)
/* do an inline savectx(), to save old context */
- ldq a0, PC_CURTHREAD(pcpup)
- ldq a1, TD_PCB(a0)
+ ldq a2, TD_PCB(a0)
/* NOTE: ksp is stored by the swpctx */
- stq s0, PCB_CONTEXT+(0 * 8)(a1) /* store s0 - s6 */
- stq s1, PCB_CONTEXT+(1 * 8)(a1)
- stq s2, PCB_CONTEXT+(2 * 8)(a1)
- stq s3, PCB_CONTEXT+(3 * 8)(a1)
- stq s4, PCB_CONTEXT+(4 * 8)(a1)
- stq s5, PCB_CONTEXT+(5 * 8)(a1)
- stq s6, PCB_CONTEXT+(6 * 8)(a1)
- stq ra, PCB_CONTEXT+(7 * 8)(a1) /* store ra */
+ stq s0, PCB_CONTEXT+(0 * 8)(a2) /* store s0 - s6 */
+ stq s1, PCB_CONTEXT+(1 * 8)(a2)
+ stq s2, PCB_CONTEXT+(2 * 8)(a2)
+ stq s3, PCB_CONTEXT+(3 * 8)(a2)
+ stq s4, PCB_CONTEXT+(4 * 8)(a2)
+ stq s5, PCB_CONTEXT+(5 * 8)(a2)
+ stq s6, PCB_CONTEXT+(6 * 8)(a2)
+ stq ra, PCB_CONTEXT+(7 * 8)(a2) /* store ra */
call_pal PAL_OSF1_rdps /* NOTE: doesn't kill a0 */
- stq v0, PCB_CONTEXT+(8 * 8)(a1) /* store ps, for ipl */
+ stq v0, PCB_CONTEXT+(8 * 8)(a2) /* store ps, for ipl */
mov a0, s0 /* s0 = old curthread */
- mov a1, s1 /* s1 = old pcb */
-
-sw1:
- br pv, Lcs1
-Lcs1: LDGP(pv)
- CALL(choosethread) /* can't return NULL */
- mov v0, s2 /* s2 = new thread */
- ldq s3, TD_MD_PCBPADDR(s2) /* s3 = new pcbpaddr */
-
- /*
- * Check to see if we're switching to ourself. If we are,
- * don't bother loading the new context.
- *
- * Note that even if we re-enter cpu_switch() from idle(),
- * s0 will still contain the old curthread value because any
- * users of that register between then and now must have
- * saved it. Also note that switch_exit() ensures that
- * s0 is clear before jumping here to find a new process.
- */
- cmpeq s0, s2, t0 /* oldthread == newthread? */
- bne t0, Lcs7 /* Yes! Skip! */
-
-#ifdef SMP
- /*
- * Save fp state if we have some.
- */
- mov s0, a0 /* curthread */
- ldiq a1, 1 /* clear fpcurthread */
- CALL(alpha_fpstate_save)
-#endif
+ mov a2, s1 /* s1 = old pcb */
/*
* Deactivate the old address space before activating the
* new one. We need to do this before activating the
- * new process's address space in the event that new
- * process is using the same vmspace as the old. If we
+ * new thread's address space in the event that new
+ * thread is using the same vmspace as the old. If we
* do this after we activate, then we might end up
* incorrectly marking the pmap inactive!
*
@@ -163,14 +140,26 @@ Lcs1: LDGP(pv)
* taken care of calling pmap_deactivate() in cpu_exit(),
* before the vmspace went away.
*/
- beq s0, Lcs6
+ beq a0, sw1
+ CALL(pmap_deactivate) /* pmap_deactivate(oldthread) */
+
+sw1:
+ br pv, Lcs1
+Lcs1: LDGP(pv)
+ mov a1, s2 /* s2 = new thread */
+ ldq s3, TD_MD_PCBPADDR(s2) /* s3 = new pcbpaddr */
- mov s0, a0 /* pmap_deactivate(oldthread) */
- CALL(pmap_deactivate) /* XXXKSE */
+#ifdef SMP
+ /*
+ * Save fp state if we have some.
+ */
+ mov s0, a0 /* curthread */
+ ldiq a1, 1 /* clear fpcurthread */
+ CALL(alpha_fpstate_save)
+#endif
-Lcs6:
/*
- * Activate the new process's address space and perform
+ * Activate the new thread's address space and perform
* the actual context swap.
*/
@@ -180,18 +169,14 @@ Lcs6:
mov s3, a0 /* swap the context */
SWITCH_CONTEXT
-Lcs7:
-
/*
* Now that the switch is done, update curthread and other
- * globals. We must do this even if switching to ourselves
- * because we might have re-entered cpu_switch() from idle(),
- * in which case curthread would be NULL.
+ * globals.
*/
stq s2, PC_CURTHREAD(pcpup) /* curthread = p */
/*
- * Now running on the new u struct.
+ * Now running on the new pcb.
* Restore registers and return.
*/
ldq t0, TD_PCB(s2)
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 40a2923..a5a294d 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -1289,7 +1289,7 @@ thread_exit(void)
}
/* XXX Shouldn't cpu_throw() here. */
mtx_assert(&sched_lock, MA_OWNED);
-#if !defined(__alpha__) && !defined(__powerpc__)
+#if !defined(__powerpc__)
cpu_throw(td, choosethread());
#else
cpu_throw();
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 384feea..7525f3a 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -459,7 +459,7 @@ mi_switch(void)
{
struct bintime new_switchtime;
struct thread *td;
-#if !defined(__alpha__) && !defined(__powerpc__)
+#if !defined(__powerpc__)
struct thread *newtd;
#endif
struct proc *p;
@@ -517,7 +517,7 @@ mi_switch(void)
thread_switchout(td);
sched_switchout(td);
-#if !defined(__alpha__) && !defined(__powerpc__)
+#if !defined(__powerpc__)
newtd = choosethread();
if (td != newtd)
cpu_switch(td, newtd); /* SHAZAM!! */
diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c
index 9f24057..4cb4df4 100644
--- a/sys/kern/kern_thr.c
+++ b/sys/kern/kern_thr.c
@@ -111,7 +111,7 @@ thr_exit1(void)
sched_exit_thread(TAILQ_NEXT(td, td_kglist), td);
thread_stash(td);
-#if !defined(__alpha__) && !defined(__powerpc__)
+#if !defined(__powerpc__)
cpu_throw(td, choosethread());
#else
cpu_throw();
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 40a2923..a5a294d 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -1289,7 +1289,7 @@ thread_exit(void)
}
/* XXX Shouldn't cpu_throw() here. */
mtx_assert(&sched_lock, MA_OWNED);
-#if !defined(__alpha__) && !defined(__powerpc__)
+#if !defined(__powerpc__)
cpu_throw(td, choosethread());
#else
cpu_throw();
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 97d0699..21b5e33 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -865,7 +865,7 @@ int sigonstack(size_t sp);
void sleepinit(void);
void stopevent(struct proc *, u_int, u_int);
void cpu_idle(void);
-#if !defined(__alpha__) && !defined(__powerpc__)
+#if !defined(__powerpc__)
void cpu_switch(struct thread *old, struct thread *new);
void cpu_throw(struct thread *old, struct thread *new) __dead2;
#else
OpenPOWER on IntegriCloud