summaryrefslogtreecommitdiffstats
path: root/sys/alpha
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2001-04-27 19:28:25 +0000
committerjhb <jhb@FreeBSD.org>2001-04-27 19:28:25 +0000
commit8bfdafc9349392c2fe02f548d6ffe6db56626575 (patch)
treeb44641a14ad9f8eb3b338e429775d3298b8946e2 /sys/alpha
parent95c17411607d3bc160b63f97e94912bf27b24274 (diff)
downloadFreeBSD-src-8bfdafc9349392c2fe02f548d6ffe6db56626575.zip
FreeBSD-src-8bfdafc9349392c2fe02f548d6ffe6db56626575.tar.gz
Overhaul of the SMP code. Several portions of the SMP kernel support have
been made machine independent and various other adjustments have been made to support Alpha SMP. - It splits the per-process portions of hardclock() and statclock() off into hardclock_process() and statclock_process() respectively. hardclock() and statclock() call the *_process() functions for the current process so that UP systems will run as before. For SMP systems, it is simply necessary to ensure that all other processors execute the *_process() functions when the main clock functions are triggered on one CPU by an interrupt. For the alpha 4100, clock interrupts are delievered in a staggered broadcast fashion, so we simply call hardclock/statclock on the boot CPU and call the *_process() functions on the secondaries. For x86, we call statclock and hardclock as usual and then call forward_hardclock/statclock in the MD code to send an IPI to cause the AP's to execute forwared_hardclock/statclock which then call the *_process() functions. - forward_signal() and forward_roundrobin() have been reworked to be MI and to involve less hackery. Now the cpu doing the forward sets any flags, etc. and sends a very simple IPI_AST to the other cpu(s). AST IPIs now just basically return so that they can execute ast() and don't bother with setting the astpending or needresched flags themselves. This also removes the loop in forward_signal() as sched_lock closes the race condition that the loop worked around. - need_resched(), resched_wanted() and clear_resched() have been changed to take a process to act on rather than assuming curproc so that they can be used to implement forward_roundrobin() as described above. - Various other SMP variables have been moved to a MI subr_smp.c and a new header sys/smp.h declares MI SMP variables and API's. The IPI API's from machine/ipl.h have moved to machine/smp.h which is included by sys/smp.h. - The globaldata_register() and globaldata_find() functions as well as the SLIST of globaldata structures has become MI and moved into subr_smp.c. Also, the globaldata list is only available if SMP support is compiled in. Reviewed by: jake, peter Looked over by: eivind
Diffstat (limited to 'sys/alpha')
-rw-r--r--sys/alpha/alpha/db_interface.c65
-rw-r--r--sys/alpha/alpha/interrupt.c31
-rw-r--r--sys/alpha/alpha/machdep.c18
-rw-r--r--sys/alpha/alpha/mp_machdep.c683
-rw-r--r--sys/alpha/alpha/pmap.c2
-rw-r--r--sys/alpha/alpha/trap.c6
-rw-r--r--sys/alpha/include/cpu.h1
-rw-r--r--sys/alpha/include/globaldata.h7
-rw-r--r--sys/alpha/include/ipl.h19
-rw-r--r--sys/alpha/include/pcpu.h7
-rw-r--r--sys/alpha/include/smp.h45
11 files changed, 115 insertions, 769 deletions
diff --git a/sys/alpha/alpha/db_interface.c b/sys/alpha/alpha/db_interface.c
index d6b1297..e94a490 100644
--- a/sys/alpha/alpha/db_interface.c
+++ b/sys/alpha/alpha/db_interface.c
@@ -60,13 +60,13 @@
#include <sys/cons.h>
#include <sys/ktr.h>
#include <sys/mutex.h>
+#include <sys/smp.h>
#include <vm/vm.h>
#include <machine/db_machdep.h>
#include <machine/pal.h>
#include <machine/prom.h>
-#include <machine/smp.h>
#include <alpha/alpha/db_instruction.h>
@@ -573,45 +573,46 @@ db_branch_taken(ins, pc, regs)
DB_SHOW_COMMAND(pcpu, db_show_pcpu)
{
struct globaldata *gd;
+#ifdef SMP
int id;
if (have_addr)
id = ((addr >> 4) % 16) * 10 + (addr % 16);
else
id = PCPU_GET(cpuid);
- SLIST_FOREACH(gd, &cpuhead, gd_allcpu) {
- if (gd->gd_cpuid == id)
- break;
- }
- if (gd == NULL)
+ gd = globaldata_find(id);
+ if (gd == NULL) {
db_printf("CPU %d not found\n", id);
- else {
- db_printf("cpuid = %d\n", gd->gd_cpuid);
- db_printf("ipis = %lx\n", gd->gd_pending_ipis);
- db_printf("next ASN = %d\n", gd->gd_next_asn);
- db_printf("curproc = ");
- if (gd->gd_curproc != NULL)
- db_printf("%p: pid %d \"%s\"\n", gd->gd_curproc,
- gd->gd_curproc->p_pid, gd->gd_curproc->p_comm);
- else
- db_printf("none\n");
- db_printf("curpcb = %p\n", gd->gd_curpcb);
- db_printf("fpcurproc = ");
- if (gd->gd_fpcurproc != NULL)
- db_printf("%p: pid %d \"%s\"\n", gd->gd_fpcurproc,
- gd->gd_fpcurproc->p_pid, gd->gd_fpcurproc->p_comm);
- else
- db_printf("none\n");
- db_printf("idleproc = ");
- if (gd->gd_idleproc != NULL)
- db_printf("%p: pid %d \"%s\"\n", gd->gd_idleproc,
- gd->gd_idleproc->p_pid, gd->gd_idleproc->p_comm);
- else
- db_printf("none\n");
+ return;
+ }
+#else
+ gd = GLOBALP;
+#endif
+ db_printf("cpuid = %d\n", gd->gd_cpuid);
+ db_printf("ipis = %lx\n", gd->gd_pending_ipis);
+ db_printf("next ASN = %d\n", gd->gd_next_asn);
+ db_printf("curproc = ");
+ if (gd->gd_curproc != NULL)
+ db_printf("%p: pid %d \"%s\"\n", gd->gd_curproc,
+ gd->gd_curproc->p_pid, gd->gd_curproc->p_comm);
+ else
+ db_printf("none\n");
+ db_printf("curpcb = %p\n", gd->gd_curpcb);
+ db_printf("fpcurproc = ");
+ if (gd->gd_fpcurproc != NULL)
+ db_printf("%p: pid %d \"%s\"\n", gd->gd_fpcurproc,
+ gd->gd_fpcurproc->p_pid, gd->gd_fpcurproc->p_comm);
+ else
+ db_printf("none\n");
+ db_printf("idleproc = ");
+ if (gd->gd_idleproc != NULL)
+ db_printf("%p: pid %d \"%s\"\n", gd->gd_idleproc,
+ gd->gd_idleproc->p_pid, gd->gd_idleproc->p_comm);
+ else
+ db_printf("none\n");
#ifdef WITNESS
- db_printf("spin locks held:\n");
- witness_list_locks(&gd->gd_spinlocks);
+ db_printf("spin locks held:\n");
+ witness_list_locks(&gd->gd_spinlocks);
#endif
- }
}
diff --git a/sys/alpha/alpha/interrupt.c b/sys/alpha/alpha/interrupt.c
index 346b21c..b28b53d 100644
--- a/sys/alpha/alpha/interrupt.c
+++ b/sys/alpha/alpha/interrupt.c
@@ -52,6 +52,7 @@
#include <sys/kthread.h>
#include <sys/ktr.h>
#include <sys/mutex.h>
+#include <sys/smp.h>
#include <sys/unistd.h>
#include <machine/reg.h>
@@ -83,7 +84,7 @@ void (*perf_irq)(unsigned long, struct trapframe *) = dummy_perf;
static u_int schedclk2;
-static driver_intr_t alpha_clock_interrupt;
+static void alpha_clock_interrupt(struct trapframe *framep);
void
interrupt(a0, a1, a2, framep)
@@ -453,7 +454,7 @@ alpha_dispatch_intr(void *frame, unsigned long vector)
}
static void
-alpha_clock_interrupt(void *framep)
+alpha_clock_interrupt(struct trapframe *framep)
{
cnt.v_intr++;
@@ -462,10 +463,26 @@ alpha_clock_interrupt(void *framep)
#else
intrcnt[INTRCNT_CLOCK]++;
#endif
- if (platform.clockintr){
- (*platform.clockintr)((struct trapframe *)framep);
- /* divide hz (1024) by 8 to get stathz (128) */
- if((++schedclk2 & 0x7) == 0)
- statclock((struct clockframe *)framep);
+ if (platform.clockintr) {
+#ifdef SMP
+ /*
+ * Only one processor drives the actual timer.
+ */
+ if (PCPU_GET(cpuid) == boot_cpu_id) {
+#endif
+ (*platform.clockintr)(framep);
+ /* divide hz (1024) by 8 to get stathz (128) */
+ if ((++schedclk2 & 0x7) == 0)
+ statclock((struct clockframe *)framep);
+#ifdef SMP
+ } else {
+ mtx_lock_spin(&sched_lock);
+ hardclock_process(curproc, TRAPF_USERMODE(framep));
+ if ((schedclk2 & 0x7) == 0)
+ statclock_process(curproc, TRAPF_PC(framep),
+ TRAPF_USERMODE(framep));
+ mtx_unlock_spin(&sched_lock);
+ }
+#endif
}
}
diff --git a/sys/alpha/alpha/machdep.c b/sys/alpha/alpha/machdep.c
index f623669..177fe8e 100644
--- a/sys/alpha/alpha/machdep.c
+++ b/sys/alpha/alpha/machdep.c
@@ -112,6 +112,7 @@
#include <sys/vmmeter.h>
#include <sys/msgbuf.h>
#include <sys/exec.h>
+#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <sys/linker.h>
@@ -130,7 +131,6 @@
#include <machine/reg.h>
#include <machine/fpu.h>
#include <machine/pal.h>
-#include <machine/smp.h>
#include <machine/globaldata.h>
#include <machine/cpuconf.h>
#include <machine/bootinfo.h>
@@ -152,8 +152,6 @@ struct platform platform;
alpha_chipset_t chipset;
struct bootinfo_kernel bootinfo;
-struct cpuhead cpuhead;
-
struct mtx sched_lock;
struct mtx Giant;
@@ -415,14 +413,6 @@ again:
vm_pager_bufferinit();
EVENTHANDLER_REGISTER(shutdown_final, alpha_srm_shutdown, 0,
SHUTDOWN_PRI_LAST);
-
-#ifdef SMP
- /*
- * OK, enough kmem_alloc/malloc state should be up, lets get on with it!
- */
- mp_start(); /* fire up the secondaries */
- mp_announce();
-#endif /* SMP */
}
/*
@@ -1002,12 +992,6 @@ alpha_init(pfn, ptb, bim, bip, biv)
*/
globalp->gd_idlepcb.apcb_ptbr = proc0.p_addr->u_pcb.pcb_hw.apcb_ptbr;
- /*
- * Record all cpus in a list.
- */
- SLIST_INIT(&cpuhead);
- SLIST_INSERT_HEAD(&cpuhead, GLOBALP, gd_allcpu);
-
/* Setup curproc so that mutexes work */
PCPU_SET(curproc, &proc0);
PCPU_SET(spinlocks, NULL);
diff --git a/sys/alpha/alpha/mp_machdep.c b/sys/alpha/alpha/mp_machdep.c
index 3715c2f..3024102 100644
--- a/sys/alpha/alpha/mp_machdep.c
+++ b/sys/alpha/alpha/mp_machdep.c
@@ -35,6 +35,7 @@
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
+#include <sys/smp.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
@@ -43,55 +44,18 @@
#include <sys/user.h>
#include <sys/dkstat.h>
-#include <machine/smp.h>
#include <machine/atomic.h>
#include <machine/globaldata.h>
#include <machine/pmap.h>
#include <machine/rpb.h>
#include <machine/clock.h>
-#define CHECKSTATE_USER 0
-#define CHECKSTATE_SYS 1
-#define CHECKSTATE_INTR 2
-
-volatile u_int stopped_cpus;
-volatile u_int started_cpus;
-volatile u_int checkstate_probed_cpus;
-volatile u_int checkstate_need_ast;
-
/* Set to 1 once we're ready to let the APs out of the pen. */
static volatile int aps_ready = 0;
static struct mtx ap_boot_mtx;
-struct proc* checkstate_curproc[MAXCPU];
-int checkstate_cpustate[MAXCPU];
-u_long checkstate_pc[MAXCPU];
-volatile u_int resched_cpus;
-void (*cpustop_restartfunc) __P((void));
-int mp_ncpus;
-
-volatile int smp_started;
-int boot_cpu_id;
-u_int32_t all_cpus;
-
-static struct globaldata *cpuid_to_globaldata[MAXCPU];
-
-int smp_active = 0; /* are the APs allowed to run? */
-SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
-
-static int smp_cpus = 1; /* how many cpu's running */
-SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
-
-/* Enable forwarding of a signal to a process running on a different CPU */
-static int forward_signal_enabled = 1;
-SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
- &forward_signal_enabled, 0, "");
-
-/* Enable forwarding of roundrobin to all other cpus */
-static int forward_roundrobin_enabled = 1;
-SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
- &forward_roundrobin_enabled, 0, "");
+u_int boot_cpu_id;
/*
* Communicate with a console running on a secondary processor.
@@ -225,7 +189,6 @@ smp_init_secondary(void)
if (smp_cpus == mp_ncpus) {
smp_started = 1;
smp_active = 1;
- ipi_all(0);
}
mtx_unlock_spin(&ap_boot_mtx);
@@ -250,7 +213,7 @@ static int
smp_start_secondary(int cpuid)
{
struct pcs *cpu = LOCATE_PCS(hwrpb, cpuid);
- struct pcs *bootcpu = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id);
+ struct pcs *bootcpu = LOCATE_PCS(hwrpb, boot_cpu_id);
struct alpha_pcb *pcb = (struct alpha_pcb *) cpu->pcs_hwpcb;
struct globaldata *globaldata;
int i;
@@ -272,7 +235,6 @@ smp_start_secondary(int cpuid)
}
globaldata_init(globaldata, cpuid, sz);
- SLIST_INSERT_HEAD(&cpuhead, globaldata, gd_allcpu);
/*
* Copy the idle pcb and setup the address to start executing.
@@ -329,48 +291,53 @@ smp_start_secondary(int cpuid)
return 1;
}
-/*
- * Register a struct globaldata.
- */
-void
-globaldata_register(struct globaldata *globaldata)
-{
- cpuid_to_globaldata[globaldata->gd_cpuid] = globaldata;
-}
+/* Other stuff */
-struct globaldata *
-globaldata_find(int cpuid)
+int
+cpu_mp_probe(void)
{
- return cpuid_to_globaldata[cpuid];
-}
-
-/* Other stuff */
+ struct pcs *pcsp;
+ int i, cpus;
-/* lock around the MP rendezvous */
-static struct mtx smp_rv_mtx;
+ /* XXX: Need to check for valid platforms here. */
-static void
-init_locks(void)
-{
- mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
- mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
+ /* Make sure we have at least one secondary CPU. */
+ cpus = 0;
+ for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) {
+ if (i == PCPU_GET(cpuid))
+ continue;
+ pcsp = (struct pcs *)((char *)hwrpb + hwrpb->rpb_pcs_off +
+ (i * hwrpb->rpb_pcs_size));
+ if ((pcsp->pcs_flags & PCS_PP) == 0)
+ continue;
+ if ((pcsp->pcs_flags & PCS_PA) == 0)
+ continue;
+ if ((pcsp->pcs_flags & PCS_PV) == 0)
+ continue;
+ if (i > MAXCPU)
+ continue;
+ cpus++;
+ }
+ return (cpus);
}
void
-mp_start()
+cpu_mp_start()
{
int i;
- int cpuid = PCPU_GET(cpuid);
- init_locks();
+ mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
mp_ncpus = 1;
- all_cpus = 1 << cpuid;
+ boot_cpu_id = PCPU_GET(cpuid);
+ KASSERT(boot_cpu_id == hwrpb->rpb_primary_cpu_id,
+ ("mp_start() called on non-primary CPU"));
+ all_cpus = 1 << boot_cpu_id;
for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) {
struct pcs *pcsp;
- if (i == cpuid)
+ if (i == boot_cpu_id)
continue;
pcsp = (struct pcs *)((char *)hwrpb + hwrpb->rpb_pcs_off +
(i * hwrpb->rpb_pcs_size));
@@ -397,10 +364,10 @@ mp_start()
all_cpus |= 1 << i;
mp_ncpus++;
}
- PCPU_SET(other_cpus, all_cpus & ~(1<<cpuid));
+ PCPU_SET(other_cpus, all_cpus & ~(1 << boot_cpu_id));
for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) {
- if (i == cpuid)
+ if (i == boot_cpu_id)
continue;
if (all_cpus & 1 << i)
smp_start_secondary(i);
@@ -408,561 +375,8 @@ mp_start()
}
void
-mp_announce()
-{
-
- printf("SMP System Detected: %d usable CPUs\n", mp_ncpus);
-}
-
-void
-smp_invltlb()
-{
-}
-
-#if 0
-#define GD_TO_INDEX(pc, prof) \
- ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
- (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
-
-static void
-addupc_intr_forwarded(struct proc *p, int id, int *astmap)
-{
- int i;
- struct uprof *prof;
- u_long pc;
-
- pc = checkstate_pc[id];
- prof = &p->p_stats->p_prof;
- if (pc >= prof->pr_off &&
- (i = GD_TO_INDEX(pc, prof)) < prof->pr_size) {
- if ((p->p_sflag & PS_OWEUPC) == 0) {
- prof->pr_addr = pc;
- prof->pr_ticks = 1;
- p->p_sflag |= PS_OWEUPC;
- }
- *astmap |= (1 << id);
- }
-}
-
-static void
-forwarded_statclock(int id, int pscnt, int *astmap)
-{
- struct pstats *pstats;
- long rss;
- struct rusage *ru;
- struct vmspace *vm;
- int cpustate;
- struct proc *p;
-#ifdef GPROF
- register struct gmonparam *g;
- int i;
-#endif
-
- p = checkstate_curproc[id];
- cpustate = checkstate_cpustate[id];
-
- /* XXX */
- if (p->p_ithd)
- cpustate = CHECKSTATE_INTR;
- else if (p == cpuid_to_globaldata[id]->gd_idleproc)
- cpustate = CHECKSTATE_SYS;
-
- switch (cpustate) {
- case CHECKSTATE_USER:
- if (p->p_sflag & PS_PROFIL)
- addupc_intr_forwarded(p, id, astmap);
- if (pscnt > 1)
- return;
- p->p_uticks++;
- if (p->p_nice > NZERO)
- cp_time[CP_NICE]++;
- else
- cp_time[CP_USER]++;
- break;
- case CHECKSTATE_SYS:
-#ifdef GPROF
- /*
- * Kernel statistics are just like addupc_intr, only easier.
- */
- g = &_gmonparam;
- if (g->state == GMON_PROF_ON) {
- i = checkstate_pc[id] - g->lowpc;
- if (i < g->textsize) {
- i /= HISTFRACTION * sizeof(*g->kcount);
- g->kcount[i]++;
- }
- }
-#endif
- if (pscnt > 1)
- return;
-
- p->p_sticks++;
- if (p == cpuid_to_globaldata[id]->gd_idleproc)
- cp_time[CP_IDLE]++;
- else
- cp_time[CP_SYS]++;
- break;
- case CHECKSTATE_INTR:
- default:
-#ifdef GPROF
- /*
- * Kernel statistics are just like addupc_intr, only easier.
- */
- g = &_gmonparam;
- if (g->state == GMON_PROF_ON) {
- i = checkstate_pc[id] - g->lowpc;
- if (i < g->textsize) {
- i /= HISTFRACTION * sizeof(*g->kcount);
- g->kcount[i]++;
- }
- }
-#endif
- if (pscnt > 1)
- return;
- KASSERT(p != NULL, ("NULL process in interrupt state"));
- p->p_iticks++;
- cp_time[CP_INTR]++;
- }
-
- schedclock(p);
-
- /* Update resource usage integrals and maximums. */
- if ((pstats = p->p_stats) != NULL &&
- (ru = &pstats->p_ru) != NULL &&
- (vm = p->p_vmspace) != NULL) {
- ru->ru_ixrss += pgtok(vm->vm_tsize);
- ru->ru_idrss += pgtok(vm->vm_dsize);
- ru->ru_isrss += pgtok(vm->vm_ssize);
- rss = pgtok(vmspace_resident_count(vm));
- if (ru->ru_maxrss < rss)
- ru->ru_maxrss = rss;
- }
-}
-
-void
-forward_statclock(int pscnt)
-{
- int map;
- int id;
- int i;
-
- /* Kludge. We don't yet have separate locks for the interrupts
- * and the kernel. This means that we cannot let the other processors
- * handle complex interrupts while inhibiting them from entering
- * the kernel in a non-interrupt context.
- *
- * What we can do, without changing the locking mechanisms yet,
- * is letting the other processors handle a very simple interrupt
- * (wich determines the processor states), and do the main
- * work ourself.
- */
-
- CTR1(KTR_SMP, "forward_statclock(%d)", pscnt);
-
- if (!smp_started || cold || panicstr)
- return;
-
- /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */
-
- mtx_lock_spin(&smp_rv_mtx);
- map = PCPU_GET(other_cpus) & ~stopped_cpus ;
- checkstate_probed_cpus = 0;
- if (map != 0)
- ipi_selected(map, IPI_CHECKSTATE);
-
- i = 0;
- while (checkstate_probed_cpus != map) {
- alpha_mb();
- /* spin */
- i++;
- if (i == 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_statclock: checkstate %x\n",
- checkstate_probed_cpus);
-#endif
- break;
- }
- }
- mtx_unlock_spin(&smp_rv_mtx);
-
- /*
- * Step 2: walk through other processors processes, update ticks and
- * profiling info.
- */
-
- map = 0;
- /* XXX: wrong, should walk bits in all_cpus */
- for (id = 0; id < mp_ncpus; id++) {
- if (id == PCPU_GET(cpuid))
- continue;
- if (((1 << id) & checkstate_probed_cpus) == 0)
- continue;
- forwarded_statclock(id, pscnt, &map);
- }
- if (map != 0) {
- mtx_lock_spin(&smp_rv_mtx);
- checkstate_need_ast |= map;
- ipi_selected(map, IPI_AST);
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- alpha_mb();
- /* spin */
- i++;
- if (i > 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_statclock: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
- mtx_unlock_spin(&smp_rv_mtx);
- }
-}
-
-void
-forward_hardclock(int pscnt)
-{
- int map;
- int id;
- struct proc *p;
- struct pstats *pstats;
- int i;
-
- /* Kludge. We don't yet have separate locks for the interrupts
- * and the kernel. This means that we cannot let the other processors
- * handle complex interrupts while inhibiting them from entering
- * the kernel in a non-interrupt context.
- *
- * What we can do, without changing the locking mechanisms yet,
- * is letting the other processors handle a very simple interrupt
- * (wich determines the processor states), and do the main
- * work ourself.
- */
-
- CTR1(KTR_SMP, "forward_hardclock(%d)", pscnt);
-
- if (!smp_started || cold || panicstr)
- return;
-
- /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */
-
- mtx_lock_spin(&smp_rv_mtx);
- map = PCPU_GET(other_cpus) & ~stopped_cpus ;
- checkstate_probed_cpus = 0;
- if (map != 0)
- ipi_selected(map, IPI_CHECKSTATE);
-
- i = 0;
- while (checkstate_probed_cpus != map) {
- alpha_mb();
- /* spin */
- i++;
- if (i == 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_hardclock: checkstate %x\n",
- checkstate_probed_cpus);
-#endif
- breakpoint();
- break;
- }
- }
- mtx_unlock_spin(&smp_rv_mtx);
-
- /*
- * Step 2: walk through other processors processes, update virtual
- * timer and profiling timer. If stathz == 0, also update ticks and
- * profiling info.
- */
-
- map = 0;
- /* XXX: wrong, should walk bits in all_cpus */
- for (id = 0; id < mp_ncpus; id++) {
- if (id == PCPU_GET(cpuid))
- continue;
- if (((1 << id) & checkstate_probed_cpus) == 0)
- continue;
- p = checkstate_curproc[id];
- if (p) {
- pstats = p->p_stats;
- if (checkstate_cpustate[id] == CHECKSTATE_USER &&
- timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
- p->p_sflag |= PS_ALRMPEND;
- map |= (1 << id);
- }
- if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
- p->p_sflag |= PS_PROFPEND;
- map |= (1 << id);
- }
- }
- if (stathz == 0) {
- forwarded_statclock( id, pscnt, &map);
- }
- }
- if (map != 0) {
- mtx_lock_spin(&smp_rv_mtx);
- checkstate_need_ast |= map;
- ipi_selected(map, IPI_AST);
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- alpha_mb();
- /* spin */
- i++;
- if (i > 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_hardclock: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
- mtx_unlock_spin(&smp_rv_mtx);
- }
-}
-#endif /* 0 */
-
-void
-forward_signal(struct proc *p)
-{
- int map;
- int id;
- int i;
-
- /* Kludge. We don't yet have separate locks for the interrupts
- * and the kernel. This means that we cannot let the other processors
- * handle complex interrupts while inhibiting them from entering
- * the kernel in a non-interrupt context.
- *
- * What we can do, without changing the locking mechanisms yet,
- * is letting the other processors handle a very simple interrupt
- * (wich determines the processor states), and do the main
- * work ourself.
- */
-
- CTR1(KTR_SMP, "forward_signal(%p)", p);
-
- if (!smp_started || cold || panicstr)
- return;
- if (!forward_signal_enabled)
- return;
- mtx_lock_spin(&sched_lock);
- while (1) {
- if (p->p_stat != SRUN) {
- mtx_unlock_spin(&sched_lock);
- return;
- }
- id = p->p_oncpu;
- mtx_unlock_spin(&sched_lock);
- if (id == 0xff)
- return;
- map = (1<<id);
- checkstate_need_ast |= map;
- ipi_selected(map, IPI_AST);
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- alpha_mb();
- /* spin */
- i++;
- if (i > 100000) {
-#if DIAGNOSTIC
- printf("forward_signal: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
- mtx_lock_spin(&sched_lock);
- if (id == p->p_oncpu) {
- mtx_unlock_spin(&sched_lock);
- return;
- }
- }
-}
-
-void
-forward_roundrobin(void)
-{
- u_int map;
- int i;
-
- CTR0(KTR_SMP, "forward_roundrobin()");
-
- if (!smp_started || cold || panicstr)
- return;
- if (!forward_roundrobin_enabled)
- return;
- resched_cpus |= PCPU_GET(other_cpus);
- map = PCPU_GET(other_cpus) & ~stopped_cpus ;
- ipi_selected(map, IPI_AST);
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- alpha_mb();
- /* spin */
- i++;
- if (i > 100000) {
-#if DIAGNOSTIC
- printf("forward_roundrobin: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
-}
-
-/*
- * When called the executing CPU will send an IPI to all other CPUs
- * requesting that they halt execution.
- *
- * Usually (but not necessarily) called with 'other_cpus' as its arg.
- *
- * - Signals all CPUs in map to stop.
- * - Waits for each to stop.
- *
- * Returns:
- * -1: error
- * 0: NA
- * 1: ok
- *
- * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
- * from executing at same time.
- */
-int
-stop_cpus(u_int map)
-{
- int i;
-
- if (!smp_started)
- return 0;
-
- CTR1(KTR_SMP, "stop_cpus(%x)", map);
-
- /* send the stop IPI to all CPUs in map */
- ipi_selected(map, IPI_STOP);
-
- i = 0;
- while ((stopped_cpus & map) != map) {
- /* spin */
- i++;
-#ifdef DIAGNOSTIC
- if (i == 100000) {
- printf("timeout stopping cpus\n");
- break;
- }
-#endif
- alpha_mb();
- }
-
- return 1;
-}
-
-
-/*
- * Called by a CPU to restart stopped CPUs.
- *
- * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
- *
- * - Signals all CPUs in map to restart.
- * - Waits for each to restart.
- *
- * Returns:
- * -1: error
- * 0: NA
- * 1: ok
- */
-int
-restart_cpus(u_int map)
-{
- if (!smp_started)
- return 0;
-
- CTR1(KTR_SMP, "restart_cpus(%x)", map);
-
- started_cpus = map; /* signal other cpus to restart */
- alpha_mb();
-
- while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */
- alpha_mb();
-
- return 1;
-}
-
-/*
- * All-CPU rendezvous. CPUs are signalled, all execute the setup function
- * (if specified), rendezvous, execute the action function (if specified),
- * rendezvous again, execute the teardown function (if specified), and then
- * resume.
- *
- * Note that the supplied external functions _must_ be reentrant and aware
- * that they are running in parallel and in an unknown lock context.
- */
-static void (*smp_rv_setup_func)(void *arg);
-static void (*smp_rv_action_func)(void *arg);
-static void (*smp_rv_teardown_func)(void *arg);
-static void *smp_rv_func_arg;
-static volatile int smp_rv_waiters[2];
-
-void
-smp_rendezvous_action(void)
+cpu_mp_announce()
{
- /* setup function */
- if (smp_rv_setup_func != NULL)
- smp_rv_setup_func(smp_rv_func_arg);
- /* spin on entry rendezvous */
- atomic_add_int(&smp_rv_waiters[0], 1);
- while (smp_rv_waiters[0] < mp_ncpus)
- alpha_mb();
- /* action function */
- if (smp_rv_action_func != NULL)
- smp_rv_action_func(smp_rv_func_arg);
- /* spin on exit rendezvous */
- atomic_add_int(&smp_rv_waiters[1], 1);
- while (smp_rv_waiters[1] < mp_ncpus)
- alpha_mb();
- /* teardown function */
- if (smp_rv_teardown_func != NULL)
- smp_rv_teardown_func(smp_rv_func_arg);
-}
-
-void
-smp_rendezvous(void (* setup_func)(void *),
- void (* action_func)(void *),
- void (* teardown_func)(void *),
- void *arg)
-{
-
- if (!smp_started) {
- if (setup_func != NULL)
- setup_func(arg);
- if (action_func != NULL)
- action_func(arg);
- if (teardown_func != NULL)
- teardown_func(arg);
- return;
- }
-
- /* obtain rendezvous lock */
- mtx_lock_spin(&smp_rv_mtx);
-
- /* set static function pointers */
- smp_rv_setup_func = setup_func;
- smp_rv_action_func = action_func;
- smp_rv_teardown_func = teardown_func;
- smp_rv_func_arg = arg;
- smp_rv_waiters[0] = 0;
- smp_rv_waiters[1] = 0;
-
- /* signal other processors, which will enter the IPI with interrupts off */
- ipi_all_but_self(IPI_RENDEZVOUS);
-
- /* call executor function */
- CTR2(KTR_SMP, "smp_rv: calling action = %p, arg = %p\n", action_func,
- arg);
- smp_rendezvous_action();
-
- /* release lock */
- mtx_unlock_spin(&smp_rv_mtx);
}
/*
@@ -979,7 +393,7 @@ ipi_selected(u_int32_t cpus, u_int64_t ipi)
int cpuid = ffs(cpus) - 1;
cpus &= ~(1 << cpuid);
- globaldata = cpuid_to_globaldata[cpuid];
+ globaldata = globaldata_find(cpuid);
if (globaldata) {
atomic_set_64(&globaldata->gd_pending_ipis, ipi);
alpha_mb();
@@ -1048,25 +462,6 @@ smp_handle_ipi(struct trapframe *frame)
case IPI_AST:
CTR0(KTR_SMP, "IPI_AST");
- atomic_clear_int(&checkstate_need_ast, cpumask);
- mtx_lock_spin(&sched_lock);
- curproc->p_sflag |= PS_ASTPENDING;
- mtx_unlock_spin(&sched_lock);
- break;
-
- case IPI_CHECKSTATE:
- CTR0(KTR_SMP, "IPI_CHECKSTATE");
- if (frame->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE)
- checkstate_cpustate[PCPU_GET(cpuid)] =
- CHECKSTATE_USER;
- else if (curproc->p_intr_nesting_level == 1)
- checkstate_cpustate[PCPU_GET(cpuid)] =
- CHECKSTATE_SYS;
- else
- checkstate_cpustate[PCPU_GET(cpuid)] =
- CHECKSTATE_INTR;
- checkstate_curproc[PCPU_GET(cpuid)] = curproc;
- atomic_set_int(&checkstate_probed_cpus, cpumask);
break;
case IPI_STOP:
@@ -1085,7 +480,7 @@ smp_handle_ipi(struct trapframe *frame)
* requests to provide PALcode to secondaries and to start up new
* secondaries that are added to the system on the fly.
*/
- if (PCPU_GET(cpuid) == hwrpb->rpb_primary_cpu_id) {
+ if (PCPU_GET(cpuid) == boot_cpu_id) {
u_int cpuid;
u_int64_t txrdy;
#ifdef DIAGNOSTIC
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index d7fe3df..f70bd00 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -154,6 +154,7 @@
#include <sys/msgbuf.h>
#include <sys/vmmeter.h>
#include <sys/mman.h>
+#include <sys/smp.h>
#include <sys/sx.h>
#include <vm/vm.h>
@@ -172,7 +173,6 @@
#include <machine/md_var.h>
#include <machine/rpb.h>
-#include <machine/smp.h>
#ifndef PMAP_SHPGPERPROC
#define PMAP_SHPGPERPROC 200
diff --git a/sys/alpha/alpha/trap.c b/sys/alpha/alpha/trap.c
index 228c50b..7238e7a 100644
--- a/sys/alpha/alpha/trap.c
+++ b/sys/alpha/alpha/trap.c
@@ -42,6 +42,7 @@
#include <sys/exec.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/smp.h>
#include <sys/vmmeter.h>
#include <sys/sysent.h>
#include <sys/syscall.h>
@@ -59,7 +60,6 @@
#include <machine/reg.h>
#include <machine/pal.h>
#include <machine/fpu.h>
-#include <machine/smp.h>
#ifdef KTRACE
#include <sys/uio.h>
@@ -150,7 +150,7 @@ userret(p, frame, oticks)
mtx_lock_spin(&sched_lock);
p->p_pri.pri_level = p->p_pri.pri_user;
- if (resched_wanted()) {
+ if (resched_wanted(p)) {
/*
* Since we are curproc, a clock interrupt could
* change our priority without changing run queues
@@ -877,7 +877,7 @@ ast(framep)
* acquiring and releasing mutexes in assembly is not fun.
*/
mtx_lock_spin(&sched_lock);
- if (!(astpending(p) || resched_wanted())) {
+ if (!(astpending(p) || resched_wanted(p))) {
mtx_unlock_spin(&sched_lock);
return;
}
diff --git a/sys/alpha/include/cpu.h b/sys/alpha/include/cpu.h
index d8ff1cc..f3bd8d2 100644
--- a/sys/alpha/include/cpu.h
+++ b/sys/alpha/include/cpu.h
@@ -68,7 +68,6 @@ struct clockframe {
#define CLKF_USERMODE(framep) TRAPF_USERMODE(&(framep)->cf_tf)
#define CLKF_PC(framep) TRAPF_PC(&(framep)->cf_tf)
-#define CLKF_INTR(framep) (curproc->p_intr_nesting_level >= 2)
/*
* Arrange to handle pending profiling ticks before returning to user mode.
diff --git a/sys/alpha/include/globaldata.h b/sys/alpha/include/globaldata.h
index 15bd8c41a..4d6d02f 100644
--- a/sys/alpha/include/globaldata.h
+++ b/sys/alpha/include/globaldata.h
@@ -66,14 +66,7 @@ struct globaldata {
#endif
};
-SLIST_HEAD(cpuhead, globaldata);
-extern struct cpuhead cpuhead;
-
void globaldata_init(struct globaldata *pcpu, int cpuid, size_t sz);
-struct globaldata *globaldata_find(int cpuid);
-#ifdef SMP
-void globaldata_register(struct globaldata *pcpu);
-#endif
#endif /* _KERNEL */
diff --git a/sys/alpha/include/ipl.h b/sys/alpha/include/ipl.h
index 357dc7a..9b94bf6 100644
--- a/sys/alpha/include/ipl.h
+++ b/sys/alpha/include/ipl.h
@@ -30,23 +30,4 @@
#define _MACHINE_IPL_H_
-#include <machine/cpu.h> /* for pal inlines */
-
-#ifdef SMP
-/*
- * Interprocessor interrupts for SMP.
- */
-#define IPI_INVLTLB 0x0001
-#define IPI_RENDEZVOUS 0x0002
-#define IPI_AST 0x0004
-#define IPI_CHECKSTATE 0x0008
-#define IPI_STOP 0x0010
-
-void ipi_selected(u_int32_t cpus, u_int64_t ipi);
-void ipi_all(u_int64_t ipi);
-void ipi_all_but_self(u_int64_t ipi);
-void ipi_self(u_int64_t ipi);
-void smp_handle_ipi(struct trapframe *frame);
-#endif
-
#endif /* !_MACHINE_IPL_H_ */
diff --git a/sys/alpha/include/pcpu.h b/sys/alpha/include/pcpu.h
index 15bd8c41a..4d6d02f 100644
--- a/sys/alpha/include/pcpu.h
+++ b/sys/alpha/include/pcpu.h
@@ -66,14 +66,7 @@ struct globaldata {
#endif
};
-SLIST_HEAD(cpuhead, globaldata);
-extern struct cpuhead cpuhead;
-
void globaldata_init(struct globaldata *pcpu, int cpuid, size_t sz);
-struct globaldata *globaldata_find(int cpuid);
-#ifdef SMP
-void globaldata_register(struct globaldata *pcpu);
-#endif
#endif /* _KERNEL */
diff --git a/sys/alpha/include/smp.h b/sys/alpha/include/smp.h
index 2c208c8..05af7f4 100644
--- a/sys/alpha/include/smp.h
+++ b/sys/alpha/include/smp.h
@@ -15,41 +15,24 @@
#ifdef _KERNEL
-#include <sys/ipl.h>
-#include <sys/ktr.h>
-#include <sys/mutex.h>
+/*
+ * Interprocessor interrupts for SMP.
+ */
+#define IPI_INVLTLB 0x0001
+#define IPI_RENDEZVOUS 0x0002
+#define IPI_AST 0x0004
+#define IPI_CHECKSTATE 0x0008
+#define IPI_STOP 0x0010
#ifndef LOCORE
-#define BETTER_CLOCK /* unconditional on alpha */
-
-/* global data in mp_machdep.c */
-extern volatile u_int checkstate_probed_cpus;
-extern volatile u_int checkstate_need_ast;
-extern volatile u_int resched_cpus;
-extern void (*cpustop_restartfunc) __P((void));
-
-extern int smp_active;
-extern int mp_ncpus;
-extern u_int all_cpus;
-extern u_int started_cpus;
-extern u_int stopped_cpus;
+extern u_int boot_cpu_id;
-/* functions in mp_machdep.c */
-void mp_start(void);
-void mp_announce(void);
-void smp_invltlb(void);
-void forward_statclock(int pscnt);
-void forward_hardclock(int pscnt);
-void forward_signal(struct proc *);
-void forward_roundrobin(void);
-int stop_cpus(u_int);
-int restart_cpus(u_int);
-void smp_rendezvous_action(void);
-void smp_rendezvous(void (*)(void *),
- void (*)(void *),
- void (*)(void *),
- void *arg);
+void ipi_selected(u_int cpus, u_int64_t ipi);
+void ipi_all(u_int64_t ipi);
+void ipi_all_but_self(u_int64_t ipi);
+void ipi_self(u_int64_t ipi);
+void smp_handle_ipi(struct trapframe *frame);
void smp_init_secondary(void);
#endif /* !LOCORE */
OpenPOWER on IntegriCloud