summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2001-04-27 19:28:25 +0000
committerjhb <jhb@FreeBSD.org>2001-04-27 19:28:25 +0000
commit8bfdafc9349392c2fe02f548d6ffe6db56626575 (patch)
treeb44641a14ad9f8eb3b338e429775d3298b8946e2 /sys/amd64
parent95c17411607d3bc160b63f97e94912bf27b24274 (diff)
downloadFreeBSD-src-8bfdafc9349392c2fe02f548d6ffe6db56626575.zip
FreeBSD-src-8bfdafc9349392c2fe02f548d6ffe6db56626575.tar.gz
Overhaul of the SMP code. Several portions of the SMP kernel support have
been made machine independent and various other adjustments have been made to support Alpha SMP. - It splits the per-process portions of hardclock() and statclock() off into hardclock_process() and statclock_process() respectively. hardclock() and statclock() call the *_process() functions for the current process so that UP systems will run as before. For SMP systems, it is simply necessary to ensure that all other processors execute the *_process() functions when the main clock functions are triggered on one CPU by an interrupt. For the alpha 4100, clock interrupts are delievered in a staggered broadcast fashion, so we simply call hardclock/statclock on the boot CPU and call the *_process() functions on the secondaries. For x86, we call statclock and hardclock as usual and then call forward_hardclock/statclock in the MD code to send an IPI to cause the AP's to execute forwared_hardclock/statclock which then call the *_process() functions. - forward_signal() and forward_roundrobin() have been reworked to be MI and to involve less hackery. Now the cpu doing the forward sets any flags, etc. and sends a very simple IPI_AST to the other cpu(s). AST IPIs now just basically return so that they can execute ast() and don't bother with setting the astpending or needresched flags themselves. This also removes the loop in forward_signal() as sched_lock closes the race condition that the loop worked around. - need_resched(), resched_wanted() and clear_resched() have been changed to take a process to act on rather than assuming curproc so that they can be used to implement forward_roundrobin() as described above. - Various other SMP variables have been moved to a MI subr_smp.c and a new header sys/smp.h declares MI SMP variables and API's. The IPI API's from machine/ipl.h have moved to machine/smp.h which is included by sys/smp.h. - The globaldata_register() and globaldata_find() functions as well as the SLIST of globaldata structures has become MI and moved into subr_smp.c. Also, the globaldata list is only available if SMP support is compiled in. Reviewed by: jake, peter Looked over by: eivind
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/amd64_mem.c5
-rw-r--r--sys/amd64/amd64/apic_vector.S155
-rw-r--r--sys/amd64/amd64/db_interface.c61
-rw-r--r--sys/amd64/amd64/machdep.c21
-rw-r--r--sys/amd64/amd64/mp_machdep.c596
-rw-r--r--sys/amd64/amd64/mptable.c596
-rw-r--r--sys/amd64/amd64/sys_machdep.c14
-rw-r--r--sys/amd64/amd64/trap.c4
-rw-r--r--sys/amd64/amd64/tsc.c13
-rw-r--r--sys/amd64/amd64/vm_machdep.c4
-rw-r--r--sys/amd64/include/cpu.h2
-rw-r--r--sys/amd64/include/mptable.h596
-rw-r--r--sys/amd64/include/pcpu.h3
-rw-r--r--sys/amd64/include/smp.h50
-rw-r--r--sys/amd64/isa/clock.c13
-rw-r--r--sys/amd64/isa/intr_machdep.h8
16 files changed, 302 insertions, 1839 deletions
diff --git a/sys/amd64/amd64/amd64_mem.c b/sys/amd64/amd64/amd64_mem.c
index 7c3ae75..2d687af 100644
--- a/sys/amd64/amd64/amd64_mem.c
+++ b/sys/amd64/amd64/amd64_mem.c
@@ -31,14 +31,11 @@
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/memrange.h>
+#include <sys/smp.h>
#include <machine/md_var.h>
#include <machine/specialreg.h>
-#ifdef SMP
-#include <machine/smp.h>
-#endif
-
/*
* i686 memory range operations
*
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S
index 3f0521f..5c68f81 100644
--- a/sys/amd64/amd64/apic_vector.S
+++ b/sys/amd64/amd64/apic_vector.S
@@ -182,7 +182,6 @@ Xspuriousint:
iret
-
/*
* Handle TLB shootdowns.
*/
@@ -211,71 +210,61 @@ Xinvltlb:
popl %eax
iret
-
/*
- * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
- *
- * - Stores current cpu state in checkstate_cpustate[cpuid]
- * 0 == user, 1 == sys, 2 == intr
- * - Stores current process in checkstate_curproc[cpuid]
- *
- * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
- *
- * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
+ * Forward hardclock to another CPU. Pushes a trapframe and calls
+ * forwarded_hardclock().
*/
-
.text
SUPERALIGN_TEXT
- .globl Xcpucheckstate
- .globl checkstate_cpustate
- .globl checkstate_curproc
- .globl checkstate_pc
-Xcpucheckstate:
- pushl %eax
- pushl %ebx
- pushl %ds /* save current data segment */
- pushl %fs
-
- movl $KDSEL, %eax
- mov %ax, %ds /* use KERNEL data segment */
+ .globl Xhardclock
+Xhardclock:
+ PUSH_FRAME
+ movl $KDSEL, %eax /* reload with kernel's data segment */
+ mov %ax, %ds
+ mov %ax, %es
movl $KPSEL, %eax
mov %ax, %fs
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
- movl $0, %ebx
- movl 20(%esp), %eax
- andl $3, %eax
- cmpl $3, %eax
- je 1f
- testl $PSL_VM, 24(%esp)
- jne 1f
- incl %ebx /* system or interrupt */
-1:
- movl PCPU(CPUID), %eax
- movl %ebx, checkstate_cpustate(,%eax,4)
- movl PCPU(CURPROC), %ebx
- movl %ebx, checkstate_curproc(,%eax,4)
-
- movl 16(%esp), %ebx
- movl %ebx, checkstate_pc(,%eax,4)
+ movl PCPU(CURPROC),%ebx
+ incl P_INTR_NESTING_LEVEL(%ebx)
+ call forwarded_hardclock
+ decl P_INTR_NESTING_LEVEL(%ebx)
+ MEXITCOUNT
+ jmp doreti
- lock /* checkstate_probed_cpus |= (1<<id) */
- btsl %eax, checkstate_probed_cpus
+/*
+ * Forward statclock to another CPU. Pushes a trapframe and calls
+ * forwarded_statclock().
+ */
+ .text
+ SUPERALIGN_TEXT
+ .globl Xstatclock
+Xstatclock:
+ PUSH_FRAME
+ movl $KDSEL, %eax /* reload with kernel's data segment */
+ mov %ax, %ds
+ mov %ax, %es
+ movl $KPSEL, %eax
+ mov %ax, %fs
- popl %fs
- popl %ds /* restore previous data segment */
- popl %ebx
- popl %eax
- iret
+ movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
+ FAKE_MCOUNT(13*4(%esp))
+ movl PCPU(CURPROC),%ebx
+ incl P_INTR_NESTING_LEVEL(%ebx)
+ call forwarded_statclock
+ decl P_INTR_NESTING_LEVEL(%ebx)
+ MEXITCOUNT
+ jmp doreti
/*
* Executed by a CPU when it receives an Xcpuast IPI from another CPU,
*
- * - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
- *
- * - We need a better method of triggering asts on other cpus.
+ * The other CPU has already executed aston() or need_resched() on our
+ * current process, so we simply need to ack the interrupt and return
+ * via doreti to run ast().
*/
.text
@@ -289,40 +278,12 @@ Xcpuast:
movl $KPSEL, %eax
mov %ax, %fs
- movl PCPU(CPUID), %eax
- lock /* checkstate_need_ast &= ~(1<<id) */
- btrl %eax, checkstate_need_ast
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
- lock
- btsl %eax, checkstate_pending_ast
- jc 1f
-
FAKE_MCOUNT(13*4(%esp))
- MTX_LOCK_SPIN(sched_lock, 0)
- movl PCPU(CURPROC),%ebx
- orl $PS_ASTPENDING, P_SFLAG(%ebx)
-
- movl PCPU(CPUID), %eax
- lock
- btrl %eax, checkstate_pending_ast
- lock
- btrl %eax, CNAME(resched_cpus)
- jnc 2f
- orl $PS_NEEDRESCHED, P_SFLAG(%ebx)
- lock
- incl CNAME(want_resched_cnt)
-2:
- MTX_UNLOCK_SPIN(sched_lock)
- lock
- incl CNAME(cpuast_cnt)
MEXITCOUNT
jmp doreti
-1:
- /* We are already in the process of delivering an ast for this CPU */
- POP_FRAME
- iret
/*
* Executed by a CPU when it receives an Xcpustop IPI from another CPU,
@@ -331,7 +292,6 @@ Xcpuast:
* - Waits for permission to restart.
* - Signals its restart.
*/
-
.text
SUPERALIGN_TEXT
.globl Xcpustop
@@ -357,20 +317,19 @@ Xcpustop:
pushl %eax
call CNAME(savectx) /* Save process context */
addl $4, %esp
-
movl PCPU(CPUID), %eax
lock
- btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
+ btsl %eax, CNAME(stopped_cpus) /* stopped_cpus |= (1<<id) */
1:
- btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
+ btl %eax, CNAME(started_cpus) /* while (!(started_cpus & (1<<id))) */
jnc 1b
lock
- btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
+ btrl %eax, CNAME(started_cpus) /* started_cpus &= ~(1<<id) */
lock
- btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
+ btrl %eax, CNAME(stopped_cpus) /* stopped_cpus &= ~(1<<id) */
test %eax, %eax
jnz 2f
@@ -492,34 +451,6 @@ _xhits:
.space (NCPU * 4), 0
#endif /* COUNT_XINVLTLB_HITS */
-/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
- .globl stopped_cpus, started_cpus
-stopped_cpus:
- .long 0
-started_cpus:
- .long 0
-
- .globl checkstate_probed_cpus
-checkstate_probed_cpus:
- .long 0
- .globl checkstate_need_ast
-checkstate_need_ast:
- .long 0
-checkstate_pending_ast:
- .long 0
- .globl CNAME(resched_cpus)
- .globl CNAME(want_resched_cnt)
- .globl CNAME(cpuast_cnt)
- .globl CNAME(cpustop_restartfunc)
-CNAME(resched_cpus):
- .long 0
-CNAME(want_resched_cnt):
- .long 0
-CNAME(cpuast_cnt):
- .long 0
-CNAME(cpustop_restartfunc):
- .long 0
-
.globl apic_pin_trigger
apic_pin_trigger:
.long 0
diff --git a/sys/amd64/amd64/db_interface.c b/sys/amd64/amd64/db_interface.c
index 8c7dc0b..cdaefe4 100644
--- a/sys/amd64/amd64/db_interface.c
+++ b/sys/amd64/amd64/db_interface.c
@@ -37,10 +37,10 @@
#include <sys/linker_set.h>
#include <sys/lock.h>
#include <sys/proc.h>
+#include <sys/smp.h>
#include <machine/cpu.h>
#ifdef SMP
-#include <machine/smp.h>
#include <machine/smptests.h> /** CPUSTOP_ON_DDBBREAK */
#endif
@@ -334,43 +334,44 @@ Debugger(msg)
DB_SHOW_COMMAND(pcpu, db_show_pcpu)
{
struct globaldata *gd;
+#ifdef SMP
int id;
if (have_addr)
id = ((addr >> 4) % 16) * 10 + (addr % 16);
else
id = PCPU_GET(cpuid);
- SLIST_FOREACH(gd, &cpuhead, gd_allcpu) {
- if (gd->gd_cpuid == id)
- break;
- }
- if (gd == NULL)
+ gd = globaldata_find(id);
+ if (gd == NULL) {
db_printf("CPU %d not found\n", id);
- else {
- db_printf("cpuid = %d\n", gd->gd_cpuid);
- db_printf("curproc = ");
- if (gd->gd_curproc != NULL)
- db_printf("%p: pid %d \"%s\"\n", gd->gd_curproc,
- gd->gd_curproc->p_pid, gd->gd_curproc->p_comm);
- else
- db_printf("none\n");
- db_printf("curpcb = %p\n", gd->gd_curpcb);
- db_printf("npxproc = ");
- if (gd->gd_npxproc != NULL)
- db_printf("%p: pid %d \"%s\"\n", gd->gd_npxproc,
- gd->gd_npxproc->p_pid, gd->gd_npxproc->p_comm);
- else
- db_printf("none\n");
- db_printf("idleproc = ");
- if (gd->gd_idleproc != NULL)
- db_printf("%p: pid %d \"%s\"\n", gd->gd_idleproc,
- gd->gd_idleproc->p_pid, gd->gd_idleproc->p_comm);
- else
- db_printf("none\n");
+ return;
+ }
+#else
+ gd = GLOBALDATA;
+#endif
+ db_printf("cpuid = %d\n", gd->gd_cpuid);
+ db_printf("curproc = ");
+ if (gd->gd_curproc != NULL)
+ db_printf("%p: pid %d \"%s\"\n", gd->gd_curproc,
+ gd->gd_curproc->p_pid, gd->gd_curproc->p_comm);
+ else
+ db_printf("none\n");
+ db_printf("curpcb = %p\n", gd->gd_curpcb);
+ db_printf("npxproc = ");
+ if (gd->gd_npxproc != NULL)
+ db_printf("%p: pid %d \"%s\"\n", gd->gd_npxproc,
+ gd->gd_npxproc->p_pid, gd->gd_npxproc->p_comm);
+ else
+ db_printf("none\n");
+ db_printf("idleproc = ");
+ if (gd->gd_idleproc != NULL)
+ db_printf("%p: pid %d \"%s\"\n", gd->gd_idleproc,
+ gd->gd_idleproc->p_pid, gd->gd_idleproc->p_comm);
+ else
+ db_printf("none\n");
#ifdef WITNESS
- db_printf("spin locks held:\n");
- witness_list_locks(&gd->gd_spinlocks);
+ db_printf("spin locks held:\n");
+ witness_list_locks(&gd->gd_spinlocks);
#endif
- }
}
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index dbdc61a..c89852a 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -66,6 +66,7 @@
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/reboot.h>
+#include <sys/smp.h>
#include <sys/callout.h>
#include <sys/msgbuf.h>
#include <sys/sysent.h>
@@ -104,9 +105,6 @@
#include <machine/globaldata.h>
#include <machine/globals.h>
#include <machine/intrcnt.h>
-#ifdef SMP
-#include <machine/smp.h>
-#endif
#ifdef PERFMON
#include <machine/perfmon.h>
#endif
@@ -249,8 +247,6 @@ static struct trapframe proc0_tf;
static struct globaldata __globaldata;
#endif
-struct cpuhead cpuhead;
-
struct mtx sched_lock;
struct mtx Giant;
@@ -441,17 +437,12 @@ again:
bufinit();
vm_pager_bufferinit();
- SLIST_INIT(&cpuhead);
- SLIST_INSERT_HEAD(&cpuhead, GLOBALDATA, gd_allcpu);
-
#ifdef SMP
- /*
- * OK, enough kmem_alloc/malloc state should be up, lets get on with it!
- */
- mp_start(); /* fire up the APs and APICs */
- mp_announce();
-#endif /* SMP */
+ globaldata_register(GLOBALDATA);
+#else
+ /* For SMP, we delay the cpu_setregs() until after SMP startup. */
cpu_setregs();
+#endif
}
/*
@@ -1631,7 +1622,7 @@ physmap_done:
physmap[1] = mp_bootaddress(physmap[1] / 1024);
/* look for the MP hardware - needed for apic addresses */
- mp_probe();
+ i386_mp_probe();
#endif
/*
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 28a5c72..21e6b6e 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -42,6 +42,7 @@
#include <sys/malloc.h>
#include <sys/memrange.h>
#include <sys/mutex.h>
+#include <sys/smp.h>
#include <sys/dkstat.h>
#include <sys/cons.h> /* cngetc() */
@@ -57,9 +58,9 @@
#include <sys/gmon.h>
#endif
-#include <machine/smp.h>
#include <machine/apic.h>
#include <machine/atomic.h>
+#include <machine/cpu.h>
#include <machine/cpufunc.h>
#include <machine/ipl.h>
#include <machine/mpapic.h>
@@ -243,7 +244,6 @@ int current_postcode;
extern struct region_descriptor r_gdt, r_idt;
int bsp_apic_ready = 0; /* flags useability of BSP apic */
-int mp_ncpus; /* # of CPUs, including BSP */
int mp_naps; /* # of Applications processors */
int mp_nbusses; /* # of busses */
int mp_napics; /* # of IO APICs */
@@ -273,9 +273,6 @@ int io_num_to_apic_id[NAPICID];
int apic_id_to_logical[NAPICID];
-/* Bitmap of all available CPUs */
-u_int all_cpus;
-
/* AP uses this during bootstrap. Do not staticize. */
char *bootSTK;
static int bootAP;
@@ -288,28 +285,9 @@ extern pt_entry_t *SMPpt;
struct pcb stoppcbs[MAXCPU];
-int smp_started; /* has the system started? */
-int smp_active = 0; /* are the APs allowed to run? */
-SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
-
-/* XXX maybe should be hw.ncpu */
-static int smp_cpus = 1; /* how many cpu's running */
-SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
-
int invltlb_ok = 0; /* throttle smp_invltlb() till safe */
SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
-/* Enable forwarding of a signal to a process running on a different CPU */
-static int forward_signal_enabled = 1;
-SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
- &forward_signal_enabled, 0, "");
-
-/* Enable forwarding of roundrobin to all other cpus */
-static int forward_roundrobin_enabled = 1;
-SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
- &forward_roundrobin_enabled, 0, "");
-
-
/*
* Local data and functions.
*/
@@ -354,9 +332,6 @@ struct mtx mcount_mtx;
struct mtx com_mtx;
#endif /* USE_COMLOCK */
-/* lock around the MP rendezvous */
-static struct mtx smp_rv_mtx;
-
static void
init_locks(void)
{
@@ -367,13 +342,9 @@ init_locks(void)
*/
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
- mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
-
#ifdef USE_COMLOCK
mtx_init(&com_mtx, "com", MTX_SPIN);
#endif /* USE_COMLOCK */
-
- mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
}
/*
@@ -397,8 +368,8 @@ mp_bootaddress(u_int basemem)
/*
* Look for an Intel MP spec table (ie, SMP capable hardware).
*/
-int
-mp_probe(void)
+void
+i386_mp_probe(void)
{
int x;
u_long segment;
@@ -427,7 +398,7 @@ mp_probe(void)
/* nothing found */
mpfps = (mpfps_t)0;
mp_capable = 0;
- return 0;
+ return;
found:
/* calculate needed resources */
@@ -436,15 +407,19 @@ found:
/* flag fact that we are running multiple processors */
mp_capable = 1;
- return 1;
}
+int
+cpu_mp_probe(void)
+{
+ return (mp_capable);
+}
/*
* Initialize the SMP hardware and the APIC and start up the AP's.
*/
void
-mp_start(void)
+cpu_mp_start(void)
{
POSTCODE(MP_START_POST);
@@ -453,6 +428,8 @@ mp_start(void)
mp_enable(boot_address);
else
panic("MP hardware not found!");
+
+ cpu_setregs();
}
@@ -460,13 +437,12 @@ mp_start(void)
* Print various information about the SMP system hardware and setup.
*/
void
-mp_announce(void)
+cpu_mp_announce(void)
{
int x;
POSTCODE(MP_ANNOUNCE_POST);
- printf("FreeBSD/SMP: Multiprocessor motherboard\n");
printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
printf(", version: 0x%08x", cpu_apic_versions[0]);
printf(", at 0x%08x\n", cpu_apic_address);
@@ -623,8 +599,12 @@ mp_enable(u_int boot_addr)
setidt(XINVLTLB_OFFSET, Xinvltlb,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- /* install an inter-CPU IPI for reading processor state */
- setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
+ /* install an inter-CPU IPI for forwarding hardclock() */
+ setidt(XHARDCLOCK_OFFSET, Xhardclock,
+ SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+
+ /* install an inter-CPU IPI for forwarding statclock() */
+ setidt(XSTATCLOCK_OFFSET, Xstatclock,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for all-CPU rendezvous */
@@ -1938,6 +1918,8 @@ start_all_aps(u_int boot_addr)
POSTCODE(START_ALL_APS_POST);
+ mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
+
/* initialize BSP's local APIC */
apic_initialize();
bsp_apic_ready = 1;
@@ -1985,8 +1967,8 @@ start_all_aps(u_int boot_addr)
(PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
/* prime data page for it to use */
- SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu);
gd->gd_cpuid = x;
+ globaldata_register(gd);
/* setup a vector to our boot code */
*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
@@ -2328,472 +2310,65 @@ ap_init(void)
panic("scheduler returned us to ap_init");
}
-#define CHECKSTATE_USER 0
-#define CHECKSTATE_SYS 1
-#define CHECKSTATE_INTR 2
-
-/* Do not staticize. Used from apic_vector.s */
-struct proc* checkstate_curproc[MAXCPU];
-int checkstate_cpustate[MAXCPU];
-u_long checkstate_pc[MAXCPU];
-
-#define PC_TO_INDEX(pc, prof) \
- ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
- (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
-
-static void
-addupc_intr_forwarded(struct proc *p, int id, int *astmap)
-{
- int i;
- struct uprof *prof;
- u_long pc;
-
- pc = checkstate_pc[id];
- prof = &p->p_stats->p_prof;
- if (pc >= prof->pr_off &&
- (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
- mtx_assert(&sched_lock, MA_OWNED);
- if ((p->p_sflag & PS_OWEUPC) == 0) {
- prof->pr_addr = pc;
- prof->pr_ticks = 1;
- p->p_sflag |= PS_OWEUPC;
- }
- *astmap |= (1 << id);
- }
-}
-
-static void
-forwarded_statclock(int id, int pscnt, int *astmap)
-{
- struct pstats *pstats;
- long rss;
- struct rusage *ru;
- struct vmspace *vm;
- int cpustate;
- struct proc *p;
-#ifdef GPROF
- register struct gmonparam *g;
- int i;
-#endif
-
- mtx_assert(&sched_lock, MA_OWNED);
- p = checkstate_curproc[id];
- cpustate = checkstate_cpustate[id];
-
- /* XXX */
- if (p->p_ithd)
- cpustate = CHECKSTATE_INTR;
- else if (p == SMP_prvspace[id].globaldata.gd_idleproc)
- cpustate = CHECKSTATE_SYS;
-
- switch (cpustate) {
- case CHECKSTATE_USER:
- if (p->p_sflag & PS_PROFIL)
- addupc_intr_forwarded(p, id, astmap);
- if (pscnt > 1)
- return;
- p->p_uticks++;
- if (p->p_nice > NZERO)
- cp_time[CP_NICE]++;
- else
- cp_time[CP_USER]++;
- break;
- case CHECKSTATE_SYS:
-#ifdef GPROF
- /*
- * Kernel statistics are just like addupc_intr, only easier.
- */
- g = &_gmonparam;
- if (g->state == GMON_PROF_ON) {
- i = checkstate_pc[id] - g->lowpc;
- if (i < g->textsize) {
- i /= HISTFRACTION * sizeof(*g->kcount);
- g->kcount[i]++;
- }
- }
-#endif
- if (pscnt > 1)
- return;
-
- p->p_sticks++;
- if (p == SMP_prvspace[id].globaldata.gd_idleproc)
- cp_time[CP_IDLE]++;
- else
- cp_time[CP_SYS]++;
- break;
- case CHECKSTATE_INTR:
- default:
-#ifdef GPROF
- /*
- * Kernel statistics are just like addupc_intr, only easier.
- */
- g = &_gmonparam;
- if (g->state == GMON_PROF_ON) {
- i = checkstate_pc[id] - g->lowpc;
- if (i < g->textsize) {
- i /= HISTFRACTION * sizeof(*g->kcount);
- g->kcount[i]++;
- }
- }
-#endif
- if (pscnt > 1)
- return;
- KASSERT(p != NULL, ("NULL process in interrupt state"));
- p->p_iticks++;
- cp_time[CP_INTR]++;
- }
-
- schedclock(p);
-
- /* Update resource usage integrals and maximums. */
- if ((pstats = p->p_stats) != NULL &&
- (ru = &pstats->p_ru) != NULL &&
- (vm = p->p_vmspace) != NULL) {
- ru->ru_ixrss += pgtok(vm->vm_tsize);
- ru->ru_idrss += pgtok(vm->vm_dsize);
- ru->ru_isrss += pgtok(vm->vm_ssize);
- rss = pgtok(vmspace_resident_count(vm));
- if (ru->ru_maxrss < rss)
- ru->ru_maxrss = rss;
- }
-}
-
+/*
+ * For statclock, we send an IPI to all CPU's to have them call this
+ * function.
+ */
void
-forward_statclock(int pscnt)
+forwarded_statclock(struct trapframe frame)
{
- int map;
- int id;
- int i;
- /* Kludge. We don't yet have separate locks for the interrupts
- * and the kernel. This means that we cannot let the other processors
- * handle complex interrupts while inhibiting them from entering
- * the kernel in a non-interrupt context.
- *
- * What we can do, without changing the locking mechanisms yet,
- * is letting the other processors handle a very simple interrupt
- * (wich determines the processor states), and do the main
- * work ourself.
- */
-
- CTR1(KTR_SMP, "forward_statclock(%d)", pscnt);
-
- if (!smp_started || !invltlb_ok || cold || panicstr)
- return;
-
- /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */
-
- map = PCPU_GET(other_cpus) & ~stopped_cpus ;
- checkstate_probed_cpus = 0;
- if (map != 0)
- ipi_selected(map, IPI_CHECKSTATE);
-
- i = 0;
- while (checkstate_probed_cpus != map) {
- /* spin */
- i++;
- if (i == 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_statclock: checkstate %x\n",
- checkstate_probed_cpus);
-#endif
- break;
- }
- }
-
- /*
- * Step 2: walk through other processors processes, update ticks and
- * profiling info.
- */
-
- map = 0;
- for (id = 0; id < mp_ncpus; id++) {
- if (id == PCPU_GET(cpuid))
- continue;
- if (((1 << id) & checkstate_probed_cpus) == 0)
- continue;
- forwarded_statclock(id, pscnt, &map);
- }
- if (map != 0) {
- checkstate_need_ast |= map;
- ipi_selected(map, IPI_AST);
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- /* spin */
- i++;
- if (i > 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_statclock: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
- }
+ mtx_lock_spin(&sched_lock);
+ statclock_process(curproc, TRAPF_PC(&frame), TRAPF_USERMODE(&frame));
+ mtx_unlock_spin(&sched_lock);
}
-void
-forward_hardclock(int pscnt)
+void
+forward_statclock(void)
{
int map;
- int id;
- struct proc *p;
- struct pstats *pstats;
- int i;
-
- /* Kludge. We don't yet have separate locks for the interrupts
- * and the kernel. This means that we cannot let the other processors
- * handle complex interrupts while inhibiting them from entering
- * the kernel in a non-interrupt context.
- *
- * What we can do, without changing the locking mechanisms yet,
- * is letting the other processors handle a very simple interrupt
- * (wich determines the processor states), and do the main
- * work ourself.
- */
- CTR1(KTR_SMP, "forward_hardclock(%d)", pscnt);
+ CTR0(KTR_SMP, "forward_statclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
return;
- /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */
-
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
- checkstate_probed_cpus = 0;
if (map != 0)
- ipi_selected(map, IPI_CHECKSTATE);
-
- i = 0;
- while (checkstate_probed_cpus != map) {
- /* spin */
- i++;
- if (i == 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_hardclock: checkstate %x\n",
- checkstate_probed_cpus);
-#endif
- break;
- }
- }
-
- /*
- * Step 2: walk through other processors processes, update virtual
- * timer and profiling timer. If stathz == 0, also update ticks and
- * profiling info.
- */
-
- map = 0;
- for (id = 0; id < mp_ncpus; id++) {
- if (id == PCPU_GET(cpuid))
- continue;
- if (((1 << id) & checkstate_probed_cpus) == 0)
- continue;
- p = checkstate_curproc[id];
- if (p) {
- pstats = p->p_stats;
- if (checkstate_cpustate[id] == CHECKSTATE_USER &&
- timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
- p->p_sflag |= PS_ALRMPEND;
- map |= (1 << id);
- }
- if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
- p->p_sflag |= PS_PROFPEND;
- map |= (1 << id);
- }
- }
- if (stathz == 0) {
- forwarded_statclock( id, pscnt, &map);
- }
- }
- if (map != 0) {
- checkstate_need_ast |= map;
- ipi_selected(map, IPI_AST);
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- /* spin */
- i++;
- if (i > 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_hardclock: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
- }
+ ipi_selected(map, IPI_STATCLOCK);
}
-void
-forward_signal(struct proc *p)
+/*
+ * For each hardclock(), we send an IPI to all other CPU's to have them
+ * execute this function. It would be nice to reduce contention on
+ * sched_lock if we could simply peek at the CPU to determine the user/kernel
+ * state and call hardclock_process() on the CPU receiving the clock interrupt
+ * and then just use a simple IPI to handle any ast's if needed.
+ */
+void
+forwarded_hardclock(struct trapframe frame)
{
- int map;
- int id;
- int i;
-
- /* Kludge. We don't yet have separate locks for the interrupts
- * and the kernel. This means that we cannot let the other processors
- * handle complex interrupts while inhibiting them from entering
- * the kernel in a non-interrupt context.
- *
- * What we can do, without changing the locking mechanisms yet,
- * is letting the other processors handle a very simple interrupt
- * (wich determines the processor states), and do the main
- * work ourself.
- */
-
- CTR1(KTR_SMP, "forward_signal(%p)", p);
- if (!smp_started || !invltlb_ok || cold || panicstr)
- return;
- if (!forward_signal_enabled)
- return;
mtx_lock_spin(&sched_lock);
- while (1) {
- if (p->p_stat != SRUN) {
- mtx_unlock_spin(&sched_lock);
- return;
- }
- id = p->p_oncpu;
- mtx_unlock_spin(&sched_lock);
- if (id == 0xff)
- return;
- map = (1<<id);
- checkstate_need_ast |= map;
- ipi_selected(map, IPI_AST);
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- /* spin */
- i++;
- if (i > 100000) {
-#if 0
- printf("forward_signal: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
- mtx_lock_spin(&sched_lock);
- if (id == p->p_oncpu) {
- mtx_unlock_spin(&sched_lock);
- return;
- }
- }
+ hardclock_process(curproc, TRAPF_USERMODE(&frame));
+ mtx_unlock_spin(&sched_lock);
}
-void
-forward_roundrobin(void)
+void
+forward_hardclock(void)
{
u_int map;
- int i;
- CTR0(KTR_SMP, "forward_roundrobin()");
+ CTR0(KTR_SMP, "forward_hardclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
return;
- if (!forward_roundrobin_enabled)
- return;
- resched_cpus |= PCPU_GET(other_cpus);
- map = PCPU_GET(other_cpus) & ~stopped_cpus ;
-#if 1
- ipi_selected(map, IPI_AST);
-#else
- ipi_all_but_self(IPI_AST);
-#endif
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- /* spin */
- i++;
- if (i > 100000) {
-#if 0
- printf("forward_roundrobin: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
-}
-
-/*
- * When called the executing CPU will send an IPI to all other CPUs
- * requesting that they halt execution.
- *
- * Usually (but not necessarily) called with 'other_cpus' as its arg.
- *
- * - Signals all CPUs in map to stop.
- * - Waits for each to stop.
- *
- * Returns:
- * -1: error
- * 0: NA
- * 1: ok
- *
- * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
- * from executing at same time.
- */
-int
-stop_cpus(u_int map)
-{
- int count = 0;
-
- if (!smp_started)
- return 0;
-
- /* send the Xcpustop IPI to all CPUs in map */
- ipi_selected(map, IPI_STOP);
-
- while (count++ < 100000 && (stopped_cpus & map) != map)
- /* spin */ ;
-
-#ifdef DIAGNOSTIC
- if ((stopped_cpus & map) != map)
- printf("Warning: CPUs 0x%x did not stop!\n",
- (~(stopped_cpus & map)) & map);
-#endif
-
- return 1;
-}
-
-
-/*
- * Called by a CPU to restart stopped CPUs.
- *
- * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
- *
- * - Signals all CPUs in map to restart.
- * - Waits for each to restart.
- *
- * Returns:
- * -1: error
- * 0: NA
- * 1: ok
- */
-int
-restart_cpus(u_int map)
-{
- int count = 0;
-
- if (!smp_started)
- return 0;
-
- started_cpus = map; /* signal other cpus to restart */
-
- /* wait for each to clear its bit */
- while (count++ < 100000 && (stopped_cpus & map) != 0)
- /* spin */ ;
-#ifdef DIAGNOSTIC
- if ((stopped_cpus & map) != 0)
- printf("Warning: CPUs 0x%x did not restart!\n",
- (~(stopped_cpus & map)) & map);
-#endif
-
- return 1;
+ map = PCPU_GET(other_cpus) & ~stopped_cpus ;
+ if (map != 0)
+ ipi_selected(map, IPI_HARDCLOCK);
}
-
#ifdef APIC_INTR_REORDER
/*
* Maintain mapping from softintr vector to isr bit in local apic.
@@ -2811,73 +2386,6 @@ set_lapic_isrloc(int intr, int vector)
#endif
/*
- * All-CPU rendezvous. CPUs are signalled, all execute the setup function
- * (if specified), rendezvous, execute the action function (if specified),
- * rendezvous again, execute the teardown function (if specified), and then
- * resume.
- *
- * Note that the supplied external functions _must_ be reentrant and aware
- * that they are running in parallel and in an unknown lock context.
- */
-static void (*smp_rv_setup_func)(void *arg);
-static void (*smp_rv_action_func)(void *arg);
-static void (*smp_rv_teardown_func)(void *arg);
-static void *smp_rv_func_arg;
-static volatile int smp_rv_waiters[2];
-
-void
-smp_rendezvous_action(void)
-{
- /* setup function */
- if (smp_rv_setup_func != NULL)
- smp_rv_setup_func(smp_rv_func_arg);
- /* spin on entry rendezvous */
- atomic_add_int(&smp_rv_waiters[0], 1);
- while (smp_rv_waiters[0] < mp_ncpus)
- ;
- /* action function */
- if (smp_rv_action_func != NULL)
- smp_rv_action_func(smp_rv_func_arg);
- /* spin on exit rendezvous */
- atomic_add_int(&smp_rv_waiters[1], 1);
- while (smp_rv_waiters[1] < mp_ncpus)
- ;
- /* teardown function */
- if (smp_rv_teardown_func != NULL)
- smp_rv_teardown_func(smp_rv_func_arg);
-}
-
-void
-smp_rendezvous(void (* setup_func)(void *),
- void (* action_func)(void *),
- void (* teardown_func)(void *),
- void *arg)
-{
-
- /* obtain rendezvous lock */
- mtx_lock_spin(&smp_rv_mtx);
-
- /* set static function pointers */
- smp_rv_setup_func = setup_func;
- smp_rv_action_func = action_func;
- smp_rv_teardown_func = teardown_func;
- smp_rv_func_arg = arg;
- smp_rv_waiters[0] = 0;
- smp_rv_waiters[1] = 0;
-
- /*
- * signal other processors, which will enter the IPI with interrupts off
- */
- ipi_all_but_self(IPI_RENDEZVOUS);
-
- /* call executor function */
- smp_rendezvous_action();
-
- /* release lock */
- mtx_unlock_spin(&smp_rv_mtx);
-}
-
-/*
* send an IPI to a set of cpus.
*/
void
diff --git a/sys/amd64/amd64/mptable.c b/sys/amd64/amd64/mptable.c
index 28a5c72..21e6b6e 100644
--- a/sys/amd64/amd64/mptable.c
+++ b/sys/amd64/amd64/mptable.c
@@ -42,6 +42,7 @@
#include <sys/malloc.h>
#include <sys/memrange.h>
#include <sys/mutex.h>
+#include <sys/smp.h>
#include <sys/dkstat.h>
#include <sys/cons.h> /* cngetc() */
@@ -57,9 +58,9 @@
#include <sys/gmon.h>
#endif
-#include <machine/smp.h>
#include <machine/apic.h>
#include <machine/atomic.h>
+#include <machine/cpu.h>
#include <machine/cpufunc.h>
#include <machine/ipl.h>
#include <machine/mpapic.h>
@@ -243,7 +244,6 @@ int current_postcode;
extern struct region_descriptor r_gdt, r_idt;
int bsp_apic_ready = 0; /* flags useability of BSP apic */
-int mp_ncpus; /* # of CPUs, including BSP */
int mp_naps; /* # of Applications processors */
int mp_nbusses; /* # of busses */
int mp_napics; /* # of IO APICs */
@@ -273,9 +273,6 @@ int io_num_to_apic_id[NAPICID];
int apic_id_to_logical[NAPICID];
-/* Bitmap of all available CPUs */
-u_int all_cpus;
-
/* AP uses this during bootstrap. Do not staticize. */
char *bootSTK;
static int bootAP;
@@ -288,28 +285,9 @@ extern pt_entry_t *SMPpt;
struct pcb stoppcbs[MAXCPU];
-int smp_started; /* has the system started? */
-int smp_active = 0; /* are the APs allowed to run? */
-SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
-
-/* XXX maybe should be hw.ncpu */
-static int smp_cpus = 1; /* how many cpu's running */
-SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
-
int invltlb_ok = 0; /* throttle smp_invltlb() till safe */
SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
-/* Enable forwarding of a signal to a process running on a different CPU */
-static int forward_signal_enabled = 1;
-SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
- &forward_signal_enabled, 0, "");
-
-/* Enable forwarding of roundrobin to all other cpus */
-static int forward_roundrobin_enabled = 1;
-SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
- &forward_roundrobin_enabled, 0, "");
-
-
/*
* Local data and functions.
*/
@@ -354,9 +332,6 @@ struct mtx mcount_mtx;
struct mtx com_mtx;
#endif /* USE_COMLOCK */
-/* lock around the MP rendezvous */
-static struct mtx smp_rv_mtx;
-
static void
init_locks(void)
{
@@ -367,13 +342,9 @@ init_locks(void)
*/
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
- mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
-
#ifdef USE_COMLOCK
mtx_init(&com_mtx, "com", MTX_SPIN);
#endif /* USE_COMLOCK */
-
- mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
}
/*
@@ -397,8 +368,8 @@ mp_bootaddress(u_int basemem)
/*
* Look for an Intel MP spec table (ie, SMP capable hardware).
*/
-int
-mp_probe(void)
+void
+i386_mp_probe(void)
{
int x;
u_long segment;
@@ -427,7 +398,7 @@ mp_probe(void)
/* nothing found */
mpfps = (mpfps_t)0;
mp_capable = 0;
- return 0;
+ return;
found:
/* calculate needed resources */
@@ -436,15 +407,19 @@ found:
/* flag fact that we are running multiple processors */
mp_capable = 1;
- return 1;
}
+int
+cpu_mp_probe(void)
+{
+ return (mp_capable);
+}
/*
* Initialize the SMP hardware and the APIC and start up the AP's.
*/
void
-mp_start(void)
+cpu_mp_start(void)
{
POSTCODE(MP_START_POST);
@@ -453,6 +428,8 @@ mp_start(void)
mp_enable(boot_address);
else
panic("MP hardware not found!");
+
+ cpu_setregs();
}
@@ -460,13 +437,12 @@ mp_start(void)
* Print various information about the SMP system hardware and setup.
*/
void
-mp_announce(void)
+cpu_mp_announce(void)
{
int x;
POSTCODE(MP_ANNOUNCE_POST);
- printf("FreeBSD/SMP: Multiprocessor motherboard\n");
printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
printf(", version: 0x%08x", cpu_apic_versions[0]);
printf(", at 0x%08x\n", cpu_apic_address);
@@ -623,8 +599,12 @@ mp_enable(u_int boot_addr)
setidt(XINVLTLB_OFFSET, Xinvltlb,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- /* install an inter-CPU IPI for reading processor state */
- setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
+ /* install an inter-CPU IPI for forwarding hardclock() */
+ setidt(XHARDCLOCK_OFFSET, Xhardclock,
+ SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+
+ /* install an inter-CPU IPI for forwarding statclock() */
+ setidt(XSTATCLOCK_OFFSET, Xstatclock,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for all-CPU rendezvous */
@@ -1938,6 +1918,8 @@ start_all_aps(u_int boot_addr)
POSTCODE(START_ALL_APS_POST);
+ mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
+
/* initialize BSP's local APIC */
apic_initialize();
bsp_apic_ready = 1;
@@ -1985,8 +1967,8 @@ start_all_aps(u_int boot_addr)
(PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
/* prime data page for it to use */
- SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu);
gd->gd_cpuid = x;
+ globaldata_register(gd);
/* setup a vector to our boot code */
*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
@@ -2328,472 +2310,65 @@ ap_init(void)
panic("scheduler returned us to ap_init");
}
-#define CHECKSTATE_USER 0
-#define CHECKSTATE_SYS 1
-#define CHECKSTATE_INTR 2
-
-/* Do not staticize. Used from apic_vector.s */
-struct proc* checkstate_curproc[MAXCPU];
-int checkstate_cpustate[MAXCPU];
-u_long checkstate_pc[MAXCPU];
-
-#define PC_TO_INDEX(pc, prof) \
- ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
- (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
-
-static void
-addupc_intr_forwarded(struct proc *p, int id, int *astmap)
-{
- int i;
- struct uprof *prof;
- u_long pc;
-
- pc = checkstate_pc[id];
- prof = &p->p_stats->p_prof;
- if (pc >= prof->pr_off &&
- (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
- mtx_assert(&sched_lock, MA_OWNED);
- if ((p->p_sflag & PS_OWEUPC) == 0) {
- prof->pr_addr = pc;
- prof->pr_ticks = 1;
- p->p_sflag |= PS_OWEUPC;
- }
- *astmap |= (1 << id);
- }
-}
-
-static void
-forwarded_statclock(int id, int pscnt, int *astmap)
-{
- struct pstats *pstats;
- long rss;
- struct rusage *ru;
- struct vmspace *vm;
- int cpustate;
- struct proc *p;
-#ifdef GPROF
- register struct gmonparam *g;
- int i;
-#endif
-
- mtx_assert(&sched_lock, MA_OWNED);
- p = checkstate_curproc[id];
- cpustate = checkstate_cpustate[id];
-
- /* XXX */
- if (p->p_ithd)
- cpustate = CHECKSTATE_INTR;
- else if (p == SMP_prvspace[id].globaldata.gd_idleproc)
- cpustate = CHECKSTATE_SYS;
-
- switch (cpustate) {
- case CHECKSTATE_USER:
- if (p->p_sflag & PS_PROFIL)
- addupc_intr_forwarded(p, id, astmap);
- if (pscnt > 1)
- return;
- p->p_uticks++;
- if (p->p_nice > NZERO)
- cp_time[CP_NICE]++;
- else
- cp_time[CP_USER]++;
- break;
- case CHECKSTATE_SYS:
-#ifdef GPROF
- /*
- * Kernel statistics are just like addupc_intr, only easier.
- */
- g = &_gmonparam;
- if (g->state == GMON_PROF_ON) {
- i = checkstate_pc[id] - g->lowpc;
- if (i < g->textsize) {
- i /= HISTFRACTION * sizeof(*g->kcount);
- g->kcount[i]++;
- }
- }
-#endif
- if (pscnt > 1)
- return;
-
- p->p_sticks++;
- if (p == SMP_prvspace[id].globaldata.gd_idleproc)
- cp_time[CP_IDLE]++;
- else
- cp_time[CP_SYS]++;
- break;
- case CHECKSTATE_INTR:
- default:
-#ifdef GPROF
- /*
- * Kernel statistics are just like addupc_intr, only easier.
- */
- g = &_gmonparam;
- if (g->state == GMON_PROF_ON) {
- i = checkstate_pc[id] - g->lowpc;
- if (i < g->textsize) {
- i /= HISTFRACTION * sizeof(*g->kcount);
- g->kcount[i]++;
- }
- }
-#endif
- if (pscnt > 1)
- return;
- KASSERT(p != NULL, ("NULL process in interrupt state"));
- p->p_iticks++;
- cp_time[CP_INTR]++;
- }
-
- schedclock(p);
-
- /* Update resource usage integrals and maximums. */
- if ((pstats = p->p_stats) != NULL &&
- (ru = &pstats->p_ru) != NULL &&
- (vm = p->p_vmspace) != NULL) {
- ru->ru_ixrss += pgtok(vm->vm_tsize);
- ru->ru_idrss += pgtok(vm->vm_dsize);
- ru->ru_isrss += pgtok(vm->vm_ssize);
- rss = pgtok(vmspace_resident_count(vm));
- if (ru->ru_maxrss < rss)
- ru->ru_maxrss = rss;
- }
-}
-
+/*
+ * For statclock, we send an IPI to all CPU's to have them call this
+ * function.
+ */
void
-forward_statclock(int pscnt)
+forwarded_statclock(struct trapframe frame)
{
- int map;
- int id;
- int i;
- /* Kludge. We don't yet have separate locks for the interrupts
- * and the kernel. This means that we cannot let the other processors
- * handle complex interrupts while inhibiting them from entering
- * the kernel in a non-interrupt context.
- *
- * What we can do, without changing the locking mechanisms yet,
- * is letting the other processors handle a very simple interrupt
- * (wich determines the processor states), and do the main
- * work ourself.
- */
-
- CTR1(KTR_SMP, "forward_statclock(%d)", pscnt);
-
- if (!smp_started || !invltlb_ok || cold || panicstr)
- return;
-
- /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */
-
- map = PCPU_GET(other_cpus) & ~stopped_cpus ;
- checkstate_probed_cpus = 0;
- if (map != 0)
- ipi_selected(map, IPI_CHECKSTATE);
-
- i = 0;
- while (checkstate_probed_cpus != map) {
- /* spin */
- i++;
- if (i == 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_statclock: checkstate %x\n",
- checkstate_probed_cpus);
-#endif
- break;
- }
- }
-
- /*
- * Step 2: walk through other processors processes, update ticks and
- * profiling info.
- */
-
- map = 0;
- for (id = 0; id < mp_ncpus; id++) {
- if (id == PCPU_GET(cpuid))
- continue;
- if (((1 << id) & checkstate_probed_cpus) == 0)
- continue;
- forwarded_statclock(id, pscnt, &map);
- }
- if (map != 0) {
- checkstate_need_ast |= map;
- ipi_selected(map, IPI_AST);
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- /* spin */
- i++;
- if (i > 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_statclock: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
- }
+ mtx_lock_spin(&sched_lock);
+ statclock_process(curproc, TRAPF_PC(&frame), TRAPF_USERMODE(&frame));
+ mtx_unlock_spin(&sched_lock);
}
-void
-forward_hardclock(int pscnt)
+void
+forward_statclock(void)
{
int map;
- int id;
- struct proc *p;
- struct pstats *pstats;
- int i;
-
- /* Kludge. We don't yet have separate locks for the interrupts
- * and the kernel. This means that we cannot let the other processors
- * handle complex interrupts while inhibiting them from entering
- * the kernel in a non-interrupt context.
- *
- * What we can do, without changing the locking mechanisms yet,
- * is letting the other processors handle a very simple interrupt
- * (wich determines the processor states), and do the main
- * work ourself.
- */
- CTR1(KTR_SMP, "forward_hardclock(%d)", pscnt);
+ CTR0(KTR_SMP, "forward_statclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
return;
- /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */
-
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
- checkstate_probed_cpus = 0;
if (map != 0)
- ipi_selected(map, IPI_CHECKSTATE);
-
- i = 0;
- while (checkstate_probed_cpus != map) {
- /* spin */
- i++;
- if (i == 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_hardclock: checkstate %x\n",
- checkstate_probed_cpus);
-#endif
- break;
- }
- }
-
- /*
- * Step 2: walk through other processors processes, update virtual
- * timer and profiling timer. If stathz == 0, also update ticks and
- * profiling info.
- */
-
- map = 0;
- for (id = 0; id < mp_ncpus; id++) {
- if (id == PCPU_GET(cpuid))
- continue;
- if (((1 << id) & checkstate_probed_cpus) == 0)
- continue;
- p = checkstate_curproc[id];
- if (p) {
- pstats = p->p_stats;
- if (checkstate_cpustate[id] == CHECKSTATE_USER &&
- timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
- p->p_sflag |= PS_ALRMPEND;
- map |= (1 << id);
- }
- if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
- p->p_sflag |= PS_PROFPEND;
- map |= (1 << id);
- }
- }
- if (stathz == 0) {
- forwarded_statclock( id, pscnt, &map);
- }
- }
- if (map != 0) {
- checkstate_need_ast |= map;
- ipi_selected(map, IPI_AST);
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- /* spin */
- i++;
- if (i > 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_hardclock: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
- }
+ ipi_selected(map, IPI_STATCLOCK);
}
-void
-forward_signal(struct proc *p)
+/*
+ * For each hardclock(), we send an IPI to all other CPU's to have them
+ * execute this function. It would be nice to reduce contention on
+ * sched_lock if we could simply peek at the CPU to determine the user/kernel
+ * state and call hardclock_process() on the CPU receiving the clock interrupt
+ * and then just use a simple IPI to handle any ast's if needed.
+ */
+void
+forwarded_hardclock(struct trapframe frame)
{
- int map;
- int id;
- int i;
-
- /* Kludge. We don't yet have separate locks for the interrupts
- * and the kernel. This means that we cannot let the other processors
- * handle complex interrupts while inhibiting them from entering
- * the kernel in a non-interrupt context.
- *
- * What we can do, without changing the locking mechanisms yet,
- * is letting the other processors handle a very simple interrupt
- * (wich determines the processor states), and do the main
- * work ourself.
- */
-
- CTR1(KTR_SMP, "forward_signal(%p)", p);
- if (!smp_started || !invltlb_ok || cold || panicstr)
- return;
- if (!forward_signal_enabled)
- return;
mtx_lock_spin(&sched_lock);
- while (1) {
- if (p->p_stat != SRUN) {
- mtx_unlock_spin(&sched_lock);
- return;
- }
- id = p->p_oncpu;
- mtx_unlock_spin(&sched_lock);
- if (id == 0xff)
- return;
- map = (1<<id);
- checkstate_need_ast |= map;
- ipi_selected(map, IPI_AST);
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- /* spin */
- i++;
- if (i > 100000) {
-#if 0
- printf("forward_signal: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
- mtx_lock_spin(&sched_lock);
- if (id == p->p_oncpu) {
- mtx_unlock_spin(&sched_lock);
- return;
- }
- }
+ hardclock_process(curproc, TRAPF_USERMODE(&frame));
+ mtx_unlock_spin(&sched_lock);
}
-void
-forward_roundrobin(void)
+void
+forward_hardclock(void)
{
u_int map;
- int i;
- CTR0(KTR_SMP, "forward_roundrobin()");
+ CTR0(KTR_SMP, "forward_hardclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
return;
- if (!forward_roundrobin_enabled)
- return;
- resched_cpus |= PCPU_GET(other_cpus);
- map = PCPU_GET(other_cpus) & ~stopped_cpus ;
-#if 1
- ipi_selected(map, IPI_AST);
-#else
- ipi_all_but_self(IPI_AST);
-#endif
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- /* spin */
- i++;
- if (i > 100000) {
-#if 0
- printf("forward_roundrobin: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
-}
-
-/*
- * When called the executing CPU will send an IPI to all other CPUs
- * requesting that they halt execution.
- *
- * Usually (but not necessarily) called with 'other_cpus' as its arg.
- *
- * - Signals all CPUs in map to stop.
- * - Waits for each to stop.
- *
- * Returns:
- * -1: error
- * 0: NA
- * 1: ok
- *
- * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
- * from executing at same time.
- */
-int
-stop_cpus(u_int map)
-{
- int count = 0;
-
- if (!smp_started)
- return 0;
-
- /* send the Xcpustop IPI to all CPUs in map */
- ipi_selected(map, IPI_STOP);
-
- while (count++ < 100000 && (stopped_cpus & map) != map)
- /* spin */ ;
-
-#ifdef DIAGNOSTIC
- if ((stopped_cpus & map) != map)
- printf("Warning: CPUs 0x%x did not stop!\n",
- (~(stopped_cpus & map)) & map);
-#endif
-
- return 1;
-}
-
-
-/*
- * Called by a CPU to restart stopped CPUs.
- *
- * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
- *
- * - Signals all CPUs in map to restart.
- * - Waits for each to restart.
- *
- * Returns:
- * -1: error
- * 0: NA
- * 1: ok
- */
-int
-restart_cpus(u_int map)
-{
- int count = 0;
-
- if (!smp_started)
- return 0;
-
- started_cpus = map; /* signal other cpus to restart */
-
- /* wait for each to clear its bit */
- while (count++ < 100000 && (stopped_cpus & map) != 0)
- /* spin */ ;
-#ifdef DIAGNOSTIC
- if ((stopped_cpus & map) != 0)
- printf("Warning: CPUs 0x%x did not restart!\n",
- (~(stopped_cpus & map)) & map);
-#endif
-
- return 1;
+ map = PCPU_GET(other_cpus) & ~stopped_cpus ;
+ if (map != 0)
+ ipi_selected(map, IPI_HARDCLOCK);
}
-
#ifdef APIC_INTR_REORDER
/*
* Maintain mapping from softintr vector to isr bit in local apic.
@@ -2811,73 +2386,6 @@ set_lapic_isrloc(int intr, int vector)
#endif
/*
- * All-CPU rendezvous. CPUs are signalled, all execute the setup function
- * (if specified), rendezvous, execute the action function (if specified),
- * rendezvous again, execute the teardown function (if specified), and then
- * resume.
- *
- * Note that the supplied external functions _must_ be reentrant and aware
- * that they are running in parallel and in an unknown lock context.
- */
-static void (*smp_rv_setup_func)(void *arg);
-static void (*smp_rv_action_func)(void *arg);
-static void (*smp_rv_teardown_func)(void *arg);
-static void *smp_rv_func_arg;
-static volatile int smp_rv_waiters[2];
-
-void
-smp_rendezvous_action(void)
-{
- /* setup function */
- if (smp_rv_setup_func != NULL)
- smp_rv_setup_func(smp_rv_func_arg);
- /* spin on entry rendezvous */
- atomic_add_int(&smp_rv_waiters[0], 1);
- while (smp_rv_waiters[0] < mp_ncpus)
- ;
- /* action function */
- if (smp_rv_action_func != NULL)
- smp_rv_action_func(smp_rv_func_arg);
- /* spin on exit rendezvous */
- atomic_add_int(&smp_rv_waiters[1], 1);
- while (smp_rv_waiters[1] < mp_ncpus)
- ;
- /* teardown function */
- if (smp_rv_teardown_func != NULL)
- smp_rv_teardown_func(smp_rv_func_arg);
-}
-
-void
-smp_rendezvous(void (* setup_func)(void *),
- void (* action_func)(void *),
- void (* teardown_func)(void *),
- void *arg)
-{
-
- /* obtain rendezvous lock */
- mtx_lock_spin(&smp_rv_mtx);
-
- /* set static function pointers */
- smp_rv_setup_func = setup_func;
- smp_rv_action_func = action_func;
- smp_rv_teardown_func = teardown_func;
- smp_rv_func_arg = arg;
- smp_rv_waiters[0] = 0;
- smp_rv_waiters[1] = 0;
-
- /*
- * signal other processors, which will enter the IPI with interrupts off
- */
- ipi_all_but_self(IPI_RENDEZVOUS);
-
- /* call executor function */
- smp_rendezvous_action();
-
- /* release lock */
- mtx_unlock_spin(&smp_rv_mtx);
-}
-
-/*
* send an IPI to a set of cpus.
*/
void
diff --git a/sys/amd64/amd64/sys_machdep.c b/sys/amd64/amd64/sys_machdep.c
index cf05d73..5ed16ab 100644
--- a/sys/amd64/amd64/sys_machdep.c
+++ b/sys/amd64/amd64/sys_machdep.c
@@ -41,6 +41,7 @@
#include <sys/ipl.h>
#include <sys/malloc.h>
#include <sys/proc.h>
+#include <sys/smp.h>
#include <vm/vm.h>
#include <sys/lock.h>
@@ -53,9 +54,6 @@
#include <machine/cpu.h>
#include <machine/pcb_ext.h> /* pcb.h included by sys/user.h */
#include <machine/sysarch.h>
-#ifdef SMP
-#include <machine/smp.h>
-#endif
#include <vm/vm_kern.h> /* for kernel_map */
@@ -154,7 +152,15 @@ i386_extend_pcb(struct proc *p)
ssdtosd(&ssd, &ext->ext_tssd);
/* switch to the new TSS after syscall completes */
- need_resched();
+ /*
+ * XXX: The sched_lock here needs to be over a slightly larger area.
+ * I have patches to more properly lock accesses to process ldt's
+ * and tss's that still need to be reviewed, but this keeps us from
+ * panic'ing on the mtx_assert() in need_resched() for the time being.
+ */
+ mtx_lock_spin(&sched_lock);
+ need_resched(p);
+ mtx_unlock_spin(&sched_lock);
return 0;
}
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index 7088d0c..d9597df 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -177,7 +177,7 @@ userret(p, frame, oticks)
mtx_lock_spin(&sched_lock);
p->p_pri.pri_level = p->p_pri.pri_user;
- if (resched_wanted()) {
+ if (resched_wanted(p)) {
/*
* Since we are curproc, clock will normally just change
* our priority without moving us from one queue to another
@@ -1277,7 +1277,7 @@ ast(framep)
* acquiring and releasing mutexes in assembly is not fun.
*/
mtx_lock_spin(&sched_lock);
- if (!(astpending(p) || resched_wanted())) {
+ if (!(astpending(p) || resched_wanted(p))) {
mtx_unlock_spin(&sched_lock);
return;
}
diff --git a/sys/amd64/amd64/tsc.c b/sys/amd64/amd64/tsc.c
index 9034f81..ce67390 100644
--- a/sys/amd64/amd64/tsc.c
+++ b/sys/amd64/amd64/tsc.c
@@ -211,6 +211,10 @@ clkintr(struct clockframe frame)
mtx_unlock_spin(&clock_lock);
}
timer_func(&frame);
+#ifdef SMP
+ if (timer_func == hardclock)
+ forward_hardclock();
+#endif
switch (timer0_state) {
case RELEASED:
@@ -253,6 +257,9 @@ clkintr(struct clockframe frame)
timer_func = hardclock;
timer0_state = RELEASED;
hardclock(&frame);
+#ifdef SMP
+ forward_hardclock();
+#endif
}
break;
}
@@ -374,8 +381,12 @@ release_timer2()
static void
rtcintr(struct clockframe frame)
{
- while (rtcin(RTC_INTR) & RTCIR_PERIOD)
+ while (rtcin(RTC_INTR) & RTCIR_PERIOD) {
statclock(&frame);
+#ifdef SMP
+ forward_statclock();
+#endif
+ }
}
#include "opt_ddb.h"
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index 0a15103..f8fa5bb 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -59,14 +59,12 @@
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/mutex.h>
+#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/unistd.h>
#include <machine/cpu.h>
#include <machine/md_var.h>
-#ifdef SMP
-#include <machine/smp.h>
-#endif
#include <machine/pcb.h>
#include <machine/pcb_ext.h>
#include <machine/vm86.h>
diff --git a/sys/amd64/include/cpu.h b/sys/amd64/include/cpu.h
index 74f21b9..29b34ee 100644
--- a/sys/amd64/include/cpu.h
+++ b/sys/amd64/include/cpu.h
@@ -65,8 +65,6 @@
#define CLKF_USERMODE(framep) \
((ISPL((framep)->cf_cs) == SEL_UPL) || ((framep)->cf_eflags & PSL_VM))
-
-#define CLKF_INTR(framep) (curproc->p_intr_nesting_level >= 2)
#define CLKF_PC(framep) ((framep)->cf_eip)
/*
diff --git a/sys/amd64/include/mptable.h b/sys/amd64/include/mptable.h
index 28a5c72..21e6b6e 100644
--- a/sys/amd64/include/mptable.h
+++ b/sys/amd64/include/mptable.h
@@ -42,6 +42,7 @@
#include <sys/malloc.h>
#include <sys/memrange.h>
#include <sys/mutex.h>
+#include <sys/smp.h>
#include <sys/dkstat.h>
#include <sys/cons.h> /* cngetc() */
@@ -57,9 +58,9 @@
#include <sys/gmon.h>
#endif
-#include <machine/smp.h>
#include <machine/apic.h>
#include <machine/atomic.h>
+#include <machine/cpu.h>
#include <machine/cpufunc.h>
#include <machine/ipl.h>
#include <machine/mpapic.h>
@@ -243,7 +244,6 @@ int current_postcode;
extern struct region_descriptor r_gdt, r_idt;
int bsp_apic_ready = 0; /* flags useability of BSP apic */
-int mp_ncpus; /* # of CPUs, including BSP */
int mp_naps; /* # of Applications processors */
int mp_nbusses; /* # of busses */
int mp_napics; /* # of IO APICs */
@@ -273,9 +273,6 @@ int io_num_to_apic_id[NAPICID];
int apic_id_to_logical[NAPICID];
-/* Bitmap of all available CPUs */
-u_int all_cpus;
-
/* AP uses this during bootstrap. Do not staticize. */
char *bootSTK;
static int bootAP;
@@ -288,28 +285,9 @@ extern pt_entry_t *SMPpt;
struct pcb stoppcbs[MAXCPU];
-int smp_started; /* has the system started? */
-int smp_active = 0; /* are the APs allowed to run? */
-SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
-
-/* XXX maybe should be hw.ncpu */
-static int smp_cpus = 1; /* how many cpu's running */
-SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
-
int invltlb_ok = 0; /* throttle smp_invltlb() till safe */
SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
-/* Enable forwarding of a signal to a process running on a different CPU */
-static int forward_signal_enabled = 1;
-SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
- &forward_signal_enabled, 0, "");
-
-/* Enable forwarding of roundrobin to all other cpus */
-static int forward_roundrobin_enabled = 1;
-SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
- &forward_roundrobin_enabled, 0, "");
-
-
/*
* Local data and functions.
*/
@@ -354,9 +332,6 @@ struct mtx mcount_mtx;
struct mtx com_mtx;
#endif /* USE_COMLOCK */
-/* lock around the MP rendezvous */
-static struct mtx smp_rv_mtx;
-
static void
init_locks(void)
{
@@ -367,13 +342,9 @@ init_locks(void)
*/
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
- mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
-
#ifdef USE_COMLOCK
mtx_init(&com_mtx, "com", MTX_SPIN);
#endif /* USE_COMLOCK */
-
- mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
}
/*
@@ -397,8 +368,8 @@ mp_bootaddress(u_int basemem)
/*
* Look for an Intel MP spec table (ie, SMP capable hardware).
*/
-int
-mp_probe(void)
+void
+i386_mp_probe(void)
{
int x;
u_long segment;
@@ -427,7 +398,7 @@ mp_probe(void)
/* nothing found */
mpfps = (mpfps_t)0;
mp_capable = 0;
- return 0;
+ return;
found:
/* calculate needed resources */
@@ -436,15 +407,19 @@ found:
/* flag fact that we are running multiple processors */
mp_capable = 1;
- return 1;
}
+int
+cpu_mp_probe(void)
+{
+ return (mp_capable);
+}
/*
* Initialize the SMP hardware and the APIC and start up the AP's.
*/
void
-mp_start(void)
+cpu_mp_start(void)
{
POSTCODE(MP_START_POST);
@@ -453,6 +428,8 @@ mp_start(void)
mp_enable(boot_address);
else
panic("MP hardware not found!");
+
+ cpu_setregs();
}
@@ -460,13 +437,12 @@ mp_start(void)
* Print various information about the SMP system hardware and setup.
*/
void
-mp_announce(void)
+cpu_mp_announce(void)
{
int x;
POSTCODE(MP_ANNOUNCE_POST);
- printf("FreeBSD/SMP: Multiprocessor motherboard\n");
printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
printf(", version: 0x%08x", cpu_apic_versions[0]);
printf(", at 0x%08x\n", cpu_apic_address);
@@ -623,8 +599,12 @@ mp_enable(u_int boot_addr)
setidt(XINVLTLB_OFFSET, Xinvltlb,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
- /* install an inter-CPU IPI for reading processor state */
- setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
+ /* install an inter-CPU IPI for forwarding hardclock() */
+ setidt(XHARDCLOCK_OFFSET, Xhardclock,
+ SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
+
+ /* install an inter-CPU IPI for forwarding statclock() */
+ setidt(XSTATCLOCK_OFFSET, Xstatclock,
SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
/* install an inter-CPU IPI for all-CPU rendezvous */
@@ -1938,6 +1918,8 @@ start_all_aps(u_int boot_addr)
POSTCODE(START_ALL_APS_POST);
+ mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
+
/* initialize BSP's local APIC */
apic_initialize();
bsp_apic_ready = 1;
@@ -1985,8 +1967,8 @@ start_all_aps(u_int boot_addr)
(PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
/* prime data page for it to use */
- SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu);
gd->gd_cpuid = x;
+ globaldata_register(gd);
/* setup a vector to our boot code */
*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
@@ -2328,472 +2310,65 @@ ap_init(void)
panic("scheduler returned us to ap_init");
}
-#define CHECKSTATE_USER 0
-#define CHECKSTATE_SYS 1
-#define CHECKSTATE_INTR 2
-
-/* Do not staticize. Used from apic_vector.s */
-struct proc* checkstate_curproc[MAXCPU];
-int checkstate_cpustate[MAXCPU];
-u_long checkstate_pc[MAXCPU];
-
-#define PC_TO_INDEX(pc, prof) \
- ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
- (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
-
-static void
-addupc_intr_forwarded(struct proc *p, int id, int *astmap)
-{
- int i;
- struct uprof *prof;
- u_long pc;
-
- pc = checkstate_pc[id];
- prof = &p->p_stats->p_prof;
- if (pc >= prof->pr_off &&
- (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
- mtx_assert(&sched_lock, MA_OWNED);
- if ((p->p_sflag & PS_OWEUPC) == 0) {
- prof->pr_addr = pc;
- prof->pr_ticks = 1;
- p->p_sflag |= PS_OWEUPC;
- }
- *astmap |= (1 << id);
- }
-}
-
-static void
-forwarded_statclock(int id, int pscnt, int *astmap)
-{
- struct pstats *pstats;
- long rss;
- struct rusage *ru;
- struct vmspace *vm;
- int cpustate;
- struct proc *p;
-#ifdef GPROF
- register struct gmonparam *g;
- int i;
-#endif
-
- mtx_assert(&sched_lock, MA_OWNED);
- p = checkstate_curproc[id];
- cpustate = checkstate_cpustate[id];
-
- /* XXX */
- if (p->p_ithd)
- cpustate = CHECKSTATE_INTR;
- else if (p == SMP_prvspace[id].globaldata.gd_idleproc)
- cpustate = CHECKSTATE_SYS;
-
- switch (cpustate) {
- case CHECKSTATE_USER:
- if (p->p_sflag & PS_PROFIL)
- addupc_intr_forwarded(p, id, astmap);
- if (pscnt > 1)
- return;
- p->p_uticks++;
- if (p->p_nice > NZERO)
- cp_time[CP_NICE]++;
- else
- cp_time[CP_USER]++;
- break;
- case CHECKSTATE_SYS:
-#ifdef GPROF
- /*
- * Kernel statistics are just like addupc_intr, only easier.
- */
- g = &_gmonparam;
- if (g->state == GMON_PROF_ON) {
- i = checkstate_pc[id] - g->lowpc;
- if (i < g->textsize) {
- i /= HISTFRACTION * sizeof(*g->kcount);
- g->kcount[i]++;
- }
- }
-#endif
- if (pscnt > 1)
- return;
-
- p->p_sticks++;
- if (p == SMP_prvspace[id].globaldata.gd_idleproc)
- cp_time[CP_IDLE]++;
- else
- cp_time[CP_SYS]++;
- break;
- case CHECKSTATE_INTR:
- default:
-#ifdef GPROF
- /*
- * Kernel statistics are just like addupc_intr, only easier.
- */
- g = &_gmonparam;
- if (g->state == GMON_PROF_ON) {
- i = checkstate_pc[id] - g->lowpc;
- if (i < g->textsize) {
- i /= HISTFRACTION * sizeof(*g->kcount);
- g->kcount[i]++;
- }
- }
-#endif
- if (pscnt > 1)
- return;
- KASSERT(p != NULL, ("NULL process in interrupt state"));
- p->p_iticks++;
- cp_time[CP_INTR]++;
- }
-
- schedclock(p);
-
- /* Update resource usage integrals and maximums. */
- if ((pstats = p->p_stats) != NULL &&
- (ru = &pstats->p_ru) != NULL &&
- (vm = p->p_vmspace) != NULL) {
- ru->ru_ixrss += pgtok(vm->vm_tsize);
- ru->ru_idrss += pgtok(vm->vm_dsize);
- ru->ru_isrss += pgtok(vm->vm_ssize);
- rss = pgtok(vmspace_resident_count(vm));
- if (ru->ru_maxrss < rss)
- ru->ru_maxrss = rss;
- }
-}
-
+/*
+ * For statclock, we send an IPI to all CPU's to have them call this
+ * function.
+ */
void
-forward_statclock(int pscnt)
+forwarded_statclock(struct trapframe frame)
{
- int map;
- int id;
- int i;
- /* Kludge. We don't yet have separate locks for the interrupts
- * and the kernel. This means that we cannot let the other processors
- * handle complex interrupts while inhibiting them from entering
- * the kernel in a non-interrupt context.
- *
- * What we can do, without changing the locking mechanisms yet,
- * is letting the other processors handle a very simple interrupt
- * (wich determines the processor states), and do the main
- * work ourself.
- */
-
- CTR1(KTR_SMP, "forward_statclock(%d)", pscnt);
-
- if (!smp_started || !invltlb_ok || cold || panicstr)
- return;
-
- /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */
-
- map = PCPU_GET(other_cpus) & ~stopped_cpus ;
- checkstate_probed_cpus = 0;
- if (map != 0)
- ipi_selected(map, IPI_CHECKSTATE);
-
- i = 0;
- while (checkstate_probed_cpus != map) {
- /* spin */
- i++;
- if (i == 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_statclock: checkstate %x\n",
- checkstate_probed_cpus);
-#endif
- break;
- }
- }
-
- /*
- * Step 2: walk through other processors processes, update ticks and
- * profiling info.
- */
-
- map = 0;
- for (id = 0; id < mp_ncpus; id++) {
- if (id == PCPU_GET(cpuid))
- continue;
- if (((1 << id) & checkstate_probed_cpus) == 0)
- continue;
- forwarded_statclock(id, pscnt, &map);
- }
- if (map != 0) {
- checkstate_need_ast |= map;
- ipi_selected(map, IPI_AST);
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- /* spin */
- i++;
- if (i > 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_statclock: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
- }
+ mtx_lock_spin(&sched_lock);
+ statclock_process(curproc, TRAPF_PC(&frame), TRAPF_USERMODE(&frame));
+ mtx_unlock_spin(&sched_lock);
}
-void
-forward_hardclock(int pscnt)
+void
+forward_statclock(void)
{
int map;
- int id;
- struct proc *p;
- struct pstats *pstats;
- int i;
-
- /* Kludge. We don't yet have separate locks for the interrupts
- * and the kernel. This means that we cannot let the other processors
- * handle complex interrupts while inhibiting them from entering
- * the kernel in a non-interrupt context.
- *
- * What we can do, without changing the locking mechanisms yet,
- * is letting the other processors handle a very simple interrupt
- * (wich determines the processor states), and do the main
- * work ourself.
- */
- CTR1(KTR_SMP, "forward_hardclock(%d)", pscnt);
+ CTR0(KTR_SMP, "forward_statclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
return;
- /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */
-
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
- checkstate_probed_cpus = 0;
if (map != 0)
- ipi_selected(map, IPI_CHECKSTATE);
-
- i = 0;
- while (checkstate_probed_cpus != map) {
- /* spin */
- i++;
- if (i == 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_hardclock: checkstate %x\n",
- checkstate_probed_cpus);
-#endif
- break;
- }
- }
-
- /*
- * Step 2: walk through other processors processes, update virtual
- * timer and profiling timer. If stathz == 0, also update ticks and
- * profiling info.
- */
-
- map = 0;
- for (id = 0; id < mp_ncpus; id++) {
- if (id == PCPU_GET(cpuid))
- continue;
- if (((1 << id) & checkstate_probed_cpus) == 0)
- continue;
- p = checkstate_curproc[id];
- if (p) {
- pstats = p->p_stats;
- if (checkstate_cpustate[id] == CHECKSTATE_USER &&
- timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
- p->p_sflag |= PS_ALRMPEND;
- map |= (1 << id);
- }
- if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
- p->p_sflag |= PS_PROFPEND;
- map |= (1 << id);
- }
- }
- if (stathz == 0) {
- forwarded_statclock( id, pscnt, &map);
- }
- }
- if (map != 0) {
- checkstate_need_ast |= map;
- ipi_selected(map, IPI_AST);
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- /* spin */
- i++;
- if (i > 100000) {
-#ifdef DIAGNOSTIC
- printf("forward_hardclock: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
- }
+ ipi_selected(map, IPI_STATCLOCK);
}
-void
-forward_signal(struct proc *p)
+/*
+ * For each hardclock(), we send an IPI to all other CPU's to have them
+ * execute this function. It would be nice to reduce contention on
+ * sched_lock if we could simply peek at the CPU to determine the user/kernel
+ * state and call hardclock_process() on the CPU receiving the clock interrupt
+ * and then just use a simple IPI to handle any ast's if needed.
+ */
+void
+forwarded_hardclock(struct trapframe frame)
{
- int map;
- int id;
- int i;
-
- /* Kludge. We don't yet have separate locks for the interrupts
- * and the kernel. This means that we cannot let the other processors
- * handle complex interrupts while inhibiting them from entering
- * the kernel in a non-interrupt context.
- *
- * What we can do, without changing the locking mechanisms yet,
- * is letting the other processors handle a very simple interrupt
- * (wich determines the processor states), and do the main
- * work ourself.
- */
-
- CTR1(KTR_SMP, "forward_signal(%p)", p);
- if (!smp_started || !invltlb_ok || cold || panicstr)
- return;
- if (!forward_signal_enabled)
- return;
mtx_lock_spin(&sched_lock);
- while (1) {
- if (p->p_stat != SRUN) {
- mtx_unlock_spin(&sched_lock);
- return;
- }
- id = p->p_oncpu;
- mtx_unlock_spin(&sched_lock);
- if (id == 0xff)
- return;
- map = (1<<id);
- checkstate_need_ast |= map;
- ipi_selected(map, IPI_AST);
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- /* spin */
- i++;
- if (i > 100000) {
-#if 0
- printf("forward_signal: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
- mtx_lock_spin(&sched_lock);
- if (id == p->p_oncpu) {
- mtx_unlock_spin(&sched_lock);
- return;
- }
- }
+ hardclock_process(curproc, TRAPF_USERMODE(&frame));
+ mtx_unlock_spin(&sched_lock);
}
-void
-forward_roundrobin(void)
+void
+forward_hardclock(void)
{
u_int map;
- int i;
- CTR0(KTR_SMP, "forward_roundrobin()");
+ CTR0(KTR_SMP, "forward_hardclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
return;
- if (!forward_roundrobin_enabled)
- return;
- resched_cpus |= PCPU_GET(other_cpus);
- map = PCPU_GET(other_cpus) & ~stopped_cpus ;
-#if 1
- ipi_selected(map, IPI_AST);
-#else
- ipi_all_but_self(IPI_AST);
-#endif
- i = 0;
- while ((checkstate_need_ast & map) != 0) {
- /* spin */
- i++;
- if (i > 100000) {
-#if 0
- printf("forward_roundrobin: dropped ast 0x%x\n",
- checkstate_need_ast & map);
-#endif
- break;
- }
- }
-}
-
-/*
- * When called the executing CPU will send an IPI to all other CPUs
- * requesting that they halt execution.
- *
- * Usually (but not necessarily) called with 'other_cpus' as its arg.
- *
- * - Signals all CPUs in map to stop.
- * - Waits for each to stop.
- *
- * Returns:
- * -1: error
- * 0: NA
- * 1: ok
- *
- * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
- * from executing at same time.
- */
-int
-stop_cpus(u_int map)
-{
- int count = 0;
-
- if (!smp_started)
- return 0;
-
- /* send the Xcpustop IPI to all CPUs in map */
- ipi_selected(map, IPI_STOP);
-
- while (count++ < 100000 && (stopped_cpus & map) != map)
- /* spin */ ;
-
-#ifdef DIAGNOSTIC
- if ((stopped_cpus & map) != map)
- printf("Warning: CPUs 0x%x did not stop!\n",
- (~(stopped_cpus & map)) & map);
-#endif
-
- return 1;
-}
-
-
-/*
- * Called by a CPU to restart stopped CPUs.
- *
- * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
- *
- * - Signals all CPUs in map to restart.
- * - Waits for each to restart.
- *
- * Returns:
- * -1: error
- * 0: NA
- * 1: ok
- */
-int
-restart_cpus(u_int map)
-{
- int count = 0;
-
- if (!smp_started)
- return 0;
-
- started_cpus = map; /* signal other cpus to restart */
-
- /* wait for each to clear its bit */
- while (count++ < 100000 && (stopped_cpus & map) != 0)
- /* spin */ ;
-#ifdef DIAGNOSTIC
- if ((stopped_cpus & map) != 0)
- printf("Warning: CPUs 0x%x did not restart!\n",
- (~(stopped_cpus & map)) & map);
-#endif
-
- return 1;
+ map = PCPU_GET(other_cpus) & ~stopped_cpus ;
+ if (map != 0)
+ ipi_selected(map, IPI_HARDCLOCK);
}
-
#ifdef APIC_INTR_REORDER
/*
* Maintain mapping from softintr vector to isr bit in local apic.
@@ -2811,73 +2386,6 @@ set_lapic_isrloc(int intr, int vector)
#endif
/*
- * All-CPU rendezvous. CPUs are signalled, all execute the setup function
- * (if specified), rendezvous, execute the action function (if specified),
- * rendezvous again, execute the teardown function (if specified), and then
- * resume.
- *
- * Note that the supplied external functions _must_ be reentrant and aware
- * that they are running in parallel and in an unknown lock context.
- */
-static void (*smp_rv_setup_func)(void *arg);
-static void (*smp_rv_action_func)(void *arg);
-static void (*smp_rv_teardown_func)(void *arg);
-static void *smp_rv_func_arg;
-static volatile int smp_rv_waiters[2];
-
-void
-smp_rendezvous_action(void)
-{
- /* setup function */
- if (smp_rv_setup_func != NULL)
- smp_rv_setup_func(smp_rv_func_arg);
- /* spin on entry rendezvous */
- atomic_add_int(&smp_rv_waiters[0], 1);
- while (smp_rv_waiters[0] < mp_ncpus)
- ;
- /* action function */
- if (smp_rv_action_func != NULL)
- smp_rv_action_func(smp_rv_func_arg);
- /* spin on exit rendezvous */
- atomic_add_int(&smp_rv_waiters[1], 1);
- while (smp_rv_waiters[1] < mp_ncpus)
- ;
- /* teardown function */
- if (smp_rv_teardown_func != NULL)
- smp_rv_teardown_func(smp_rv_func_arg);
-}
-
-void
-smp_rendezvous(void (* setup_func)(void *),
- void (* action_func)(void *),
- void (* teardown_func)(void *),
- void *arg)
-{
-
- /* obtain rendezvous lock */
- mtx_lock_spin(&smp_rv_mtx);
-
- /* set static function pointers */
- smp_rv_setup_func = setup_func;
- smp_rv_action_func = action_func;
- smp_rv_teardown_func = teardown_func;
- smp_rv_func_arg = arg;
- smp_rv_waiters[0] = 0;
- smp_rv_waiters[1] = 0;
-
- /*
- * signal other processors, which will enter the IPI with interrupts off
- */
- ipi_all_but_self(IPI_RENDEZVOUS);
-
- /* call executor function */
- smp_rendezvous_action();
-
- /* release lock */
- mtx_unlock_spin(&smp_rv_mtx);
-}
-
-/*
* send an IPI to a set of cpus.
*/
void
diff --git a/sys/amd64/include/pcpu.h b/sys/amd64/include/pcpu.h
index e713227..ef82a66 100644
--- a/sys/amd64/include/pcpu.h
+++ b/sys/amd64/include/pcpu.h
@@ -74,9 +74,6 @@ struct globaldata {
#endif
};
-SLIST_HEAD(cpuhead, globaldata);
-extern struct cpuhead cpuhead;
-
#ifdef SMP
/*
* This is the upper (0xff800000) address space layout that is per-cpu.
diff --git a/sys/amd64/include/smp.h b/sys/amd64/include/smp.h
index 9f9149e..f699e1f 100644
--- a/sys/amd64/include/smp.h
+++ b/sys/amd64/include/smp.h
@@ -41,7 +41,21 @@ extern int current_postcode; /** XXX currently in mp_machdep.c */
outb(0x80, current_postcode)
+#include <sys/bus.h> /* XXX */
#include <machine/apic.h>
+#include <machine/frame.h>
+#include <i386/isa/icu.h>
+#include <i386/isa/intr_machdep.h>
+
+/*
+ * Interprocessor interrupts for SMP.
+ */
+#define IPI_INVLTLB XINVLTLB_OFFSET
+#define IPI_RENDEZVOUS XRENDEZVOUS_OFFSET
+#define IPI_AST XCPUAST_OFFSET
+#define IPI_STOP XCPUSTOP_OFFSET
+#define IPI_HARDCLOCK XHARDCLOCK_OFFSET
+#define IPI_STATCLOCK XSTATCLOCK_OFFSET
/* global data in mpboot.s */
extern int bootMP_size;
@@ -49,18 +63,8 @@ extern int bootMP_size;
/* functions in mpboot.s */
void bootMP __P((void));
-/* global data in apic_vector.s */
-extern volatile u_int stopped_cpus;
-extern volatile u_int started_cpus;
-
-extern volatile u_int checkstate_probed_cpus;
-extern volatile u_int checkstate_need_ast;
-extern volatile u_int resched_cpus;
-extern void (*cpustop_restartfunc) __P((void));
-
/* global data in mp_machdep.c */
extern int bsp_apic_ready;
-extern int mp_ncpus;
extern int mp_naps;
extern int mp_nbusses;
extern int mp_napics;
@@ -81,14 +85,11 @@ struct apic_intmapinfo {
int redirindex;
};
extern struct apic_intmapinfo int_to_apicintpin[];
-extern u_int all_cpus;
extern struct pcb stoppcbs[];
/* functions in mp_machdep.c */
+void i386_mp_probe __P((void));
u_int mp_bootaddress __P((u_int));
-int mp_probe __P((void));
-void mp_start __P((void));
-void mp_announce __P((void));
u_int isa_apic_mask __P((u_int));
int isa_apic_irq __P((int));
int pci_apic_irq __P((int, int, int));
@@ -107,20 +108,17 @@ void revoke_apic_irq __P((int irq));
void bsp_apic_configure __P((void));
void init_secondary __P((void));
void smp_invltlb __P((void));
-int stop_cpus __P((u_int));
-int restart_cpus __P((u_int));
-void forward_statclock __P((int pscnt));
-void forward_hardclock __P((int pscnt));
-void forward_signal __P((struct proc *));
-void forward_roundrobin __P((void));
+void forward_statclock __P((void));
+void forwarded_statclock __P((struct trapframe frame));
+void forward_hardclock __P((void));
+void forwarded_hardclock __P((struct trapframe frame));
+void ipi_selected __P((u_int cpus, u_int ipi));
+void ipi_all __P((u_int ipi));
+void ipi_all_but_self __P((u_int ipi));
+void ipi_self __P((u_int ipi));
#ifdef APIC_INTR_REORDER
void set_lapic_isrloc __P((int, int));
#endif /* APIC_INTR_REORDER */
-void smp_rendezvous_action __P((void));
-void smp_rendezvous __P((void (*)(void *),
- void (*)(void *),
- void (*)(void *),
- void *arg));
/* global data in mpapic.c */
extern volatile lapic_t lapic;
@@ -147,8 +145,6 @@ void io_apic_write __P((int, int, u_int));
/* global data in init_smp.c */
extern int invltlb_ok;
-extern int smp_active;
-extern int smp_started;
extern volatile int smp_idle_loops;
#endif /* !LOCORE */
diff --git a/sys/amd64/isa/clock.c b/sys/amd64/isa/clock.c
index 9034f81..ce67390 100644
--- a/sys/amd64/isa/clock.c
+++ b/sys/amd64/isa/clock.c
@@ -211,6 +211,10 @@ clkintr(struct clockframe frame)
mtx_unlock_spin(&clock_lock);
}
timer_func(&frame);
+#ifdef SMP
+ if (timer_func == hardclock)
+ forward_hardclock();
+#endif
switch (timer0_state) {
case RELEASED:
@@ -253,6 +257,9 @@ clkintr(struct clockframe frame)
timer_func = hardclock;
timer0_state = RELEASED;
hardclock(&frame);
+#ifdef SMP
+ forward_hardclock();
+#endif
}
break;
}
@@ -374,8 +381,12 @@ release_timer2()
static void
rtcintr(struct clockframe frame)
{
- while (rtcin(RTC_INTR) & RTCIR_PERIOD)
+ while (rtcin(RTC_INTR) & RTCIR_PERIOD) {
statclock(&frame);
+#ifdef SMP
+ forward_statclock();
+#endif
+ }
}
#include "opt_ddb.h"
diff --git a/sys/amd64/isa/intr_machdep.h b/sys/amd64/isa/intr_machdep.h
index ed58437..6350524 100644
--- a/sys/amd64/isa/intr_machdep.h
+++ b/sys/amd64/isa/intr_machdep.h
@@ -107,10 +107,11 @@
#define XINVLTLB_OFFSET (ICU_OFFSET + 112)
/* inter-cpu clock handling */
-#define XCPUCHECKSTATE_OFFSET (ICU_OFFSET + 113)
+#define XHARDCLOCK_OFFSET (ICU_OFFSET + 113)
+#define XSTATCLOCK_OFFSET (ICU_OFFSET + 114)
/* inter-CPU rendezvous */
-#define XRENDEZVOUS_OFFSET (ICU_OFFSET + 114)
+#define XRENDEZVOUS_OFFSET (ICU_OFFSET + 115)
/* IPI to generate an additional software trap at the target CPU */
#define XCPUAST_OFFSET (ICU_OFFSET + 48)
@@ -173,7 +174,8 @@ inthand_t
inthand_t
Xinvltlb, /* TLB shootdowns */
- Xcpucheckstate, /* Check cpu state */
+ Xhardclock, /* Forward hardclock() */
+ Xstatclock, /* Forward statclock() */
Xcpuast, /* Additional software trap on other cpu */
Xcpustop, /* CPU stops & waits for another CPU to restart it */
Xspuriousint, /* handle APIC "spurious INTs" */
OpenPOWER on IntegriCloud