summaryrefslogtreecommitdiffstats
path: root/sys/i386/isa/apic_vector.s
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2001-04-27 19:28:25 +0000
committerjhb <jhb@FreeBSD.org>2001-04-27 19:28:25 +0000
commit8bfdafc9349392c2fe02f548d6ffe6db56626575 (patch)
treeb44641a14ad9f8eb3b338e429775d3298b8946e2 /sys/i386/isa/apic_vector.s
parent95c17411607d3bc160b63f97e94912bf27b24274 (diff)
downloadFreeBSD-src-8bfdafc9349392c2fe02f548d6ffe6db56626575.zip
FreeBSD-src-8bfdafc9349392c2fe02f548d6ffe6db56626575.tar.gz
Overhaul of the SMP code. Several portions of the SMP kernel support have
been made machine independent and various other adjustments have been made to support Alpha SMP. - It splits the per-process portions of hardclock() and statclock() off into hardclock_process() and statclock_process() respectively. hardclock() and statclock() call the *_process() functions for the current process so that UP systems will run as before. For SMP systems, it is simply necessary to ensure that all other processors execute the *_process() functions when the main clock functions are triggered on one CPU by an interrupt. For the alpha 4100, clock interrupts are delievered in a staggered broadcast fashion, so we simply call hardclock/statclock on the boot CPU and call the *_process() functions on the secondaries. For x86, we call statclock and hardclock as usual and then call forward_hardclock/statclock in the MD code to send an IPI to cause the AP's to execute forwared_hardclock/statclock which then call the *_process() functions. - forward_signal() and forward_roundrobin() have been reworked to be MI and to involve less hackery. Now the cpu doing the forward sets any flags, etc. and sends a very simple IPI_AST to the other cpu(s). AST IPIs now just basically return so that they can execute ast() and don't bother with setting the astpending or needresched flags themselves. This also removes the loop in forward_signal() as sched_lock closes the race condition that the loop worked around. - need_resched(), resched_wanted() and clear_resched() have been changed to take a process to act on rather than assuming curproc so that they can be used to implement forward_roundrobin() as described above. - Various other SMP variables have been moved to a MI subr_smp.c and a new header sys/smp.h declares MI SMP variables and API's. The IPI API's from machine/ipl.h have moved to machine/smp.h which is included by sys/smp.h. - The globaldata_register() and globaldata_find() functions as well as the SLIST of globaldata structures has become MI and moved into subr_smp.c. Also, the globaldata list is only available if SMP support is compiled in. Reviewed by: jake, peter Looked over by: eivind
Diffstat (limited to 'sys/i386/isa/apic_vector.s')
-rw-r--r--sys/i386/isa/apic_vector.s155
1 files changed, 43 insertions, 112 deletions
diff --git a/sys/i386/isa/apic_vector.s b/sys/i386/isa/apic_vector.s
index 3f0521f..5c68f81 100644
--- a/sys/i386/isa/apic_vector.s
+++ b/sys/i386/isa/apic_vector.s
@@ -182,7 +182,6 @@ Xspuriousint:
iret
-
/*
* Handle TLB shootdowns.
*/
@@ -211,71 +210,61 @@ Xinvltlb:
popl %eax
iret
-
/*
- * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
- *
- * - Stores current cpu state in checkstate_cpustate[cpuid]
- * 0 == user, 1 == sys, 2 == intr
- * - Stores current process in checkstate_curproc[cpuid]
- *
- * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
- *
- * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
+ * Forward hardclock to another CPU. Pushes a trapframe and calls
+ * forwarded_hardclock().
*/
-
.text
SUPERALIGN_TEXT
- .globl Xcpucheckstate
- .globl checkstate_cpustate
- .globl checkstate_curproc
- .globl checkstate_pc
-Xcpucheckstate:
- pushl %eax
- pushl %ebx
- pushl %ds /* save current data segment */
- pushl %fs
-
- movl $KDSEL, %eax
- mov %ax, %ds /* use KERNEL data segment */
+ .globl Xhardclock
+Xhardclock:
+ PUSH_FRAME
+ movl $KDSEL, %eax /* reload with kernel's data segment */
+ mov %ax, %ds
+ mov %ax, %es
movl $KPSEL, %eax
mov %ax, %fs
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
- movl $0, %ebx
- movl 20(%esp), %eax
- andl $3, %eax
- cmpl $3, %eax
- je 1f
- testl $PSL_VM, 24(%esp)
- jne 1f
- incl %ebx /* system or interrupt */
-1:
- movl PCPU(CPUID), %eax
- movl %ebx, checkstate_cpustate(,%eax,4)
- movl PCPU(CURPROC), %ebx
- movl %ebx, checkstate_curproc(,%eax,4)
-
- movl 16(%esp), %ebx
- movl %ebx, checkstate_pc(,%eax,4)
+ movl PCPU(CURPROC),%ebx
+ incl P_INTR_NESTING_LEVEL(%ebx)
+ call forwarded_hardclock
+ decl P_INTR_NESTING_LEVEL(%ebx)
+ MEXITCOUNT
+ jmp doreti
- lock /* checkstate_probed_cpus |= (1<<id) */
- btsl %eax, checkstate_probed_cpus
+/*
+ * Forward statclock to another CPU. Pushes a trapframe and calls
+ * forwarded_statclock().
+ */
+ .text
+ SUPERALIGN_TEXT
+ .globl Xstatclock
+Xstatclock:
+ PUSH_FRAME
+ movl $KDSEL, %eax /* reload with kernel's data segment */
+ mov %ax, %ds
+ mov %ax, %es
+ movl $KPSEL, %eax
+ mov %ax, %fs
- popl %fs
- popl %ds /* restore previous data segment */
- popl %ebx
- popl %eax
- iret
+ movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
+ FAKE_MCOUNT(13*4(%esp))
+ movl PCPU(CURPROC),%ebx
+ incl P_INTR_NESTING_LEVEL(%ebx)
+ call forwarded_statclock
+ decl P_INTR_NESTING_LEVEL(%ebx)
+ MEXITCOUNT
+ jmp doreti
/*
* Executed by a CPU when it receives an Xcpuast IPI from another CPU,
*
- * - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
- *
- * - We need a better method of triggering asts on other cpus.
+ * The other CPU has already executed aston() or need_resched() on our
+ * current process, so we simply need to ack the interrupt and return
+ * via doreti to run ast().
*/
.text
@@ -289,40 +278,12 @@ Xcpuast:
movl $KPSEL, %eax
mov %ax, %fs
- movl PCPU(CPUID), %eax
- lock /* checkstate_need_ast &= ~(1<<id) */
- btrl %eax, checkstate_need_ast
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
- lock
- btsl %eax, checkstate_pending_ast
- jc 1f
-
FAKE_MCOUNT(13*4(%esp))
- MTX_LOCK_SPIN(sched_lock, 0)
- movl PCPU(CURPROC),%ebx
- orl $PS_ASTPENDING, P_SFLAG(%ebx)
-
- movl PCPU(CPUID), %eax
- lock
- btrl %eax, checkstate_pending_ast
- lock
- btrl %eax, CNAME(resched_cpus)
- jnc 2f
- orl $PS_NEEDRESCHED, P_SFLAG(%ebx)
- lock
- incl CNAME(want_resched_cnt)
-2:
- MTX_UNLOCK_SPIN(sched_lock)
- lock
- incl CNAME(cpuast_cnt)
MEXITCOUNT
jmp doreti
-1:
- /* We are already in the process of delivering an ast for this CPU */
- POP_FRAME
- iret
/*
* Executed by a CPU when it receives an Xcpustop IPI from another CPU,
@@ -331,7 +292,6 @@ Xcpuast:
* - Waits for permission to restart.
* - Signals its restart.
*/
-
.text
SUPERALIGN_TEXT
.globl Xcpustop
@@ -357,20 +317,19 @@ Xcpustop:
pushl %eax
call CNAME(savectx) /* Save process context */
addl $4, %esp
-
movl PCPU(CPUID), %eax
lock
- btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
+ btsl %eax, CNAME(stopped_cpus) /* stopped_cpus |= (1<<id) */
1:
- btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
+ btl %eax, CNAME(started_cpus) /* while (!(started_cpus & (1<<id))) */
jnc 1b
lock
- btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
+ btrl %eax, CNAME(started_cpus) /* started_cpus &= ~(1<<id) */
lock
- btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
+ btrl %eax, CNAME(stopped_cpus) /* stopped_cpus &= ~(1<<id) */
test %eax, %eax
jnz 2f
@@ -492,34 +451,6 @@ _xhits:
.space (NCPU * 4), 0
#endif /* COUNT_XINVLTLB_HITS */
-/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
- .globl stopped_cpus, started_cpus
-stopped_cpus:
- .long 0
-started_cpus:
- .long 0
-
- .globl checkstate_probed_cpus
-checkstate_probed_cpus:
- .long 0
- .globl checkstate_need_ast
-checkstate_need_ast:
- .long 0
-checkstate_pending_ast:
- .long 0
- .globl CNAME(resched_cpus)
- .globl CNAME(want_resched_cnt)
- .globl CNAME(cpuast_cnt)
- .globl CNAME(cpustop_restartfunc)
-CNAME(resched_cpus):
- .long 0
-CNAME(want_resched_cnt):
- .long 0
-CNAME(cpuast_cnt):
- .long 0
-CNAME(cpustop_restartfunc):
- .long 0
-
.globl apic_pin_trigger
apic_pin_trigger:
.long 0
OpenPOWER on IntegriCloud