summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorjake <jake@FreeBSD.org>2000-12-13 09:23:53 +0000
committerjake <jake@FreeBSD.org>2000-12-13 09:23:53 +0000
commit90d90d0c248fbcde27a7d443098c2bf514aaa199 (patch)
tree48852dbfa3490f6dfe3145145122f26ee2991cd5 /sys/amd64
parent46b93fb7883b21c43eeac8eb6f58b1288edb2518 (diff)
downloadFreeBSD-src-90d90d0c248fbcde27a7d443098c2bf514aaa199.zip
FreeBSD-src-90d90d0c248fbcde27a7d443098c2bf514aaa199.tar.gz
Introduce a new potientially cleaner interface for accessing per-cpu
variables from i386 assembly language. The syntax is PCPU(member) where member is the capitalized name of the per-cpu variable, without the gd_ prefix. Example: movl %eax,PCPU(CURPROC). The capitalization is due to using the offsets generated by genassym rather than the symbols provided by linking with globals.o. asmacros.h is the wrong place for this but it seemed as good a place as any for now. The old implementation in asnames.h has not been removed because it is still used to de-mangle the symbols used by the C variables for the UP case.
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/apic_vector.S31
-rw-r--r--sys/amd64/amd64/cpu_switch.S38
-rw-r--r--sys/amd64/amd64/exception.S20
-rw-r--r--sys/amd64/amd64/exception.s20
-rw-r--r--sys/amd64/amd64/support.S74
-rw-r--r--sys/amd64/amd64/support.s74
-rw-r--r--sys/amd64/amd64/swtch.s38
-rw-r--r--sys/amd64/include/asmacros.h6
-rw-r--r--sys/amd64/isa/atpic_vector.S4
-rw-r--r--sys/amd64/isa/icu_vector.S4
-rw-r--r--sys/amd64/isa/icu_vector.s4
11 files changed, 160 insertions, 153 deletions
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S
index 995cf21..a23ccff 100644
--- a/sys/amd64/amd64/apic_vector.S
+++ b/sys/amd64/amd64/apic_vector.S
@@ -48,7 +48,7 @@ IDTVEC(vec_name) ; \
movl $KPSEL,%eax ; \
mov %ax,%fs ; \
FAKE_MCOUNT(13*4(%esp)) ; \
- incb _intr_nesting_level ; \
+ incb PCPU(INTR_NESTING_LEVEL) ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
addl $4, %esp ; \
@@ -136,7 +136,7 @@ log_intr_event:
addl $4, %esp
movl CNAME(apic_itrace_debugbuffer_idx), %ecx
andl $32767, %ecx
- movl _cpuid, %eax
+ movl PCPU(CPUID), %eax
shll $8, %eax
orl 8(%esp), %eax
movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2)
@@ -218,7 +218,7 @@ IDTVEC(vec_name) ; \
MASK_LEVEL_IRQ(irq_num) ; \
EOI_IRQ(irq_num) ; \
0: ; \
- incb _intr_nesting_level ; \
+ incb PCPU(INTR_NESTING_LEVEL) ; \
; \
/* entry point used by doreti_unpend for HWIs. */ \
__CONCAT(Xresume,irq_num): ; \
@@ -263,7 +263,7 @@ _Xinvltlb:
pushl %fs
movl $KPSEL, %eax
mov %ax, %fs
- movl _cpuid, %eax
+ movl PCPU(CPUID), %eax
popl %fs
ss
incl _xhits(,%eax,4)
@@ -321,10 +321,11 @@ _Xcpucheckstate:
jne 1f
incl %ebx /* system or interrupt */
1:
- movl _cpuid, %eax
+ movl PCPU(CPUID), %eax
movl %ebx, _checkstate_cpustate(,%eax,4)
- movl _curproc, %ebx
+ movl PCPU(CURPROC), %ebx
movl %ebx, _checkstate_curproc(,%eax,4)
+
movl 16(%esp), %ebx
movl %ebx, _checkstate_pc(,%eax,4)
@@ -358,7 +359,7 @@ _Xcpuast:
movl $KPSEL, %eax
mov %ax, %fs
- movl _cpuid, %eax
+ movl PCPU(CPUID), %eax
lock /* checkstate_need_ast &= ~(1<<id) */
btrl %eax, _checkstate_need_ast
movl $0, lapic_eoi /* End Of Interrupt to APIC */
@@ -369,17 +370,17 @@ _Xcpuast:
FAKE_MCOUNT(13*4(%esp))
- orl $AST_PENDING, _astpending /* XXX */
- incb _intr_nesting_level
+ orl $AST_PENDING, PCPU(ASTPENDING) /* XXX */
+ incb PCPU(INTR_NESTING_LEVEL)
sti
- movl _cpuid, %eax
+ movl PCPU(CPUID), %eax
lock
btrl %eax, _checkstate_pending_ast
lock
btrl %eax, CNAME(resched_cpus)
jnc 2f
- orl $AST_PENDING+AST_RESCHED, _astpending
+ orl $AST_PENDING+AST_RESCHED, PCPU(ASTPENDING)
lock
incl CNAME(want_resched_cnt)
2:
@@ -414,10 +415,10 @@ _Xforward_irq:
lock
incl CNAME(forward_irq_hitcnt)
- cmpb $4, _intr_nesting_level
+ cmpb $4, PCPU(INTR_NESTING_LEVEL)
jae 1f
- incb _intr_nesting_level
+ incb PCPU(INTR_NESTING_LEVEL)
sti
MEXITCOUNT
@@ -499,7 +500,7 @@ _Xcpustop:
movl $0, lapic_eoi /* End Of Interrupt to APIC */
- movl _cpuid, %eax
+ movl PCPU(CPUID), %eax
imull $PCB_SIZE, %eax
leal CNAME(stoppcbs)(%eax), %eax
pushl %eax
@@ -507,7 +508,7 @@ _Xcpustop:
addl $4, %esp
- movl _cpuid, %eax
+ movl PCPU(CPUID), %eax
lock
btsl %eax, _stopped_cpus /* stopped_cpus |= (1<<id) */
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S
index 2d315fc..c2cfa5e 100644
--- a/sys/amd64/amd64/cpu_switch.S
+++ b/sys/amd64/amd64/cpu_switch.S
@@ -82,7 +82,7 @@ ENTRY(cpu_throw)
ENTRY(cpu_switch)
/* switch to new process. first, save context as needed */
- movl _curproc,%ecx
+ movl PCPU(CURPROC),%ecx
/* if no process to save, don't bother */
testl %ecx,%ecx
@@ -95,7 +95,7 @@ ENTRY(cpu_switch)
#endif /* SMP */
movl P_VMSPACE(%ecx), %edx
#ifdef SMP
- movl _cpuid, %eax
+ movl PCPU(CPUID), %eax
#else
xorl %eax, %eax
#endif /* SMP */
@@ -142,7 +142,7 @@ ENTRY(cpu_switch)
#if NNPX > 0
/* have we used fp, and need a save? */
- cmpl %ecx,_npxproc
+ cmpl %ecx,PCPU(NPXPROC)
jne 1f
addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */
pushl %edx
@@ -158,10 +158,10 @@ sw1:
/* Stop scheduling if smp_active goes zero and we are not BSP */
cmpl $0,_smp_active
jne 1f
- cmpl $0,_cpuid
+ cmpl $0,PCPU(CPUID)
je 1f
- movl _idleproc, %eax
+ movl PCPU(IDLEPROC), %eax
jmp sw1b
1:
#endif
@@ -181,7 +181,7 @@ sw1b:
movl %eax,%ecx
xorl %eax,%eax
- andl $~AST_RESCHED,_astpending
+ andl $~AST_RESCHED,PCPU(ASTPENDING)
#ifdef INVARIANTS
cmpb $SRUN,P_STAT(%ecx)
@@ -206,7 +206,7 @@ sw1b:
4:
#ifdef SMP
- movl _cpuid, %esi
+ movl PCPU(CPUID), %esi
#else
xorl %esi, %esi
#endif
@@ -220,19 +220,19 @@ sw1b:
/* update common_tss.tss_esp0 pointer */
movl %edx, %ebx /* pcb */
addl $(UPAGES * PAGE_SIZE - 16), %ebx
- movl %ebx, _common_tss + TSS_ESP0
+ movl %ebx, PCPU(COMMON_TSS) + TSS_ESP0
btrl %esi, _private_tss
jae 3f
#ifdef SMP
- movl $gd_common_tssd, %edi
+ movl $GD_COMMON_TSSD, %edi
addl %fs:0, %edi
#else
- movl $_common_tssd, %edi
+ movl $PCPU(COMMON_TSSD), %edi
#endif
2:
/* move correct tss descriptor into GDT slot, then reload tr */
- movl _tss_gdt, %ebx /* entry in GDT */
+ movl PCPU(TSS_GDT), %ebx /* entry in GDT */
movl 0(%edi), %eax
movl %eax, 0(%ebx)
movl 4(%edi), %eax
@@ -242,7 +242,7 @@ sw1b:
3:
movl P_VMSPACE(%ecx), %ebx
#ifdef SMP
- movl _cpuid, %eax
+ movl PCPU(CPUID), %eax
#else
xorl %eax, %eax
#endif
@@ -265,11 +265,11 @@ sw1b:
andl $~APIC_TPR_PRIO, lapic_tpr
#endif /** CHEAP_TPR */
#endif /** GRAB_LOPRIO */
- movl _cpuid,%eax
+ movl PCPU(CPUID),%eax
movb %al, P_ONCPU(%ecx)
#endif /* SMP */
- movl %edx, _curpcb
- movl %ecx, _curproc /* into next process */
+ movl %edx, PCPU(CURPCB)
+ movl %ecx, PCPU(CURPROC) /* into next process */
#ifdef SMP
/* XXX FIXME: we should be restoring the local APIC TPR */
@@ -279,10 +279,10 @@ sw1b:
cmpl $0, PCB_USERLDT(%edx)
jnz 1f
movl __default_ldt,%eax
- cmpl _currentldt,%eax
+ cmpl PCPU(CURRENTLDT),%eax
je 2f
lldt __default_ldt
- movl %eax,_currentldt
+ movl %eax,PCPU(CURRENTLDT)
jmp 2f
1: pushl %edx
call _set_user_ldt
@@ -320,7 +320,7 @@ cpu_switch_load_gs:
movl PCB_SCHEDNEST(%edx),%eax
movl %eax,_sched_lock+MTX_RECURSE
- movl _curproc,%eax
+ movl PCPU(CURPROC),%eax
movl %eax,_sched_lock+MTX_LOCK
ret
@@ -376,7 +376,7 @@ ENTRY(savectx)
* have to handle h/w bugs for reloading. We used to lose the
* parent's npx state for forks by forgetting to reload.
*/
- movl _npxproc,%eax
+ movl PCPU(NPXPROC),%eax
testl %eax,%eax
je 1f
diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S
index 99584a6..6ba916f 100644
--- a/sys/amd64/amd64/exception.S
+++ b/sys/amd64/amd64/exception.S
@@ -184,7 +184,7 @@ IDTVEC(fpu)
call __mtx_exit_giant_def
addl $4,%esp
- incb _intr_nesting_level
+ incb PCPU(INTR_NESTING_LEVEL)
MEXITCOUNT
jmp _doreti
#else /* NNPX > 0 */
@@ -223,7 +223,7 @@ calltrap:
/*
* Return via _doreti to handle ASTs.
*/
- incb _intr_nesting_level
+ incb PCPU(INTR_NESTING_LEVEL)
MEXITCOUNT
jmp _doreti
@@ -262,9 +262,9 @@ IDTVEC(syscall)
call _syscall2
MEXITCOUNT
cli /* atomic astpending access */
- cmpl $0,_astpending /* AST pending? */
+ cmpl $0,PCPU(ASTPENDING) /* AST pending? */
je doreti_syscall_ret /* no, get out of here */
- movb $1,_intr_nesting_level
+ movb $1,PCPU(INTR_NESTING_LEVEL)
jmp _doreti
/*
@@ -294,9 +294,9 @@ IDTVEC(int0x80_syscall)
call _syscall2
MEXITCOUNT
cli /* atomic astpending access */
- cmpl $0,_astpending /* AST pending? */
+ cmpl $0,PCPU(ASTPENDING) /* AST pending? */
je doreti_syscall_ret /* no, get out of here */
- movb $1,_intr_nesting_level
+ movb $1,PCPU(INTR_NESTING_LEVEL)
jmp _doreti
ENTRY(fork_trampoline)
@@ -306,15 +306,15 @@ ENTRY(fork_trampoline)
are enabled */
#ifdef SMP
- cmpl $0,_switchtime
+ cmpl $0,PCPU(SWITCHTIME)
jne 1f
- movl $gd_switchtime,%eax
+ movl $GD_SWITCHTIME,%eax
addl %fs:0,%eax
pushl %eax
call _microuptime
popl %edx
movl _ticks,%eax
- movl %eax,_switchticks
+ movl %eax,PCPU(SWITCHTICKS)
1:
#endif
@@ -331,7 +331,7 @@ ENTRY(fork_trampoline)
/*
* Return via _doreti to handle ASTs.
*/
- movb $1,_intr_nesting_level
+ movb $1,PCPU(INTR_NESTING_LEVEL)
MEXITCOUNT
jmp _doreti
diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s
index 99584a6..6ba916f 100644
--- a/sys/amd64/amd64/exception.s
+++ b/sys/amd64/amd64/exception.s
@@ -184,7 +184,7 @@ IDTVEC(fpu)
call __mtx_exit_giant_def
addl $4,%esp
- incb _intr_nesting_level
+ incb PCPU(INTR_NESTING_LEVEL)
MEXITCOUNT
jmp _doreti
#else /* NNPX > 0 */
@@ -223,7 +223,7 @@ calltrap:
/*
* Return via _doreti to handle ASTs.
*/
- incb _intr_nesting_level
+ incb PCPU(INTR_NESTING_LEVEL)
MEXITCOUNT
jmp _doreti
@@ -262,9 +262,9 @@ IDTVEC(syscall)
call _syscall2
MEXITCOUNT
cli /* atomic astpending access */
- cmpl $0,_astpending /* AST pending? */
+ cmpl $0,PCPU(ASTPENDING) /* AST pending? */
je doreti_syscall_ret /* no, get out of here */
- movb $1,_intr_nesting_level
+ movb $1,PCPU(INTR_NESTING_LEVEL)
jmp _doreti
/*
@@ -294,9 +294,9 @@ IDTVEC(int0x80_syscall)
call _syscall2
MEXITCOUNT
cli /* atomic astpending access */
- cmpl $0,_astpending /* AST pending? */
+ cmpl $0,PCPU(ASTPENDING) /* AST pending? */
je doreti_syscall_ret /* no, get out of here */
- movb $1,_intr_nesting_level
+ movb $1,PCPU(INTR_NESTING_LEVEL)
jmp _doreti
ENTRY(fork_trampoline)
@@ -306,15 +306,15 @@ ENTRY(fork_trampoline)
are enabled */
#ifdef SMP
- cmpl $0,_switchtime
+ cmpl $0,PCPU(SWITCHTIME)
jne 1f
- movl $gd_switchtime,%eax
+ movl $GD_SWITCHTIME,%eax
addl %fs:0,%eax
pushl %eax
call _microuptime
popl %edx
movl _ticks,%eax
- movl %eax,_switchticks
+ movl %eax,PCPU(SWITCHTICKS)
1:
#endif
@@ -331,7 +331,7 @@ ENTRY(fork_trampoline)
/*
* Return via _doreti to handle ASTs.
*/
- movb $1,_intr_nesting_level
+ movb $1,PCPU(INTR_NESTING_LEVEL)
MEXITCOUNT
jmp _doreti
diff --git a/sys/amd64/amd64/support.S b/sys/amd64/amd64/support.S
index cb3a144..e240de4 100644
--- a/sys/amd64/amd64/support.S
+++ b/sys/amd64/amd64/support.S
@@ -241,7 +241,7 @@ ENTRY(i586_bzero)
* method. CR0_TS must be preserved although it is very likely to
* always end up as clear.
*/
- cmpl $0,_npxproc
+ cmpl $0,PCPU(NPXPROC)
je i586_bz1
cmpl $256+184,%ecx /* empirical; not quite 2*108 more */
jb intreg_i586_bzero
@@ -293,7 +293,7 @@ fpureg_i586_bzero_loop:
cmpl $8,%ecx
jae fpureg_i586_bzero_loop
- cmpl $0,_npxproc
+ cmpl $0,PCPU(NPXPROC)
je i586_bz3
frstor 0(%esp)
addl $108,%esp
@@ -501,7 +501,7 @@ ENTRY(i586_bcopy)
sarb $1,kernel_fpu_lock
jc small_i586_bcopy
- cmpl $0,_npxproc
+ cmpl $0,PCPU(NPXPROC)
je i586_bc1
smsw %dx
clts
@@ -572,7 +572,7 @@ large_i586_bcopy_loop:
cmpl $64,%ecx
jae 4b
- cmpl $0,_npxproc
+ cmpl $0,PCPU(NPXPROC)
je i586_bc2
frstor 0(%esp)
addl $108,%esp
@@ -670,7 +670,7 @@ ENTRY(copyout)
jmp *_copyout_vector
ENTRY(generic_copyout)
- movl _curpcb,%eax
+ movl PCPU(CURPCB),%eax
movl $copyout_fault,PCB_ONFAULT(%eax)
pushl %esi
pushl %edi
@@ -781,7 +781,7 @@ done_copyout:
popl %edi
popl %esi
xorl %eax,%eax
- movl _curpcb,%edx
+ movl PCPU(CURPCB),%edx
movl %eax,PCB_ONFAULT(%edx)
ret
@@ -790,7 +790,7 @@ copyout_fault:
popl %ebx
popl %edi
popl %esi
- movl _curpcb,%edx
+ movl PCPU(CURPCB),%edx
movl $0,PCB_ONFAULT(%edx)
movl $EFAULT,%eax
ret
@@ -800,7 +800,7 @@ ENTRY(i586_copyout)
/*
* Duplicated from generic_copyout. Could be done a bit better.
*/
- movl _curpcb,%eax
+ movl PCPU(CURPCB),%eax
movl $copyout_fault,PCB_ONFAULT(%eax)
pushl %esi
pushl %edi
@@ -857,7 +857,7 @@ ENTRY(copyin)
jmp *_copyin_vector
ENTRY(generic_copyin)
- movl _curpcb,%eax
+ movl PCPU(CURPCB),%eax
movl $copyin_fault,PCB_ONFAULT(%eax)
pushl %esi
pushl %edi
@@ -895,7 +895,7 @@ done_copyin:
popl %edi
popl %esi
xorl %eax,%eax
- movl _curpcb,%edx
+ movl PCPU(CURPCB),%edx
movl %eax,PCB_ONFAULT(%edx)
ret
@@ -903,7 +903,7 @@ done_copyin:
copyin_fault:
popl %edi
popl %esi
- movl _curpcb,%edx
+ movl PCPU(CURPCB),%edx
movl $0,PCB_ONFAULT(%edx)
movl $EFAULT,%eax
ret
@@ -913,7 +913,7 @@ ENTRY(i586_copyin)
/*
* Duplicated from generic_copyin. Could be done a bit better.
*/
- movl _curpcb,%eax
+ movl PCPU(CURPCB),%eax
movl $copyin_fault,PCB_ONFAULT(%eax)
pushl %esi
pushl %edi
@@ -967,13 +967,13 @@ ENTRY(fastmove)
jnz fastmove_tail
/* if (npxproc != NULL) { */
- cmpl $0,_npxproc
+ cmpl $0,PCPU(NPXPROC)
je 6f
/* fnsave(&curpcb->pcb_savefpu); */
- movl _curpcb,%eax
+ movl PCPU(CURPCB),%eax
fnsave PCB_SAVEFPU(%eax)
/* npxproc = NULL; */
- movl $0,_npxproc
+ movl $0,PCPU(NPXPROC)
/* } */
6:
/* now we own the FPU. */
@@ -990,7 +990,7 @@ ENTRY(fastmove)
movl %esi,-8(%ebp)
movl %edi,-4(%ebp)
movl %esp,%edi
- movl _curpcb,%esi
+ movl PCPU(CURPCB),%esi
addl $PCB_SAVEFPU,%esi
cld
movl $PCB_SAVEFPU_SIZE>>2,%ecx
@@ -1002,9 +1002,9 @@ ENTRY(fastmove)
/* stop_emulating(); */
clts
/* npxproc = curproc; */
- movl _curproc,%eax
- movl %eax,_npxproc
- movl _curpcb,%eax
+ movl PCPU(CURPROC),%eax
+ movl %eax,PCPU(NPXPROC)
+ movl PCPU(CURPCB),%eax
movl $fastmove_fault,PCB_ONFAULT(%eax)
4:
movl %ecx,-12(%ebp)
@@ -1066,7 +1066,7 @@ fastmove_loop:
movl %ecx,-12(%ebp)
movl %esi,-8(%ebp)
movl %edi,-4(%ebp)
- movl _curpcb,%edi
+ movl PCPU(CURPCB),%edi
addl $PCB_SAVEFPU,%edi
movl %esp,%esi
cld
@@ -1082,11 +1082,11 @@ fastmove_loop:
orb $CR0_TS,%al
lmsw %ax
/* npxproc = NULL; */
- movl $0,_npxproc
+ movl $0,PCPU(NPXPROC)
ALIGN_TEXT
fastmove_tail:
- movl _curpcb,%eax
+ movl PCPU(CURPCB),%eax
movl $fastmove_tail_fault,PCB_ONFAULT(%eax)
movb %cl,%al
@@ -1105,7 +1105,7 @@ fastmove_tail:
ALIGN_TEXT
fastmove_fault:
- movl _curpcb,%edi
+ movl PCPU(CURPCB),%edi
addl $PCB_SAVEFPU,%edi
movl %esp,%esi
cld
@@ -1116,7 +1116,7 @@ fastmove_fault:
smsw %ax
orb $CR0_TS,%al
lmsw %ax
- movl $0,_npxproc
+ movl $0,PCPU(NPXPROC)
fastmove_tail_fault:
movl %ebp,%esp
@@ -1125,7 +1125,7 @@ fastmove_tail_fault:
popl %ebx
popl %edi
popl %esi
- movl _curpcb,%edx
+ movl PCPU(CURPCB),%edx
movl $0,PCB_ONFAULT(%edx)
movl $EFAULT,%eax
ret
@@ -1137,7 +1137,7 @@ fastmove_tail_fault:
* Fetch a byte (sword, word) from user memory
*/
ENTRY(fuword)
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
movl 4(%esp),%edx /* from */
@@ -1163,7 +1163,7 @@ ENTRY(fuswintr)
* fusword - MP SAFE
*/
ENTRY(fusword)
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
movl 4(%esp),%edx
@@ -1178,7 +1178,7 @@ ENTRY(fusword)
* fubyte - MP SAFE
*/
ENTRY(fubyte)
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
movl 4(%esp),%edx
@@ -1191,7 +1191,7 @@ ENTRY(fubyte)
ALIGN_TEXT
fusufault:
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
xorl %eax,%eax
movl %eax,PCB_ONFAULT(%ecx)
decl %eax
@@ -1203,7 +1203,7 @@ fusufault:
* Write a byte (word, longword) to user memory
*/
ENTRY(suword)
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
movl 4(%esp),%edx
@@ -1247,7 +1247,7 @@ ENTRY(suword)
movl 8(%esp),%eax
movl %eax,(%edx)
xorl %eax,%eax
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl %eax,PCB_ONFAULT(%ecx)
ret
@@ -1255,7 +1255,7 @@ ENTRY(suword)
* susword - MP SAFE (if not I386_CPU)
*/
ENTRY(susword)
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
movl 4(%esp),%edx
@@ -1299,7 +1299,7 @@ ENTRY(susword)
movw 8(%esp),%ax
movw %ax,(%edx)
xorl %eax,%eax
- movl _curpcb,%ecx /* restore trashed register */
+ movl PCPU(CURPCB),%ecx /* restore trashed register */
movl %eax,PCB_ONFAULT(%ecx)
ret
@@ -1308,7 +1308,7 @@ ENTRY(susword)
*/
ALTENTRY(suibyte)
ENTRY(subyte)
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
movl 4(%esp),%edx
@@ -1351,7 +1351,7 @@ ENTRY(subyte)
movb 8(%esp),%al
movb %al,(%edx)
xorl %eax,%eax
- movl _curpcb,%ecx /* restore trashed register */
+ movl PCPU(CURPCB),%ecx /* restore trashed register */
movl %eax,PCB_ONFAULT(%ecx)
ret
@@ -1366,7 +1366,7 @@ ENTRY(subyte)
ENTRY(copyinstr)
pushl %esi
pushl %edi
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $cpystrflt,PCB_ONFAULT(%ecx)
movl 12(%esp),%esi /* %esi = from */
@@ -1414,7 +1414,7 @@ cpystrflt:
cpystrflt_x:
/* set *lencopied and return %eax */
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $0,PCB_ONFAULT(%ecx)
movl 20(%esp),%ecx
subl %edx,%ecx
diff --git a/sys/amd64/amd64/support.s b/sys/amd64/amd64/support.s
index cb3a144..e240de4 100644
--- a/sys/amd64/amd64/support.s
+++ b/sys/amd64/amd64/support.s
@@ -241,7 +241,7 @@ ENTRY(i586_bzero)
* method. CR0_TS must be preserved although it is very likely to
* always end up as clear.
*/
- cmpl $0,_npxproc
+ cmpl $0,PCPU(NPXPROC)
je i586_bz1
cmpl $256+184,%ecx /* empirical; not quite 2*108 more */
jb intreg_i586_bzero
@@ -293,7 +293,7 @@ fpureg_i586_bzero_loop:
cmpl $8,%ecx
jae fpureg_i586_bzero_loop
- cmpl $0,_npxproc
+ cmpl $0,PCPU(NPXPROC)
je i586_bz3
frstor 0(%esp)
addl $108,%esp
@@ -501,7 +501,7 @@ ENTRY(i586_bcopy)
sarb $1,kernel_fpu_lock
jc small_i586_bcopy
- cmpl $0,_npxproc
+ cmpl $0,PCPU(NPXPROC)
je i586_bc1
smsw %dx
clts
@@ -572,7 +572,7 @@ large_i586_bcopy_loop:
cmpl $64,%ecx
jae 4b
- cmpl $0,_npxproc
+ cmpl $0,PCPU(NPXPROC)
je i586_bc2
frstor 0(%esp)
addl $108,%esp
@@ -670,7 +670,7 @@ ENTRY(copyout)
jmp *_copyout_vector
ENTRY(generic_copyout)
- movl _curpcb,%eax
+ movl PCPU(CURPCB),%eax
movl $copyout_fault,PCB_ONFAULT(%eax)
pushl %esi
pushl %edi
@@ -781,7 +781,7 @@ done_copyout:
popl %edi
popl %esi
xorl %eax,%eax
- movl _curpcb,%edx
+ movl PCPU(CURPCB),%edx
movl %eax,PCB_ONFAULT(%edx)
ret
@@ -790,7 +790,7 @@ copyout_fault:
popl %ebx
popl %edi
popl %esi
- movl _curpcb,%edx
+ movl PCPU(CURPCB),%edx
movl $0,PCB_ONFAULT(%edx)
movl $EFAULT,%eax
ret
@@ -800,7 +800,7 @@ ENTRY(i586_copyout)
/*
* Duplicated from generic_copyout. Could be done a bit better.
*/
- movl _curpcb,%eax
+ movl PCPU(CURPCB),%eax
movl $copyout_fault,PCB_ONFAULT(%eax)
pushl %esi
pushl %edi
@@ -857,7 +857,7 @@ ENTRY(copyin)
jmp *_copyin_vector
ENTRY(generic_copyin)
- movl _curpcb,%eax
+ movl PCPU(CURPCB),%eax
movl $copyin_fault,PCB_ONFAULT(%eax)
pushl %esi
pushl %edi
@@ -895,7 +895,7 @@ done_copyin:
popl %edi
popl %esi
xorl %eax,%eax
- movl _curpcb,%edx
+ movl PCPU(CURPCB),%edx
movl %eax,PCB_ONFAULT(%edx)
ret
@@ -903,7 +903,7 @@ done_copyin:
copyin_fault:
popl %edi
popl %esi
- movl _curpcb,%edx
+ movl PCPU(CURPCB),%edx
movl $0,PCB_ONFAULT(%edx)
movl $EFAULT,%eax
ret
@@ -913,7 +913,7 @@ ENTRY(i586_copyin)
/*
* Duplicated from generic_copyin. Could be done a bit better.
*/
- movl _curpcb,%eax
+ movl PCPU(CURPCB),%eax
movl $copyin_fault,PCB_ONFAULT(%eax)
pushl %esi
pushl %edi
@@ -967,13 +967,13 @@ ENTRY(fastmove)
jnz fastmove_tail
/* if (npxproc != NULL) { */
- cmpl $0,_npxproc
+ cmpl $0,PCPU(NPXPROC)
je 6f
/* fnsave(&curpcb->pcb_savefpu); */
- movl _curpcb,%eax
+ movl PCPU(CURPCB),%eax
fnsave PCB_SAVEFPU(%eax)
/* npxproc = NULL; */
- movl $0,_npxproc
+ movl $0,PCPU(NPXPROC)
/* } */
6:
/* now we own the FPU. */
@@ -990,7 +990,7 @@ ENTRY(fastmove)
movl %esi,-8(%ebp)
movl %edi,-4(%ebp)
movl %esp,%edi
- movl _curpcb,%esi
+ movl PCPU(CURPCB),%esi
addl $PCB_SAVEFPU,%esi
cld
movl $PCB_SAVEFPU_SIZE>>2,%ecx
@@ -1002,9 +1002,9 @@ ENTRY(fastmove)
/* stop_emulating(); */
clts
/* npxproc = curproc; */
- movl _curproc,%eax
- movl %eax,_npxproc
- movl _curpcb,%eax
+ movl PCPU(CURPROC),%eax
+ movl %eax,PCPU(NPXPROC)
+ movl PCPU(CURPCB),%eax
movl $fastmove_fault,PCB_ONFAULT(%eax)
4:
movl %ecx,-12(%ebp)
@@ -1066,7 +1066,7 @@ fastmove_loop:
movl %ecx,-12(%ebp)
movl %esi,-8(%ebp)
movl %edi,-4(%ebp)
- movl _curpcb,%edi
+ movl PCPU(CURPCB),%edi
addl $PCB_SAVEFPU,%edi
movl %esp,%esi
cld
@@ -1082,11 +1082,11 @@ fastmove_loop:
orb $CR0_TS,%al
lmsw %ax
/* npxproc = NULL; */
- movl $0,_npxproc
+ movl $0,PCPU(NPXPROC)
ALIGN_TEXT
fastmove_tail:
- movl _curpcb,%eax
+ movl PCPU(CURPCB),%eax
movl $fastmove_tail_fault,PCB_ONFAULT(%eax)
movb %cl,%al
@@ -1105,7 +1105,7 @@ fastmove_tail:
ALIGN_TEXT
fastmove_fault:
- movl _curpcb,%edi
+ movl PCPU(CURPCB),%edi
addl $PCB_SAVEFPU,%edi
movl %esp,%esi
cld
@@ -1116,7 +1116,7 @@ fastmove_fault:
smsw %ax
orb $CR0_TS,%al
lmsw %ax
- movl $0,_npxproc
+ movl $0,PCPU(NPXPROC)
fastmove_tail_fault:
movl %ebp,%esp
@@ -1125,7 +1125,7 @@ fastmove_tail_fault:
popl %ebx
popl %edi
popl %esi
- movl _curpcb,%edx
+ movl PCPU(CURPCB),%edx
movl $0,PCB_ONFAULT(%edx)
movl $EFAULT,%eax
ret
@@ -1137,7 +1137,7 @@ fastmove_tail_fault:
* Fetch a byte (sword, word) from user memory
*/
ENTRY(fuword)
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
movl 4(%esp),%edx /* from */
@@ -1163,7 +1163,7 @@ ENTRY(fuswintr)
* fusword - MP SAFE
*/
ENTRY(fusword)
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
movl 4(%esp),%edx
@@ -1178,7 +1178,7 @@ ENTRY(fusword)
* fubyte - MP SAFE
*/
ENTRY(fubyte)
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
movl 4(%esp),%edx
@@ -1191,7 +1191,7 @@ ENTRY(fubyte)
ALIGN_TEXT
fusufault:
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
xorl %eax,%eax
movl %eax,PCB_ONFAULT(%ecx)
decl %eax
@@ -1203,7 +1203,7 @@ fusufault:
* Write a byte (word, longword) to user memory
*/
ENTRY(suword)
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
movl 4(%esp),%edx
@@ -1247,7 +1247,7 @@ ENTRY(suword)
movl 8(%esp),%eax
movl %eax,(%edx)
xorl %eax,%eax
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl %eax,PCB_ONFAULT(%ecx)
ret
@@ -1255,7 +1255,7 @@ ENTRY(suword)
* susword - MP SAFE (if not I386_CPU)
*/
ENTRY(susword)
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
movl 4(%esp),%edx
@@ -1299,7 +1299,7 @@ ENTRY(susword)
movw 8(%esp),%ax
movw %ax,(%edx)
xorl %eax,%eax
- movl _curpcb,%ecx /* restore trashed register */
+ movl PCPU(CURPCB),%ecx /* restore trashed register */
movl %eax,PCB_ONFAULT(%ecx)
ret
@@ -1308,7 +1308,7 @@ ENTRY(susword)
*/
ALTENTRY(suibyte)
ENTRY(subyte)
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $fusufault,PCB_ONFAULT(%ecx)
movl 4(%esp),%edx
@@ -1351,7 +1351,7 @@ ENTRY(subyte)
movb 8(%esp),%al
movb %al,(%edx)
xorl %eax,%eax
- movl _curpcb,%ecx /* restore trashed register */
+ movl PCPU(CURPCB),%ecx /* restore trashed register */
movl %eax,PCB_ONFAULT(%ecx)
ret
@@ -1366,7 +1366,7 @@ ENTRY(subyte)
ENTRY(copyinstr)
pushl %esi
pushl %edi
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $cpystrflt,PCB_ONFAULT(%ecx)
movl 12(%esp),%esi /* %esi = from */
@@ -1414,7 +1414,7 @@ cpystrflt:
cpystrflt_x:
/* set *lencopied and return %eax */
- movl _curpcb,%ecx
+ movl PCPU(CURPCB),%ecx
movl $0,PCB_ONFAULT(%ecx)
movl 20(%esp),%ecx
subl %edx,%ecx
diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s
index 2d315fc..c2cfa5e 100644
--- a/sys/amd64/amd64/swtch.s
+++ b/sys/amd64/amd64/swtch.s
@@ -82,7 +82,7 @@ ENTRY(cpu_throw)
ENTRY(cpu_switch)
/* switch to new process. first, save context as needed */
- movl _curproc,%ecx
+ movl PCPU(CURPROC),%ecx
/* if no process to save, don't bother */
testl %ecx,%ecx
@@ -95,7 +95,7 @@ ENTRY(cpu_switch)
#endif /* SMP */
movl P_VMSPACE(%ecx), %edx
#ifdef SMP
- movl _cpuid, %eax
+ movl PCPU(CPUID), %eax
#else
xorl %eax, %eax
#endif /* SMP */
@@ -142,7 +142,7 @@ ENTRY(cpu_switch)
#if NNPX > 0
/* have we used fp, and need a save? */
- cmpl %ecx,_npxproc
+ cmpl %ecx,PCPU(NPXPROC)
jne 1f
addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */
pushl %edx
@@ -158,10 +158,10 @@ sw1:
/* Stop scheduling if smp_active goes zero and we are not BSP */
cmpl $0,_smp_active
jne 1f
- cmpl $0,_cpuid
+ cmpl $0,PCPU(CPUID)
je 1f
- movl _idleproc, %eax
+ movl PCPU(IDLEPROC), %eax
jmp sw1b
1:
#endif
@@ -181,7 +181,7 @@ sw1b:
movl %eax,%ecx
xorl %eax,%eax
- andl $~AST_RESCHED,_astpending
+ andl $~AST_RESCHED,PCPU(ASTPENDING)
#ifdef INVARIANTS
cmpb $SRUN,P_STAT(%ecx)
@@ -206,7 +206,7 @@ sw1b:
4:
#ifdef SMP
- movl _cpuid, %esi
+ movl PCPU(CPUID), %esi
#else
xorl %esi, %esi
#endif
@@ -220,19 +220,19 @@ sw1b:
/* update common_tss.tss_esp0 pointer */
movl %edx, %ebx /* pcb */
addl $(UPAGES * PAGE_SIZE - 16), %ebx
- movl %ebx, _common_tss + TSS_ESP0
+ movl %ebx, PCPU(COMMON_TSS) + TSS_ESP0
btrl %esi, _private_tss
jae 3f
#ifdef SMP
- movl $gd_common_tssd, %edi
+ movl $GD_COMMON_TSSD, %edi
addl %fs:0, %edi
#else
- movl $_common_tssd, %edi
+ movl $PCPU(COMMON_TSSD), %edi
#endif
2:
/* move correct tss descriptor into GDT slot, then reload tr */
- movl _tss_gdt, %ebx /* entry in GDT */
+ movl PCPU(TSS_GDT), %ebx /* entry in GDT */
movl 0(%edi), %eax
movl %eax, 0(%ebx)
movl 4(%edi), %eax
@@ -242,7 +242,7 @@ sw1b:
3:
movl P_VMSPACE(%ecx), %ebx
#ifdef SMP
- movl _cpuid, %eax
+ movl PCPU(CPUID), %eax
#else
xorl %eax, %eax
#endif
@@ -265,11 +265,11 @@ sw1b:
andl $~APIC_TPR_PRIO, lapic_tpr
#endif /** CHEAP_TPR */
#endif /** GRAB_LOPRIO */
- movl _cpuid,%eax
+ movl PCPU(CPUID),%eax
movb %al, P_ONCPU(%ecx)
#endif /* SMP */
- movl %edx, _curpcb
- movl %ecx, _curproc /* into next process */
+ movl %edx, PCPU(CURPCB)
+ movl %ecx, PCPU(CURPROC) /* into next process */
#ifdef SMP
/* XXX FIXME: we should be restoring the local APIC TPR */
@@ -279,10 +279,10 @@ sw1b:
cmpl $0, PCB_USERLDT(%edx)
jnz 1f
movl __default_ldt,%eax
- cmpl _currentldt,%eax
+ cmpl PCPU(CURRENTLDT),%eax
je 2f
lldt __default_ldt
- movl %eax,_currentldt
+ movl %eax,PCPU(CURRENTLDT)
jmp 2f
1: pushl %edx
call _set_user_ldt
@@ -320,7 +320,7 @@ cpu_switch_load_gs:
movl PCB_SCHEDNEST(%edx),%eax
movl %eax,_sched_lock+MTX_RECURSE
- movl _curproc,%eax
+ movl PCPU(CURPROC),%eax
movl %eax,_sched_lock+MTX_LOCK
ret
@@ -376,7 +376,7 @@ ENTRY(savectx)
* have to handle h/w bugs for reloading. We used to lose the
* parent's npx state for forks by forgetting to reload.
*/
- movl _npxproc,%eax
+ movl PCPU(NPXPROC),%eax
testl %eax,%eax
je 1f
diff --git a/sys/amd64/include/asmacros.h b/sys/amd64/include/asmacros.h
index 92b9e1a..c2d89b8 100644
--- a/sys/amd64/include/asmacros.h
+++ b/sys/amd64/include/asmacros.h
@@ -69,6 +69,12 @@
#define NON_GPROF_ENTRY(name) GEN_ENTRY(name)
#define NON_GPROF_RET .byte 0xc3 /* opcode for `ret' */
+#ifdef SMP
+#define PCPU(member) %fs:GD_ ## member
+#else
+#define PCPU(member) CNAME(globaldata) + GD_ ## member
+#endif
+
#ifdef GPROF
/*
* __mcount is like [.]mcount except that doesn't require its caller to set
diff --git a/sys/amd64/isa/atpic_vector.S b/sys/amd64/isa/atpic_vector.S
index f810401..e28c375 100644
--- a/sys/amd64/isa/atpic_vector.S
+++ b/sys/amd64/isa/atpic_vector.S
@@ -60,7 +60,7 @@ IDTVEC(vec_name) ; \
mov %ax,%es ; \
mov %ax,%fs ; \
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
- incb _intr_nesting_level ; \
+ incb PCPU(INTR_NESTING_LEVEL) ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
@@ -100,7 +100,7 @@ IDTVEC(vec_name) ; \
movb %al,_imen + IRQ_BYTE(irq_num) ; \
outb %al,$icu+ICU_IMR_OFFSET ; \
enable_icus ; \
- incb _intr_nesting_level ; \
+ incb PCPU(INTR_NESTING_LEVEL) ; \
__CONCAT(Xresume,irq_num): ; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
pushl $irq_num; /* pass the IRQ */ \
diff --git a/sys/amd64/isa/icu_vector.S b/sys/amd64/isa/icu_vector.S
index f810401..e28c375 100644
--- a/sys/amd64/isa/icu_vector.S
+++ b/sys/amd64/isa/icu_vector.S
@@ -60,7 +60,7 @@ IDTVEC(vec_name) ; \
mov %ax,%es ; \
mov %ax,%fs ; \
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
- incb _intr_nesting_level ; \
+ incb PCPU(INTR_NESTING_LEVEL) ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
@@ -100,7 +100,7 @@ IDTVEC(vec_name) ; \
movb %al,_imen + IRQ_BYTE(irq_num) ; \
outb %al,$icu+ICU_IMR_OFFSET ; \
enable_icus ; \
- incb _intr_nesting_level ; \
+ incb PCPU(INTR_NESTING_LEVEL) ; \
__CONCAT(Xresume,irq_num): ; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
pushl $irq_num; /* pass the IRQ */ \
diff --git a/sys/amd64/isa/icu_vector.s b/sys/amd64/isa/icu_vector.s
index f810401..e28c375 100644
--- a/sys/amd64/isa/icu_vector.s
+++ b/sys/amd64/isa/icu_vector.s
@@ -60,7 +60,7 @@ IDTVEC(vec_name) ; \
mov %ax,%es ; \
mov %ax,%fs ; \
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
- incb _intr_nesting_level ; \
+ incb PCPU(INTR_NESTING_LEVEL) ; \
pushl _intr_unit + (irq_num) * 4 ; \
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
@@ -100,7 +100,7 @@ IDTVEC(vec_name) ; \
movb %al,_imen + IRQ_BYTE(irq_num) ; \
outb %al,$icu+ICU_IMR_OFFSET ; \
enable_icus ; \
- incb _intr_nesting_level ; \
+ incb PCPU(INTR_NESTING_LEVEL) ; \
__CONCAT(Xresume,irq_num): ; \
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
pushl $irq_num; /* pass the IRQ */ \
OpenPOWER on IntegriCloud