summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorobrien <obrien@FreeBSD.org>2000-05-10 01:24:23 +0000
committerobrien <obrien@FreeBSD.org>2000-05-10 01:24:23 +0000
commit3407cffd5743913e8139db168932d47d674ba338 (patch)
tree0db668362109b11150d567dc43bb984661835595
parent5840ec82fcfbe54eec6513ea52cc0c6b30620f5c (diff)
downloadFreeBSD-src-3407cffd5743913e8139db168932d47d674ba338.zip
FreeBSD-src-3407cffd5743913e8139db168932d47d674ba338.tar.gz
1. `movl' is for use with 32-bit operands. Do NOT use it with 16-bit
operands. `movw' could be used, but instead let the assembler decide the right instruction to use. 2. AT&T asm syntax requires a leading '*' in front of the operand for indirect calls and jumps.
-rw-r--r--sys/amd64/amd64/apic_vector.S40
-rw-r--r--sys/amd64/amd64/exception.S34
-rw-r--r--sys/amd64/amd64/exception.s34
-rw-r--r--sys/amd64/amd64/locore.S12
-rw-r--r--sys/amd64/amd64/locore.s12
-rw-r--r--sys/amd64/amd64/support.S14
-rw-r--r--sys/amd64/amd64/support.s14
-rw-r--r--sys/amd64/isa/atpic_vector.S18
-rw-r--r--sys/amd64/isa/icu_vector.S18
-rw-r--r--sys/amd64/isa/icu_vector.s18
-rw-r--r--sys/i386/i386/apic_vector.s40
-rw-r--r--sys/i386/i386/exception.s34
-rw-r--r--sys/i386/i386/locore.s12
-rw-r--r--sys/i386/i386/support.s14
-rw-r--r--sys/i386/isa/apic_vector.s40
-rw-r--r--sys/i386/isa/atpic_vector.s18
-rw-r--r--sys/i386/isa/icu_vector.s18
17 files changed, 195 insertions, 195 deletions
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S
index 602b4ce..6e4ba67 100644
--- a/sys/amd64/amd64/apic_vector.S
+++ b/sys/amd64/amd64/apic_vector.S
@@ -42,10 +42,10 @@ IDTVEC(vec_name) ; \
MAYBE_PUSHL_ES ; \
pushl %fs ; \
movl $KDSEL,%eax ; \
- movl %ax,%ds ; \
+ mov %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
movl $KPSEL,%eax ; \
- movl %ax,%fs ; \
+ mov %ax,%fs ; \
FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ; \
pushl _intr_unit + (irq_num) * 4 ; \
GET_FAST_INTR_LOCK ; \
@@ -221,10 +221,10 @@ log_intr_event:
IDTVEC(vec_name) ; \
PUSH_FRAME ; \
movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
- movl %ax, %ds ; \
- movl %ax, %es ; \
+ mov %ax, %ds ; \
+ mov %ax, %es ; \
movl $KPSEL, %eax ; \
- movl %ax, %fs ; \
+ mov %ax, %fs ; \
; \
maybe_extra_ipending ; \
; \
@@ -340,7 +340,7 @@ _Xinvltlb:
#ifdef COUNT_XINVLTLB_HITS
pushl %fs
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
movl _cpuid, %eax
popl %fs
ss
@@ -384,9 +384,9 @@ _Xcpucheckstate:
pushl %fs
movl $KDSEL, %eax
- movl %ax, %ds /* use KERNEL data segment */
+ mov %ax, %ds /* use KERNEL data segment */
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
movl $0, lapic_eoi /* End Of Interrupt to APIC */
@@ -431,10 +431,10 @@ _Xcpucheckstate:
_Xcpuast:
PUSH_FRAME
movl $KDSEL, %eax
- movl %ax, %ds /* use KERNEL data segment */
- movl %ax, %es
+ mov %ax, %ds /* use KERNEL data segment */
+ mov %ax, %es
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
movl _cpuid, %eax
lock /* checkstate_need_ast &= ~(1<<id) */
@@ -491,10 +491,10 @@ _Xcpuast:
_Xforward_irq:
PUSH_FRAME
movl $KDSEL, %eax
- movl %ax, %ds /* use KERNEL data segment */
- movl %ax, %es
+ mov %ax, %ds /* use KERNEL data segment */
+ mov %ax, %es
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
movl $0, lapic_eoi /* End Of Interrupt to APIC */
@@ -594,9 +594,9 @@ _Xcpustop:
pushl %fs
movl $KDSEL, %eax
- movl %ax, %ds /* use KERNEL data segment */
+ mov %ax, %ds /* use KERNEL data segment */
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
movl $0, lapic_eoi /* End Of Interrupt to APIC */
@@ -629,7 +629,7 @@ _Xcpustop:
jz 2f
movl $0, CNAME(cpustop_restartfunc) /* One-shot */
- call %eax
+ call *%eax
2:
popl %fs
popl %ds /* restore previous data segment */
@@ -704,10 +704,10 @@ MCOUNT_LABEL(eintr)
_Xrendezvous:
PUSH_FRAME
movl $KDSEL, %eax
- movl %ax, %ds /* use KERNEL data segment */
- movl %ax, %es
+ mov %ax, %ds /* use KERNEL data segment */
+ mov %ax, %es
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
call _smp_rendezvous_action
diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S
index e3d5e35..acb8b40 100644
--- a/sys/amd64/amd64/exception.S
+++ b/sys/amd64/amd64/exception.S
@@ -168,11 +168,11 @@ IDTVEC(fpu)
pushl %ds
pushl %es /* now stack frame is a trap frame */
pushl %fs
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
+ mov $KDSEL,%ax
+ mov %ax,%ds
+ mov %ax,%es
MOVL_KPSEL_EAX
- movl %ax,%fs
+ mov %ax,%fs
FAKE_MCOUNT(13*4(%esp))
#ifdef SMP
@@ -219,11 +219,11 @@ _alltraps:
pushl %es
pushl %fs
alltraps_with_regs_pushed:
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
+ mov $KDSEL,%ax
+ mov %ax,%ds
+ mov %ax,%es
MOVL_KPSEL_EAX
- movl %ax,%fs
+ mov %ax,%fs
FAKE_MCOUNT(13*4(%esp))
calltrap:
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
@@ -265,11 +265,11 @@ IDTVEC(syscall)
pushl %ds
pushl %es
pushl %fs
- movl $KDSEL,%eax /* switch to kernel segments */
- movl %ax,%ds
- movl %ax,%es
+ mov $KDSEL,%ax /* switch to kernel segments */
+ mov %ax,%ds
+ mov %ax,%es
MOVL_KPSEL_EAX
- movl %ax,%fs
+ mov %ax,%fs
movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
movl %eax,TF_EFLAGS(%esp)
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
@@ -305,11 +305,11 @@ IDTVEC(int0x80_syscall)
pushl %ds
pushl %es
pushl %fs
- movl $KDSEL,%eax /* switch to kernel segments */
- movl %ax,%ds
- movl %ax,%es
+ mov $KDSEL,%ax /* switch to kernel segments */
+ mov %ax,%ds
+ mov %ax,%es
MOVL_KPSEL_EAX
- movl %ax,%fs
+ mov %ax,%fs
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(13*4(%esp))
MPLOCKED incl _cnt+V_SYSCALL
@@ -348,7 +348,7 @@ ENTRY(fork_trampoline)
* initproc has its own fork handler, but it does return.
*/
pushl %ebx /* arg1 */
- call %esi /* function */
+ call *%esi /* function */
addl $4,%esp
/* cut from syscall */
diff --git a/sys/amd64/amd64/exception.s b/sys/amd64/amd64/exception.s
index e3d5e35..acb8b40 100644
--- a/sys/amd64/amd64/exception.s
+++ b/sys/amd64/amd64/exception.s
@@ -168,11 +168,11 @@ IDTVEC(fpu)
pushl %ds
pushl %es /* now stack frame is a trap frame */
pushl %fs
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
+ mov $KDSEL,%ax
+ mov %ax,%ds
+ mov %ax,%es
MOVL_KPSEL_EAX
- movl %ax,%fs
+ mov %ax,%fs
FAKE_MCOUNT(13*4(%esp))
#ifdef SMP
@@ -219,11 +219,11 @@ _alltraps:
pushl %es
pushl %fs
alltraps_with_regs_pushed:
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
+ mov $KDSEL,%ax
+ mov %ax,%ds
+ mov %ax,%es
MOVL_KPSEL_EAX
- movl %ax,%fs
+ mov %ax,%fs
FAKE_MCOUNT(13*4(%esp))
calltrap:
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
@@ -265,11 +265,11 @@ IDTVEC(syscall)
pushl %ds
pushl %es
pushl %fs
- movl $KDSEL,%eax /* switch to kernel segments */
- movl %ax,%ds
- movl %ax,%es
+ mov $KDSEL,%ax /* switch to kernel segments */
+ mov %ax,%ds
+ mov %ax,%es
MOVL_KPSEL_EAX
- movl %ax,%fs
+ mov %ax,%fs
movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
movl %eax,TF_EFLAGS(%esp)
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
@@ -305,11 +305,11 @@ IDTVEC(int0x80_syscall)
pushl %ds
pushl %es
pushl %fs
- movl $KDSEL,%eax /* switch to kernel segments */
- movl %ax,%ds
- movl %ax,%es
+ mov $KDSEL,%ax /* switch to kernel segments */
+ mov %ax,%ds
+ mov %ax,%es
MOVL_KPSEL_EAX
- movl %ax,%fs
+ mov %ax,%fs
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(13*4(%esp))
MPLOCKED incl _cnt+V_SYSCALL
@@ -348,7 +348,7 @@ ENTRY(fork_trampoline)
* initproc has its own fork handler, but it does return.
*/
pushl %ebx /* arg1 */
- call %esi /* function */
+ call *%esi /* function */
addl $4,%esp
/* cut from syscall */
diff --git a/sys/amd64/amd64/locore.S b/sys/amd64/amd64/locore.S
index bfbcad8..ea3913e 100644
--- a/sys/amd64/amd64/locore.S
+++ b/sys/amd64/amd64/locore.S
@@ -404,11 +404,11 @@ NON_GPROF_ENTRY(prepare_usermode)
movl __udatasel,%ecx
#if 0 /* ds/es/fs are in trap frame */
- movl %cx,%ds
- movl %cx,%es
- movl %cx,%fs
+ mov %cx,%ds
+ mov %cx,%es
+ mov %cx,%fs
#endif
- movl %cx,%gs /* and ds to gs */
+ mov %cx,%gs /* and ds to gs */
ret /* goto user! */
@@ -416,7 +416,7 @@ NON_GPROF_ENTRY(prepare_usermode)
* Signal trampoline, copied to top of user stack
*/
NON_GPROF_ENTRY(sigcode)
- call SIGF_HANDLER(%esp) /* call signal handler */
+ call *SIGF_HANDLER(%esp) /* call signal handler */
lea SIGF_UC(%esp),%eax /* get ucontext_t */
pushl %eax
testl $PSL_VM,UC_EFLAGS(%eax)
@@ -430,7 +430,7 @@ NON_GPROF_ENTRY(sigcode)
ALIGN_TEXT
_osigcode:
- call SIGF_HANDLER(%esp) /* call signal handler */
+ call *SIGF_HANDLER(%esp) /* call signal handler */
lea SIGF_SC(%esp),%eax /* get sigcontext */
pushl %eax
testl $PSL_VM,SC_PS(%eax)
diff --git a/sys/amd64/amd64/locore.s b/sys/amd64/amd64/locore.s
index bfbcad8..ea3913e 100644
--- a/sys/amd64/amd64/locore.s
+++ b/sys/amd64/amd64/locore.s
@@ -404,11 +404,11 @@ NON_GPROF_ENTRY(prepare_usermode)
movl __udatasel,%ecx
#if 0 /* ds/es/fs are in trap frame */
- movl %cx,%ds
- movl %cx,%es
- movl %cx,%fs
+ mov %cx,%ds
+ mov %cx,%es
+ mov %cx,%fs
#endif
- movl %cx,%gs /* and ds to gs */
+ mov %cx,%gs /* and ds to gs */
ret /* goto user! */
@@ -416,7 +416,7 @@ NON_GPROF_ENTRY(prepare_usermode)
* Signal trampoline, copied to top of user stack
*/
NON_GPROF_ENTRY(sigcode)
- call SIGF_HANDLER(%esp) /* call signal handler */
+ call *SIGF_HANDLER(%esp) /* call signal handler */
lea SIGF_UC(%esp),%eax /* get ucontext_t */
pushl %eax
testl $PSL_VM,UC_EFLAGS(%eax)
@@ -430,7 +430,7 @@ NON_GPROF_ENTRY(sigcode)
ALIGN_TEXT
_osigcode:
- call SIGF_HANDLER(%esp) /* call signal handler */
+ call *SIGF_HANDLER(%esp) /* call signal handler */
lea SIGF_SC(%esp),%eax /* get sigcontext */
pushl %eax
testl $PSL_VM,SC_PS(%eax)
diff --git a/sys/amd64/amd64/support.S b/sys/amd64/amd64/support.S
index 788151c..1f073d8 100644
--- a/sys/amd64/amd64/support.S
+++ b/sys/amd64/amd64/support.S
@@ -169,7 +169,7 @@ jtab:
.text
SUPERALIGN_TEXT
5:
- jmp jtab(,%ecx,4)
+ jmp *jtab(,%ecx,4)
SUPERALIGN_TEXT
do3:
@@ -303,7 +303,7 @@ fpureg_i586_bzero_loop:
ret
i586_bz3:
- fstpl %st(0)
+ fstp %st(0)
lmsw %ax
movb $0xfe,kernel_fpu_lock
ret
@@ -1513,14 +1513,14 @@ ENTRY(lgdt)
1:
/* reload "stale" selectors */
movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
- movl %ax,%gs
- movl %ax,%ss
+ mov %ax,%ds
+ mov %ax,%es
+ mov %ax,%gs
+ mov %ax,%ss
#ifdef SMP
movl $KPSEL,%eax
#endif
- movl %ax,%fs
+ mov %ax,%fs
/* reload code selector by turning return into intersegmental return */
movl (%esp),%eax
diff --git a/sys/amd64/amd64/support.s b/sys/amd64/amd64/support.s
index 788151c..1f073d8 100644
--- a/sys/amd64/amd64/support.s
+++ b/sys/amd64/amd64/support.s
@@ -169,7 +169,7 @@ jtab:
.text
SUPERALIGN_TEXT
5:
- jmp jtab(,%ecx,4)
+ jmp *jtab(,%ecx,4)
SUPERALIGN_TEXT
do3:
@@ -303,7 +303,7 @@ fpureg_i586_bzero_loop:
ret
i586_bz3:
- fstpl %st(0)
+ fstp %st(0)
lmsw %ax
movb $0xfe,kernel_fpu_lock
ret
@@ -1513,14 +1513,14 @@ ENTRY(lgdt)
1:
/* reload "stale" selectors */
movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
- movl %ax,%gs
- movl %ax,%ss
+ mov %ax,%ds
+ mov %ax,%es
+ mov %ax,%gs
+ mov %ax,%ss
#ifdef SMP
movl $KPSEL,%eax
#endif
- movl %ax,%fs
+ mov %ax,%fs
/* reload code selector by turning return into intersegmental return */
movl (%esp),%eax
diff --git a/sys/amd64/isa/atpic_vector.S b/sys/amd64/isa/atpic_vector.S
index 24ce97b..21ea2a8 100644
--- a/sys/amd64/isa/atpic_vector.S
+++ b/sys/amd64/isa/atpic_vector.S
@@ -54,8 +54,8 @@ IDTVEC(vec_name) ; \
pushl %edx ; \
pushl %ds ; \
MAYBE_PUSHL_ES ; \
- movl $KDSEL,%eax ; \
- movl %ax,%ds ; \
+ mov $KDSEL,%ax ; \
+ mov %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
pushl _intr_unit + (irq_num) * 4 ; \
@@ -95,9 +95,9 @@ IDTVEC(vec_name) ; \
pushl %ecx ; /* ... actually %ds ... */ \
pushl %es ; \
pushl %fs ; \
- movl $KDSEL,%eax ; \
- movl %ax,%es ; \
- movl %ax,%fs ; \
+ mov $KDSEL,%ax ; \
+ mov %ax,%es ; \
+ mov %ax,%fs ; \
movl (3+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \
movl %ecx,(3+6)*4(%esp) ; /* ... to fat frame ... */ \
movl (3+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \
@@ -116,10 +116,10 @@ IDTVEC(vec_name) ; \
pushl %ds ; /* save our data and extra segments ... */ \
pushl %es ; \
pushl %fs ; \
- movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \
- movl %ax,%ds ; /* ... early for obsolete reasons */ \
- movl %ax,%es ; \
- movl %ax,%fs ; \
+ mov $KDSEL,%ax ; /* ... and reload with kernel's own ... */ \
+ mov %ax,%ds ; /* ... early for obsolete reasons */ \
+ mov %ax,%es ; \
+ mov %ax,%fs ; \
maybe_extra_ipending ; \
movb _imen + IRQ_BYTE(irq_num),%al ; \
orb $IRQ_BIT(irq_num),%al ; \
diff --git a/sys/amd64/isa/icu_vector.S b/sys/amd64/isa/icu_vector.S
index 24ce97b..21ea2a8 100644
--- a/sys/amd64/isa/icu_vector.S
+++ b/sys/amd64/isa/icu_vector.S
@@ -54,8 +54,8 @@ IDTVEC(vec_name) ; \
pushl %edx ; \
pushl %ds ; \
MAYBE_PUSHL_ES ; \
- movl $KDSEL,%eax ; \
- movl %ax,%ds ; \
+ mov $KDSEL,%ax ; \
+ mov %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
pushl _intr_unit + (irq_num) * 4 ; \
@@ -95,9 +95,9 @@ IDTVEC(vec_name) ; \
pushl %ecx ; /* ... actually %ds ... */ \
pushl %es ; \
pushl %fs ; \
- movl $KDSEL,%eax ; \
- movl %ax,%es ; \
- movl %ax,%fs ; \
+ mov $KDSEL,%ax ; \
+ mov %ax,%es ; \
+ mov %ax,%fs ; \
movl (3+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \
movl %ecx,(3+6)*4(%esp) ; /* ... to fat frame ... */ \
movl (3+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \
@@ -116,10 +116,10 @@ IDTVEC(vec_name) ; \
pushl %ds ; /* save our data and extra segments ... */ \
pushl %es ; \
pushl %fs ; \
- movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \
- movl %ax,%ds ; /* ... early for obsolete reasons */ \
- movl %ax,%es ; \
- movl %ax,%fs ; \
+ mov $KDSEL,%ax ; /* ... and reload with kernel's own ... */ \
+ mov %ax,%ds ; /* ... early for obsolete reasons */ \
+ mov %ax,%es ; \
+ mov %ax,%fs ; \
maybe_extra_ipending ; \
movb _imen + IRQ_BYTE(irq_num),%al ; \
orb $IRQ_BIT(irq_num),%al ; \
diff --git a/sys/amd64/isa/icu_vector.s b/sys/amd64/isa/icu_vector.s
index 24ce97b..21ea2a8 100644
--- a/sys/amd64/isa/icu_vector.s
+++ b/sys/amd64/isa/icu_vector.s
@@ -54,8 +54,8 @@ IDTVEC(vec_name) ; \
pushl %edx ; \
pushl %ds ; \
MAYBE_PUSHL_ES ; \
- movl $KDSEL,%eax ; \
- movl %ax,%ds ; \
+ mov $KDSEL,%ax ; \
+ mov %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
pushl _intr_unit + (irq_num) * 4 ; \
@@ -95,9 +95,9 @@ IDTVEC(vec_name) ; \
pushl %ecx ; /* ... actually %ds ... */ \
pushl %es ; \
pushl %fs ; \
- movl $KDSEL,%eax ; \
- movl %ax,%es ; \
- movl %ax,%fs ; \
+ mov $KDSEL,%ax ; \
+ mov %ax,%es ; \
+ mov %ax,%fs ; \
movl (3+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \
movl %ecx,(3+6)*4(%esp) ; /* ... to fat frame ... */ \
movl (3+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \
@@ -116,10 +116,10 @@ IDTVEC(vec_name) ; \
pushl %ds ; /* save our data and extra segments ... */ \
pushl %es ; \
pushl %fs ; \
- movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \
- movl %ax,%ds ; /* ... early for obsolete reasons */ \
- movl %ax,%es ; \
- movl %ax,%fs ; \
+ mov $KDSEL,%ax ; /* ... and reload with kernel's own ... */ \
+ mov %ax,%ds ; /* ... early for obsolete reasons */ \
+ mov %ax,%es ; \
+ mov %ax,%fs ; \
maybe_extra_ipending ; \
movb _imen + IRQ_BYTE(irq_num),%al ; \
orb $IRQ_BIT(irq_num),%al ; \
diff --git a/sys/i386/i386/apic_vector.s b/sys/i386/i386/apic_vector.s
index 602b4ce..6e4ba67 100644
--- a/sys/i386/i386/apic_vector.s
+++ b/sys/i386/i386/apic_vector.s
@@ -42,10 +42,10 @@ IDTVEC(vec_name) ; \
MAYBE_PUSHL_ES ; \
pushl %fs ; \
movl $KDSEL,%eax ; \
- movl %ax,%ds ; \
+ mov %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
movl $KPSEL,%eax ; \
- movl %ax,%fs ; \
+ mov %ax,%fs ; \
FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ; \
pushl _intr_unit + (irq_num) * 4 ; \
GET_FAST_INTR_LOCK ; \
@@ -221,10 +221,10 @@ log_intr_event:
IDTVEC(vec_name) ; \
PUSH_FRAME ; \
movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
- movl %ax, %ds ; \
- movl %ax, %es ; \
+ mov %ax, %ds ; \
+ mov %ax, %es ; \
movl $KPSEL, %eax ; \
- movl %ax, %fs ; \
+ mov %ax, %fs ; \
; \
maybe_extra_ipending ; \
; \
@@ -340,7 +340,7 @@ _Xinvltlb:
#ifdef COUNT_XINVLTLB_HITS
pushl %fs
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
movl _cpuid, %eax
popl %fs
ss
@@ -384,9 +384,9 @@ _Xcpucheckstate:
pushl %fs
movl $KDSEL, %eax
- movl %ax, %ds /* use KERNEL data segment */
+ mov %ax, %ds /* use KERNEL data segment */
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
movl $0, lapic_eoi /* End Of Interrupt to APIC */
@@ -431,10 +431,10 @@ _Xcpucheckstate:
_Xcpuast:
PUSH_FRAME
movl $KDSEL, %eax
- movl %ax, %ds /* use KERNEL data segment */
- movl %ax, %es
+ mov %ax, %ds /* use KERNEL data segment */
+ mov %ax, %es
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
movl _cpuid, %eax
lock /* checkstate_need_ast &= ~(1<<id) */
@@ -491,10 +491,10 @@ _Xcpuast:
_Xforward_irq:
PUSH_FRAME
movl $KDSEL, %eax
- movl %ax, %ds /* use KERNEL data segment */
- movl %ax, %es
+ mov %ax, %ds /* use KERNEL data segment */
+ mov %ax, %es
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
movl $0, lapic_eoi /* End Of Interrupt to APIC */
@@ -594,9 +594,9 @@ _Xcpustop:
pushl %fs
movl $KDSEL, %eax
- movl %ax, %ds /* use KERNEL data segment */
+ mov %ax, %ds /* use KERNEL data segment */
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
movl $0, lapic_eoi /* End Of Interrupt to APIC */
@@ -629,7 +629,7 @@ _Xcpustop:
jz 2f
movl $0, CNAME(cpustop_restartfunc) /* One-shot */
- call %eax
+ call *%eax
2:
popl %fs
popl %ds /* restore previous data segment */
@@ -704,10 +704,10 @@ MCOUNT_LABEL(eintr)
_Xrendezvous:
PUSH_FRAME
movl $KDSEL, %eax
- movl %ax, %ds /* use KERNEL data segment */
- movl %ax, %es
+ mov %ax, %ds /* use KERNEL data segment */
+ mov %ax, %es
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
call _smp_rendezvous_action
diff --git a/sys/i386/i386/exception.s b/sys/i386/i386/exception.s
index e3d5e35..acb8b40 100644
--- a/sys/i386/i386/exception.s
+++ b/sys/i386/i386/exception.s
@@ -168,11 +168,11 @@ IDTVEC(fpu)
pushl %ds
pushl %es /* now stack frame is a trap frame */
pushl %fs
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
+ mov $KDSEL,%ax
+ mov %ax,%ds
+ mov %ax,%es
MOVL_KPSEL_EAX
- movl %ax,%fs
+ mov %ax,%fs
FAKE_MCOUNT(13*4(%esp))
#ifdef SMP
@@ -219,11 +219,11 @@ _alltraps:
pushl %es
pushl %fs
alltraps_with_regs_pushed:
- movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
+ mov $KDSEL,%ax
+ mov %ax,%ds
+ mov %ax,%es
MOVL_KPSEL_EAX
- movl %ax,%fs
+ mov %ax,%fs
FAKE_MCOUNT(13*4(%esp))
calltrap:
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
@@ -265,11 +265,11 @@ IDTVEC(syscall)
pushl %ds
pushl %es
pushl %fs
- movl $KDSEL,%eax /* switch to kernel segments */
- movl %ax,%ds
- movl %ax,%es
+ mov $KDSEL,%ax /* switch to kernel segments */
+ mov %ax,%ds
+ mov %ax,%es
MOVL_KPSEL_EAX
- movl %ax,%fs
+ mov %ax,%fs
movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
movl %eax,TF_EFLAGS(%esp)
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
@@ -305,11 +305,11 @@ IDTVEC(int0x80_syscall)
pushl %ds
pushl %es
pushl %fs
- movl $KDSEL,%eax /* switch to kernel segments */
- movl %ax,%ds
- movl %ax,%es
+ mov $KDSEL,%ax /* switch to kernel segments */
+ mov %ax,%ds
+ mov %ax,%es
MOVL_KPSEL_EAX
- movl %ax,%fs
+ mov %ax,%fs
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
FAKE_MCOUNT(13*4(%esp))
MPLOCKED incl _cnt+V_SYSCALL
@@ -348,7 +348,7 @@ ENTRY(fork_trampoline)
* initproc has its own fork handler, but it does return.
*/
pushl %ebx /* arg1 */
- call %esi /* function */
+ call *%esi /* function */
addl $4,%esp
/* cut from syscall */
diff --git a/sys/i386/i386/locore.s b/sys/i386/i386/locore.s
index bfbcad8..ea3913e 100644
--- a/sys/i386/i386/locore.s
+++ b/sys/i386/i386/locore.s
@@ -404,11 +404,11 @@ NON_GPROF_ENTRY(prepare_usermode)
movl __udatasel,%ecx
#if 0 /* ds/es/fs are in trap frame */
- movl %cx,%ds
- movl %cx,%es
- movl %cx,%fs
+ mov %cx,%ds
+ mov %cx,%es
+ mov %cx,%fs
#endif
- movl %cx,%gs /* and ds to gs */
+ mov %cx,%gs /* and ds to gs */
ret /* goto user! */
@@ -416,7 +416,7 @@ NON_GPROF_ENTRY(prepare_usermode)
* Signal trampoline, copied to top of user stack
*/
NON_GPROF_ENTRY(sigcode)
- call SIGF_HANDLER(%esp) /* call signal handler */
+ call *SIGF_HANDLER(%esp) /* call signal handler */
lea SIGF_UC(%esp),%eax /* get ucontext_t */
pushl %eax
testl $PSL_VM,UC_EFLAGS(%eax)
@@ -430,7 +430,7 @@ NON_GPROF_ENTRY(sigcode)
ALIGN_TEXT
_osigcode:
- call SIGF_HANDLER(%esp) /* call signal handler */
+ call *SIGF_HANDLER(%esp) /* call signal handler */
lea SIGF_SC(%esp),%eax /* get sigcontext */
pushl %eax
testl $PSL_VM,SC_PS(%eax)
diff --git a/sys/i386/i386/support.s b/sys/i386/i386/support.s
index 788151c..1f073d8 100644
--- a/sys/i386/i386/support.s
+++ b/sys/i386/i386/support.s
@@ -169,7 +169,7 @@ jtab:
.text
SUPERALIGN_TEXT
5:
- jmp jtab(,%ecx,4)
+ jmp *jtab(,%ecx,4)
SUPERALIGN_TEXT
do3:
@@ -303,7 +303,7 @@ fpureg_i586_bzero_loop:
ret
i586_bz3:
- fstpl %st(0)
+ fstp %st(0)
lmsw %ax
movb $0xfe,kernel_fpu_lock
ret
@@ -1513,14 +1513,14 @@ ENTRY(lgdt)
1:
/* reload "stale" selectors */
movl $KDSEL,%eax
- movl %ax,%ds
- movl %ax,%es
- movl %ax,%gs
- movl %ax,%ss
+ mov %ax,%ds
+ mov %ax,%es
+ mov %ax,%gs
+ mov %ax,%ss
#ifdef SMP
movl $KPSEL,%eax
#endif
- movl %ax,%fs
+ mov %ax,%fs
/* reload code selector by turning return into intersegmental return */
movl (%esp),%eax
diff --git a/sys/i386/isa/apic_vector.s b/sys/i386/isa/apic_vector.s
index 602b4ce..6e4ba67 100644
--- a/sys/i386/isa/apic_vector.s
+++ b/sys/i386/isa/apic_vector.s
@@ -42,10 +42,10 @@ IDTVEC(vec_name) ; \
MAYBE_PUSHL_ES ; \
pushl %fs ; \
movl $KDSEL,%eax ; \
- movl %ax,%ds ; \
+ mov %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
movl $KPSEL,%eax ; \
- movl %ax,%fs ; \
+ mov %ax,%fs ; \
FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ; \
pushl _intr_unit + (irq_num) * 4 ; \
GET_FAST_INTR_LOCK ; \
@@ -221,10 +221,10 @@ log_intr_event:
IDTVEC(vec_name) ; \
PUSH_FRAME ; \
movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
- movl %ax, %ds ; \
- movl %ax, %es ; \
+ mov %ax, %ds ; \
+ mov %ax, %es ; \
movl $KPSEL, %eax ; \
- movl %ax, %fs ; \
+ mov %ax, %fs ; \
; \
maybe_extra_ipending ; \
; \
@@ -340,7 +340,7 @@ _Xinvltlb:
#ifdef COUNT_XINVLTLB_HITS
pushl %fs
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
movl _cpuid, %eax
popl %fs
ss
@@ -384,9 +384,9 @@ _Xcpucheckstate:
pushl %fs
movl $KDSEL, %eax
- movl %ax, %ds /* use KERNEL data segment */
+ mov %ax, %ds /* use KERNEL data segment */
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
movl $0, lapic_eoi /* End Of Interrupt to APIC */
@@ -431,10 +431,10 @@ _Xcpucheckstate:
_Xcpuast:
PUSH_FRAME
movl $KDSEL, %eax
- movl %ax, %ds /* use KERNEL data segment */
- movl %ax, %es
+ mov %ax, %ds /* use KERNEL data segment */
+ mov %ax, %es
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
movl _cpuid, %eax
lock /* checkstate_need_ast &= ~(1<<id) */
@@ -491,10 +491,10 @@ _Xcpuast:
_Xforward_irq:
PUSH_FRAME
movl $KDSEL, %eax
- movl %ax, %ds /* use KERNEL data segment */
- movl %ax, %es
+ mov %ax, %ds /* use KERNEL data segment */
+ mov %ax, %es
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
movl $0, lapic_eoi /* End Of Interrupt to APIC */
@@ -594,9 +594,9 @@ _Xcpustop:
pushl %fs
movl $KDSEL, %eax
- movl %ax, %ds /* use KERNEL data segment */
+ mov %ax, %ds /* use KERNEL data segment */
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
movl $0, lapic_eoi /* End Of Interrupt to APIC */
@@ -629,7 +629,7 @@ _Xcpustop:
jz 2f
movl $0, CNAME(cpustop_restartfunc) /* One-shot */
- call %eax
+ call *%eax
2:
popl %fs
popl %ds /* restore previous data segment */
@@ -704,10 +704,10 @@ MCOUNT_LABEL(eintr)
_Xrendezvous:
PUSH_FRAME
movl $KDSEL, %eax
- movl %ax, %ds /* use KERNEL data segment */
- movl %ax, %es
+ mov %ax, %ds /* use KERNEL data segment */
+ mov %ax, %es
movl $KPSEL, %eax
- movl %ax, %fs
+ mov %ax, %fs
call _smp_rendezvous_action
diff --git a/sys/i386/isa/atpic_vector.s b/sys/i386/isa/atpic_vector.s
index 24ce97b..21ea2a8 100644
--- a/sys/i386/isa/atpic_vector.s
+++ b/sys/i386/isa/atpic_vector.s
@@ -54,8 +54,8 @@ IDTVEC(vec_name) ; \
pushl %edx ; \
pushl %ds ; \
MAYBE_PUSHL_ES ; \
- movl $KDSEL,%eax ; \
- movl %ax,%ds ; \
+ mov $KDSEL,%ax ; \
+ mov %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
pushl _intr_unit + (irq_num) * 4 ; \
@@ -95,9 +95,9 @@ IDTVEC(vec_name) ; \
pushl %ecx ; /* ... actually %ds ... */ \
pushl %es ; \
pushl %fs ; \
- movl $KDSEL,%eax ; \
- movl %ax,%es ; \
- movl %ax,%fs ; \
+ mov $KDSEL,%ax ; \
+ mov %ax,%es ; \
+ mov %ax,%fs ; \
movl (3+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \
movl %ecx,(3+6)*4(%esp) ; /* ... to fat frame ... */ \
movl (3+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \
@@ -116,10 +116,10 @@ IDTVEC(vec_name) ; \
pushl %ds ; /* save our data and extra segments ... */ \
pushl %es ; \
pushl %fs ; \
- movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \
- movl %ax,%ds ; /* ... early for obsolete reasons */ \
- movl %ax,%es ; \
- movl %ax,%fs ; \
+ mov $KDSEL,%ax ; /* ... and reload with kernel's own ... */ \
+ mov %ax,%ds ; /* ... early for obsolete reasons */ \
+ mov %ax,%es ; \
+ mov %ax,%fs ; \
maybe_extra_ipending ; \
movb _imen + IRQ_BYTE(irq_num),%al ; \
orb $IRQ_BIT(irq_num),%al ; \
diff --git a/sys/i386/isa/icu_vector.s b/sys/i386/isa/icu_vector.s
index 24ce97b..21ea2a8 100644
--- a/sys/i386/isa/icu_vector.s
+++ b/sys/i386/isa/icu_vector.s
@@ -54,8 +54,8 @@ IDTVEC(vec_name) ; \
pushl %edx ; \
pushl %ds ; \
MAYBE_PUSHL_ES ; \
- movl $KDSEL,%eax ; \
- movl %ax,%ds ; \
+ mov $KDSEL,%ax ; \
+ mov %ax,%ds ; \
MAYBE_MOVW_AX_ES ; \
FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
pushl _intr_unit + (irq_num) * 4 ; \
@@ -95,9 +95,9 @@ IDTVEC(vec_name) ; \
pushl %ecx ; /* ... actually %ds ... */ \
pushl %es ; \
pushl %fs ; \
- movl $KDSEL,%eax ; \
- movl %ax,%es ; \
- movl %ax,%fs ; \
+ mov $KDSEL,%ax ; \
+ mov %ax,%es ; \
+ mov %ax,%fs ; \
movl (3+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \
movl %ecx,(3+6)*4(%esp) ; /* ... to fat frame ... */ \
movl (3+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \
@@ -116,10 +116,10 @@ IDTVEC(vec_name) ; \
pushl %ds ; /* save our data and extra segments ... */ \
pushl %es ; \
pushl %fs ; \
- movl $KDSEL,%eax ; /* ... and reload with kernel's own ... */ \
- movl %ax,%ds ; /* ... early for obsolete reasons */ \
- movl %ax,%es ; \
- movl %ax,%fs ; \
+ mov $KDSEL,%ax ; /* ... and reload with kernel's own ... */ \
+ mov %ax,%ds ; /* ... early for obsolete reasons */ \
+ mov %ax,%es ; \
+ mov %ax,%fs ; \
maybe_extra_ipending ; \
movb _imen + IRQ_BYTE(irq_num),%al ; \
orb $IRQ_BIT(irq_num),%al ; \
OpenPOWER on IntegriCloud