summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/vm86_32.c
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2008-01-30 13:30:56 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:30:56 +0100
commit65ea5b0349903585bfed9720fa06f5edb4f1cd25 (patch)
tree6c252228c34416b7e2077f23475de34500c2ab8a /arch/x86/kernel/vm86_32.c
parent53756d3722172815f52272b28c6d5d5e9639adde (diff)
downloadop-kernel-dev-65ea5b0349903585bfed9720fa06f5edb4f1cd25.zip
op-kernel-dev-65ea5b0349903585bfed9720fa06f5edb4f1cd25.tar.gz
x86: rename the struct pt_regs members for 32/64-bit consistency
We have a lot of code which differs only by the naming of specific members of structures that contain registers. In order to enable additional unifications, this patch drops the e- or r- size prefix from the register names in struct pt_regs, and drops the x- prefixes for segment registers on the 32-bit side. This patch also performs the equivalent renames in some additional places that might be candidates for unification in the future. Signed-off-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/vm86_32.c')
-rw-r--r--arch/x86/kernel/vm86_32.c94
1 files changed, 47 insertions, 47 deletions
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 157e4be..980e85b 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -70,10 +70,10 @@
/*
* 8- and 16-bit register defines..
*/
-#define AL(regs) (((unsigned char *)&((regs)->pt.eax))[0])
-#define AH(regs) (((unsigned char *)&((regs)->pt.eax))[1])
-#define IP(regs) (*(unsigned short *)&((regs)->pt.eip))
-#define SP(regs) (*(unsigned short *)&((regs)->pt.esp))
+#define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
+#define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
+#define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
+#define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
/*
* virtual flags (16 and 32-bit versions)
@@ -93,12 +93,12 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
{
int ret = 0;
- /* kernel_vm86_regs is missing xgs, so copy everything up to
+ /* kernel_vm86_regs is missing gs, so copy everything up to
(but not including) orig_eax, and then rest including orig_eax. */
- ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_eax));
- ret += copy_to_user(&user->orig_eax, &regs->pt.orig_eax,
+ ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax));
+ ret += copy_to_user(&user->orig_eax, &regs->pt.orig_ax,
sizeof(struct kernel_vm86_regs) -
- offsetof(struct kernel_vm86_regs, pt.orig_eax));
+ offsetof(struct kernel_vm86_regs, pt.orig_ax));
return ret;
}
@@ -110,12 +110,12 @@ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
{
int ret = 0;
- /* copy eax-xfs inclusive */
- ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_eax));
- /* copy orig_eax-__gsh+extra */
- ret += copy_from_user(&regs->pt.orig_eax, &user->orig_eax,
+ /* copy ax-fs inclusive */
+ ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax));
+ /* copy orig_ax-__gsh+extra */
+ ret += copy_from_user(&regs->pt.orig_ax, &user->orig_eax,
sizeof(struct kernel_vm86_regs) -
- offsetof(struct kernel_vm86_regs, pt.orig_eax) +
+ offsetof(struct kernel_vm86_regs, pt.orig_ax) +
extra);
return ret;
}
@@ -138,7 +138,7 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
printk("no vm86_info: BAD\n");
do_exit(SIGSEGV);
}
- set_flags(regs->pt.eflags, VEFLAGS, VIF_MASK | current->thread.v86mask);
+ set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask);
tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs);
tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap);
if (tmp) {
@@ -155,7 +155,7 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
ret = KVM86->regs32;
- ret->xfs = current->thread.saved_fs;
+ ret->fs = current->thread.saved_fs;
loadsegment(gs, current->thread.saved_gs);
return ret;
@@ -197,7 +197,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
asmlinkage int sys_vm86old(struct pt_regs regs)
{
- struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.ebx;
+ struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx;
struct kernel_vm86_struct info; /* declare this _on top_,
* this avoids wasting of stack space.
* This remains on the stack until we
@@ -237,12 +237,12 @@ asmlinkage int sys_vm86(struct pt_regs regs)
struct vm86plus_struct __user *v86;
tsk = current;
- switch (regs.ebx) {
+ switch (regs.bx) {
case VM86_REQUEST_IRQ:
case VM86_FREE_IRQ:
case VM86_GET_IRQ_BITS:
case VM86_GET_AND_RESET_IRQ:
- ret = do_vm86_irq_handling(regs.ebx, (int)regs.ecx);
+ ret = do_vm86_irq_handling(regs.bx, (int)regs.cx);
goto out;
case VM86_PLUS_INSTALL_CHECK:
/* NOTE: on old vm86 stuff this will return the error
@@ -258,7 +258,7 @@ asmlinkage int sys_vm86(struct pt_regs regs)
ret = -EPERM;
if (tsk->thread.saved_esp0)
goto out;
- v86 = (struct vm86plus_struct __user *)regs.ecx;
+ v86 = (struct vm86plus_struct __user *)regs.cx;
tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
offsetof(struct kernel_vm86_struct, regs32) -
sizeof(info.regs));
@@ -281,23 +281,23 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
/*
* make sure the vm86() system call doesn't try to do anything silly
*/
- info->regs.pt.xds = 0;
- info->regs.pt.xes = 0;
- info->regs.pt.xfs = 0;
+ info->regs.pt.ds = 0;
+ info->regs.pt.es = 0;
+ info->regs.pt.fs = 0;
/* we are clearing gs later just before "jmp resume_userspace",
* because it is not saved/restored.
*/
/*
- * The eflags register is also special: we cannot trust that the user
+ * The flags register is also special: we cannot trust that the user
* has set it up safely, so this makes sure interrupt etc flags are
* inherited from protected mode.
*/
- VEFLAGS = info->regs.pt.eflags;
- info->regs.pt.eflags &= SAFE_MASK;
- info->regs.pt.eflags |= info->regs32->eflags & ~SAFE_MASK;
- info->regs.pt.eflags |= VM_MASK;
+ VEFLAGS = info->regs.pt.flags;
+ info->regs.pt.flags &= SAFE_MASK;
+ info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
+ info->regs.pt.flags |= VM_MASK;
switch (info->cpu_type) {
case CPU_286:
@@ -315,11 +315,11 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
}
/*
- * Save old state, set default return value (%eax) to 0
+ * Save old state, set default return value (%ax) to 0
*/
- info->regs32->eax = 0;
+ info->regs32->ax = 0;
tsk->thread.saved_esp0 = tsk->thread.esp0;
- tsk->thread.saved_fs = info->regs32->xfs;
+ tsk->thread.saved_fs = info->regs32->fs;
savesegment(gs, tsk->thread.saved_gs);
tss = &per_cpu(init_tss, get_cpu());
@@ -352,7 +352,7 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval)
struct pt_regs * regs32;
regs32 = save_v86_state(regs16);
- regs32->eax = retval;
+ regs32->ax = retval;
__asm__ __volatile__("movl %0,%%esp\n\t"
"movl %1,%%ebp\n\t"
"jmp resume_userspace"
@@ -373,12 +373,12 @@ static inline void clear_IF(struct kernel_vm86_regs * regs)
static inline void clear_TF(struct kernel_vm86_regs * regs)
{
- regs->pt.eflags &= ~TF_MASK;
+ regs->pt.flags &= ~TF_MASK;
}
static inline void clear_AC(struct kernel_vm86_regs * regs)
{
- regs->pt.eflags &= ~AC_MASK;
+ regs->pt.flags &= ~AC_MASK;
}
/* It is correct to call set_IF(regs) from the set_vflags_*
@@ -392,11 +392,11 @@ static inline void clear_AC(struct kernel_vm86_regs * regs)
* [KD]
*/
-static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs)
+static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs * regs)
{
- set_flags(VEFLAGS, eflags, current->thread.v86mask);
- set_flags(regs->pt.eflags, eflags, SAFE_MASK);
- if (eflags & IF_MASK)
+ set_flags(VEFLAGS, flags, current->thread.v86mask);
+ set_flags(regs->pt.flags, flags, SAFE_MASK);
+ if (flags & IF_MASK)
set_IF(regs);
else
clear_IF(regs);
@@ -405,7 +405,7 @@ static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs
static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs)
{
set_flags(VFLAGS, flags, current->thread.v86mask);
- set_flags(regs->pt.eflags, flags, SAFE_MASK);
+ set_flags(regs->pt.flags, flags, SAFE_MASK);
if (flags & IF_MASK)
set_IF(regs);
else
@@ -414,7 +414,7 @@ static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_reg
static inline unsigned long get_vflags(struct kernel_vm86_regs * regs)
{
- unsigned long flags = regs->pt.eflags & RETURN_MASK;
+ unsigned long flags = regs->pt.flags & RETURN_MASK;
if (VEFLAGS & VIF_MASK)
flags |= IF_MASK;
@@ -518,7 +518,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
unsigned long __user *intr_ptr;
unsigned long segoffs;
- if (regs->pt.xcs == BIOSSEG)
+ if (regs->pt.cs == BIOSSEG)
goto cannot_handle;
if (is_revectored(i, &KVM86->int_revectored))
goto cannot_handle;
@@ -530,9 +530,9 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
if ((segoffs >> 16) == BIOSSEG)
goto cannot_handle;
pushw(ssp, sp, get_vflags(regs), cannot_handle);
- pushw(ssp, sp, regs->pt.xcs, cannot_handle);
+ pushw(ssp, sp, regs->pt.cs, cannot_handle);
pushw(ssp, sp, IP(regs), cannot_handle);
- regs->pt.xcs = segoffs >> 16;
+ regs->pt.cs = segoffs >> 16;
SP(regs) -= 6;
IP(regs) = segoffs & 0xffff;
clear_TF(regs);
@@ -549,7 +549,7 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno
if (VMPI.is_vm86pus) {
if ( (trapno==3) || (trapno==1) )
return_to_32bit(regs, VM86_TRAP + (trapno << 8));
- do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs));
+ do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
return 0;
}
if (trapno !=1)
@@ -585,10 +585,10 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
handle_vm86_trap(regs, 0, 1); \
return; } while (0)
- orig_flags = *(unsigned short *)&regs->pt.eflags;
+ orig_flags = *(unsigned short *)&regs->pt.flags;
- csp = (unsigned char __user *) (regs->pt.xcs << 4);
- ssp = (unsigned char __user *) (regs->pt.xss << 4);
+ csp = (unsigned char __user *) (regs->pt.cs << 4);
+ ssp = (unsigned char __user *) (regs->pt.ss << 4);
sp = SP(regs);
ip = IP(regs);
@@ -675,7 +675,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
SP(regs) += 6;
}
IP(regs) = newip;
- regs->pt.xcs = newcs;
+ regs->pt.cs = newcs;
CHECK_IF_IN_TRAP;
if (data32) {
set_vflags_long(newflags, regs);
OpenPOWER on IntegriCloud