summaryrefslogtreecommitdiffstats
path: root/target-i386/seg_helper.c
diff options
context:
space:
mode:
authorKevin O'Connor <kevin@koconnor.net>2014-04-29 16:38:59 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-05-13 13:12:40 +0200
commit7848c8d19f8556666df25044bbd5d8b29439c368 (patch)
tree250b284ca81228ae073c19938cfefde8a008a26c /target-i386/seg_helper.c
parentfd460606fd6f356ddcf424558ed67664e8d46eb4 (diff)
downloadhqemu-7848c8d19f8556666df25044bbd5d8b29439c368.zip
hqemu-7848c8d19f8556666df25044bbd5d8b29439c368.tar.gz
target-i386: the x86 CPL is stored in CS.selector - auto update hflags accordingly.
Instead of manually calling cpu_x86_set_cpl() when the CPL changes, check for CPL changes on calls to cpu_x86_load_seg_cache(R_CS). Every location that called cpu_x86_set_cpl() also called cpu_x86_load_seg_cache(R_CS), so cpu_x86_set_cpl() is no longer required. This fixes the SMM handler code as it was not setting/restoring the CPL level manually. Signed-off-by: Kevin O'Connor <kevin@koconnor.net> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target-i386/seg_helper.c')
-rw-r--r--target-i386/seg_helper.c15
1 files changed, 0 insertions, 15 deletions
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
index 2e0b113..3cf862e 100644
--- a/target-i386/seg_helper.c
+++ b/target-i386/seg_helper.c
@@ -409,11 +409,7 @@ static void switch_tss(CPUX86State *env, int tss_selector,
for (i = 0; i < 6; i++) {
load_seg_vm(env, i, new_segs[i]);
}
- /* in vm86, CPL is always 3 */
- cpu_x86_set_cpl(env, 3);
} else {
- /* CPL is set the RPL of CS */
- cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
/* first just selectors as the rest may trigger exceptions */
for (i = 0; i < 6; i++) {
cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
@@ -763,7 +759,6 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
- cpu_x86_set_cpl(env, dpl);
env->eip = offset;
}
@@ -928,7 +923,6 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
- cpu_x86_set_cpl(env, dpl);
env->eip = offset;
}
#endif
@@ -962,7 +956,6 @@ void helper_syscall(CPUX86State *env, int next_eip_addend)
env->eflags &= ~env->fmask;
cpu_load_eflags(env, env->eflags, 0);
- cpu_x86_set_cpl(env, 0);
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_P_MASK |
@@ -983,7 +976,6 @@ void helper_syscall(CPUX86State *env, int next_eip_addend)
env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
- cpu_x86_set_cpl(env, 0);
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
@@ -1038,7 +1030,6 @@ void helper_sysret(CPUX86State *env, int dflag)
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
DESC_W_MASK | DESC_A_MASK);
- cpu_x86_set_cpl(env, 3);
} else {
env->eflags |= IF_MASK;
cpu_x86_load_seg_cache(env, R_CS, selector | 3,
@@ -1052,7 +1043,6 @@ void helper_sysret(CPUX86State *env, int dflag)
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
DESC_W_MASK | DESC_A_MASK);
- cpu_x86_set_cpl(env, 3);
}
}
#endif
@@ -1905,7 +1895,6 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
- cpu_x86_set_cpl(env, dpl);
SET_ESP(sp, sp_mask);
env->eip = offset;
}
@@ -2134,7 +2123,6 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
- cpu_x86_set_cpl(env, rpl);
sp = new_esp;
#ifdef TARGET_X86_64
if (env->hflags & HF_CS64_MASK) {
@@ -2185,7 +2173,6 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
VIP_MASK);
load_seg_vm(env, R_CS, new_cs & 0xffff);
- cpu_x86_set_cpl(env, 3);
load_seg_vm(env, R_SS, new_ss & 0xffff);
load_seg_vm(env, R_ES, new_es & 0xffff);
load_seg_vm(env, R_DS, new_ds & 0xffff);
@@ -2238,7 +2225,6 @@ void helper_sysenter(CPUX86State *env)
raise_exception_err(env, EXCP0D_GPF, 0);
}
env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
- cpu_x86_set_cpl(env, 0);
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
@@ -2274,7 +2260,6 @@ void helper_sysexit(CPUX86State *env, int dflag)
if (env->sysenter_cs == 0 || cpl != 0) {
raise_exception_err(env, EXCP0D_GPF, 0);
}
- cpu_x86_set_cpl(env, 3);
#ifdef TARGET_X86_64
if (dflag == 2) {
cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
OpenPOWER on IntegriCloud