summaryrefslogtreecommitdiffstats
path: root/target-i386
diff options
context:
space:
mode:
Diffstat (limited to 'target-i386')
-rw-r--r--target-i386/Makefile.objs3
-rw-r--r--target-i386/arch_dump.c10
-rw-r--r--target-i386/cpu-qom.h14
-rw-r--r--target-i386/cpu.c47
-rw-r--r--target-i386/cpu.h40
-rw-r--r--target-i386/gdbstub.c231
-rw-r--r--target-i386/hyperv.c64
-rw-r--r--target-i386/hyperv.h45
-rw-r--r--target-i386/kvm.c38
9 files changed, 338 insertions, 154 deletions
diff --git a/target-i386/Makefile.objs b/target-i386/Makefile.objs
index c1d4f05..da1fc40 100644
--- a/target-i386/Makefile.objs
+++ b/target-i386/Makefile.objs
@@ -1,8 +1,9 @@
obj-y += translate.o helper.o cpu.o
obj-y += excp_helper.o fpu_helper.o cc_helper.o int_helper.o svm_helper.o
obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o
+obj-y += gdbstub.o
obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o
-obj-$(CONFIG_KVM) += kvm.o hyperv.o
+obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_NO_KVM) += kvm-stub.o
obj-$(CONFIG_LINUX_USER) += ioport-user.o
obj-$(CONFIG_BSD_USER) += ioport-user.o
diff --git a/target-i386/arch_dump.c b/target-i386/arch_dump.c
index 10dc228..0bbed23 100644
--- a/target-i386/arch_dump.c
+++ b/target-i386/arch_dump.c
@@ -15,6 +15,7 @@
#include "exec/cpu-all.h"
#include "sysemu/dump.h"
#include "elf.h"
+#include "sysemu/memory_mapping.h"
#ifdef TARGET_X86_64
typedef struct {
@@ -389,10 +390,11 @@ int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cs,
return cpu_write_qemu_note(f, &cpu->env, opaque, 0);
}
-int cpu_get_dump_info(ArchDumpInfo *info)
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const GuestPhysBlockList *guest_phys_blocks)
{
bool lma = false;
- RAMBlock *block;
+ GuestPhysBlock *block;
#ifdef TARGET_X86_64
X86CPU *first_x86_cpu = X86_CPU(first_cpu);
@@ -412,8 +414,8 @@ int cpu_get_dump_info(ArchDumpInfo *info)
} else {
info->d_class = ELFCLASS32;
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
- if (block->offset + block->length > UINT_MAX) {
+ QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
+ if (block->target_end > UINT_MAX) {
/* The memory size is greater than 4G */
info->d_class = ELFCLASS64;
break;
diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h
index d928562..c4447c2 100644
--- a/target-i386/cpu-qom.h
+++ b/target-i386/cpu-qom.h
@@ -66,8 +66,19 @@ typedef struct X86CPU {
CPUX86State env;
+ bool hyperv_vapic;
+ bool hyperv_relaxed_timing;
+ int hyperv_spinlock_attempts;
+
/* Features that were filtered out because of missing host capabilities */
uint32_t filtered_features[FEATURE_WORDS];
+
+ /* Enable PMU CPUID bits. This can't be enabled by default yet because
+ * it doesn't have ABI stability guarantees, as it passes all PMU CPUID
+ * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
+ * capabilities) directly to the guest.
+ */
+ bool enable_pmu;
} X86CPU;
static inline X86CPU *x86_env_get_cpu(CPUX86State *env)
@@ -106,4 +117,7 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
hwaddr x86_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int x86_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
#endif
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index cd350cb..42c5de0 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -35,8 +35,6 @@
#include "qapi/visitor.h"
#include "sysemu/arch_init.h"
-#include "hyperv.h"
-
#include "hw/hw.h"
#if defined(CONFIG_KVM)
#include <linux/kvm_para.h>
@@ -1475,9 +1473,11 @@ static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
error_propagate(errp, err);
}
-static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *name)
+static int cpu_x86_find_by_name(X86CPU *cpu, x86_def_t *x86_cpu_def,
+ const char *name)
{
x86_def_t *def;
+ Error *err = NULL;
int i;
if (name == NULL) {
@@ -1485,6 +1485,8 @@ static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *name)
}
if (kvm_enabled() && strcmp(name, "host") == 0) {
kvm_cpu_fill_host(x86_cpu_def);
+ object_property_set_bool(OBJECT(cpu), true, "pmu", &err);
+ assert_no_error(err);
return 0;
}
@@ -1587,12 +1589,19 @@ static void cpu_x86_parse_featurestr(X86CPU *cpu, char *features, Error **errp)
object_property_parse(OBJECT(cpu), num, "tsc-frequency", errp);
} else if (!strcmp(featurestr, "hv-spinlocks")) {
char *err;
+ const int min = 0xFFF;
numvalue = strtoul(val, &err, 0);
if (!*val || *err) {
error_setg(errp, "bad numerical value %s", val);
goto out;
}
- hyperv_set_spinlock_retries(numvalue);
+ if (numvalue < min) {
+ fprintf(stderr, "hv-spinlocks value shall always be >= 0x%x"
+ ", fixup will be removed in future versions\n",
+ min);
+ numvalue = min;
+ }
+ cpu->hyperv_spinlock_attempts = numvalue;
} else {
error_setg(errp, "unrecognized feature %s", featurestr);
goto out;
@@ -1602,9 +1611,9 @@ static void cpu_x86_parse_featurestr(X86CPU *cpu, char *features, Error **errp)
} else if (!strcmp(featurestr, "enforce")) {
check_cpuid = enforce_cpuid = 1;
} else if (!strcmp(featurestr, "hv_relaxed")) {
- hyperv_enable_relaxed_timing(true);
+ cpu->hyperv_relaxed_timing = true;
} else if (!strcmp(featurestr, "hv_vapic")) {
- hyperv_enable_vapic_recommended(true);
+ cpu->hyperv_vapic = true;
} else {
error_setg(errp, "feature string `%s' not in format (+feature|"
"-feature|feature=xyz)", featurestr);
@@ -1742,7 +1751,7 @@ static void cpu_x86_register(X86CPU *cpu, const char *name, Error **errp)
memset(def, 0, sizeof(*def));
- if (cpu_x86_find_by_name(def, name) < 0) {
+ if (cpu_x86_find_by_name(cpu, def, name) < 0) {
error_setg(errp, "Unable to find CPU definition: %s", name);
return;
}
@@ -1820,7 +1829,11 @@ X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
}
out:
- error_propagate(errp, error);
+ if (error != NULL) {
+ error_propagate(errp, error);
+ object_unref(OBJECT(cpu));
+ cpu = NULL;
+ }
g_strfreev(model_pieces);
return cpu;
}
@@ -1839,7 +1852,7 @@ X86CPU *cpu_x86_init(const char *cpu_model)
out:
if (error) {
- fprintf(stderr, "%s\n", error_get_pretty(error));
+ error_report("%s", error_get_pretty(error));
error_free(error);
if (cpu != NULL) {
object_unref(OBJECT(cpu));
@@ -2016,7 +2029,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
case 0xA:
/* Architectural Performance Monitoring Leaf */
- if (kvm_enabled()) {
+ if (kvm_enabled() && cpu->enable_pmu) {
KVMState *s = cs->kvm_state;
*eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
@@ -2333,6 +2346,7 @@ static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
{
+ CPUState *cs = CPU(dev);
X86CPU *cpu = X86_CPU(dev);
X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
CPUX86State *env = &cpu->env;
@@ -2387,12 +2401,13 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
#endif
mce_init(cpu);
+ qemu_init_vcpu(cs);
x86_cpu_apic_realize(cpu, &local_err);
if (local_err != NULL) {
goto out;
}
- cpu_reset(CPU(cpu));
+ cpu_reset(cs);
xcc->parent_realize(dev, &local_err);
out:
@@ -2479,6 +2494,7 @@ static void x86_cpu_initfn(Object *obj)
x86_cpu_get_feature_words,
NULL, NULL, (void *)cpu->filtered_features, NULL);
+ cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
env->cpuid_apic_id = x86_cpu_apic_id_from_index(cs->cpu_index);
/* init various static tables used in TCG mode */
@@ -2520,6 +2536,11 @@ static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
cpu->env.eip = tb->pc - tb->cs_base;
}
+static Property x86_cpu_properties[] = {
+ DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
+ DEFINE_PROP_END_OF_LIST()
+};
+
static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
{
X86CPUClass *xcc = X86_CPU_CLASS(oc);
@@ -2529,6 +2550,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
xcc->parent_realize = dc->realize;
dc->realize = x86_cpu_realizefn;
dc->bus_type = TYPE_ICC_BUS;
+ dc->props = x86_cpu_properties;
xcc->parent_reset = cc->reset;
cc->reset = x86_cpu_reset;
@@ -2538,6 +2560,8 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->dump_state = x86_cpu_dump_state;
cc->set_pc = x86_cpu_set_pc;
cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
+ cc->gdb_read_register = x86_cpu_gdb_read_register;
+ cc->gdb_write_register = x86_cpu_gdb_write_register;
cc->get_arch_id = x86_cpu_get_arch_id;
cc->get_paging_enabled = x86_cpu_get_paging_enabled;
#ifndef CONFIG_USER_ONLY
@@ -2549,6 +2573,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
cc->vmsd = &vmstate_x86_cpu;
#endif
+ cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
}
static const TypeInfo x86_cpu_type_info = {
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 31de265..5723eff 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -562,24 +562,28 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_MWAIT_IBE (1 << 1) /* Interrupts can exit capability */
#define CPUID_MWAIT_EMX (1 << 0) /* enumeration supported */
-#define EXCP00_DIVZ 0
-#define EXCP01_DB 1
-#define EXCP02_NMI 2
-#define EXCP03_INT3 3
-#define EXCP04_INTO 4
-#define EXCP05_BOUND 5
-#define EXCP06_ILLOP 6
-#define EXCP07_PREX 7
-#define EXCP08_DBLE 8
-#define EXCP09_XERR 9
-#define EXCP0A_TSS 10
-#define EXCP0B_NOSEG 11
-#define EXCP0C_STACK 12
-#define EXCP0D_GPF 13
-#define EXCP0E_PAGE 14
-#define EXCP10_COPR 16
-#define EXCP11_ALGN 17
-#define EXCP12_MCHK 18
+#ifndef HYPERV_SPINLOCK_NEVER_RETRY
+#define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF
+#endif
+
+#define EXCP00_DIVZ 0
+#define EXCP01_DB 1
+#define EXCP02_NMI 2
+#define EXCP03_INT3 3
+#define EXCP04_INTO 4
+#define EXCP05_BOUND 5
+#define EXCP06_ILLOP 6
+#define EXCP07_PREX 7
+#define EXCP08_DBLE 8
+#define EXCP09_XERR 9
+#define EXCP0A_TSS 10
+#define EXCP0B_NOSEG 11
+#define EXCP0C_STACK 12
+#define EXCP0D_GPF 13
+#define EXCP0E_PAGE 14
+#define EXCP10_COPR 16
+#define EXCP11_ALGN 17
+#define EXCP12_MCHK 18
#define EXCP_SYSCALL 0x100 /* only happens in user only emulation
for syscall instruction */
diff --git a/target-i386/gdbstub.c b/target-i386/gdbstub.c
new file mode 100644
index 0000000..15bebef
--- /dev/null
+++ b/target-i386/gdbstub.c
@@ -0,0 +1,231 @@
+/*
+ * x86 gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "config.h"
+#include "qemu-common.h"
+#include "exec/gdbstub.h"
+
+#ifdef TARGET_X86_64
+static const int gpr_map[16] = {
+ R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
+ 8, 9, 10, 11, 12, 13, 14, 15
+};
+#else
+#define gpr_map gpr_map32
+#endif
+static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+
+#define IDX_IP_REG CPU_NB_REGS
+#define IDX_FLAGS_REG (IDX_IP_REG + 1)
+#define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
+#define IDX_FP_REGS (IDX_SEG_REGS + 6)
+#define IDX_XMM_REGS (IDX_FP_REGS + 16)
+#define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
+
+int x86_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ if (n < CPU_NB_REGS) {
+ if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
+ return gdb_get_reg64(mem_buf, env->regs[gpr_map[n]]);
+ } else if (n < CPU_NB_REGS32) {
+ return gdb_get_reg32(mem_buf, env->regs[gpr_map32[n]]);
+ }
+ } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
+#ifdef USE_X86LDOUBLE
+ /* FIXME: byteswap float values - after fixing fpregs layout. */
+ memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
+#else
+ memset(mem_buf, 0, 10);
+#endif
+ return 10;
+ } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
+ n -= IDX_XMM_REGS;
+ if (n < CPU_NB_REGS32 ||
+ (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
+ stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
+ stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
+ return 16;
+ }
+ } else {
+ switch (n) {
+ case IDX_IP_REG:
+ if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
+ return gdb_get_reg64(mem_buf, env->eip);
+ } else {
+ return gdb_get_reg32(mem_buf, env->eip);
+ }
+ case IDX_FLAGS_REG:
+ return gdb_get_reg32(mem_buf, env->eflags);
+
+ case IDX_SEG_REGS:
+ return gdb_get_reg32(mem_buf, env->segs[R_CS].selector);
+ case IDX_SEG_REGS + 1:
+ return gdb_get_reg32(mem_buf, env->segs[R_SS].selector);
+ case IDX_SEG_REGS + 2:
+ return gdb_get_reg32(mem_buf, env->segs[R_DS].selector);
+ case IDX_SEG_REGS + 3:
+ return gdb_get_reg32(mem_buf, env->segs[R_ES].selector);
+ case IDX_SEG_REGS + 4:
+ return gdb_get_reg32(mem_buf, env->segs[R_FS].selector);
+ case IDX_SEG_REGS + 5:
+ return gdb_get_reg32(mem_buf, env->segs[R_GS].selector);
+
+ case IDX_FP_REGS + 8:
+ return gdb_get_reg32(mem_buf, env->fpuc);
+ case IDX_FP_REGS + 9:
+ return gdb_get_reg32(mem_buf, (env->fpus & ~0x3800) |
+ (env->fpstt & 0x7) << 11);
+ case IDX_FP_REGS + 10:
+ return gdb_get_reg32(mem_buf, 0); /* ftag */
+ case IDX_FP_REGS + 11:
+ return gdb_get_reg32(mem_buf, 0); /* fiseg */
+ case IDX_FP_REGS + 12:
+ return gdb_get_reg32(mem_buf, 0); /* fioff */
+ case IDX_FP_REGS + 13:
+ return gdb_get_reg32(mem_buf, 0); /* foseg */
+ case IDX_FP_REGS + 14:
+ return gdb_get_reg32(mem_buf, 0); /* fooff */
+ case IDX_FP_REGS + 15:
+ return gdb_get_reg32(mem_buf, 0); /* fop */
+
+ case IDX_MXCSR_REG:
+ return gdb_get_reg32(mem_buf, env->mxcsr);
+ }
+ }
+ return 0;
+}
+
+static int x86_cpu_gdb_load_seg(X86CPU *cpu, int sreg, uint8_t *mem_buf)
+{
+ CPUX86State *env = &cpu->env;
+ uint16_t selector = ldl_p(mem_buf);
+
+ if (selector != env->segs[sreg].selector) {
+#if defined(CONFIG_USER_ONLY)
+ cpu_x86_load_seg(env, sreg, selector);
+#else
+ unsigned int limit, flags;
+ target_ulong base;
+
+ if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
+ base = selector << 4;
+ limit = 0xffff;
+ flags = 0;
+ } else {
+ if (!cpu_x86_get_descr_debug(env, selector, &base, &limit,
+ &flags)) {
+ return 4;
+ }
+ }
+ cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
+#endif
+ }
+ return 4;
+}
+
+int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ uint32_t tmp;
+
+ if (n < CPU_NB_REGS) {
+ if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
+ env->regs[gpr_map[n]] = ldtul_p(mem_buf);
+ return sizeof(target_ulong);
+ } else if (n < CPU_NB_REGS32) {
+ n = gpr_map32[n];
+ env->regs[n] &= ~0xffffffffUL;
+ env->regs[n] |= (uint32_t)ldl_p(mem_buf);
+ return 4;
+ }
+ } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
+#ifdef USE_X86LDOUBLE
+ /* FIXME: byteswap float values - after fixing fpregs layout. */
+ memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
+#endif
+ return 10;
+ } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
+ n -= IDX_XMM_REGS;
+ if (n < CPU_NB_REGS32 ||
+ (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
+ env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
+ env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
+ return 16;
+ }
+ } else {
+ switch (n) {
+ case IDX_IP_REG:
+ if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
+ env->eip = ldq_p(mem_buf);
+ return 8;
+ } else {
+ env->eip &= ~0xffffffffUL;
+ env->eip |= (uint32_t)ldl_p(mem_buf);
+ return 4;
+ }
+ case IDX_FLAGS_REG:
+ env->eflags = ldl_p(mem_buf);
+ return 4;
+
+ case IDX_SEG_REGS:
+ return x86_cpu_gdb_load_seg(cpu, R_CS, mem_buf);
+ case IDX_SEG_REGS + 1:
+ return x86_cpu_gdb_load_seg(cpu, R_SS, mem_buf);
+ case IDX_SEG_REGS + 2:
+ return x86_cpu_gdb_load_seg(cpu, R_DS, mem_buf);
+ case IDX_SEG_REGS + 3:
+ return x86_cpu_gdb_load_seg(cpu, R_ES, mem_buf);
+ case IDX_SEG_REGS + 4:
+ return x86_cpu_gdb_load_seg(cpu, R_FS, mem_buf);
+ case IDX_SEG_REGS + 5:
+ return x86_cpu_gdb_load_seg(cpu, R_GS, mem_buf);
+
+ case IDX_FP_REGS + 8:
+ env->fpuc = ldl_p(mem_buf);
+ return 4;
+ case IDX_FP_REGS + 9:
+ tmp = ldl_p(mem_buf);
+ env->fpstt = (tmp >> 11) & 7;
+ env->fpus = tmp & ~0x3800;
+ return 4;
+ case IDX_FP_REGS + 10: /* ftag */
+ return 4;
+ case IDX_FP_REGS + 11: /* fiseg */
+ return 4;
+ case IDX_FP_REGS + 12: /* fioff */
+ return 4;
+ case IDX_FP_REGS + 13: /* foseg */
+ return 4;
+ case IDX_FP_REGS + 14: /* fooff */
+ return 4;
+ case IDX_FP_REGS + 15: /* fop */
+ return 4;
+
+ case IDX_MXCSR_REG:
+ env->mxcsr = ldl_p(mem_buf);
+ return 4;
+ }
+ }
+ /* Unrecognised register. */
+ return 0;
+}
diff --git a/target-i386/hyperv.c b/target-i386/hyperv.c
deleted file mode 100644
index f284e99..0000000
--- a/target-i386/hyperv.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * QEMU Hyper-V support
- *
- * Copyright Red Hat, Inc. 2011
- *
- * Author: Vadim Rozenfeld <vrozenfe@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#include "hyperv.h"
-
-static bool hyperv_vapic;
-static bool hyperv_relaxed_timing;
-static int hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
-
-void hyperv_enable_vapic_recommended(bool val)
-{
- hyperv_vapic = val;
-}
-
-void hyperv_enable_relaxed_timing(bool val)
-{
- hyperv_relaxed_timing = val;
-}
-
-void hyperv_set_spinlock_retries(int val)
-{
- hyperv_spinlock_attempts = val;
- if (hyperv_spinlock_attempts < 0xFFF) {
- hyperv_spinlock_attempts = 0xFFF;
- }
-}
-
-bool hyperv_enabled(void)
-{
- return hyperv_hypercall_available() || hyperv_relaxed_timing_enabled();
-}
-
-bool hyperv_hypercall_available(void)
-{
- if (hyperv_vapic ||
- (hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY)) {
- return true;
- }
- return false;
-}
-
-bool hyperv_vapic_recommended(void)
-{
- return hyperv_vapic;
-}
-
-bool hyperv_relaxed_timing_enabled(void)
-{
- return hyperv_relaxed_timing;
-}
-
-int hyperv_get_spinlock_retries(void)
-{
- return hyperv_spinlock_attempts;
-}
diff --git a/target-i386/hyperv.h b/target-i386/hyperv.h
deleted file mode 100644
index bacb1d4..0000000
--- a/target-i386/hyperv.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * QEMU Hyper-V support
- *
- * Copyright Red Hat, Inc. 2011
- *
- * Author: Vadim Rozenfeld <vrozenfe@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#ifndef QEMU_HW_HYPERV_H
-#define QEMU_HW_HYPERV_H 1
-
-#include "qemu-common.h"
-#ifdef CONFIG_KVM
-#include <asm/hyperv.h>
-#endif
-
-#ifndef HYPERV_SPINLOCK_NEVER_RETRY
-#define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF
-#endif
-
-#ifndef KVM_CPUID_SIGNATURE_NEXT
-#define KVM_CPUID_SIGNATURE_NEXT 0x40000100
-#endif
-
-#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_KVM)
-void hyperv_enable_vapic_recommended(bool val);
-void hyperv_enable_relaxed_timing(bool val);
-void hyperv_set_spinlock_retries(int val);
-#else
-static inline void hyperv_enable_vapic_recommended(bool val) { }
-static inline void hyperv_enable_relaxed_timing(bool val) { }
-static inline void hyperv_set_spinlock_retries(int val) { }
-#endif
-
-bool hyperv_enabled(void);
-bool hyperv_hypercall_available(void);
-bool hyperv_vapic_recommended(void);
-bool hyperv_relaxed_timing_enabled(void);
-int hyperv_get_spinlock_retries(void);
-
-#endif /* QEMU_HW_HYPERV_H */
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 58f7bb7..749aa09 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -31,7 +31,7 @@
#include "hw/i386/pc.h"
#include "hw/i386/apic.h"
#include "exec/ioport.h"
-#include "hyperv.h"
+#include <asm/hyperv.h>
#include "hw/pci/pci.h"
//#define DEBUG_KVM
@@ -424,6 +424,22 @@ unsigned long kvm_arch_vcpu_id(CPUState *cs)
return cpu->env.cpuid_apic_id;
}
+#ifndef KVM_CPUID_SIGNATURE_NEXT
+#define KVM_CPUID_SIGNATURE_NEXT 0x40000100
+#endif
+
+static bool hyperv_hypercall_available(X86CPU *cpu)
+{
+ return cpu->hyperv_vapic ||
+ (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY);
+}
+
+static bool hyperv_enabled(X86CPU *cpu)
+{
+ return hyperv_hypercall_available(cpu) ||
+ cpu->hyperv_relaxed_timing;
+}
+
#define KVM_MAX_CPUID_ENTRIES 100
int kvm_arch_init_vcpu(CPUState *cs)
@@ -446,7 +462,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
c = &cpuid_data.entries[cpuid_i++];
memset(c, 0, sizeof(*c));
c->function = KVM_CPUID_SIGNATURE;
- if (!hyperv_enabled()) {
+ if (!hyperv_enabled(cpu)) {
memcpy(signature, "KVMKVMKVM\0\0\0", 12);
c->eax = 0;
} else {
@@ -462,7 +478,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
c->function = KVM_CPUID_FEATURES;
c->eax = env->features[FEAT_KVM];
- if (hyperv_enabled()) {
+ if (hyperv_enabled(cpu)) {
memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
c->eax = signature[0];
@@ -475,10 +491,10 @@ int kvm_arch_init_vcpu(CPUState *cs)
c = &cpuid_data.entries[cpuid_i++];
memset(c, 0, sizeof(*c));
c->function = HYPERV_CPUID_FEATURES;
- if (hyperv_relaxed_timing_enabled()) {
+ if (cpu->hyperv_relaxed_timing) {
c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
}
- if (hyperv_vapic_recommended()) {
+ if (cpu->hyperv_vapic) {
c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
}
@@ -486,13 +502,13 @@ int kvm_arch_init_vcpu(CPUState *cs)
c = &cpuid_data.entries[cpuid_i++];
memset(c, 0, sizeof(*c));
c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
- if (hyperv_relaxed_timing_enabled()) {
+ if (cpu->hyperv_relaxed_timing) {
c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
}
- if (hyperv_vapic_recommended()) {
+ if (cpu->hyperv_vapic) {
c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
}
- c->ebx = hyperv_get_spinlock_retries();
+ c->ebx = cpu->hyperv_spinlock_attempts;
c = &cpuid_data.entries[cpuid_i++];
memset(c, 0, sizeof(*c));
@@ -1186,11 +1202,11 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL,
env->msr_global_ctrl);
}
- if (hyperv_hypercall_available()) {
+ if (hyperv_hypercall_available(cpu)) {
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_GUEST_OS_ID, 0);
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_HYPERCALL, 0);
}
- if (hyperv_vapic_recommended()) {
+ if (cpu->hyperv_vapic) {
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE, 0);
}
if (has_msr_feature_control) {
@@ -1735,7 +1751,7 @@ static int kvm_guest_debug_workarounds(X86CPU *cpu)
*/
if (reinject_trap ||
(!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
- ret = kvm_update_guest_debug(env, reinject_trap);
+ ret = kvm_update_guest_debug(cs, reinject_trap);
}
return ret;
}
OpenPOWER on IntegriCloud