summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2015-05-11 12:01:09 +0100
committerPeter Maydell <peter.maydell@linaro.org>2015-05-11 12:01:09 +0100
commitb951cda21d6b232f138ccf008e12bce8ddc95465 (patch)
tree60f1b35a5594ce23a9ad7a580bed158a762a15f3
parentec62ad1e27ffd1f7ff2172a916d161cc385e73bd (diff)
parentca4414804114fd0095b317785bc0b51862e62ebb (diff)
downloadhqemu-b951cda21d6b232f138ccf008e12bce8ddc95465.zip
hqemu-b951cda21d6b232f138ccf008e12bce8ddc95465.tar.gz
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
- build bugfix from Fam and new configure check from Emilio - two improvements to "info mtere" from Gerd - KVM support for memory transaction attributes - one more small step towards unlocked MMIO dispatch - one piece of the qemu-nbd errno fixes - trivial-ish patches from Denis and Thomas # gpg: Signature made Fri May 8 13:47:29 2015 BST using RSA key ID 78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # gpg: WARNING: This key is not certified with sufficiently trusted signatures! # gpg: It is not certain that the signature belongs to the owner. # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: qemu-nbd: only send a limited number of errno codes on the wire rules.mak: Force CFLAGS for all objects in DSO configure: require __thread support exec: move rcu_read_lock/unlock to address_space_translate callers kvm: add support for memory transaction attributes mtree: also print disabled regions mtree: tag & indent a bit better apic_common: improve readability of apic_reset_common kvm: Silence warning from valgrind Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rwxr-xr-xconfigure11
-rw-r--r--exec.c33
-rw-r--r--hw/intc/apic_common.c7
-rw-r--r--hw/vfio/common.c7
-rw-r--r--include/exec/memory.h4
-rw-r--r--include/sysemu/kvm.h3
-rw-r--r--kvm-all.c21
-rw-r--r--memory.c23
-rw-r--r--nbd.c57
-rw-r--r--rules.mak4
-rw-r--r--target-arm/kvm.c4
-rw-r--r--target-i386/kvm.c4
-rw-r--r--target-mips/kvm.c4
-rw-r--r--target-ppc/kvm.c4
-rw-r--r--target-s390x/kvm.c4
-rw-r--r--translate-all.c3
16 files changed, 156 insertions, 37 deletions
diff --git a/configure b/configure
index b18aa9e..1f0f485 100755
--- a/configure
+++ b/configure
@@ -1556,6 +1556,17 @@ if test "$static" = "yes" ; then
fi
fi
+# Unconditional check for compiler __thread support
+ cat > $TMPC << EOF
+static __thread int tls_var;
+int main(void) { return tls_var; }
+EOF
+
+if ! compile_prog "-Werror" "" ; then
+ error_exit "Your compiler does not support the __thread specifier for " \
+ "Thread-Local Storage (TLS). Please upgrade to a version that does."
+fi
+
if test "$pie" = ""; then
case "$cpu-$targetos" in
i386-Linux|x86_64-Linux|x32-Linux|i386-OpenBSD|x86_64-OpenBSD)
diff --git a/exec.c b/exec.c
index ae37b98..e19ab22 100644
--- a/exec.c
+++ b/exec.c
@@ -373,6 +373,7 @@ static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
return false;
}
+/* Called from RCU critical section */
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
hwaddr *xlat, hwaddr *plen,
bool is_write)
@@ -381,7 +382,6 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
MemoryRegionSection *section;
MemoryRegion *mr;
- rcu_read_lock();
for (;;) {
AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
section = address_space_translate_internal(d, addr, &addr, plen, true);
@@ -409,7 +409,6 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
}
*xlat = addr;
- rcu_read_unlock();
return mr;
}
@@ -2329,6 +2328,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
MemoryRegion *mr;
MemTxResult result = MEMTX_OK;
+ rcu_read_lock();
while (len > 0) {
l = len;
mr = address_space_translate(as, addr, &addr1, &l, is_write);
@@ -2415,6 +2415,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
buf += l;
addr += l;
}
+ rcu_read_unlock();
return result;
}
@@ -2452,6 +2453,7 @@ static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
hwaddr addr1;
MemoryRegion *mr;
+ rcu_read_lock();
while (len > 0) {
l = len;
mr = address_space_translate(as, addr, &addr1, &l, true);
@@ -2477,6 +2479,7 @@ static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
buf += l;
addr += l;
}
+ rcu_read_unlock();
}
/* used for ROM loading : can write in RAM and ROM */
@@ -2585,6 +2588,7 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_
MemoryRegion *mr;
hwaddr l, xlat;
+ rcu_read_lock();
while (len > 0) {
l = len;
mr = address_space_translate(as, addr, &xlat, &l, is_write);
@@ -2598,6 +2602,7 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_
len -= l;
addr += l;
}
+ rcu_read_unlock();
return true;
}
@@ -2624,9 +2629,12 @@ void *address_space_map(AddressSpace *as,
}
l = len;
+ rcu_read_lock();
mr = address_space_translate(as, addr, &xlat, &l, is_write);
+
if (!memory_access_is_direct(mr, is_write)) {
if (atomic_xchg(&bounce.in_use, true)) {
+ rcu_read_unlock();
return NULL;
}
/* Avoid unbounded allocations */
@@ -2642,6 +2650,7 @@ void *address_space_map(AddressSpace *as,
bounce.buffer, l);
}
+ rcu_read_unlock();
*plen = l;
return bounce.buffer;
}
@@ -2665,6 +2674,7 @@ void *address_space_map(AddressSpace *as,
}
memory_region_ref(mr);
+ rcu_read_unlock();
*plen = done;
return qemu_ram_ptr_length(raddr + base, plen);
}
@@ -2728,6 +2738,7 @@ static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
hwaddr addr1;
MemTxResult r;
+ rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l, false);
if (l < 4 || !memory_access_is_direct(mr, false)) {
/* I/O case */
@@ -2762,6 +2773,7 @@ static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
if (result) {
*result = r;
}
+ rcu_read_unlock();
return val;
}
@@ -2814,6 +2826,7 @@ static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
hwaddr addr1;
MemTxResult r;
+ rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
false);
if (l < 8 || !memory_access_is_direct(mr, false)) {
@@ -2849,6 +2862,7 @@ static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
if (result) {
*result = r;
}
+ rcu_read_unlock();
return val;
}
@@ -2921,6 +2935,7 @@ static inline uint32_t address_space_lduw_internal(AddressSpace *as,
hwaddr addr1;
MemTxResult r;
+ rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
false);
if (l < 2 || !memory_access_is_direct(mr, false)) {
@@ -2956,6 +2971,7 @@ static inline uint32_t address_space_lduw_internal(AddressSpace *as,
if (result) {
*result = r;
}
+ rcu_read_unlock();
return val;
}
@@ -3007,6 +3023,7 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
hwaddr addr1;
MemTxResult r;
+ rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
true);
if (l < 4 || !memory_access_is_direct(mr, true)) {
@@ -3029,6 +3046,7 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
if (result) {
*result = r;
}
+ rcu_read_unlock();
}
void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
@@ -3049,6 +3067,7 @@ static inline void address_space_stl_internal(AddressSpace *as,
hwaddr addr1;
MemTxResult r;
+ rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
true);
if (l < 4 || !memory_access_is_direct(mr, true)) {
@@ -3083,6 +3102,7 @@ static inline void address_space_stl_internal(AddressSpace *as,
if (result) {
*result = r;
}
+ rcu_read_unlock();
}
void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
@@ -3152,6 +3172,7 @@ static inline void address_space_stw_internal(AddressSpace *as,
hwaddr addr1;
MemTxResult r;
+ rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l, true);
if (l < 2 || !memory_access_is_direct(mr, true)) {
#if defined(TARGET_WORDS_BIGENDIAN)
@@ -3185,6 +3206,7 @@ static inline void address_space_stw_internal(AddressSpace *as,
if (result) {
*result = r;
}
+ rcu_read_unlock();
}
void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
@@ -3322,12 +3344,15 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr)
{
MemoryRegion*mr;
hwaddr l = 1;
+ bool res;
+ rcu_read_lock();
mr = address_space_translate(&address_space_memory,
phys_addr, &phys_addr, &l, false);
- return !(memory_region_is_ram(mr) ||
- memory_region_is_romd(mr));
+ res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
+ rcu_read_unlock();
+ return res;
}
void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c
index d38d24b..d595d63 100644
--- a/hw/intc/apic_common.c
+++ b/hw/intc/apic_common.c
@@ -233,11 +233,10 @@ static void apic_reset_common(DeviceState *dev)
{
APICCommonState *s = APIC_COMMON(dev);
APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
- bool bsp;
+ uint32_t bsp;
- bsp = cpu_is_bsp(s->cpu);
- s->apicbase = APIC_DEFAULT_ADDRESS |
- (bsp ? MSR_IA32_APICBASE_BSP : 0) | MSR_IA32_APICBASE_ENABLE;
+ bsp = s->apicbase & MSR_IA32_APICBASE_BSP;
+ s->apicbase = APIC_DEFAULT_ADDRESS | bsp | MSR_IA32_APICBASE_ENABLE;
s->vapic_paddr = 0;
info->vapic_base_update(s);
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index b012620..b1045da 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -270,13 +270,14 @@ static void vfio_iommu_map_notify(Notifier *n, void *data)
* this IOMMU to its immediate target. We need to translate
* it the rest of the way through to memory.
*/
+ rcu_read_lock();
mr = address_space_translate(&address_space_memory,
iotlb->translated_addr,
&xlat, &len, iotlb->perm & IOMMU_WO);
if (!memory_region_is_ram(mr)) {
error_report("iommu map to non memory area %"HWADDR_PRIx"",
xlat);
- return;
+ goto out;
}
/*
* Translation truncates length to the IOMMU page size,
@@ -284,7 +285,7 @@ static void vfio_iommu_map_notify(Notifier *n, void *data)
*/
if (len & iotlb->addr_mask) {
error_report("iommu has granularity incompatible with target AS");
- return;
+ goto out;
}
if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
@@ -307,6 +308,8 @@ static void vfio_iommu_map_notify(Notifier *n, void *data)
iotlb->addr_mask + 1, ret);
}
}
+out:
+ rcu_read_unlock();
}
static void vfio_listener_region_add(MemoryListener *listener,
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 0ccfd3b..b61c84f 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -1233,7 +1233,9 @@ void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
#endif
/* address_space_translate: translate an address range into an address space
- * into a MemoryRegion and an address range into that section
+ * into a MemoryRegion and an address range into that section. Should be
+ * called from an RCU critical section, to avoid that the last reference
+ * to the returned region disappears after address_space_translate returns.
*
* @as: #AddressSpace to be accessed
* @addr: address within that address space
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index 197e6c0..4878959 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -18,6 +18,7 @@
#include "config-host.h"
#include "qemu/queue.h"
#include "qom/cpu.h"
+#include "exec/memattrs.h"
#ifdef CONFIG_KVM
#include <linux/kvm.h>
@@ -254,7 +255,7 @@ int kvm_create_device(KVMState *s, uint64_t type, bool test);
extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
-void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
+MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run);
diff --git a/kvm-all.c b/kvm-all.c
index 28f4589..17a3771 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -1669,14 +1669,14 @@ void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
s->sigmask_len = sigmask_len;
}
-static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
- uint32_t count)
+static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
+ int size, uint32_t count)
{
int i;
uint8_t *ptr = data;
for (i = 0; i < count; i++) {
- address_space_rw(&address_space_io, port, MEMTXATTRS_UNSPECIFIED,
+ address_space_rw(&address_space_io, port, attrs,
ptr, size,
direction == KVM_EXIT_IO_OUT);
ptr += size;
@@ -1796,6 +1796,8 @@ int kvm_cpu_exec(CPUState *cpu)
}
do {
+ MemTxAttrs attrs;
+
if (cpu->kvm_vcpu_dirty) {
kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
cpu->kvm_vcpu_dirty = false;
@@ -1816,7 +1818,7 @@ int kvm_cpu_exec(CPUState *cpu)
run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
qemu_mutex_lock_iothread();
- kvm_arch_post_run(cpu, run);
+ attrs = kvm_arch_post_run(cpu, run);
if (run_ret < 0) {
if (run_ret == -EINTR || run_ret == -EAGAIN) {
@@ -1834,7 +1836,7 @@ int kvm_cpu_exec(CPUState *cpu)
switch (run->exit_reason) {
case KVM_EXIT_IO:
DPRINTF("handle_io\n");
- kvm_handle_io(run->io.port,
+ kvm_handle_io(run->io.port, attrs,
(uint8_t *)run + run->io.data_offset,
run->io.direction,
run->io.size,
@@ -1843,10 +1845,11 @@ int kvm_cpu_exec(CPUState *cpu)
break;
case KVM_EXIT_MMIO:
DPRINTF("handle_mmio\n");
- cpu_physical_memory_rw(run->mmio.phys_addr,
- run->mmio.data,
- run->mmio.len,
- run->mmio.is_write);
+ address_space_rw(&address_space_memory,
+ run->mmio.phys_addr, attrs,
+ run->mmio.data,
+ run->mmio.len,
+ run->mmio.is_write);
ret = 0;
break;
case KVM_EXIT_IRQ_WINDOW_OPEN:
diff --git a/memory.c b/memory.c
index 0f6cb81..03c536b 100644
--- a/memory.c
+++ b/memory.c
@@ -2089,7 +2089,7 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f,
const MemoryRegion *submr;
unsigned int i;
- if (!mr || !mr->enabled) {
+ if (!mr) {
return;
}
@@ -2115,7 +2115,7 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f,
}
mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
" (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
- "-" TARGET_FMT_plx "\n",
+ "-" TARGET_FMT_plx "%s\n",
base + mr->addr,
base + mr->addr
+ (int128_nz(mr->size) ?
@@ -2131,10 +2131,11 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f,
mr->alias_offset
+ (int128_nz(mr->size) ?
(hwaddr)int128_get64(int128_sub(mr->size,
- int128_one())) : 0));
+ int128_one())) : 0),
+ mr->enabled ? "" : " [disabled]");
} else {
mon_printf(f,
- TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s\n",
+ TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s%s\n",
base + mr->addr,
base + mr->addr
+ (int128_nz(mr->size) ?
@@ -2144,7 +2145,8 @@ static void mtree_print_mr(fprintf_function mon_printf, void *f,
mr->romd_mode ? 'R' : '-',
!mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
: '-',
- memory_region_name(mr));
+ memory_region_name(mr),
+ mr->enabled ? "" : " [disabled]");
}
QTAILQ_INIT(&submr_print_queue);
@@ -2185,15 +2187,16 @@ void mtree_info(fprintf_function mon_printf, void *f)
QTAILQ_INIT(&ml_head);
QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
- mon_printf(f, "%s\n", as->name);
- mtree_print_mr(mon_printf, f, as->root, 0, 0, &ml_head);
+ mon_printf(f, "address-space: %s\n", as->name);
+ mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
+ mon_printf(f, "\n");
}
- mon_printf(f, "aliases\n");
/* print aliased regions */
QTAILQ_FOREACH(ml, &ml_head, queue) {
- mon_printf(f, "%s\n", memory_region_name(ml->mr));
- mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head);
+ mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
+ mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
+ mon_printf(f, "\n");
}
QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
diff --git a/nbd.c b/nbd.c
index cb1b9bb..06b501b 100644
--- a/nbd.c
+++ b/nbd.c
@@ -86,6 +86,59 @@
#define NBD_OPT_ABORT (2)
#define NBD_OPT_LIST (3)
+/* NBD errors are based on errno numbers, so there is a 1:1 mapping,
+ * but only a limited set of errno values is specified in the protocol.
+ * Everything else is squashed to EINVAL.
+ */
+#define NBD_SUCCESS 0
+#define NBD_EPERM 1
+#define NBD_EIO 5
+#define NBD_ENOMEM 12
+#define NBD_EINVAL 22
+#define NBD_ENOSPC 28
+
+static int system_errno_to_nbd_errno(int err)
+{
+ switch (err) {
+ case 0:
+ return NBD_SUCCESS;
+ case EPERM:
+ return NBD_EPERM;
+ case EIO:
+ return NBD_EIO;
+ case ENOMEM:
+ return NBD_ENOMEM;
+#ifdef EDQUOT
+ case EDQUOT:
+#endif
+ case EFBIG:
+ case ENOSPC:
+ return NBD_ENOSPC;
+ case EINVAL:
+ default:
+ return NBD_EINVAL;
+ }
+}
+
+static int nbd_errno_to_system_errno(int err)
+{
+ switch (err) {
+ case NBD_SUCCESS:
+ return 0;
+ case NBD_EPERM:
+ return EPERM;
+ case NBD_EIO:
+ return EIO;
+ case NBD_ENOMEM:
+ return ENOMEM;
+ case NBD_ENOSPC:
+ return ENOSPC;
+ case NBD_EINVAL:
+ default:
+ return EINVAL;
+ }
+}
+
/* Definitions for opaque data types */
typedef struct NBDRequest NBDRequest;
@@ -856,6 +909,8 @@ ssize_t nbd_receive_reply(int csock, struct nbd_reply *reply)
reply->error = be32_to_cpup((uint32_t*)(buf + 4));
reply->handle = be64_to_cpup((uint64_t*)(buf + 8));
+ reply->error = nbd_errno_to_system_errno(reply->error);
+
TRACE("Got reply: "
"{ magic = 0x%x, .error = %d, handle = %" PRIu64" }",
magic, reply->error, reply->handle);
@@ -872,6 +927,8 @@ static ssize_t nbd_send_reply(int csock, struct nbd_reply *reply)
uint8_t buf[NBD_REPLY_SIZE];
ssize_t ret;
+ reply->error = system_errno_to_nbd_errno(reply->error);
+
/* Reply
[ 0 .. 3] magic (NBD_REPLY_MAGIC)
[ 4 .. 7] error (0 == no error)
diff --git a/rules.mak b/rules.mak
index 3a05627..aec27f8 100644
--- a/rules.mak
+++ b/rules.mak
@@ -102,7 +102,8 @@ endif
%.o: %.dtrace
$(call quiet-command,dtrace -o $@ -G -s $<, " GEN $(TARGET_DIR)$@")
-%$(DSOSUF): CFLAGS += -fPIC -DBUILD_DSO
+DSO_OBJ_CFLAGS := -fPIC -DBUILD_DSO
+module-common.o: CFLAGS += $(DSO_OBJ_CFLAGS)
%$(DSOSUF): LDFLAGS += $(LDFLAGS_SHARED)
%$(DSOSUF): %.mo
$(call LINK,$^)
@@ -351,6 +352,7 @@ define unnest-vars
# For non-module build, add -m to -y
$(if $(CONFIG_MODULES),
$(foreach o,$($v),
+ $(eval $($o-objs): CFLAGS += $(DSO_OBJ_CFLAGS))
$(eval $o: $($o-objs)))
$(eval $(patsubst %-m,%-y,$v) += $($v))
$(eval modules: $($v:%.mo=%$(DSOSUF))),
diff --git a/target-arm/kvm.c b/target-arm/kvm.c
index fdd9ba3..16abbf1 100644
--- a/target-arm/kvm.c
+++ b/target-arm/kvm.c
@@ -23,6 +23,7 @@
#include "cpu.h"
#include "internals.h"
#include "hw/arm/arm.h"
+#include "exec/memattrs.h"
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
@@ -506,8 +507,9 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
{
}
-void kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
{
+ return MEMTXATTRS_UNSPECIFIED;
}
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 41d09e5..a26d25a 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -37,6 +37,7 @@
#include "hw/pci/pci.h"
#include "migration/migration.h"
#include "qapi/qmp/qerror.h"
+#include "exec/memattrs.h"
//#define DEBUG_KVM
@@ -2246,7 +2247,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
}
}
-void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
+MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
{
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
@@ -2258,6 +2259,7 @@ void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
}
cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
+ return MEMTXATTRS_UNSPECIFIED;
}
int kvm_arch_process_async_events(CPUState *cs)
diff --git a/target-mips/kvm.c b/target-mips/kvm.c
index 4d1f7ea..59eb111 100644
--- a/target-mips/kvm.c
+++ b/target-mips/kvm.c
@@ -23,6 +23,7 @@
#include "cpu.h"
#include "sysemu/cpus.h"
#include "kvm_mips.h"
+#include "exec/memattrs.h"
#define DEBUG_KVM 0
@@ -110,9 +111,10 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
}
}
-void kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
{
DPRINTF("%s\n", __func__);
+ return MEMTXATTRS_UNSPECIFIED;
}
int kvm_arch_process_async_events(CPUState *cs)
diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index 12328a4..1da9ea8 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -39,6 +39,7 @@
#include "sysemu/watchdog.h"
#include "trace.h"
#include "exec/gdbstub.h"
+#include "exec/memattrs.h"
//#define DEBUG_KVM
@@ -1270,8 +1271,9 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
* anyways, so we will get a chance to deliver the rest. */
}
-void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
{
+ return MEMTXATTRS_UNSPECIFIED;
}
int kvm_arch_process_async_events(CPUState *cs)
diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c
index aba1265..ea18015 100644
--- a/target-s390x/kvm.c
+++ b/target-s390x/kvm.c
@@ -45,6 +45,7 @@
#include "hw/s390x/s390-pci-bus.h"
#include "hw/s390x/ipl.h"
#include "hw/s390x/ebcdic.h"
+#include "exec/memattrs.h"
/* #define DEBUG_KVM */
@@ -780,8 +781,9 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
{
}
-void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
{
+ return MEMTXATTRS_UNSPECIFIED;
}
int kvm_arch_process_async_events(CPUState *cs)
diff --git a/translate-all.c b/translate-all.c
index 65a76c5..536008f 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -1416,14 +1416,17 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
MemoryRegion *mr;
hwaddr l = 1;
+ rcu_read_lock();
mr = address_space_translate(as, addr, &addr, &l, false);
if (!(memory_region_is_ram(mr)
|| memory_region_is_romd(mr))) {
+ rcu_read_unlock();
return;
}
ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
+ addr;
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
+ rcu_read_unlock();
}
#endif /* !defined(CONFIG_USER_ONLY) */
OpenPOWER on IntegriCloud