summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xconfigure5
-rw-r--r--hw/i2c/smbus_eeprom.c2
-rw-r--r--hw/ide/ahci.c4
-rw-r--r--include/qemu/int128.h4
-rw-r--r--net/net.c4
-rw-r--r--qemu-doc.texi2
-rw-r--r--qemu-options.hx7
-rw-r--r--qga/commands-posix.c2
-rw-r--r--qga/qapi-schema.json14
-rw-r--r--scripts/coverity-model.c183
-rw-r--r--tcg/ia64/tcg-target.c498
-rw-r--r--tcg/ia64/tcg-target.h2
-rw-r--r--vl.c2
-rw-r--r--xbzrle.c8
14 files changed, 477 insertions, 260 deletions
diff --git a/configure b/configure
index 69b9f56..b08afc3 100755
--- a/configure
+++ b/configure
@@ -1217,8 +1217,8 @@ Advanced options (experts only):
--enable-modules enable modules support
--enable-debug-tcg enable TCG debugging
--disable-debug-tcg disable TCG debugging (default)
- --enable-debug-info enable debugging information (default)
- --disable-debug-info disable debugging information
+ --enable-debug-info enable debugging information (default)
+ --disable-debug-info disable debugging information
--enable-debug enable common debug build options
--enable-sparse enable sparse checker
--disable-sparse disable sparse checker (default)
@@ -4095,7 +4095,6 @@ echo "sparse enabled $sparse"
echo "strip binaries $strip_opt"
echo "profiler $profiler"
echo "static build $static"
-echo "-Werror enabled $werror"
if test "$darwin" = "yes" ; then
echo "Cocoa support $cocoa"
fi
diff --git a/hw/i2c/smbus_eeprom.c b/hw/i2c/smbus_eeprom.c
index 86f35c1..72c09cb 100644
--- a/hw/i2c/smbus_eeprom.c
+++ b/hw/i2c/smbus_eeprom.c
@@ -71,7 +71,7 @@ static void eeprom_write_data(SMBusDevice *dev, uint8_t cmd, uint8_t *buf, int l
printf("eeprom_write_byte: addr=0x%02x cmd=0x%02x val=0x%02x\n",
dev->i2c.address, cmd, buf[0]);
#endif
- /* An page write operation is not a valid SMBus command.
+ /* A page write operation is not a valid SMBus command.
It is a block write without a length byte. Fortunately we
get the full block anyway. */
/* TODO: Should this set the current location? */
diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
index bfe633f..50327ff 100644
--- a/hw/ide/ahci.c
+++ b/hw/ide/ahci.c
@@ -438,9 +438,9 @@ static void check_cmd(AHCIState *s, int port)
if ((pr->cmd & PORT_CMD_START) && pr->cmd_issue) {
for (slot = 0; (slot < 32) && pr->cmd_issue; slot++) {
- if ((pr->cmd_issue & (1 << slot)) &&
+ if ((pr->cmd_issue & (1U << slot)) &&
!handle_cmd(s, port, slot)) {
- pr->cmd_issue &= ~(1 << slot);
+ pr->cmd_issue &= ~(1U << slot);
}
}
}
diff --git a/include/qemu/int128.h b/include/qemu/int128.h
index 9ed47aa..f597031 100644
--- a/include/qemu/int128.h
+++ b/include/qemu/int128.h
@@ -53,7 +53,7 @@ static inline Int128 int128_rshift(Int128 a, int n)
if (n >= 64) {
return (Int128) { h, h >> 63 };
} else {
- return (Int128) { (a.lo >> n) | (a.hi << (64 - n)), h };
+ return (Int128) { (a.lo >> n) | ((uint64_t)a.hi << (64 - n)), h };
}
}
@@ -78,7 +78,7 @@ static inline Int128 int128_neg(Int128 a)
static inline Int128 int128_sub(Int128 a, Int128 b)
{
- return (Int128){ a.lo - b.lo, a.hi - b.hi - (a.lo < b.lo) };
+ return (Int128){ a.lo - b.lo, (uint64_t)a.hi - b.hi - (a.lo < b.lo) };
}
static inline bool int128_nonneg(Int128 a)
diff --git a/net/net.c b/net/net.c
index e3ef1e4..a4aadff 100644
--- a/net/net.c
+++ b/net/net.c
@@ -952,10 +952,12 @@ void net_host_device_remove(Monitor *mon, const QDict *qdict)
nc = net_hub_find_client_by_name(vlan_id, device);
if (!nc) {
+ error_report("Host network device '%s' on hub '%d' not found",
+ device, vlan_id);
return;
}
if (!net_host_check_device(nc->model)) {
- monitor_printf(mon, "invalid host network device %s\n", device);
+ error_report("invalid host network device '%s'", device);
return;
}
qemu_del_net_client(nc);
diff --git a/qemu-doc.texi b/qemu-doc.texi
index e6e20eb..88ec9bb 100644
--- a/qemu-doc.texi
+++ b/qemu-doc.texi
@@ -823,7 +823,7 @@ In this case, the block device must be exported using qemu-nbd:
qemu-nbd --socket=/tmp/my_socket my_disk.qcow2
@end example
-The use of qemu-nbd allows to share a disk between several guests:
+The use of qemu-nbd allows sharing of a disk between several guests:
@example
qemu-nbd --socket=/tmp/my_socket --share=2 my_disk.qcow2
@end example
diff --git a/qemu-options.hx b/qemu-options.hx
index 2d33815..6457034 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -444,7 +444,8 @@ This option defines the type of the media: disk or cdrom.
@item cyls=@var{c},heads=@var{h},secs=@var{s}[,trans=@var{t}]
These options have the same definition as they have in @option{-hdachs}.
@item snapshot=@var{snapshot}
-@var{snapshot} is "on" or "off" and allows to enable snapshot for given drive (see @option{-snapshot}).
+@var{snapshot} is "on" or "off" and controls snapshot mode for the given drive
+(see @option{-snapshot}).
@item cache=@var{cache}
@var{cache} is "none", "writeback", "unsafe", "directsync" or "writethrough" and controls how the host cache is used to access block data.
@item aio=@var{aio}
@@ -1242,7 +1243,7 @@ Disable adaptive encodings. Adaptive encodings are enabled by default.
An adaptive encoding will try to detect frequently updated screen regions,
and send updates in these regions using a lossy encoding (like JPEG).
This can be really helpful to save bandwidth when playing videos. Disabling
-adaptive encodings allows to restore the original static behavior of encodings
+adaptive encodings restores the original static behavior of encodings
like Tight.
@item share=[allow-exclusive|force-shared|ignore]
@@ -2805,7 +2806,7 @@ UTC or local time, respectively. @code{localtime} is required for correct date i
MS-DOS or Windows. To start at a specific point in time, provide @var{date} in the
format @code{2006-06-17T16:01:21} or @code{2006-06-17}. The default base is UTC.
-By default the RTC is driven by the host system time. This allows to use the
+By default the RTC is driven by the host system time. This allows using of the
RTC as accurate reference clock inside the guest, specifically if the host
time is smoothly following an accurate external reference clock, e.g. via NTP.
If you want to isolate the guest time from the host, you can set @option{clock}
diff --git a/qga/commands-posix.c b/qga/commands-posix.c
index 6b5f11f..935a4ec 100644
--- a/qga/commands-posix.c
+++ b/qga/commands-posix.c
@@ -171,7 +171,7 @@ void qmp_guest_set_time(bool has_time, int64_t time_ns, Error **errp)
/* Now, if user has passed a time to set and the system time is set, we
* just need to synchronize the hardware clock. However, if no time was
* passed, user is requesting the opposite: set the system time from the
- * hardware clock. */
+ * hardware clock (RTC). */
pid = fork();
if (pid == 0) {
setsid();
diff --git a/qga/qapi-schema.json b/qga/qapi-schema.json
index 80edca1..a8cdcb3 100644
--- a/qga/qapi-schema.json
+++ b/qga/qapi-schema.json
@@ -96,8 +96,8 @@
##
# @guest-get-time:
#
-# Get the information about guest time relative to the Epoch
-# of 1970-01-01 in UTC.
+# Get the information about guest's System Time relative to
+# the Epoch of 1970-01-01 in UTC.
#
# Returns: Time in nanoseconds.
#
@@ -117,11 +117,11 @@
# gap was, NTP might not be able to resynchronize the
# guest.
#
-# This command tries to set guest time to the given value,
-# then sets the Hardware Clock to the current System Time.
-# This will make it easier for a guest to resynchronize
-# without waiting for NTP. If no @time is specified, then
-# the time to set is read from RTC.
+# This command tries to set guest's System Time to the
+# given value, then sets the Hardware Clock (RTC) to the
+# current System Time. This will make it easier for a guest
+# to resynchronize without waiting for NTP. If no @time is
+# specified, then the time to set is read from RTC.
#
# @time: #optional time of nanoseconds, relative to the Epoch
# of 1970-01-01 in UTC.
diff --git a/scripts/coverity-model.c b/scripts/coverity-model.c
new file mode 100644
index 0000000..4c99a85
--- /dev/null
+++ b/scripts/coverity-model.c
@@ -0,0 +1,183 @@
+/* Coverity Scan model
+ *
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>
+ * Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or, at your
+ * option, any later version. See the COPYING file in the top-level directory.
+ */
+
+
+/*
+ * This is the source code for our Coverity user model file. The
+ * purpose of user models is to increase scanning accuracy by explaining
+ * code Coverity can't see (out of tree libraries) or doesn't
+ * sufficiently understand. Better accuracy means both fewer false
+ * positives and more true defects. Memory leaks in particular.
+ *
+ * - A model file can't import any header files. Some built-in primitives are
+ * available but not wchar_t, NULL etc.
+ * - Modeling doesn't need full structs and typedefs. Rudimentary structs
+ * and similar types are sufficient.
+ * - An uninitialized local variable signifies that the variable could be
+ * any value.
+ *
+ * The model file must be uploaded by an admin in the analysis settings of
+ * http://scan.coverity.com/projects/378
+ */
+
+#define NULL ((void *)0)
+
+typedef unsigned char uint8_t;
+typedef char int8_t;
+typedef unsigned int uint32_t;
+typedef int int32_t;
+typedef long ssize_t;
+typedef unsigned long long uint64_t;
+typedef long long int64_t;
+typedef _Bool bool;
+
+/* exec.c */
+
+typedef struct AddressSpace AddressSpace;
+typedef uint64_t hwaddr;
+
+static void __write(uint8_t *buf, ssize_t len)
+{
+ int first, last;
+ __coverity_negative_sink__(len);
+ if (len == 0) return;
+ buf[0] = first;
+ buf[len-1] = last;
+ __coverity_writeall__(buf);
+}
+
+static void __read(uint8_t *buf, ssize_t len)
+{
+ __coverity_negative_sink__(len);
+ if (len == 0) return;
+ int first = buf[0];
+ int last = buf[len-1];
+}
+
+bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
+ int len, bool is_write)
+{
+ bool result;
+
+ // TODO: investigate impact of treating reads as producing
+ // tainted data, with __coverity_tainted_data_argument__(buf).
+ if (is_write) __write(buf, len); else __read(buf, len);
+
+ return result;
+}
+
+/* Tainting */
+
+typedef struct {} name2keysym_t;
+static int get_keysym(const name2keysym_t *table,
+ const char *name)
+{
+ int result;
+ if (result > 0) {
+ __coverity_tainted_string_sanitize_content__(name);
+ return result;
+ } else {
+ return 0;
+ }
+}
+
+/* glib memory allocation functions.
+ *
+ * Note that we ignore the fact that g_malloc of 0 bytes returns NULL,
+ * and g_realloc of 0 bytes frees the pointer.
+ *
+ * Modeling this would result in Coverity flagging a lot of memory
+ * allocations as potentially returning NULL, and asking us to check
+ * whether the result of the allocation is NULL or not. However, the
+ * resulting pointer should never be dereferenced anyway, and in fact
+ * it is not in the vast majority of cases.
+ *
+ * If a dereference did happen, this would suppress a defect report
+ * for an actual null pointer dereference. But it's too unlikely to
+ * be worth wading through the false positives, and with some luck
+ * we'll get a buffer overflow reported anyway.
+ */
+
+void *malloc(size_t);
+void *calloc(size_t, size_t);
+void *realloc(void *, size_t);
+void free(void *);
+
+void *
+g_malloc(size_t n_bytes)
+{
+ void *mem;
+ __coverity_negative_sink__(n_bytes);
+ mem = malloc(n_bytes == 0 ? 1 : n_bytes);
+ if (!mem) __coverity_panic__();
+ return mem;
+}
+
+void *
+g_malloc0(size_t n_bytes)
+{
+ void *mem;
+ __coverity_negative_sink__(n_bytes);
+ mem = calloc(1, n_bytes == 0 ? 1 : n_bytes);
+ if (!mem) __coverity_panic__();
+ return mem;
+}
+
+void g_free(void *mem)
+{
+ free(mem);
+}
+
+void *g_realloc(void * mem, size_t n_bytes)
+{
+ __coverity_negative_sink__(n_bytes);
+ mem = realloc(mem, n_bytes == 0 ? 1 : n_bytes);
+ if (!mem) __coverity_panic__();
+ return mem;
+}
+
+void *g_try_malloc(size_t n_bytes)
+{
+ __coverity_negative_sink__(n_bytes);
+ return malloc(n_bytes == 0 ? 1 : n_bytes);
+}
+
+void *g_try_malloc0(size_t n_bytes)
+{
+ __coverity_negative_sink__(n_bytes);
+ return calloc(1, n_bytes == 0 ? 1 : n_bytes);
+}
+
+void *g_try_realloc(void *mem, size_t n_bytes)
+{
+ __coverity_negative_sink__(n_bytes);
+ return realloc(mem, n_bytes == 0 ? 1 : n_bytes);
+}
+
+/* Other glib functions */
+
+typedef struct _GIOChannel GIOChannel;
+GIOChannel *g_io_channel_unix_new(int fd)
+{
+ GIOChannel *c = g_malloc0(sizeof(GIOChannel));
+ __coverity_escape__(fd);
+ return c;
+}
+
+void g_assertion_message_expr(const char *domain,
+ const char *file,
+ int line,
+ const char *func,
+ const char *expr)
+{
+ __coverity_panic__();
+}
diff --git a/tcg/ia64/tcg-target.c b/tcg/ia64/tcg-target.c
index c640184..1f523d6 100644
--- a/tcg/ia64/tcg-target.c
+++ b/tcg/ia64/tcg-target.c
@@ -23,8 +23,6 @@
* THE SOFTWARE.
*/
-#include "tcg-be-null.h"
-
/*
* Register definitions
*/
@@ -221,10 +219,12 @@ enum {
OPC_ALLOC_M34 = 0x02c00000000ull,
OPC_BR_DPTK_FEW_B1 = 0x08400000000ull,
OPC_BR_SPTK_MANY_B1 = 0x08000001000ull,
+ OPC_BR_CALL_SPNT_FEW_B3 = 0x0a200000000ull,
OPC_BR_SPTK_MANY_B4 = 0x00100001000ull,
OPC_BR_CALL_SPTK_MANY_B5 = 0x02100001000ull,
OPC_BR_RET_SPTK_MANY_B4 = 0x00108001100ull,
OPC_BRL_SPTK_MANY_X3 = 0x18000001000ull,
+ OPC_BRL_CALL_SPNT_MANY_X4 = 0x1a200001000ull,
OPC_BRL_CALL_SPTK_MANY_X4 = 0x1a000001000ull,
OPC_CMP_LT_A6 = 0x18000000000ull,
OPC_CMP_LTU_A6 = 0x1a000000000ull,
@@ -356,6 +356,15 @@ static inline uint64_t tcg_opc_b1(int qp, uint64_t opc, uint64_t imm)
| (qp & 0x3f);
}
+static inline uint64_t tcg_opc_b3(int qp, uint64_t opc, int b1, uint64_t imm)
+{
+ return opc
+ | ((imm & 0x100000) << 16) /* s */
+ | ((imm & 0x0fffff) << 13) /* imm20b */
+ | ((b1 & 0x7) << 6)
+ | (qp & 0x3f);
+}
+
static inline uint64_t tcg_opc_b4(int qp, uint64_t opc, int b2)
{
return opc
@@ -815,6 +824,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
#if defined(CONFIG_SOFTMMU)
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R56);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R57);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R58);
#endif
break;
case 'Z':
@@ -950,15 +960,21 @@ static inline void tcg_out_callr(TCGContext *s, TCGReg addr)
static void tcg_out_exit_tb(TCGContext *s, tcg_target_long arg)
{
int64_t disp;
- uint64_t imm;
+ uint64_t imm, opc1;
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R8, arg);
+ /* At least arg == 0 is a common operation. */
+ if (arg == sextract64(arg, 0, 22)) {
+ opc1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R8, arg);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R8, arg);
+ opc1 = INSN_NOP_M;
+ }
disp = tb_ret_addr - s->code_ptr;
imm = (uint64_t)disp >> 4;
tcg_out_bundle(s, mLX,
- INSN_NOP_M,
+ opc1,
tcg_opc_l3 (imm),
tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, imm));
}
@@ -1558,238 +1574,284 @@ static inline void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGArg ret,
}
#if defined(CONFIG_SOFTMMU)
+/* We're expecting to use an signed 22-bit immediate add. */
+QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
+ > 0x1fffff)
+
/* Load and compare a TLB entry, and return the result in (p6, p7).
- R2 is loaded with the address of the addend TLB entry.
- R57 is loaded with the address, zero extented on 32-bit targets. */
-static inline void tcg_out_qemu_tlb(TCGContext *s, TCGArg addr_reg,
- TCGMemOp s_bits, uint64_t offset_rw,
- uint64_t offset_addend)
-{
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, TCG_REG_R2,
+ R2 is loaded with the addend TLB entry.
+ R57 is loaded with the address, zero extented on 32-bit targets.
+ R1, R3 are clobbered, leaving R56 free for...
+ BSWAP_1, BSWAP_2 and I-slot insns for swapping data for store. */
+static inline void tcg_out_qemu_tlb(TCGContext *s, TCGReg addr_reg,
+ TCGMemOp s_bits, int off_rw, int off_add,
+ uint64_t bswap1, uint64_t bswap2)
+{
+ /*
+ .mii
+ mov r2 = off_rw
+ extr.u r3 = addr_reg, ... # extract tlb page
+ zxt4 r57 = addr_reg # or mov for 64-bit guest
+ ;;
+ .mii
+ addl r2 = r2, areg0
+ shl r3 = r3, cteb # via dep.z
+ dep r1 = 0, r57, ... # zero page ofs, keep align
+ ;;
+ .mmi
+ add r2 = r2, r3
+ ;;
+ ld4 r3 = [r2], off_add-off_rw # or ld8 for 64-bit guest
+ nop
+ ;;
+ .mmi
+ nop
+ cmp.eq p6, p7 = r3, r58
+ nop
+ ;;
+ */
+ tcg_out_bundle(s, miI,
+ tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, off_rw),
+ tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, TCG_REG_R3,
addr_reg, TARGET_PAGE_BITS, CPU_TLB_BITS - 1),
- tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, TCG_REG_R2,
- TCG_REG_R2, 63 - CPU_TLB_ENTRY_BITS,
- 63 - CPU_TLB_ENTRY_BITS));
- tcg_out_bundle(s, mII,
- tcg_opc_a5 (TCG_REG_P0, OPC_ADDL_A5, TCG_REG_R2,
- offset_rw, TCG_REG_R2),
tcg_opc_ext_i(TCG_REG_P0,
TARGET_LONG_BITS == 32 ? MO_UL : MO_Q,
- TCG_REG_R57, addr_reg),
+ TCG_REG_R57, addr_reg));
+ tcg_out_bundle(s, miI,
tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
- TCG_REG_R2, TCG_AREG0));
- tcg_out_bundle(s, mII,
+ TCG_REG_R2, TCG_AREG0),
+ tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, TCG_REG_R3,
+ TCG_REG_R3, 63 - CPU_TLB_ENTRY_BITS,
+ 63 - CPU_TLB_ENTRY_BITS),
+ tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, TCG_REG_R1, 0,
+ TCG_REG_R57, 63 - s_bits,
+ TARGET_PAGE_BITS - s_bits - 1));
+ tcg_out_bundle(s, MmI,
+ tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1,
+ TCG_REG_R2, TCG_REG_R2, TCG_REG_R3),
tcg_opc_m3 (TCG_REG_P0,
(TARGET_LONG_BITS == 32
- ? OPC_LD4_M3 : OPC_LD8_M3), TCG_REG_R56,
- TCG_REG_R2, offset_addend - offset_rw),
- tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, TCG_REG_R3, 0,
- TCG_REG_R57, 63 - s_bits,
- TARGET_PAGE_BITS - s_bits - 1),
+ ? OPC_LD4_M3 : OPC_LD8_M3), TCG_REG_R3,
+ TCG_REG_R2, off_add - off_rw),
+ bswap1);
+ tcg_out_bundle(s, mmI,
+ tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, TCG_REG_R2, TCG_REG_R2),
tcg_opc_a6 (TCG_REG_P0, OPC_CMP_EQ_A6, TCG_REG_P6,
- TCG_REG_P7, TCG_REG_R3, TCG_REG_R56));
+ TCG_REG_P7, TCG_REG_R1, TCG_REG_R3),
+ bswap2);
}
-/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
- int mmu_idx) */
-static const void * const qemu_ld_helpers[4] = {
- helper_ldb_mmu,
- helper_ldw_mmu,
- helper_ldl_mmu,
- helper_ldq_mmu,
-};
+#define TCG_MAX_QEMU_LDST 640
-static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
- TCGMemOp opc)
+typedef struct TCGLabelQemuLdst {
+ bool is_ld;
+ TCGMemOp size;
+ uint8_t *label_ptr; /* label pointers to be updated */
+} TCGLabelQemuLdst;
+
+typedef struct TCGBackendData {
+ int nb_ldst_labels;
+ TCGLabelQemuLdst ldst_labels[TCG_MAX_QEMU_LDST];
+} TCGBackendData;
+
+static inline void tcg_out_tb_init(TCGContext *s)
+{
+ s->be->nb_ldst_labels = 0;
+}
+
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
+ uint8_t *label_ptr)
+{
+ TCGBackendData *be = s->be;
+ TCGLabelQemuLdst *l = &be->ldst_labels[be->nb_ldst_labels++];
+
+ assert(be->nb_ldst_labels <= TCG_MAX_QEMU_LDST);
+ l->is_ld = is_ld;
+ l->size = opc & MO_SIZE;
+ l->label_ptr = label_ptr;
+}
+
+static void tcg_out_tb_finalize(TCGContext *s)
+{
+ static const void * const helpers[8] = {
+ helper_ret_stb_mmu,
+ helper_le_stw_mmu,
+ helper_le_stl_mmu,
+ helper_le_stq_mmu,
+ helper_ret_ldub_mmu,
+ helper_le_lduw_mmu,
+ helper_le_ldul_mmu,
+ helper_le_ldq_mmu,
+ };
+ uintptr_t thunks[8] = { };
+ TCGBackendData *be = s->be;
+ size_t i, n = be->nb_ldst_labels;
+
+ for (i = 0; i < n; i++) {
+ TCGLabelQemuLdst *l = &be->ldst_labels[i];
+ long x = l->is_ld * 4 + l->size;
+ uintptr_t dest = thunks[x];
+
+ /* The out-of-line thunks are all the same; load the return address
+ from B0, load the GP, and branch to the code. Note that we are
+ always post-call, so the register window has rolled, so we're
+ using incomming parameter register numbers, not outgoing. */
+ if (dest == 0) {
+ uintptr_t disp, *desc = (uintptr_t *)helpers[x];
+
+ thunks[x] = dest = (uintptr_t)s->code_ptr;
+
+ tcg_out_bundle(s, mlx,
+ INSN_NOP_M,
+ tcg_opc_l2 (desc[1]),
+ tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2,
+ TCG_REG_R1, desc[1]));
+ tcg_out_bundle(s, mii,
+ INSN_NOP_M,
+ INSN_NOP_I,
+ tcg_opc_i22(TCG_REG_P0, OPC_MOV_I22,
+ l->is_ld ? TCG_REG_R35 : TCG_REG_R36,
+ TCG_REG_B0));
+ disp = (desc[0] - (uintptr_t)s->code_ptr) >> 4;
+ tcg_out_bundle(s, mLX,
+ INSN_NOP_M,
+ tcg_opc_l3 (disp),
+ tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, disp));
+ }
+
+ reloc_pcrel21b(l->label_ptr, dest);
+ }
+}
+
+static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
{
static const uint64_t opc_ld_m1[4] = {
OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1
};
int addr_reg, data_reg, mem_index;
- TCGMemOp s_bits, bswap;
-
- data_reg = *args++;
- addr_reg = *args++;
- mem_index = *args;
+ TCGMemOp opc, s_bits;
+ uint64_t fin1, fin2;
+ uint8_t *label_ptr;
+
+ data_reg = args[0];
+ addr_reg = args[1];
+ opc = args[2];
+ mem_index = args[3];
s_bits = opc & MO_SIZE;
- bswap = opc & MO_BSWAP;
/* Read the TLB entry */
tcg_out_qemu_tlb(s, addr_reg, s_bits,
offsetof(CPUArchState, tlb_table[mem_index][0].addr_read),
- offsetof(CPUArchState, tlb_table[mem_index][0].addend));
+ offsetof(CPUArchState, tlb_table[mem_index][0].addend),
+ INSN_NOP_I, INSN_NOP_I);
/* P6 is the fast path, and P7 the slow path */
- tcg_out_bundle(s, mLX,
- tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
- tcg_opc_l2 ((tcg_target_long) qemu_ld_helpers[s_bits]),
- tcg_opc_x2 (TCG_REG_P7, OPC_MOVL_X2, TCG_REG_R2,
- (tcg_target_long) qemu_ld_helpers[s_bits]));
- tcg_out_bundle(s, MmI,
- tcg_opc_m3 (TCG_REG_P0, OPC_LD8_M3, TCG_REG_R3,
- TCG_REG_R2, 8),
- tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R3,
- TCG_REG_R3, TCG_REG_R57),
- tcg_opc_i21(TCG_REG_P7, OPC_MOV_I21, TCG_REG_B6,
- TCG_REG_R3, 0));
- if (bswap && s_bits == MO_16) {
- tcg_out_bundle(s, MmI,
- tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
- TCG_REG_R8, TCG_REG_R3),
- tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2),
- tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12,
- TCG_REG_R8, TCG_REG_R8, 15, 15));
- } else if (bswap && s_bits == MO_32) {
- tcg_out_bundle(s, MmI,
- tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
- TCG_REG_R8, TCG_REG_R3),
- tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2),
- tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12,
- TCG_REG_R8, TCG_REG_R8, 31, 31));
- } else {
- tcg_out_bundle(s, mmI,
- tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
- TCG_REG_R8, TCG_REG_R3),
- tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2),
- INSN_NOP_I);
- }
- if (!bswap) {
- tcg_out_bundle(s, miB,
- tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, mem_index),
- INSN_NOP_I,
- tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5,
- TCG_REG_B0, TCG_REG_B6));
+
+ fin2 = 0;
+ if (opc & MO_BSWAP) {
+ fin1 = tcg_opc_bswap64_i(TCG_REG_P0, data_reg, TCG_REG_R8);
+ if (s_bits < MO_64) {
+ int shift = 64 - (8 << s_bits);
+ fin2 = (opc & MO_SIGN ? OPC_EXTR_I11 : OPC_EXTR_U_I11);
+ fin2 = tcg_opc_i11(TCG_REG_P0, fin2,
+ data_reg, data_reg, shift, 63 - shift);
+ }
} else {
- tcg_out_bundle(s, miB,
- tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, mem_index),
- tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R8, TCG_REG_R8),
- tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5,
- TCG_REG_B0, TCG_REG_B6));
+ fin1 = tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, TCG_REG_R8);
}
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
+ tcg_out_bundle(s, mmI,
+ tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
+ tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2,
+ TCG_REG_R2, TCG_REG_R57),
+ tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, mem_index));
+ label_ptr = s->code_ptr + 2;
+ tcg_out_bundle(s, miB,
+ tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
+ TCG_REG_R8, TCG_REG_R2),
INSN_NOP_I,
- tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, TCG_REG_R8));
-}
+ tcg_opc_b3 (TCG_REG_P7, OPC_BR_CALL_SPNT_FEW_B3, TCG_REG_B0,
+ get_reloc_pcrel21b(label_ptr)));
-/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
- uintxx_t val, int mmu_idx) */
-static const void * const qemu_st_helpers[4] = {
- helper_stb_mmu,
- helper_stw_mmu,
- helper_stl_mmu,
- helper_stq_mmu,
-};
+ add_qemu_ldst_label(s, 1, opc, label_ptr);
-static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
- TCGMemOp opc)
+ /* Note that we always use LE helper functions, so the bswap insns
+ here for the fast path also apply to the slow path. */
+ tcg_out_bundle(s, (fin2 ? mII : miI),
+ INSN_NOP_M,
+ fin1,
+ fin2 ? fin2 : INSN_NOP_I);
+}
+
+static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
{
static const uint64_t opc_st_m4[4] = {
OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4
};
- int addr_reg, data_reg, mem_index;
- TCGMemOp s_bits;
-
- data_reg = *args++;
- addr_reg = *args++;
- mem_index = *args;
+ TCGReg addr_reg, data_reg;
+ int mem_index;
+ uint64_t pre1, pre2;
+ TCGMemOp opc, s_bits;
+ uint8_t *label_ptr;
+
+ data_reg = args[0];
+ addr_reg = args[1];
+ opc = args[2];
+ mem_index = args[3];
s_bits = opc & MO_SIZE;
+ /* Note that we always use LE helper functions, so the bswap insns
+ that are here for the fast path also apply to the slow path,
+ and move the data into the argument register. */
+ pre2 = INSN_NOP_I;
+ if (opc & MO_BSWAP) {
+ pre1 = tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R58, data_reg);
+ if (s_bits < MO_64) {
+ int shift = 64 - (8 << s_bits);
+ pre2 = tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11,
+ TCG_REG_R58, TCG_REG_R58, shift, 63 - shift);
+ }
+ } else {
+ /* Just move the data into place for the slow path. */
+ pre1 = tcg_opc_ext_i(TCG_REG_P0, opc, TCG_REG_R58, data_reg);
+ }
+
tcg_out_qemu_tlb(s, addr_reg, s_bits,
offsetof(CPUArchState, tlb_table[mem_index][0].addr_write),
- offsetof(CPUArchState, tlb_table[mem_index][0].addend));
+ offsetof(CPUArchState, tlb_table[mem_index][0].addend),
+ pre1, pre2);
/* P6 is the fast path, and P7 the slow path */
- tcg_out_bundle(s, mLX,
+ tcg_out_bundle(s, mmI,
tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
- tcg_opc_l2 ((tcg_target_long) qemu_st_helpers[s_bits]),
- tcg_opc_x2 (TCG_REG_P7, OPC_MOVL_X2, TCG_REG_R2,
- (tcg_target_long) qemu_st_helpers[s_bits]));
- tcg_out_bundle(s, MmI,
- tcg_opc_m3 (TCG_REG_P0, OPC_LD8_M3, TCG_REG_R3,
- TCG_REG_R2, 8),
- tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R3,
- TCG_REG_R3, TCG_REG_R57),
- tcg_opc_i21(TCG_REG_P7, OPC_MOV_I21, TCG_REG_B6,
- TCG_REG_R3, 0));
-
- switch (opc) {
- case MO_8:
- case MO_16:
- case MO_32:
- case MO_64:
- tcg_out_bundle(s, mii,
- tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
- TCG_REG_R1, TCG_REG_R2),
- tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg),
- INSN_NOP_I);
- break;
-
- case MO_16 | MO_BSWAP:
- tcg_out_bundle(s, miI,
- tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
- TCG_REG_R1, TCG_REG_R2),
- INSN_NOP_I,
- tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12,
- TCG_REG_R2, data_reg, 15, 15));
- tcg_out_bundle(s, miI,
- tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg),
- INSN_NOP_I,
- tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R2, TCG_REG_R2));
- data_reg = TCG_REG_R2;
- break;
-
- case MO_32 | MO_BSWAP:
- tcg_out_bundle(s, miI,
- tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
- TCG_REG_R1, TCG_REG_R2),
- INSN_NOP_I,
- tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12,
- TCG_REG_R2, data_reg, 31, 31));
- tcg_out_bundle(s, miI,
- tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg),
- INSN_NOP_I,
- tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R2, TCG_REG_R2));
- data_reg = TCG_REG_R2;
- break;
-
- case MO_64 | MO_BSWAP:
- tcg_out_bundle(s, miI,
- tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
- TCG_REG_R1, TCG_REG_R2),
- tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg),
- tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R2, data_reg));
- data_reg = TCG_REG_R2;
- break;
-
- default:
- tcg_abort();
- }
-
+ tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2,
+ TCG_REG_R2, TCG_REG_R57),
+ tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R59, mem_index));
+ label_ptr = s->code_ptr + 2;
tcg_out_bundle(s, miB,
tcg_opc_m4 (TCG_REG_P6, opc_st_m4[s_bits],
- data_reg, TCG_REG_R3),
- tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R59, mem_index),
- tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5,
- TCG_REG_B0, TCG_REG_B6));
+ TCG_REG_R58, TCG_REG_R2),
+ INSN_NOP_I,
+ tcg_opc_b3 (TCG_REG_P7, OPC_BR_CALL_SPNT_FEW_B3, TCG_REG_B0,
+ get_reloc_pcrel21b(label_ptr)));
+
+ add_qemu_ldst_label(s, 0, opc, label_ptr);
}
#else /* !CONFIG_SOFTMMU */
+# include "tcg-be-null.h"
-static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
- TCGMemOp opc)
+static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
{
static uint64_t const opc_ld_m1[4] = {
OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1
};
int addr_reg, data_reg;
- TCGMemOp s_bits, bswap;
+ TCGMemOp opc, s_bits, bswap;
- data_reg = *args++;
- addr_reg = *args++;
+ data_reg = args[0];
+ addr_reg = args[1];
+ opc = args[2];
s_bits = opc & MO_SIZE;
bswap = opc & MO_BSWAP;
@@ -1900,8 +1962,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
#endif
}
-static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
- TCGMemOp opc)
+static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
{
static uint64_t const opc_st_m4[4] = {
OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4
@@ -1910,10 +1971,11 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
#if TARGET_LONG_BITS == 64
uint64_t add_guest_base;
#endif
- TCGMemOp s_bits, bswap;
+ TCGMemOp opc, s_bits, bswap;
- data_reg = *args++;
- addr_reg = *args++;
+ data_reg = args[0];
+ addr_reg = args[1];
+ opc = args[2];
s_bits = opc & MO_SIZE;
bswap = opc & MO_BSWAP;
@@ -2237,40 +2299,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
args[3], const_args[3], args[4], const_args[4], 0);
break;
- case INDEX_op_qemu_ld8u:
- tcg_out_qemu_ld(s, args, MO_UB);
- break;
- case INDEX_op_qemu_ld8s:
- tcg_out_qemu_ld(s, args, MO_SB);
- break;
- case INDEX_op_qemu_ld16u:
- tcg_out_qemu_ld(s, args, MO_TEUW);
- break;
- case INDEX_op_qemu_ld16s:
- tcg_out_qemu_ld(s, args, MO_TESW);
- break;
- case INDEX_op_qemu_ld32:
- case INDEX_op_qemu_ld32u:
- tcg_out_qemu_ld(s, args, MO_TEUL);
- break;
- case INDEX_op_qemu_ld32s:
- tcg_out_qemu_ld(s, args, MO_TESL);
- break;
- case INDEX_op_qemu_ld64:
- tcg_out_qemu_ld(s, args, MO_TEQ);
- break;
-
- case INDEX_op_qemu_st8:
- tcg_out_qemu_st(s, args, MO_UB);
+ case INDEX_op_qemu_ld_i32:
+ tcg_out_qemu_ld(s, args);
break;
- case INDEX_op_qemu_st16:
- tcg_out_qemu_st(s, args, MO_TEUW);
+ case INDEX_op_qemu_ld_i64:
+ tcg_out_qemu_ld(s, args);
break;
- case INDEX_op_qemu_st32:
- tcg_out_qemu_st(s, args, MO_TEUL);
+ case INDEX_op_qemu_st_i32:
+ tcg_out_qemu_st(s, args);
break;
- case INDEX_op_qemu_st64:
- tcg_out_qemu_st(s, args, MO_TEQ);
+ case INDEX_op_qemu_st_i64:
+ tcg_out_qemu_st(s, args);
break;
default:
@@ -2381,19 +2420,10 @@ static const TCGTargetOpDef ia64_op_defs[] = {
{ INDEX_op_deposit_i32, { "r", "rZ", "ri" } },
{ INDEX_op_deposit_i64, { "r", "rZ", "ri" } },
- { INDEX_op_qemu_ld8u, { "r", "r" } },
- { INDEX_op_qemu_ld8s, { "r", "r" } },
- { INDEX_op_qemu_ld16u, { "r", "r" } },
- { INDEX_op_qemu_ld16s, { "r", "r" } },
- { INDEX_op_qemu_ld32, { "r", "r" } },
- { INDEX_op_qemu_ld32u, { "r", "r" } },
- { INDEX_op_qemu_ld32s, { "r", "r" } },
- { INDEX_op_qemu_ld64, { "r", "r" } },
-
- { INDEX_op_qemu_st8, { "SZ", "r" } },
- { INDEX_op_qemu_st16, { "SZ", "r" } },
- { INDEX_op_qemu_st32, { "SZ", "r" } },
- { INDEX_op_qemu_st64, { "SZ", "r" } },
+ { INDEX_op_qemu_ld_i32, { "r", "r" } },
+ { INDEX_op_qemu_ld_i64, { "r", "r" } },
+ { INDEX_op_qemu_st_i32, { "SZ", "r" } },
+ { INDEX_op_qemu_st_i64, { "SZ", "r" } },
{ -1 },
};
diff --git a/tcg/ia64/tcg-target.h b/tcg/ia64/tcg-target.h
index 52a939c..09c3ba8 100644
--- a/tcg/ia64/tcg-target.h
+++ b/tcg/ia64/tcg-target.h
@@ -153,7 +153,7 @@ typedef enum {
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_mulsh_i64 0
-#define TCG_TARGET_HAS_new_ldst 0
+#define TCG_TARGET_HAS_new_ldst 1
#define TCG_TARGET_deposit_i32_valid(ofs, len) ((len) <= 16)
#define TCG_TARGET_deposit_i64_valid(ofs, len) ((len) <= 16)
diff --git a/vl.c b/vl.c
index 9975e5a..db9ea90 100644
--- a/vl.c
+++ b/vl.c
@@ -2740,7 +2740,7 @@ static int configure_accelerator(QEMUMachine *machine)
if (!accel_list[i].available()) {
printf("%s not supported for this target\n",
accel_list[i].name);
- continue;
+ break;
}
*(accel_list[i].allowed) = true;
ret = accel_list[i].init(machine);
diff --git a/xbzrle.c b/xbzrle.c
index fbcb35d..8e220bf 100644
--- a/xbzrle.c
+++ b/xbzrle.c
@@ -28,7 +28,7 @@ int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen,
{
uint32_t zrun_len = 0, nzrun_len = 0;
int d = 0, i = 0;
- long res, xor;
+ long res;
uint8_t *nzrun_start = NULL;
g_assert(!(((uintptr_t)old_buf | (uintptr_t)new_buf | slen) %
@@ -93,9 +93,11 @@ int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen,
/* word at a time for speed, use of 32-bit long okay */
if (!res) {
/* truncation to 32-bit long okay */
- long mask = (long)0x0101010101010101ULL;
+ unsigned long mask = (unsigned long)0x0101010101010101ULL;
while (i < slen) {
- xor = *(long *)(old_buf + i) ^ *(long *)(new_buf + i);
+ unsigned long xor;
+ xor = *(unsigned long *)(old_buf + i)
+ ^ *(unsigned long *)(new_buf + i);
if ((xor - mask) & ~xor & (mask << 7)) {
/* found the end of an nzrun within the current long */
while (old_buf[i] != new_buf[i]) {
OpenPOWER on IntegriCloud