summaryrefslogtreecommitdiffstats
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/9pfs/virtio-9p-device.c11
-rw-r--r--hw/acpi/aml-build.c2
-rw-r--r--hw/arm/allwinner-a10.c11
-rw-r--r--hw/arm/boot.c10
-rw-r--r--hw/arm/highbank.c91
-rw-r--r--hw/arm/nseries.c2
-rw-r--r--hw/arm/virt.c10
-rw-r--r--hw/arm/xilinx_zynq.c6
-rw-r--r--hw/block/nvme.c11
-rw-r--r--hw/block/virtio-blk.c8
-rw-r--r--hw/block/xen_disk.c27
-rw-r--r--hw/bt/hci.c7
-rw-r--r--hw/bt/sdp.c29
-rw-r--r--hw/core/ptimer.c3
-rw-r--r--hw/core/qdev.c5
-rw-r--r--hw/display/qxl.c2
-rw-r--r--hw/display/tcx.c26
-rw-r--r--hw/dma/pxa2xx_dma.c3
-rw-r--r--hw/i386/kvm/clock.c18
-rw-r--r--hw/i386/pc.c6
-rw-r--r--hw/i386/pc_piix.c13
-rw-r--r--hw/i386/pc_q35.c10
-rw-r--r--hw/i386/pci-assign-load-rom.c6
-rw-r--r--hw/ide/ahci.c182
-rw-r--r--hw/ide/ahci.h19
-rw-r--r--hw/ide/atapi.c69
-rw-r--r--hw/ide/core.c14
-rw-r--r--hw/ide/ich.c10
-rw-r--r--hw/ide/internal.h2
-rw-r--r--hw/ide/macio.c12
-rw-r--r--hw/ide/pci.c7
-rw-r--r--hw/input/tsc210x.c8
-rw-r--r--hw/intc/arm_gic.c8
-rw-r--r--hw/misc/Makefile.objs1
-rw-r--r--hw/misc/macio/cuda.c246
-rw-r--r--hw/misc/zynq-xadc.c302
-rw-r--r--hw/net/e1000.c476
-rw-r--r--hw/net/e1000_regs.h8
-rw-r--r--hw/net/rocker/rocker.c6
-rw-r--r--hw/net/rocker/rocker_of_dpa.c12
-rw-r--r--hw/pci/pcie.c12
-rw-r--r--hw/ppc/mac.h3
-rw-r--r--hw/ppc/mac_newworld.c3
-rw-r--r--hw/ppc/spapr.c22
-rw-r--r--hw/s390x/css.c6
-rw-r--r--hw/s390x/ipl.c68
-rw-r--r--hw/s390x/ipl.h25
-rw-r--r--hw/s390x/s390-pci-bus.c8
-rw-r--r--hw/s390x/s390-virtio-ccw.c3
-rw-r--r--hw/s390x/s390-virtio.c16
-rw-r--r--hw/scsi/megasas.c2
-rw-r--r--hw/scsi/scsi-bus.c2
-rw-r--r--hw/scsi/scsi-disk.c48
-rw-r--r--hw/timer/hpet.c4
-rw-r--r--hw/tpm/tpm_tis.c2
-rw-r--r--hw/usb/ccid-card-emulated.c4
-rw-r--r--hw/usb/dev-mtp.c3
-rw-r--r--hw/usb/hcd-xhci.c2
-rw-r--r--hw/usb/redirect.c6
-rw-r--r--hw/vfio/pci-quirks.c16
-rw-r--r--hw/vfio/pci.c40
-rw-r--r--hw/vfio/platform.c2
-rw-r--r--hw/virtio/dataplane/vring.c96
-rw-r--r--hw/virtio/vhost-backend.c2
-rw-r--r--hw/virtio/vhost-user.c17
-rw-r--r--hw/virtio/vhost.c9
-rw-r--r--hw/virtio/virtio-balloon.c4
-rw-r--r--hw/virtio/virtio-pci.c299
-rw-r--r--hw/virtio/virtio-pci.h30
-rw-r--r--hw/virtio/virtio.c58
-rw-r--r--hw/xen/xen_pt_config_init.c4
71 files changed, 1938 insertions, 577 deletions
diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
index 93a407c..e3abcfa 100644
--- a/hw/9pfs/virtio-9p-device.c
+++ b/hw/9pfs/virtio-9p-device.c
@@ -43,6 +43,16 @@ static void virtio_9p_get_config(VirtIODevice *vdev, uint8_t *config)
g_free(cfg);
}
+static void virtio_9p_save(QEMUFile *f, void *opaque)
+{
+ virtio_save(VIRTIO_DEVICE(opaque), f);
+}
+
+static int virtio_9p_load(QEMUFile *f, void *opaque, int version_id)
+{
+ return virtio_load(VIRTIO_DEVICE(opaque), f, version_id);
+}
+
static void virtio_9p_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@@ -130,6 +140,7 @@ static void virtio_9p_device_realize(DeviceState *dev, Error **errp)
}
v9fs_path_free(&path);
+ register_savevm(dev, "virtio-9p", -1, 1, virtio_9p_save, virtio_9p_load, s);
return;
out:
g_free(s->ctx.fs_root);
diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index 0d4b324..a00a0ab 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -1163,9 +1163,7 @@ void *acpi_data_push(GArray *table_data, unsigned size)
unsigned acpi_data_len(GArray *table)
{
-#if GLIB_CHECK_VERSION(2, 22, 0)
assert(g_array_get_element_size(table) == 1);
-#endif
return table->len;
}
diff --git a/hw/arm/allwinner-a10.c b/hw/arm/allwinner-a10.c
index 43dc0a1..b0ca81c 100644
--- a/hw/arm/allwinner-a10.c
+++ b/hw/arm/allwinner-a10.c
@@ -39,6 +39,9 @@ static void aw_a10_init(Object *obj)
qemu_check_nic_model(&nd_table[0], TYPE_AW_EMAC);
qdev_set_nic_properties(DEVICE(&s->emac), &nd_table[0]);
}
+
+ object_initialize(&s->sata, sizeof(s->sata), TYPE_ALLWINNER_AHCI);
+ qdev_set_parent_bus(DEVICE(&s->sata), sysbus_get_default());
}
static void aw_a10_realize(DeviceState *dev, Error **errp)
@@ -93,6 +96,14 @@ static void aw_a10_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(sysbusdev, 0, AW_A10_EMAC_BASE);
sysbus_connect_irq(sysbusdev, 0, s->irq[55]);
+ object_property_set_bool(OBJECT(&s->sata), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->sata), 0, AW_A10_SATA_BASE);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->sata), 0, s->irq[56]);
+
/* FIXME use a qdev chardev prop instead of serial_hds[] */
serial_mm_init(get_system_memory(), AW_A10_UART0_REG_BASE, 2, s->irq[1],
115200, serial_hds[0], DEVICE_NATIVE_ENDIAN);
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index b0879a5..75f69bf 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -11,6 +11,7 @@
#include "hw/hw.h"
#include "hw/arm/arm.h"
#include "hw/arm/linux-boot-if.h"
+#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
#include "hw/boards.h"
#include "hw/loader.h"
@@ -495,7 +496,8 @@ static void do_cpu_reset(void *opaque)
}
/* Set to non-secure if not a secure boot */
- if (!info->secure_boot) {
+ if (!info->secure_boot &&
+ (cs != first_cpu || !info->secure_board_setup)) {
/* Linux expects non-secure state */
env->cp15.scr_el3 |= SCR_NS;
}
@@ -598,6 +600,12 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
struct arm_boot_info *info =
container_of(n, struct arm_boot_info, load_kernel_notifier);
+ /* The board code is not supposed to set secure_board_setup unless
+ * running its code in secure mode is actually possible, and KVM
+ * doesn't support secure.
+ */
+ assert(!(info->secure_board_setup && kvm_enabled()));
+
/* Load the kernel. */
if (!info->kernel_filename || info->firmware_loaded) {
diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c
index be04b27..85ae69e 100644
--- a/hw/arm/highbank.c
+++ b/hw/arm/highbank.c
@@ -22,6 +22,7 @@
#include "hw/devices.h"
#include "hw/loader.h"
#include "net/net.h"
+#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
#include "hw/boards.h"
#include "sysemu/block-backend.h"
@@ -32,10 +33,52 @@
#define SMP_BOOT_REG 0x40
#define MPCORE_PERIPHBASE 0xfff10000
+#define MVBAR_ADDR 0x200
+
#define NIRQ_GIC 160
/* Board init. */
+/* MVBAR_ADDR is limited by precision of movw */
+
+QEMU_BUILD_BUG_ON(MVBAR_ADDR >= (1 << 16));
+
+#define ARMV7_IMM16(x) (extract32((x), 0, 12) | \
+ extract32((x), 12, 4) << 16)
+
+static void hb_write_board_setup(ARMCPU *cpu,
+ const struct arm_boot_info *info)
+{
+ int n;
+ uint32_t board_setup_blob[] = {
+ /* MVBAR_ADDR */
+ /* Default unimplemented and unused vectors to spin. Makes it
+ * easier to debug (as opposed to the CPU running away).
+ */
+ 0xeafffffe, /* notused1: b notused */
+ 0xeafffffe, /* notused2: b notused */
+ 0xe1b0f00e, /* smc: movs pc, lr - exception return */
+ 0xeafffffe, /* prefetch_abort: b prefetch_abort */
+ 0xeafffffe, /* data_abort: b data_abort */
+ 0xeafffffe, /* notused3: b notused3 */
+ 0xeafffffe, /* irq: b irq */
+ 0xeafffffe, /* fiq: b fiq */
+#define BOARD_SETUP_ADDR (MVBAR_ADDR + 8 * sizeof(uint32_t))
+ 0xe3000000 + ARMV7_IMM16(MVBAR_ADDR), /* movw r0, MVBAR_ADDR */
+ 0xee0c0f30, /* mcr p15, 0, r0, c12, c0, 1 - set MVBAR */
+ 0xee110f11, /* mrc p15, 0, r0, c1 , c1, 0 - get SCR */
+ 0xe3810001, /* orr r0, #1 - set NS */
+ 0xee010f11, /* mcr p15, 0, r0, c1 , c1, 0 - set SCR */
+ 0xe1600070, /* smc - go to monitor mode to flush NS change */
+ 0xe12fff1e, /* bx lr - return to caller */
+ };
+ for (n = 0; n < ARRAY_SIZE(board_setup_blob); n++) {
+ board_setup_blob[n] = tswap32(board_setup_blob[n]);
+ }
+ rom_add_blob_fixed("board-setup", board_setup_blob,
+ sizeof(board_setup_blob), MVBAR_ADDR);
+}
+
static void hb_write_secondary(ARMCPU *cpu, const struct arm_boot_info *info)
{
int n;
@@ -223,15 +266,13 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
MemoryRegion *sysmem;
char *sysboot_filename;
- if (!cpu_model) {
- switch (machine_id) {
- case CALXEDA_HIGHBANK:
- cpu_model = "cortex-a9";
- break;
- case CALXEDA_MIDWAY:
- cpu_model = "cortex-a15";
- break;
- }
+ switch (machine_id) {
+ case CALXEDA_HIGHBANK:
+ cpu_model = "cortex-a9";
+ break;
+ case CALXEDA_MIDWAY:
+ cpu_model = "cortex-a15";
+ break;
}
for (n = 0; n < smp_cpus; n++) {
@@ -240,24 +281,16 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
ARMCPU *cpu;
Error *err = NULL;
- if (!oc) {
- error_report("Unable to find CPU definition");
- exit(1);
- }
-
cpuobj = object_new(object_class_get_name(oc));
cpu = ARM_CPU(cpuobj);
- /* By default A9 and A15 CPUs have EL3 enabled. This board does not
- * currently support EL3 so the CPU EL3 property is disabled before
- * realization.
- */
- if (object_property_find(cpuobj, "has_el3", NULL)) {
- object_property_set_bool(cpuobj, false, "has_el3", &err);
- if (err) {
- error_report_err(err);
- exit(1);
- }
+ object_property_set_int(cpuobj, QEMU_PSCI_CONDUIT_SMC,
+ "psci-conduit", &error_abort);
+
+ if (n) {
+ /* Secondary CPUs start in PSCI powered-down state */
+ object_property_set_bool(cpuobj, true,
+ "start-powered-off", &error_abort);
}
if (object_property_find(cpuobj, "reset-cbar", NULL)) {
@@ -378,6 +411,16 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
highbank_binfo.loader_start = 0;
highbank_binfo.write_secondary_boot = hb_write_secondary;
highbank_binfo.secondary_cpu_reset_hook = hb_reset_secondary;
+ if (!kvm_enabled()) {
+ highbank_binfo.board_setup_addr = BOARD_SETUP_ADDR;
+ highbank_binfo.write_board_setup = hb_write_board_setup;
+ highbank_binfo.secure_board_setup = true;
+ } else {
+ error_report("WARNING: cannot load built-in Monitor support "
+ "if KVM is enabled. Some guests (such as Linux) "
+ "may not boot.");
+ }
+
arm_load_kernel(ARM_CPU(first_cpu), &highbank_binfo);
}
diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c
index 6a6b3e6..2a8835e 100644
--- a/hw/arm/nseries.c
+++ b/hw/arm/nseries.c
@@ -1275,7 +1275,7 @@ static int n8x0_atag_setup(void *p, int model)
strcpy((void *) w, "hw-build"); /* char component[12] */
w += 6;
strcpy((void *) w, "QEMU ");
- pstrcat((void *) w, 12, qemu_get_version()); /* char version[12] */
+ pstrcat((void *) w, 12, qemu_hw_version()); /* char version[12] */
w += 6;
tag = (model == 810) ? "1.1.10-qemu" : "1.1.6-qemu";
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 77d9267..9c6792c 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -941,8 +941,8 @@ static void machvirt_init(MachineState *machine)
if (!gic_version) {
gic_version = kvm_arm_vgic_probe();
if (!gic_version) {
- error_report("Unable to determine GIC version supported by host\n"
- "Probably KVM acceleration is not supported\n");
+ error_report("Unable to determine GIC version supported by host");
+ error_printf("KVM acceleration is probably not supported\n");
exit(1);
}
}
@@ -990,7 +990,7 @@ static void machvirt_init(MachineState *machine)
char *cpuopts = g_strdup(cpustr[1]);
if (!oc) {
- fprintf(stderr, "Unable to find CPU definition\n");
+ error_report("Unable to find CPU definition");
exit(1);
}
cpuobj = object_new(object_class_get_name(oc));
@@ -1126,8 +1126,8 @@ static void virt_set_gic_version(Object *obj, const char *value, Error **errp)
} else if (!strcmp(value, "host")) {
vms->gic_version = 0; /* Will probe later */
} else {
- error_report("Invalid gic-version option value\n"
- "Allowed values are: 3, 2, host\n");
+ error_report("Invalid gic-version option value");
+ error_printf("Allowed gic-version values are: 3, 2, host\n");
exit(1);
}
}
diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c
index 82a9db8..1c1a445 100644
--- a/hw/arm/xilinx_zynq.c
+++ b/hw/arm/xilinx_zynq.c
@@ -24,6 +24,7 @@
#include "hw/block/flash.h"
#include "sysemu/block-backend.h"
#include "hw/loader.h"
+#include "hw/misc/zynq-xadc.h"
#include "hw/ssi.h"
#include "qemu/error-report.h"
@@ -264,6 +265,11 @@ static void zynq_init(MachineState *machine)
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xE0101000);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[79-IRQ_OFFSET]);
+ dev = qdev_create(NULL, TYPE_ZYNQ_XADC);
+ qdev_init_nofail(dev);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xF8007100);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[39-IRQ_OFFSET]);
+
dev = qdev_create(NULL, "pl330");
qdev_prop_set_uint8(dev, "num_chnls", 8);
qdev_prop_set_uint8(dev, "num_periph_req", 4);
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 5da41b2..169e4fa 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -201,10 +201,11 @@ static void nvme_rw_cb(void *opaque, int ret)
NvmeCtrl *n = sq->ctrl;
NvmeCQueue *cq = n->cq[sq->cqid];
- block_acct_done(blk_get_stats(n->conf.blk), &req->acct);
if (!ret) {
+ block_acct_done(blk_get_stats(n->conf.blk), &req->acct);
req->status = NVME_SUCCESS;
} else {
+ block_acct_failed(blk_get_stats(n->conf.blk), &req->acct);
req->status = NVME_INTERNAL_DEV_ERROR;
}
if (req->has_sg) {
@@ -238,18 +239,22 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
uint64_t data_size = (uint64_t)nlb << data_shift;
uint64_t aio_slba = slba << (data_shift - BDRV_SECTOR_BITS);
int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0;
+ enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
if ((slba + nlb) > ns->id_ns.nsze) {
+ block_acct_invalid(blk_get_stats(n->conf.blk), acct);
return NVME_LBA_RANGE | NVME_DNR;
}
+
if (nvme_map_prp(&req->qsg, prp1, prp2, data_size, n)) {
+ block_acct_invalid(blk_get_stats(n->conf.blk), acct);
return NVME_INVALID_FIELD | NVME_DNR;
}
+
assert((nlb << data_shift) == req->qsg.size);
req->has_sg = true;
- dma_acct_start(n->conf.blk, &req->acct, &req->qsg,
- is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
+ dma_acct_start(n->conf.blk, &req->acct, &req->qsg, acct);
req->aiocb = is_write ?
dma_blk_write(n->conf.blk, &req->qsg, aio_slba, nvme_rw_cb, req) :
dma_blk_read(n->conf.blk, &req->qsg, aio_slba, nvme_rw_cb, req);
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 093e475..848f3fe 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -76,7 +76,7 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
s->rq = req;
} else if (action == BLOCK_ERROR_ACTION_REPORT) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
- block_acct_done(blk_get_stats(s->blk), &req->acct);
+ block_acct_failed(blk_get_stats(s->blk), &req->acct);
virtio_blk_free_request(req);
}
@@ -112,6 +112,10 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
* happen on the other side of the migration).
*/
if (virtio_blk_handle_rw_error(req, -ret, is_read)) {
+ /* Break the link in case the next request is added to the
+ * restart queue and is going to be parsed from the ring again.
+ */
+ req->mr_next = NULL;
continue;
}
}
@@ -536,6 +540,8 @@ void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
if (!virtio_blk_sect_range_ok(req->dev, req->sector_num,
req->qiov.size)) {
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
+ block_acct_invalid(blk_get_stats(req->dev->blk),
+ is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
virtio_blk_free_request(req);
return;
}
diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index 1bbc111..02eda6e 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -537,7 +537,11 @@ static void qemu_aio_complete(void *opaque, int ret)
break;
}
case BLKIF_OP_READ:
- block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
+ if (ioreq->status == BLKIF_RSP_OKAY) {
+ block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
+ } else {
+ block_acct_failed(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
+ }
break;
case BLKIF_OP_DISCARD:
default:
@@ -576,7 +580,9 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
}
block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
- ioreq->v.size, BLOCK_ACCT_WRITE);
+ ioreq->v.size,
+ ioreq->req.operation == BLKIF_OP_WRITE ?
+ BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
ioreq->aio_inflight++;
blk_aio_writev(blkdev->blk, ioreq->start / BLOCK_SIZE,
&ioreq->v, ioreq->v.size / BLOCK_SIZE,
@@ -720,6 +726,23 @@ static void blk_handle_requests(struct XenBlkDev *blkdev)
/* parse them */
if (ioreq_parse(ioreq) != 0) {
+
+ switch (ioreq->req.operation) {
+ case BLKIF_OP_READ:
+ block_acct_invalid(blk_get_stats(blkdev->blk),
+ BLOCK_ACCT_READ);
+ break;
+ case BLKIF_OP_WRITE:
+ block_acct_invalid(blk_get_stats(blkdev->blk),
+ BLOCK_ACCT_WRITE);
+ break;
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ block_acct_invalid(blk_get_stats(blkdev->blk),
+ BLOCK_ACCT_FLUSH);
+ default:
+ break;
+ };
+
if (blk_send_response_one(ioreq)) {
xen_be_send_notify(&blkdev->xendev);
}
diff --git a/hw/bt/hci.c b/hw/bt/hci.c
index 6a88d49..2151d01 100644
--- a/hw/bt/hci.c
+++ b/hw/bt/hci.c
@@ -23,6 +23,8 @@
#include "hw/usb.h"
#include "sysemu/bt.h"
#include "hw/bt.h"
+#include "qapi/qmp/qerror.h"
+#include "sysemu/replay.h"
struct bt_hci_s {
uint8_t *(*evt_packet)(void *opaque);
@@ -72,6 +74,8 @@ struct bt_hci_s {
struct HCIInfo info;
struct bt_device_s device;
+
+ Error *replay_blocker;
};
#define DEFAULT_RSSI_DBM 20
@@ -2189,6 +2193,9 @@ struct HCIInfo *bt_new_hci(struct bt_scatternet_s *net)
s->device.handle_destroy = bt_hci_destroy;
+ error_setg(&s->replay_blocker, QERR_REPLAY_NOT_SUPPORTED, "-bt hci");
+ replay_add_blocker(s->replay_blocker);
+
return &s->info;
}
diff --git a/hw/bt/sdp.c b/hw/bt/sdp.c
index c903747..b9bcdcc 100644
--- a/hw/bt/sdp.c
+++ b/hw/bt/sdp.c
@@ -150,12 +150,14 @@ static ssize_t sdp_svc_search(struct bt_l2cap_sdp_state_s *sdp,
if (seqlen < 3 || len < seqlen)
return -SDP_INVALID_SYNTAX;
len -= seqlen;
-
while (seqlen)
if (sdp_svc_match(sdp, &req, &seqlen))
return -SDP_INVALID_SYNTAX;
- } else if (sdp_svc_match(sdp, &req, &seqlen))
- return -SDP_INVALID_SYNTAX;
+ } else {
+ if (sdp_svc_match(sdp, &req, &len)) {
+ return -SDP_INVALID_SYNTAX;
+ }
+ }
if (len < 3)
return -SDP_INVALID_SYNTAX;
@@ -278,8 +280,11 @@ static ssize_t sdp_attr_get(struct bt_l2cap_sdp_state_s *sdp,
while (seqlen)
if (sdp_attr_match(record, &req, &seqlen))
return -SDP_INVALID_SYNTAX;
- } else if (sdp_attr_match(record, &req, &seqlen))
- return -SDP_INVALID_SYNTAX;
+ } else {
+ if (sdp_attr_match(record, &req, &len)) {
+ return -SDP_INVALID_SYNTAX;
+ }
+ }
if (len < 1)
return -SDP_INVALID_SYNTAX;
@@ -393,8 +398,11 @@ static ssize_t sdp_svc_search_attr_get(struct bt_l2cap_sdp_state_s *sdp,
while (seqlen)
if (sdp_svc_match(sdp, &req, &seqlen))
return -SDP_INVALID_SYNTAX;
- } else if (sdp_svc_match(sdp, &req, &seqlen))
- return -SDP_INVALID_SYNTAX;
+ } else {
+ if (sdp_svc_match(sdp, &req, &len)) {
+ return -SDP_INVALID_SYNTAX;
+ }
+ }
if (len < 3)
return -SDP_INVALID_SYNTAX;
@@ -413,8 +421,11 @@ static ssize_t sdp_svc_search_attr_get(struct bt_l2cap_sdp_state_s *sdp,
while (seqlen)
if (sdp_svc_attr_match(sdp, &req, &seqlen))
return -SDP_INVALID_SYNTAX;
- } else if (sdp_svc_attr_match(sdp, &req, &seqlen))
- return -SDP_INVALID_SYNTAX;
+ } else {
+ if (sdp_svc_attr_match(sdp, &req, &len)) {
+ return -SDP_INVALID_SYNTAX;
+ }
+ }
if (len < 1)
return -SDP_INVALID_SYNTAX;
diff --git a/hw/core/ptimer.c b/hw/core/ptimer.c
index 8437bd6..edf077c 100644
--- a/hw/core/ptimer.c
+++ b/hw/core/ptimer.c
@@ -9,6 +9,7 @@
#include "qemu/timer.h"
#include "hw/ptimer.h"
#include "qemu/host-utils.h"
+#include "sysemu/replay.h"
struct ptimer_state
{
@@ -27,7 +28,7 @@ struct ptimer_state
static void ptimer_trigger(ptimer_state *s)
{
if (s->bh) {
- qemu_bh_schedule(s->bh);
+ replay_bh_schedule_event(s->bh);
}
}
diff --git a/hw/core/qdev.c b/hw/core/qdev.c
index 4ab04aa..b3ad467 100644
--- a/hw/core/qdev.c
+++ b/hw/core/qdev.c
@@ -325,6 +325,11 @@ void qdev_reset_all(DeviceState *dev)
qdev_walk_children(dev, NULL, NULL, qdev_reset_one, qbus_reset_one, NULL);
}
+void qdev_reset_all_fn(void *opaque)
+{
+ qdev_reset_all(DEVICE(opaque));
+}
+
void qbus_reset_all(BusState *bus)
{
qbus_walk_children(bus, NULL, NULL, qdev_reset_one, qbus_reset_one, NULL);
diff --git a/hw/display/qxl.c b/hw/display/qxl.c
index 9c961da..8a3040c 100644
--- a/hw/display/qxl.c
+++ b/hw/display/qxl.c
@@ -2156,7 +2156,7 @@ static int qxl_post_load(void *opaque, int version)
qxl_create_guest_primary(d, 1, QXL_SYNC);
/* replay surface-create and cursor-set commands */
- cmds = g_malloc0(sizeof(QXLCommandExt) * (d->ssd.num_surfaces + 1));
+ cmds = g_new0(QXLCommandExt, d->ssd.num_surfaces + 1);
for (in = 0, out = 0; in < d->ssd.num_surfaces; in++) {
if (d->guest_surfaces.cmds[in] == 0) {
continue;
diff --git a/hw/display/tcx.c b/hw/display/tcx.c
index bf119bc..d720ea6 100644
--- a/hw/display/tcx.c
+++ b/hw/display/tcx.c
@@ -944,57 +944,55 @@ static void tcx_initfn(Object *obj)
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
TCXState *s = TCX(obj);
- memory_region_init_ram(&s->rom, OBJECT(s), "tcx.prom", FCODE_MAX_ROM_SIZE,
+ memory_region_init_ram(&s->rom, obj, "tcx.prom", FCODE_MAX_ROM_SIZE,
&error_fatal);
memory_region_set_readonly(&s->rom, true);
sysbus_init_mmio(sbd, &s->rom);
/* 2/STIP : Stippler */
- memory_region_init_io(&s->stip, OBJECT(s), &tcx_stip_ops, s, "tcx.stip",
+ memory_region_init_io(&s->stip, obj, &tcx_stip_ops, s, "tcx.stip",
TCX_STIP_NREGS);
sysbus_init_mmio(sbd, &s->stip);
/* 3/BLIT : Blitter */
- memory_region_init_io(&s->blit, OBJECT(s), &tcx_blit_ops, s, "tcx.blit",
+ memory_region_init_io(&s->blit, obj, &tcx_blit_ops, s, "tcx.blit",
TCX_BLIT_NREGS);
sysbus_init_mmio(sbd, &s->blit);
/* 5/RSTIP : Raw Stippler */
- memory_region_init_io(&s->rstip, OBJECT(s), &tcx_rstip_ops, s, "tcx.rstip",
+ memory_region_init_io(&s->rstip, obj, &tcx_rstip_ops, s, "tcx.rstip",
TCX_RSTIP_NREGS);
sysbus_init_mmio(sbd, &s->rstip);
/* 6/RBLIT : Raw Blitter */
- memory_region_init_io(&s->rblit, OBJECT(s), &tcx_rblit_ops, s, "tcx.rblit",
+ memory_region_init_io(&s->rblit, obj, &tcx_rblit_ops, s, "tcx.rblit",
TCX_RBLIT_NREGS);
sysbus_init_mmio(sbd, &s->rblit);
/* 7/TEC : ??? */
- memory_region_init_io(&s->tec, OBJECT(s), &tcx_dummy_ops, s,
- "tcx.tec", TCX_TEC_NREGS);
+ memory_region_init_io(&s->tec, obj, &tcx_dummy_ops, s, "tcx.tec",
+ TCX_TEC_NREGS);
sysbus_init_mmio(sbd, &s->tec);
/* 8/CMAP : DAC */
- memory_region_init_io(&s->dac, OBJECT(s), &tcx_dac_ops, s,
- "tcx.dac", TCX_DAC_NREGS);
+ memory_region_init_io(&s->dac, obj, &tcx_dac_ops, s, "tcx.dac",
+ TCX_DAC_NREGS);
sysbus_init_mmio(sbd, &s->dac);
/* 9/THC : Cursor */
- memory_region_init_io(&s->thc, OBJECT(s), &tcx_thc_ops, s, "tcx.thc",
+ memory_region_init_io(&s->thc, obj, &tcx_thc_ops, s, "tcx.thc",
TCX_THC_NREGS);
sysbus_init_mmio(sbd, &s->thc);
/* 11/DHC : ??? */
- memory_region_init_io(&s->dhc, OBJECT(s), &tcx_dummy_ops, s, "tcx.dhc",
+ memory_region_init_io(&s->dhc, obj, &tcx_dummy_ops, s, "tcx.dhc",
TCX_DHC_NREGS);
sysbus_init_mmio(sbd, &s->dhc);
/* 12/ALT : ??? */
- memory_region_init_io(&s->alt, OBJECT(s), &tcx_dummy_ops, s, "tcx.alt",
+ memory_region_init_io(&s->alt, obj, &tcx_dummy_ops, s, "tcx.alt",
TCX_ALT_NREGS);
sysbus_init_mmio(sbd, &s->alt);
-
- return;
}
static void tcx_realizefn(DeviceState *dev, Error **errp)
diff --git a/hw/dma/pxa2xx_dma.c b/hw/dma/pxa2xx_dma.c
index d4501fb..54cdb25 100644
--- a/hw/dma/pxa2xx_dma.c
+++ b/hw/dma/pxa2xx_dma.c
@@ -459,9 +459,8 @@ static int pxa2xx_dma_init(SysBusDevice *sbd)
return -1;
}
- s->chan = g_malloc0(sizeof(PXA2xxDMAChannel) * s->channels);
+ s->chan = g_new0(PXA2xxDMAChannel, s->channels);
- memset(s->chan, 0, sizeof(PXA2xxDMAChannel) * s->channels);
for (i = 0; i < s->channels; i ++)
s->chan[i].state = DCSR_STOPINTR;
diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c
index efdf165..0593a3f 100644
--- a/hw/i386/kvm/clock.c
+++ b/hw/i386/kvm/clock.c
@@ -17,7 +17,7 @@
#include "qemu/host-utils.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
-#include "sysemu/cpus.h"
+#include "kvm_i386.h"
#include "hw/sysbus.h"
#include "hw/kvm/clock.h"
@@ -125,21 +125,7 @@ static void kvmclock_vm_state_change(void *opaque, int running,
return;
}
- cpu_synchronize_all_states();
- /* In theory, the cpu_synchronize_all_states() call above wouldn't
- * affect the rest of the code, as the VCPU state inside CPUState
- * is supposed to always match the VCPU state on the kernel side.
- *
- * In practice, calling cpu_synchronize_state() too soon will load the
- * kernel-side APIC state into X86CPU.apic_state too early, APIC state
- * won't be reloaded later because CPUState.vcpu_dirty==true, and
- * outdated APIC state may be migrated to another host.
- *
- * The real fix would be to make sure outdated APIC state is read
- * from the kernel again when necessary. While this is not fixed, we
- * need the cpu_clean_all_dirty() call below.
- */
- cpu_clean_all_dirty();
+ kvm_synchronize_all_tsc();
ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data);
if (ret < 0) {
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 0cb8afd..5e20e07 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -1795,9 +1795,9 @@ static void pc_machine_set_max_ram_below_4g(Object *obj, Visitor *v,
return;
}
if (value > (1ULL << 32)) {
- error_set(&error, ERROR_CLASS_GENERIC_ERROR,
- "Machine option 'max-ram-below-4g=%"PRIu64
- "' expects size less than or equal to 4G", value);
+ error_setg(&error,
+ "Machine option 'max-ram-below-4g=%"PRIu64
+ "' expects size less than or equal to 4G", value);
error_propagate(errp, error);
return;
}
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 393dcc4..07d0baa 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -472,6 +472,7 @@ static void pc_i440fx_machine_options(MachineClass *m)
static void pc_i440fx_2_5_machine_options(MachineClass *m)
{
pc_i440fx_machine_options(m);
+ m->hw_version = QEMU_VERSION;
m->alias = "pc";
m->is_default = 1;
}
@@ -484,6 +485,7 @@ static void pc_i440fx_2_4_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_2_5_machine_options(m);
+ m->hw_version = "2.4.0";
m->alias = NULL;
m->is_default = 0;
pcmc->broken_reserved_end = true;
@@ -497,6 +499,7 @@ DEFINE_I440FX_MACHINE(v2_4, "pc-i440fx-2.4", NULL,
static void pc_i440fx_2_3_machine_options(MachineClass *m)
{
pc_i440fx_2_4_machine_options(m);
+ m->hw_version = "2.3.0";
m->alias = NULL;
m->is_default = 0;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_3);
@@ -509,6 +512,7 @@ DEFINE_I440FX_MACHINE(v2_3, "pc-i440fx-2.3", pc_compat_2_3,
static void pc_i440fx_2_2_machine_options(MachineClass *m)
{
pc_i440fx_2_3_machine_options(m);
+ m->hw_version = "2.2.0";
SET_MACHINE_COMPAT(m, PC_COMPAT_2_2);
}
@@ -519,6 +523,7 @@ DEFINE_I440FX_MACHINE(v2_2, "pc-i440fx-2.2", pc_compat_2_2,
static void pc_i440fx_2_1_machine_options(MachineClass *m)
{
pc_i440fx_2_2_machine_options(m);
+ m->hw_version = "2.1.0";
m->default_display = NULL;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_1);
}
@@ -531,6 +536,7 @@ DEFINE_I440FX_MACHINE(v2_1, "pc-i440fx-2.1", pc_compat_2_1,
static void pc_i440fx_2_0_machine_options(MachineClass *m)
{
pc_i440fx_2_1_machine_options(m);
+ m->hw_version = "2.0.0";
SET_MACHINE_COMPAT(m, PC_COMPAT_2_0);
}
@@ -541,6 +547,7 @@ DEFINE_I440FX_MACHINE(v2_0, "pc-i440fx-2.0", pc_compat_2_0,
static void pc_i440fx_1_7_machine_options(MachineClass *m)
{
pc_i440fx_2_0_machine_options(m);
+ m->hw_version = "1.7.0";
m->default_machine_opts = NULL;
SET_MACHINE_COMPAT(m, PC_COMPAT_1_7);
}
@@ -552,6 +559,7 @@ DEFINE_I440FX_MACHINE(v1_7, "pc-i440fx-1.7", pc_compat_1_7,
static void pc_i440fx_1_6_machine_options(MachineClass *m)
{
pc_i440fx_1_7_machine_options(m);
+ m->hw_version = "1.6.0";
SET_MACHINE_COMPAT(m, PC_COMPAT_1_6);
}
@@ -562,6 +570,7 @@ DEFINE_I440FX_MACHINE(v1_6, "pc-i440fx-1.6", pc_compat_1_6,
static void pc_i440fx_1_5_machine_options(MachineClass *m)
{
pc_i440fx_1_6_machine_options(m);
+ m->hw_version = "1.5.0";
SET_MACHINE_COMPAT(m, PC_COMPAT_1_5);
}
@@ -572,6 +581,7 @@ DEFINE_I440FX_MACHINE(v1_5, "pc-i440fx-1.5", pc_compat_1_5,
static void pc_i440fx_1_4_machine_options(MachineClass *m)
{
pc_i440fx_1_5_machine_options(m);
+ m->hw_version = "1.4.0";
m->hot_add_cpu = NULL;
SET_MACHINE_COMPAT(m, PC_COMPAT_1_4);
}
@@ -604,6 +614,7 @@ DEFINE_I440FX_MACHINE(v1_4, "pc-i440fx-1.4", pc_compat_1_4,
static void pc_i440fx_1_3_machine_options(MachineClass *m)
{
pc_i440fx_1_4_machine_options(m);
+ m->hw_version = "1.3.0";
SET_MACHINE_COMPAT(m, PC_COMPAT_1_3);
}
@@ -642,6 +653,7 @@ DEFINE_I440FX_MACHINE(v1_3, "pc-1.3", pc_compat_1_3,
static void pc_i440fx_1_2_machine_options(MachineClass *m)
{
pc_i440fx_1_3_machine_options(m);
+ m->hw_version = "1.2.0";
SET_MACHINE_COMPAT(m, PC_COMPAT_1_2);
}
@@ -684,6 +696,7 @@ DEFINE_I440FX_MACHINE(v1_2, "pc-1.2", pc_compat_1_2,
static void pc_i440fx_1_1_machine_options(MachineClass *m)
{
pc_i440fx_1_2_machine_options(m);
+ m->hw_version = "1.1.0";
SET_MACHINE_COMPAT(m, PC_COMPAT_1_1);
}
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index 2f8f396..0fdae09 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -373,6 +373,7 @@ static void pc_q35_machine_options(MachineClass *m)
static void pc_q35_2_5_machine_options(MachineClass *m)
{
pc_q35_machine_options(m);
+ m->hw_version = QEMU_VERSION;
m->alias = "q35";
}
@@ -383,6 +384,7 @@ static void pc_q35_2_4_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_2_5_machine_options(m);
+ m->hw_version = "2.4.0";
m->alias = NULL;
pcmc->broken_reserved_end = true;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_4);
@@ -395,6 +397,7 @@ DEFINE_Q35_MACHINE(v2_4, "pc-q35-2.4", NULL,
static void pc_q35_2_3_machine_options(MachineClass *m)
{
pc_q35_2_4_machine_options(m);
+ m->hw_version = "2.3.0";
m->no_floppy = 0;
m->no_tco = 1;
m->alias = NULL;
@@ -408,6 +411,7 @@ DEFINE_Q35_MACHINE(v2_3, "pc-q35-2.3", pc_compat_2_3,
static void pc_q35_2_2_machine_options(MachineClass *m)
{
pc_q35_2_3_machine_options(m);
+ m->hw_version = "2.2.0";
SET_MACHINE_COMPAT(m, PC_COMPAT_2_2);
}
@@ -418,6 +422,7 @@ DEFINE_Q35_MACHINE(v2_2, "pc-q35-2.2", pc_compat_2_2,
static void pc_q35_2_1_machine_options(MachineClass *m)
{
pc_q35_2_2_machine_options(m);
+ m->hw_version = "2.1.0";
m->default_display = NULL;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_1);
}
@@ -429,6 +434,7 @@ DEFINE_Q35_MACHINE(v2_1, "pc-q35-2.1", pc_compat_2_1,
static void pc_q35_2_0_machine_options(MachineClass *m)
{
pc_q35_2_1_machine_options(m);
+ m->hw_version = "2.0.0";
SET_MACHINE_COMPAT(m, PC_COMPAT_2_0);
}
@@ -439,6 +445,7 @@ DEFINE_Q35_MACHINE(v2_0, "pc-q35-2.0", pc_compat_2_0,
static void pc_q35_1_7_machine_options(MachineClass *m)
{
pc_q35_2_0_machine_options(m);
+ m->hw_version = "1.7.0";
m->default_machine_opts = NULL;
SET_MACHINE_COMPAT(m, PC_COMPAT_1_7);
}
@@ -450,6 +457,7 @@ DEFINE_Q35_MACHINE(v1_7, "pc-q35-1.7", pc_compat_1_7,
static void pc_q35_1_6_machine_options(MachineClass *m)
{
pc_q35_machine_options(m);
+ m->hw_version = "1.6.0";
SET_MACHINE_COMPAT(m, PC_COMPAT_1_6);
}
@@ -460,6 +468,7 @@ DEFINE_Q35_MACHINE(v1_6, "pc-q35-1.6", pc_compat_1_6,
static void pc_q35_1_5_machine_options(MachineClass *m)
{
pc_q35_1_6_machine_options(m);
+ m->hw_version = "1.5.0";
SET_MACHINE_COMPAT(m, PC_COMPAT_1_5);
}
@@ -470,6 +479,7 @@ DEFINE_Q35_MACHINE(v1_5, "pc-q35-1.5", pc_compat_1_5,
static void pc_q35_1_4_machine_options(MachineClass *m)
{
pc_q35_1_5_machine_options(m);
+ m->hw_version = "1.4.0";
m->hot_add_cpu = NULL;
SET_MACHINE_COMPAT(m, PC_COMPAT_1_4);
}
diff --git a/hw/i386/pci-assign-load-rom.c b/hw/i386/pci-assign-load-rom.c
index 34a3a7e..e40b586 100644
--- a/hw/i386/pci-assign-load-rom.c
+++ b/hw/i386/pci-assign-load-rom.c
@@ -45,14 +45,10 @@ void *pci_assign_dev_load_option_rom(PCIDevice *dev, struct Object *owner,
return NULL;
}
- if (access(rom_file, F_OK)) {
- error_report("pci-assign: Insufficient privileges for %s", rom_file);
- return NULL;
- }
-
/* Write "1" to the ROM file to enable it */
fp = fopen(rom_file, "r+");
if (fp == NULL) {
+ error_report("pci-assign: Cannot open %s: %s", rom_file, strerror(errno));
return NULL;
}
val = 1;
diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
index 21f76ed..dd1912e 100644
--- a/hw/ide/ahci.c
+++ b/hw/ide/ahci.c
@@ -378,17 +378,23 @@ static uint64_t ahci_mem_read(void *opaque, hwaddr addr, unsigned size)
int ofst = addr - aligned;
uint64_t lo = ahci_mem_read_32(opaque, aligned);
uint64_t hi;
+ uint64_t val;
/* if < 8 byte read does not cross 4 byte boundary */
if (ofst + size <= 4) {
- return lo >> (ofst * 8);
+ val = lo >> (ofst * 8);
+ } else {
+ g_assert_cmpint(size, >, 1);
+
+ /* If the 64bit read is unaligned, we will produce undefined
+ * results. AHCI does not support unaligned 64bit reads. */
+ hi = ahci_mem_read_32(opaque, aligned + 4);
+ val = (hi << 32 | lo) >> (ofst * 8);
}
- g_assert_cmpint(size, >, 1);
- /* If the 64bit read is unaligned, we will produce undefined
- * results. AHCI does not support unaligned 64bit reads. */
- hi = ahci_mem_read_32(opaque, aligned + 4);
- return (hi << 32 | lo) >> (ofst * 8);
+ DPRINTF(-1, "addr=0x%" HWADDR_PRIx " val=0x%" PRIx64 ", size=%d\n",
+ addr, val, size);
+ return val;
}
@@ -397,6 +403,9 @@ static void ahci_mem_write(void *opaque, hwaddr addr,
{
AHCIState *s = opaque;
+ DPRINTF(-1, "addr=0x%" HWADDR_PRIx " val=0x%" PRIx64 ", size=%d\n",
+ addr, val, size);
+
/* Only aligned reads are allowed on AHCI */
if (addr & 3) {
fprintf(stderr, "ahci: Mis-aligned write to addr 0x"
@@ -804,8 +813,21 @@ static int prdt_tbl_entry_size(const AHCI_SG *tbl)
return (le32_to_cpu(tbl->flags_size) & AHCI_PRDT_SIZE_MASK) + 1;
}
+/**
+ * Fetch entries in a guest-provided PRDT and convert it into a QEMU SGlist.
+ * @ad: The AHCIDevice for whom we are building the SGList.
+ * @sglist: The SGList target to add PRD entries to.
+ * @cmd: The AHCI Command Header that describes where the PRDT is.
+ * @limit: The remaining size of the S/ATA transaction, in bytes.
+ * @offset: The number of bytes already transferred, in bytes.
+ *
+ * The AHCI PRDT can describe up to 256GiB. S/ATA only support transactions of
+ * up to 32MiB as of ATA8-ACS3 rev 1b, assuming a 512 byte sector size. We stop
+ * building the sglist from the PRDT as soon as we hit @limit bytes,
+ * which is <= INT32_MAX/2GiB.
+ */
static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist,
- AHCICmdHdr *cmd, int64_t limit, int32_t offset)
+ AHCICmdHdr *cmd, int64_t limit, uint64_t offset)
{
uint16_t opts = le16_to_cpu(cmd->opts);
uint16_t prdtl = le16_to_cpu(cmd->prdtl);
@@ -823,14 +845,6 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist,
IDEBus *bus = &ad->port;
BusState *qbus = BUS(bus);
- /*
- * Note: AHCI PRDT can describe up to 256GiB. SATA/ATA only support
- * transactions of up to 32MiB as of ATA8-ACS3 rev 1b, assuming a
- * 512 byte sector size. We limit the PRDT in this implementation to
- * a reasonably large 2GiB, which can accommodate the maximum transfer
- * request for sector sizes up to 32K.
- */
-
if (!prdtl) {
DPRINTF(ad->port_no, "no sg list given by guest: 0x%08x\n", opts);
return -1;
@@ -880,13 +894,6 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist,
qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr),
MIN(prdt_tbl_entry_size(&tbl[i]),
limit - sglist->size));
- if (sglist->size > INT32_MAX) {
- error_report("AHCI Physical Region Descriptor Table describes "
- "more than 2 GiB.");
- qemu_sglist_destroy(sglist);
- r = -1;
- goto out;
- }
}
}
@@ -1427,24 +1434,26 @@ static const IDEDMAOps ahci_dma_ops = {
.cmd_done = ahci_cmd_done,
};
-void ahci_init(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports)
+void ahci_init(AHCIState *s, DeviceState *qdev)
{
- qemu_irq *irqs;
- int i;
-
- s->as = as;
- s->ports = ports;
- s->dev = g_new0(AHCIDevice, ports);
s->container = qdev;
- ahci_reg_init(s);
/* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */
memory_region_init_io(&s->mem, OBJECT(qdev), &ahci_mem_ops, s,
"ahci", AHCI_MEM_BAR_SIZE);
memory_region_init_io(&s->idp, OBJECT(qdev), &ahci_idp_ops, s,
"ahci-idp", 32);
+}
- irqs = qemu_allocate_irqs(ahci_irq_set, s, s->ports);
+void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports)
+{
+ qemu_irq *irqs;
+ int i;
+ s->as = as;
+ s->ports = ports;
+ s->dev = g_new0(AHCIDevice, ports);
+ ahci_reg_init(s);
+ irqs = qemu_allocate_irqs(ahci_irq_set, s, s->ports);
for (i = 0; i < s->ports; i++) {
AHCIDevice *ad = &s->dev[i];
@@ -1639,17 +1648,24 @@ static void sysbus_ahci_reset(DeviceState *dev)
ahci_reset(&s->ahci);
}
-static void sysbus_ahci_realize(DeviceState *dev, Error **errp)
+static void sysbus_ahci_init(Object *obj)
{
- SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
- SysbusAHCIState *s = SYSBUS_AHCI(dev);
+ SysbusAHCIState *s = SYSBUS_AHCI(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- ahci_init(&s->ahci, dev, &address_space_memory, s->num_ports);
+ ahci_init(&s->ahci, DEVICE(obj));
sysbus_init_mmio(sbd, &s->ahci.mem);
sysbus_init_irq(sbd, &s->ahci.irq);
}
+static void sysbus_ahci_realize(DeviceState *dev, Error **errp)
+{
+ SysbusAHCIState *s = SYSBUS_AHCI(dev);
+
+ ahci_realize(&s->ahci, dev, &address_space_memory, s->num_ports);
+}
+
static Property sysbus_ahci_properties[] = {
DEFINE_PROP_UINT32("num-ports", SysbusAHCIState, num_ports, 1),
DEFINE_PROP_END_OF_LIST(),
@@ -1670,12 +1686,108 @@ static const TypeInfo sysbus_ahci_info = {
.name = TYPE_SYSBUS_AHCI,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(SysbusAHCIState),
+ .instance_init = sysbus_ahci_init,
.class_init = sysbus_ahci_class_init,
};
+#define ALLWINNER_AHCI_BISTAFR ((0xa0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_BISTCR ((0xa4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_BISTFCTR ((0xa8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_BISTSR ((0xac - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_BISTDECR ((0xb0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_DIAGNR0 ((0xb4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_DIAGNR1 ((0xb8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_OOBR ((0xbc - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_PHYCS0R ((0xc0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_PHYCS1R ((0xc4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_PHYCS2R ((0xc8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_TIMER1MS ((0xe0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_GPARAM1R ((0xe8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_GPARAM2R ((0xec - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_PPARAMR ((0xf0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_TESTR ((0xf4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_VERSIONR ((0xf8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_IDR ((0xfc - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_RWCR ((0xfc - ALLWINNER_AHCI_MMIO_OFF) / 4)
+
+static uint64_t allwinner_ahci_mem_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ AllwinnerAHCIState *a = opaque;
+ uint64_t val = a->regs[addr/4];
+
+ switch (addr / 4) {
+ case ALLWINNER_AHCI_PHYCS0R:
+ val |= 0x2 << 28;
+ break;
+ case ALLWINNER_AHCI_PHYCS2R:
+ val &= ~(0x1 << 24);
+ break;
+ }
+ DPRINTF(-1, "addr=0x%" HWADDR_PRIx " val=0x%" PRIx64 ", size=%d\n",
+ addr, val, size);
+ return val;
+}
+
+static void allwinner_ahci_mem_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ AllwinnerAHCIState *a = opaque;
+
+ DPRINTF(-1, "addr=0x%" HWADDR_PRIx " val=0x%" PRIx64 ", size=%d\n",
+ addr, val, size);
+ a->regs[addr/4] = val;
+}
+
+static const MemoryRegionOps allwinner_ahci_mem_ops = {
+ .read = allwinner_ahci_mem_read,
+ .write = allwinner_ahci_mem_write,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void allwinner_ahci_init(Object *obj)
+{
+ SysbusAHCIState *s = SYSBUS_AHCI(obj);
+ AllwinnerAHCIState *a = ALLWINNER_AHCI(obj);
+
+ memory_region_init_io(&a->mmio, OBJECT(obj), &allwinner_ahci_mem_ops, a,
+ "allwinner-ahci", ALLWINNER_AHCI_MMIO_SIZE);
+ memory_region_add_subregion(&s->ahci.mem, ALLWINNER_AHCI_MMIO_OFF,
+ &a->mmio);
+}
+
+static const VMStateDescription vmstate_allwinner_ahci = {
+ .name = "allwinner-ahci",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, AllwinnerAHCIState,
+ ALLWINNER_AHCI_MMIO_SIZE/4),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void allwinner_ahci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->vmsd = &vmstate_allwinner_ahci;
+}
+
+static const TypeInfo allwinner_ahci_info = {
+ .name = TYPE_ALLWINNER_AHCI,
+ .parent = TYPE_SYSBUS_AHCI,
+ .instance_size = sizeof(AllwinnerAHCIState),
+ .instance_init = allwinner_ahci_init,
+ .class_init = allwinner_ahci_class_init,
+};
+
static void sysbus_ahci_register_types(void)
{
type_register_static(&sysbus_ahci_info);
+ type_register_static(&allwinner_ahci_info);
}
type_init(sysbus_ahci_register_types)
diff --git a/hw/ide/ahci.h b/hw/ide/ahci.h
index c9b3805..bc777ed 100644
--- a/hw/ide/ahci.h
+++ b/hw/ide/ahci.h
@@ -366,7 +366,8 @@ typedef struct SDBFIS {
uint32_t payload;
} QEMU_PACKED SDBFIS;
-void ahci_init(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports);
+void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports);
+void ahci_init(AHCIState *s, DeviceState *qdev);
void ahci_uninit(AHCIState *s);
void ahci_reset(AHCIState *s);
@@ -385,4 +386,20 @@ typedef struct SysbusAHCIState {
uint32_t num_ports;
} SysbusAHCIState;
+#define TYPE_ALLWINNER_AHCI "allwinner-ahci"
+#define ALLWINNER_AHCI(obj) OBJECT_CHECK(AllwinnerAHCIState, (obj), \
+ TYPE_ALLWINNER_AHCI)
+
+#define ALLWINNER_AHCI_MMIO_OFF 0x80
+#define ALLWINNER_AHCI_MMIO_SIZE 0x80
+
+struct AllwinnerAHCIState {
+ /*< private >*/
+ SysbusAHCIState parent_obj;
+ /*< public >*/
+
+ MemoryRegion mmio;
+ uint32_t regs[ALLWINNER_AHCI_MMIO_SIZE/4];
+};
+
#endif /* HW_IDE_AHCI_H */
diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c
index 747f466..ed8bb2c 100644
--- a/hw/ide/atapi.c
+++ b/hw/ide/atapi.c
@@ -108,27 +108,30 @@ static void cd_data_to_raw(uint8_t *buf, int lba)
static int cd_read_sector(IDEState *s, int lba, uint8_t *buf, int sector_size)
{
int ret;
+ block_acct_start(blk_get_stats(s->blk), &s->acct,
+ 4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
switch(sector_size) {
case 2048:
- block_acct_start(blk_get_stats(s->blk), &s->acct,
- 4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
ret = blk_read(s->blk, (int64_t)lba << 2, buf, 4);
- block_acct_done(blk_get_stats(s->blk), &s->acct);
break;
case 2352:
- block_acct_start(blk_get_stats(s->blk), &s->acct,
- 4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
ret = blk_read(s->blk, (int64_t)lba << 2, buf + 16, 4);
- block_acct_done(blk_get_stats(s->blk), &s->acct);
- if (ret < 0)
- return ret;
- cd_data_to_raw(buf, lba);
+ if (ret >= 0) {
+ cd_data_to_raw(buf, lba);
+ }
break;
default:
- ret = -EIO;
- break;
+ block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
+ return -EIO;
+ }
+
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(s->blk), &s->acct);
+ } else {
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
}
+
return ret;
}
@@ -167,6 +170,17 @@ void ide_atapi_io_error(IDEState *s, int ret)
}
}
+static uint16_t atapi_byte_count_limit(IDEState *s)
+{
+ uint16_t bcl;
+
+ bcl = s->lcyl | (s->hcyl << 8);
+ if (bcl == 0xffff) {
+ return 0xfffe;
+ }
+ return bcl;
+}
+
/* The whole ATAPI transfer logic is handled in this function */
void ide_atapi_cmd_reply_end(IDEState *s)
{
@@ -209,12 +223,10 @@ void ide_atapi_cmd_reply_end(IDEState *s)
} else {
/* a new transfer is needed */
s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO;
- byte_count_limit = s->lcyl | (s->hcyl << 8);
+ byte_count_limit = atapi_byte_count_limit(s);
#ifdef DEBUG_IDE_ATAPI
printf("byte_count_limit=%d\n", byte_count_limit);
#endif
- if (byte_count_limit == 0xffff)
- byte_count_limit--;
size = s->packet_transfer_size;
if (size > byte_count_limit) {
/* byte count limit must be even if this case */
@@ -357,7 +369,11 @@ static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret)
return;
eot:
- block_acct_done(blk_get_stats(s->blk), &s->acct);
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(s->blk), &s->acct);
+ } else {
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
+ }
ide_set_inactive(s, false);
}
@@ -1179,7 +1195,7 @@ enum {
NONDATA = 0x04,
};
-static const struct {
+static const struct AtapiCmd {
void (*handler)(IDEState *s, uint8_t *buf);
int flags;
} atapi_cmd_table[0x100] = {
@@ -1206,9 +1222,9 @@ static const struct {
void ide_atapi_cmd(IDEState *s)
{
- uint8_t *buf;
+ uint8_t *buf = s->io_buffer;
+ const struct AtapiCmd *cmd = &atapi_cmd_table[s->io_buffer[0]];
- buf = s->io_buffer;
#ifdef DEBUG_IDE_ATAPI
{
int i;
@@ -1219,14 +1235,14 @@ void ide_atapi_cmd(IDEState *s)
printf("\n");
}
#endif
+
/*
* If there's a UNIT_ATTENTION condition pending, only command flagged with
* ALLOW_UA are allowed to complete. with other commands getting a CHECK
* condition response unless a higher priority status, defined by the drive
* here, is pending.
*/
- if (s->sense_key == UNIT_ATTENTION &&
- !(atapi_cmd_table[s->io_buffer[0]].flags & ALLOW_UA)) {
+ if (s->sense_key == UNIT_ATTENTION && !(cmd->flags & ALLOW_UA)) {
ide_atapi_cmd_check_status(s);
return;
}
@@ -1237,7 +1253,7 @@ void ide_atapi_cmd(IDEState *s)
* GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
* states rely on this behavior.
*/
- if (!(atapi_cmd_table[s->io_buffer[0]].flags & ALLOW_UA) &&
+ if (!(cmd->flags & ALLOW_UA) &&
!s->tray_open && blk_is_inserted(s->blk) && s->cdrom_changed) {
if (s->cdrom_changed == 1) {
@@ -1252,7 +1268,7 @@ void ide_atapi_cmd(IDEState *s)
}
/* Report a Not Ready condition if appropriate for the command */
- if ((atapi_cmd_table[s->io_buffer[0]].flags & CHECK_READY) &&
+ if ((cmd->flags & CHECK_READY) &&
(!media_present(s) || !blk_is_inserted(s->blk)))
{
ide_atapi_cmd_error(s, NOT_READY, ASC_MEDIUM_NOT_PRESENT);
@@ -1263,10 +1279,9 @@ void ide_atapi_cmd(IDEState *s)
* If this is a data-transferring PIO command and BCL is 0,
* we abort at the /ATA/ level, not the ATAPI level.
* See ATA8 ACS3 section 7.17.6.49 and 7.21.5 */
- if (!(atapi_cmd_table[s->io_buffer[0]].flags & NONDATA)) {
+ if (cmd->handler && !(cmd->flags & NONDATA)) {
/* TODO: Check IDENTIFY data word 125 for default BCL (currently 0) */
- uint16_t byte_count_limit = s->lcyl | (s->hcyl << 8);
- if (!(byte_count_limit || s->atapi_dma)) {
+ if (!(atapi_byte_count_limit(s) || s->atapi_dma)) {
/* TODO: Move abort back into core.c and make static inline again */
ide_abort_command(s);
return;
@@ -1274,8 +1289,8 @@ void ide_atapi_cmd(IDEState *s)
}
/* Execute the command */
- if (atapi_cmd_table[s->io_buffer[0]].handler) {
- atapi_cmd_table[s->io_buffer[0]].handler(s, buf);
+ if (cmd->handler) {
+ cmd->handler(s, buf);
return;
}
diff --git a/hw/ide/core.c b/hw/ide/core.c
index 317406d..2725dd3 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -574,7 +574,6 @@ static void ide_sector_read_cb(void *opaque, int ret)
if (ret == -ECANCELED) {
return;
}
- block_acct_done(blk_get_stats(s->blk), &s->acct);
if (ret != 0) {
if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
IDE_RETRY_READ)) {
@@ -582,6 +581,8 @@ static void ide_sector_read_cb(void *opaque, int ret)
}
}
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
+
n = s->nsector;
if (n > s->req_nb_sectors) {
n = s->req_nb_sectors;
@@ -621,6 +622,7 @@ static void ide_sector_read(IDEState *s)
if (!ide_sect_range_ok(s, sector_num, n)) {
ide_rw_error(s);
+ block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
return;
}
@@ -672,6 +674,7 @@ static int ide_handle_rw_error(IDEState *s, int error, int op)
assert(s->bus->retry_unit == s->unit);
s->bus->error_status = op;
} else if (action == BLOCK_ERROR_ACTION_REPORT) {
+ block_acct_failed(blk_get_stats(s->blk), &s->acct);
if (op & IDE_RETRY_DMA) {
ide_dma_error(s);
} else {
@@ -750,6 +753,7 @@ static void ide_dma_cb(void *opaque, int ret)
if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
!ide_sect_range_ok(s, sector_num, n)) {
ide_dma_error(s);
+ block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
return;
}
@@ -826,7 +830,6 @@ static void ide_sector_write_cb(void *opaque, int ret)
if (ret == -ECANCELED) {
return;
}
- block_acct_done(blk_get_stats(s->blk), &s->acct);
s->pio_aiocb = NULL;
s->status &= ~BUSY_STAT;
@@ -837,6 +840,8 @@ static void ide_sector_write_cb(void *opaque, int ret)
}
}
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
+
n = s->nsector;
if (n > s->req_nb_sectors) {
n = s->req_nb_sectors;
@@ -887,6 +892,7 @@ static void ide_sector_write(IDEState *s)
if (!ide_sect_range_ok(s, sector_num, n)) {
ide_rw_error(s);
+ block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
return;
}
@@ -895,7 +901,7 @@ static void ide_sector_write(IDEState *s)
qemu_iovec_init_external(&s->qiov, &s->iov, 1);
block_acct_start(blk_get_stats(s->blk), &s->acct,
- n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
+ n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
s->pio_aiocb = blk_aio_writev(s->blk, sector_num, &s->qiov, n,
ide_sector_write_cb, s);
}
@@ -2312,7 +2318,7 @@ int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
if (version) {
pstrcpy(s->version, sizeof(s->version), version);
} else {
- pstrcpy(s->version, sizeof(s->version), qemu_get_version());
+ pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
}
ide_reset(s);
diff --git a/hw/ide/ich.c b/hw/ide/ich.c
index 350c7f1..16925fa 100644
--- a/hw/ide/ich.c
+++ b/hw/ide/ich.c
@@ -97,6 +97,13 @@ static void pci_ich9_reset(DeviceState *dev)
ahci_reset(&d->ahci);
}
+static void pci_ich9_ahci_init(Object *obj)
+{
+ struct AHCIPCIState *d = ICH_AHCI(obj);
+
+ ahci_init(&d->ahci, DEVICE(obj));
+}
+
static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp)
{
struct AHCIPCIState *d;
@@ -104,7 +111,7 @@ static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp)
uint8_t *sata_cap;
d = ICH_AHCI(dev);
- ahci_init(&d->ahci, DEVICE(dev), pci_get_address_space(dev), 6);
+ ahci_realize(&d->ahci, DEVICE(dev), pci_get_address_space(dev), 6);
pci_config_set_prog_interface(dev->config, AHCI_PROGMODE_MAJOR_REV_1);
@@ -171,6 +178,7 @@ static const TypeInfo ich_ahci_info = {
.name = TYPE_ICH9_AHCI,
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(AHCIPCIState),
+ .instance_init = pci_ich9_ahci_init,
.class_init = ich_ahci_class_init,
};
diff --git a/hw/ide/internal.h b/hw/ide/internal.h
index 05e93ff..e4629b0 100644
--- a/hw/ide/internal.h
+++ b/hw/ide/internal.h
@@ -397,7 +397,7 @@ struct IDEState {
struct iovec iov;
QEMUIOVector qiov;
/* ATA DMA state */
- int32_t io_buffer_offset;
+ uint64_t io_buffer_offset;
int32_t io_buffer_size;
QEMUSGList sg;
/* PIO transfer handling */
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index 893c9b9..3ee962f 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -286,7 +286,11 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
return;
done:
- block_acct_done(blk_get_stats(s->blk), &s->acct);
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(s->blk), &s->acct);
+ } else {
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
+ }
io->dma_end(opaque);
return;
@@ -348,7 +352,11 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
done:
if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
- block_acct_done(blk_get_stats(s->blk), &s->acct);
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(s->blk), &s->acct);
+ } else {
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
+ }
}
io->dma_end(opaque);
}
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
index d31ff88..9c54b37 100644
--- a/hw/ide/pci.c
+++ b/hw/ide/pci.c
@@ -103,13 +103,6 @@ static int32_t bmdma_prepare_buf(IDEDMA *dma, int32_t limit)
qemu_sglist_add(&s->sg, bm->cur_prd_addr, sg_len);
}
- /* Note: We limit the max transfer to be 2GiB.
- * This should accommodate the largest ATA transaction
- * for LBA48 (65,536 sectors) and 32K sector sizes. */
- if (s->sg.size > INT32_MAX) {
- error_report("IDE: sglist describes more than 2GiB.");
- break;
- }
bm->cur_prd_addr += l;
bm->cur_prd_len -= l;
s->io_buffer_size += l;
diff --git a/hw/input/tsc210x.c b/hw/input/tsc210x.c
index fae3385..1073bbf 100644
--- a/hw/input/tsc210x.c
+++ b/hw/input/tsc210x.c
@@ -1086,9 +1086,7 @@ uWireSlave *tsc2102_init(qemu_irq pint)
{
TSC210xState *s;
- s = (TSC210xState *)
- g_malloc0(sizeof(TSC210xState));
- memset(s, 0, sizeof(TSC210xState));
+ s = g_new0(TSC210xState, 1);
s->x = 160;
s->y = 160;
s->pressure = 0;
@@ -1135,9 +1133,7 @@ uWireSlave *tsc2301_init(qemu_irq penirq, qemu_irq kbirq, qemu_irq dav)
{
TSC210xState *s;
- s = (TSC210xState *)
- g_malloc0(sizeof(TSC210xState));
- memset(s, 0, sizeof(TSC210xState));
+ s = g_new0(TSC210xState, 1);
s->x = 400;
s->y = 240;
s->pressure = 0;
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 8bad132..d71aeb8 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -35,8 +35,6 @@ static const uint8_t gic_id[] = {
0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1
};
-#define NUM_CPU(s) ((s)->num_cpu)
-
static inline int gic_get_current_cpu(GICState *s)
{
if (s->num_cpu > 1) {
@@ -64,7 +62,7 @@ void gic_update(GICState *s)
int cpu;
int cm;
- for (cpu = 0; cpu < NUM_CPU(s); cpu++) {
+ for (cpu = 0; cpu < s->num_cpu; cpu++) {
cm = 1 << cpu;
s->current_pending[cpu] = 1023;
if (!(s->ctlr & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1))
@@ -567,7 +565,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
if (offset == 4)
/* Interrupt Controller Type Register */
return ((s->num_irq / 32) - 1)
- | ((NUM_CPU(s) - 1) << 5)
+ | ((s->num_cpu - 1) << 5)
| (s->security_extn << 10);
if (offset < 0x08)
return 0;
@@ -1284,7 +1282,7 @@ static void arm_gic_realize(DeviceState *dev, Error **errp)
* GIC v2 defines a larger memory region (0x1000) so this will need
* to be extended when we implement A15.
*/
- for (i = 0; i < NUM_CPU(s); i++) {
+ for (i = 0; i < s->num_cpu; i++) {
s->backref[i] = s;
memory_region_init_io(&s->cpuiomem[i+1], OBJECT(s), &gic_cpu_ops,
&s->backref[i], "gic_cpu", 0x100);
diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs
index 4aa76ff..aeb6b7d 100644
--- a/hw/misc/Makefile.objs
+++ b/hw/misc/Makefile.objs
@@ -36,6 +36,7 @@ obj-$(CONFIG_OMAP) += omap_sdrc.o
obj-$(CONFIG_OMAP) += omap_tap.o
obj-$(CONFIG_SLAVIO) += slavio_misc.o
obj-$(CONFIG_ZYNQ) += zynq_slcr.o
+obj-$(CONFIG_ZYNQ) += zynq-xadc.o
obj-$(CONFIG_STM32F2XX_SYSCFG) += stm32f2xx_syscfg.o
obj-$(CONFIG_PVPANIC) += pvpanic.o
diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c
index 0fd75b3..9db4c64 100644
--- a/hw/misc/macio/cuda.c
+++ b/hw/misc/macio/cuda.c
@@ -57,6 +57,8 @@
#define IER_SET 0x80 /* set bits in IER */
#define IER_CLR 0 /* clear bits in IER */
#define SR_INT 0x04 /* Shift register full/empty */
+#define SR_DATA_INT 0x08
+#define SR_CLOCK_INT 0x10
#define T1_INT 0x40 /* Timer 1 interrupt */
#define T2_INT 0x20 /* Timer 2 interrupt */
@@ -108,6 +110,24 @@
/* CUDA returns time_t's offset from Jan 1, 1904, not 1970 */
#define RTC_OFFSET 2082844800
+/* CUDA registers */
+#define CUDA_REG_B 0x00
+#define CUDA_REG_A 0x01
+#define CUDA_REG_DIRB 0x02
+#define CUDA_REG_DIRA 0x03
+#define CUDA_REG_T1CL 0x04
+#define CUDA_REG_T1CH 0x05
+#define CUDA_REG_T1LL 0x06
+#define CUDA_REG_T1LH 0x07
+#define CUDA_REG_T2CL 0x08
+#define CUDA_REG_T2CH 0x09
+#define CUDA_REG_SR 0x0a
+#define CUDA_REG_ACR 0x0b
+#define CUDA_REG_PCR 0x0c
+#define CUDA_REG_IFR 0x0d
+#define CUDA_REG_IER 0x0e
+#define CUDA_REG_ANH 0x0f
+
static void cuda_update(CUDAState *s);
static void cuda_receive_packet_from_host(CUDAState *s,
const uint8_t *data, int len);
@@ -116,47 +136,48 @@ static void cuda_timer_update(CUDAState *s, CUDATimer *ti,
static void cuda_update_irq(CUDAState *s)
{
- if (s->ifr & s->ier & (SR_INT | T1_INT)) {
+ if (s->ifr & s->ier & (SR_INT | T1_INT | T2_INT)) {
qemu_irq_raise(s->irq);
} else {
qemu_irq_lower(s->irq);
}
}
-static uint64_t get_tb(uint64_t freq)
+static uint64_t get_tb(uint64_t time, uint64_t freq)
{
- return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
- freq, get_ticks_per_sec());
+ return muldiv64(time, freq, get_ticks_per_sec());
}
-static unsigned int get_counter(CUDATimer *s)
+static unsigned int get_counter(CUDATimer *ti)
{
int64_t d;
unsigned int counter;
uint64_t tb_diff;
+ uint64_t current_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
/* Reverse of the tb calculation algorithm that Mac OS X uses on bootup. */
- tb_diff = get_tb(s->frequency) - s->load_time;
- d = (tb_diff * 0xBF401675E5DULL) / (s->frequency << 24);
+ tb_diff = get_tb(current_time, ti->frequency) - ti->load_time;
+ d = (tb_diff * 0xBF401675E5DULL) / (ti->frequency << 24);
- if (s->index == 0) {
+ if (ti->index == 0) {
/* the timer goes down from latch to -1 (period of latch + 2) */
- if (d <= (s->counter_value + 1)) {
- counter = (s->counter_value - d) & 0xffff;
+ if (d <= (ti->counter_value + 1)) {
+ counter = (ti->counter_value - d) & 0xffff;
} else {
- counter = (d - (s->counter_value + 1)) % (s->latch + 2);
- counter = (s->latch - counter) & 0xffff;
+ counter = (d - (ti->counter_value + 1)) % (ti->latch + 2);
+ counter = (ti->latch - counter) & 0xffff;
}
} else {
- counter = (s->counter_value - d) & 0xffff;
+ counter = (ti->counter_value - d) & 0xffff;
}
return counter;
}
static void set_counter(CUDAState *s, CUDATimer *ti, unsigned int val)
{
- CUDA_DPRINTF("T%d.counter=%d\n", 1 + (ti->timer == NULL), val);
- ti->load_time = get_tb(s->frequency);
+ CUDA_DPRINTF("T%d.counter=%d\n", 1 + ti->index, val);
+ ti->load_time = get_tb(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ s->frequency);
ti->counter_value = val;
cuda_timer_update(s, ti, ti->load_time);
}
@@ -199,7 +220,7 @@ static void cuda_timer_update(CUDAState *s, CUDATimer *ti,
{
if (!ti->timer)
return;
- if ((s->acr & T1MODE) != T1MODE_CONT) {
+ if (ti->index == 0 && (s->acr & T1MODE) != T1MODE_CONT) {
timer_del(ti->timer);
} else {
ti->next_irq_time = get_next_irq_time(ti, current_time);
@@ -217,6 +238,41 @@ static void cuda_timer1(void *opaque)
cuda_update_irq(s);
}
+static void cuda_timer2(void *opaque)
+{
+ CUDAState *s = opaque;
+ CUDATimer *ti = &s->timers[1];
+
+ cuda_timer_update(s, ti, ti->next_irq_time);
+ s->ifr |= T2_INT;
+ cuda_update_irq(s);
+}
+
+static void cuda_set_sr_int(void *opaque)
+{
+ CUDAState *s = opaque;
+
+ CUDA_DPRINTF("CUDA: %s:%d\n", __func__, __LINE__);
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+}
+
+static void cuda_delay_set_sr_int(CUDAState *s)
+{
+ int64_t expire;
+
+ if (s->dirb == 0xff) {
+ /* Not in Mac OS, fire the IRQ directly */
+ cuda_set_sr_int(s);
+ return;
+ }
+
+ CUDA_DPRINTF("CUDA: %s:%d\n", __func__, __LINE__);
+
+ expire = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 300 * SCALE_US;
+ timer_mod(s->sr_delay_timer, expire);
+}
+
static uint32_t cuda_readb(void *opaque, hwaddr addr)
{
CUDAState *s = opaque;
@@ -224,66 +280,68 @@ static uint32_t cuda_readb(void *opaque, hwaddr addr)
addr = (addr >> 9) & 0xf;
switch(addr) {
- case 0:
+ case CUDA_REG_B:
val = s->b;
break;
- case 1:
+ case CUDA_REG_A:
val = s->a;
break;
- case 2:
+ case CUDA_REG_DIRB:
val = s->dirb;
break;
- case 3:
+ case CUDA_REG_DIRA:
val = s->dira;
break;
- case 4:
+ case CUDA_REG_T1CL:
val = get_counter(&s->timers[0]) & 0xff;
s->ifr &= ~T1_INT;
cuda_update_irq(s);
break;
- case 5:
+ case CUDA_REG_T1CH:
val = get_counter(&s->timers[0]) >> 8;
cuda_update_irq(s);
break;
- case 6:
+ case CUDA_REG_T1LL:
val = s->timers[0].latch & 0xff;
break;
- case 7:
+ case CUDA_REG_T1LH:
/* XXX: check this */
val = (s->timers[0].latch >> 8) & 0xff;
break;
- case 8:
+ case CUDA_REG_T2CL:
val = get_counter(&s->timers[1]) & 0xff;
s->ifr &= ~T2_INT;
+ cuda_update_irq(s);
break;
- case 9:
+ case CUDA_REG_T2CH:
val = get_counter(&s->timers[1]) >> 8;
break;
- case 10:
+ case CUDA_REG_SR:
val = s->sr;
- s->ifr &= ~SR_INT;
+ s->ifr &= ~(SR_INT | SR_CLOCK_INT | SR_DATA_INT);
cuda_update_irq(s);
break;
- case 11:
+ case CUDA_REG_ACR:
val = s->acr;
break;
- case 12:
+ case CUDA_REG_PCR:
val = s->pcr;
break;
- case 13:
+ case CUDA_REG_IFR:
val = s->ifr;
- if (s->ifr & s->ier)
+ if (s->ifr & s->ier) {
val |= 0x80;
+ }
break;
- case 14:
+ case CUDA_REG_IER:
val = s->ier | 0x80;
break;
default:
- case 15:
+ case CUDA_REG_ANH:
val = s->anh;
break;
}
- if (addr != 13 || val != 0) {
+ if (addr != CUDA_REG_IFR || val != 0) {
CUDA_DPRINTF("read: reg=0x%x val=%02x\n", (int)addr, val);
}
@@ -298,61 +356,65 @@ static void cuda_writeb(void *opaque, hwaddr addr, uint32_t val)
CUDA_DPRINTF("write: reg=0x%x val=%02x\n", (int)addr, val);
switch(addr) {
- case 0:
+ case CUDA_REG_B:
s->b = val;
cuda_update(s);
break;
- case 1:
+ case CUDA_REG_A:
s->a = val;
break;
- case 2:
+ case CUDA_REG_DIRB:
s->dirb = val;
break;
- case 3:
+ case CUDA_REG_DIRA:
s->dira = val;
break;
- case 4:
+ case CUDA_REG_T1CL:
s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
break;
- case 5:
+ case CUDA_REG_T1CH:
s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
s->ifr &= ~T1_INT;
set_counter(s, &s->timers[0], s->timers[0].latch);
break;
- case 6:
+ case CUDA_REG_T1LL:
s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
break;
- case 7:
+ case CUDA_REG_T1LH:
s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
s->ifr &= ~T1_INT;
cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
break;
- case 8:
- s->timers[1].latch = val;
- set_counter(s, &s->timers[1], val);
+ case CUDA_REG_T2CL:
+ s->timers[1].latch = (s->timers[1].latch & 0xff00) | val;
break;
- case 9:
- set_counter(s, &s->timers[1], (val << 8) | s->timers[1].latch);
+ case CUDA_REG_T2CH:
+ /* To ensure T2 generates an interrupt on zero crossing with the
+ common timer code, write the value directly from the latch to
+ the counter */
+ s->timers[1].latch = (s->timers[1].latch & 0xff) | (val << 8);
+ s->ifr &= ~T2_INT;
+ set_counter(s, &s->timers[1], s->timers[1].latch);
break;
- case 10:
+ case CUDA_REG_SR:
s->sr = val;
break;
- case 11:
+ case CUDA_REG_ACR:
s->acr = val;
cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
cuda_update(s);
break;
- case 12:
+ case CUDA_REG_PCR:
s->pcr = val;
break;
- case 13:
+ case CUDA_REG_IFR:
/* reset bits */
s->ifr &= ~val;
cuda_update_irq(s);
break;
- case 14:
+ case CUDA_REG_IER:
if (val & IER_SET) {
/* set bits */
s->ier |= val & 0x7f;
@@ -363,7 +425,7 @@ static void cuda_writeb(void *opaque, hwaddr addr, uint32_t val)
cuda_update_irq(s);
break;
default:
- case 15:
+ case CUDA_REG_ANH:
s->anh = val;
break;
}
@@ -384,8 +446,7 @@ static void cuda_update(CUDAState *s)
if (s->data_out_index < sizeof(s->data_out)) {
CUDA_DPRINTF("send: %02x\n", s->sr);
s->data_out[s->data_out_index++] = s->sr;
- s->ifr |= SR_INT;
- cuda_update_irq(s);
+ cuda_delay_set_sr_int(s);
}
}
} else {
@@ -398,8 +459,7 @@ static void cuda_update(CUDAState *s)
if (s->data_in_index >= s->data_in_size) {
s->b = (s->b | TREQ);
}
- s->ifr |= SR_INT;
- cuda_update_irq(s);
+ cuda_delay_set_sr_int(s);
}
}
}
@@ -411,15 +471,13 @@ static void cuda_update(CUDAState *s)
s->b = (s->b | TREQ);
else
s->b = (s->b & ~TREQ);
- s->ifr |= SR_INT;
- cuda_update_irq(s);
+ cuda_delay_set_sr_int(s);
} else {
if (!(s->last_b & TIP)) {
/* handle end of host to cuda transfer */
packet_received = (s->data_out_index > 0);
/* always an IRQ at the end of transfer */
- s->ifr |= SR_INT;
- cuda_update_irq(s);
+ cuda_delay_set_sr_int(s);
}
/* signal if there is data to read */
if (s->data_in_index < s->data_in_size) {
@@ -456,8 +514,7 @@ static void cuda_send_packet_to_host(CUDAState *s,
s->data_in_size = len;
s->data_in_index = 0;
cuda_update(s);
- s->ifr |= SR_INT;
- cuda_update_irq(s);
+ cuda_delay_set_sr_int(s);
}
static void cuda_adb_poll(void *opaque)
@@ -480,7 +537,7 @@ static void cuda_adb_poll(void *opaque)
static void cuda_receive_packet(CUDAState *s,
const uint8_t *data, int len)
{
- uint8_t obuf[16];
+ uint8_t obuf[16] = { CUDA_PACKET, 0, data[0] };
int autopoll;
uint32_t ti;
@@ -497,23 +554,18 @@ static void cuda_receive_packet(CUDAState *s,
timer_del(s->adb_poll_timer);
}
}
- obuf[0] = CUDA_PACKET;
- obuf[1] = data[1];
- cuda_send_packet_to_host(s, obuf, 2);
+ cuda_send_packet_to_host(s, obuf, 3);
+ break;
+ case CUDA_GET_6805_ADDR:
+ cuda_send_packet_to_host(s, obuf, 3);
break;
case CUDA_SET_TIME:
ti = (((uint32_t)data[1]) << 24) + (((uint32_t)data[2]) << 16) + (((uint32_t)data[3]) << 8) + data[4];
s->tick_offset = ti - (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / get_ticks_per_sec());
- obuf[0] = CUDA_PACKET;
- obuf[1] = 0;
- obuf[2] = 0;
cuda_send_packet_to_host(s, obuf, 3);
break;
case CUDA_GET_TIME:
ti = s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / get_ticks_per_sec());
- obuf[0] = CUDA_PACKET;
- obuf[1] = 0;
- obuf[2] = 0;
obuf[3] = ti >> 24;
obuf[4] = ti >> 16;
obuf[5] = ti >> 8;
@@ -524,22 +576,34 @@ static void cuda_receive_packet(CUDAState *s,
case CUDA_SET_DEVICE_LIST:
case CUDA_SET_AUTO_RATE:
case CUDA_SET_POWER_MESSAGES:
- obuf[0] = CUDA_PACKET;
- obuf[1] = 0;
- cuda_send_packet_to_host(s, obuf, 2);
+ cuda_send_packet_to_host(s, obuf, 3);
break;
case CUDA_POWERDOWN:
- obuf[0] = CUDA_PACKET;
- obuf[1] = 0;
- cuda_send_packet_to_host(s, obuf, 2);
+ cuda_send_packet_to_host(s, obuf, 3);
qemu_system_shutdown_request();
break;
case CUDA_RESET_SYSTEM:
- obuf[0] = CUDA_PACKET;
- obuf[1] = 0;
- cuda_send_packet_to_host(s, obuf, 2);
+ cuda_send_packet_to_host(s, obuf, 3);
qemu_system_reset_request();
break;
+ case CUDA_COMBINED_FORMAT_IIC:
+ obuf[0] = ERROR_PACKET;
+ obuf[1] = 0x5;
+ obuf[2] = CUDA_PACKET;
+ obuf[3] = data[0];
+ cuda_send_packet_to_host(s, obuf, 4);
+ break;
+ case CUDA_GET_SET_IIC:
+ if (len == 4) {
+ cuda_send_packet_to_host(s, obuf, 3);
+ } else {
+ obuf[0] = ERROR_PACKET;
+ obuf[1] = 0x2;
+ obuf[2] = CUDA_PACKET;
+ obuf[3] = data[0];
+ cuda_send_packet_to_host(s, obuf, 4);
+ }
+ break;
default:
break;
}
@@ -560,19 +624,21 @@ static void cuda_receive_packet_from_host(CUDAState *s,
switch(data[0]) {
case ADB_PACKET:
{
- uint8_t obuf[ADB_MAX_OUT_LEN + 2];
+ uint8_t obuf[ADB_MAX_OUT_LEN + 3];
int olen;
olen = adb_request(&s->adb_bus, obuf + 2, data + 1, len - 1);
if (olen > 0) {
obuf[0] = ADB_PACKET;
obuf[1] = 0x00;
+ cuda_send_packet_to_host(s, obuf, olen + 2);
} else {
/* error */
obuf[0] = ADB_PACKET;
obuf[1] = -olen;
+ obuf[2] = data[1];
olen = 0;
+ cuda_send_packet_to_host(s, obuf, olen + 3);
}
- cuda_send_packet_to_host(s, obuf, olen + 2);
}
break;
case CUDA_PACKET:
@@ -671,7 +737,7 @@ static void cuda_reset(DeviceState *dev)
s->b = 0;
s->a = 0;
- s->dirb = 0;
+ s->dirb = 0xff;
s->dira = 0;
s->sr = 0;
s->acr = 0;
@@ -688,8 +754,9 @@ static void cuda_reset(DeviceState *dev)
s->timers[0].latch = 0xffff;
set_counter(s, &s->timers[0], 0xffff);
- s->timers[1].latch = 0;
- set_counter(s, &s->timers[1], 0xffff);
+ s->timers[1].latch = 0xffff;
+
+ s->sr_delay_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_set_sr_int, s);
}
static void cuda_realizefn(DeviceState *dev, Error **errp)
@@ -699,7 +766,8 @@ static void cuda_realizefn(DeviceState *dev, Error **errp)
s->timers[0].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer1, s);
s->timers[0].frequency = s->frequency;
- s->timers[1].frequency = s->frequency;
+ s->timers[1].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer2, s);
+ s->timers[1].frequency = (SCALE_US * 6000) / 4700;
qemu_get_timedate(&tm, 0);
s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET;
diff --git a/hw/misc/zynq-xadc.c b/hw/misc/zynq-xadc.c
new file mode 100644
index 0000000..1a32595
--- /dev/null
+++ b/hw/misc/zynq-xadc.c
@@ -0,0 +1,302 @@
+/*
+ * ADC registers for Xilinx Zynq Platform
+ *
+ * Copyright (c) 2015 Guenter Roeck
+ * Based on hw/misc/zynq_slcr.c, written by Michal Simek
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw/hw.h"
+#include "hw/misc/zynq-xadc.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+
+enum {
+ CFG = 0x000 / 4,
+ INT_STS,
+ INT_MASK,
+ MSTS,
+ CMDFIFO,
+ RDFIFO,
+ MCTL,
+};
+
+#define CFG_ENABLE BIT(31)
+#define CFG_CFIFOTH_SHIFT 20
+#define CFG_CFIFOTH_LENGTH 4
+#define CFG_DFIFOTH_SHIFT 16
+#define CFG_DFIFOTH_LENGTH 4
+#define CFG_WEDGE BIT(13)
+#define CFG_REDGE BIT(12)
+#define CFG_TCKRATE_SHIFT 8
+#define CFG_TCKRATE_LENGTH 2
+
+#define CFG_TCKRATE_DIV(x) (0x1 << (x - 1))
+
+#define CFG_IGAP_SHIFT 0
+#define CFG_IGAP_LENGTH 5
+
+#define INT_CFIFO_LTH BIT(9)
+#define INT_DFIFO_GTH BIT(8)
+#define INT_OT BIT(7)
+#define INT_ALM_SHIFT 0
+#define INT_ALM_LENGTH 7
+#define INT_ALM_MASK (((1 << INT_ALM_LENGTH) - 1) << INT_ALM_SHIFT)
+
+#define INT_ALL (INT_CFIFO_LTH | INT_DFIFO_GTH | INT_OT | INT_ALM_MASK)
+
+#define MSTS_CFIFO_LVL_SHIFT 16
+#define MSTS_CFIFO_LVL_LENGTH 4
+#define MSTS_DFIFO_LVL_SHIFT 12
+#define MSTS_DFIFO_LVL_LENGTH 4
+#define MSTS_CFIFOF BIT(11)
+#define MSTS_CFIFOE BIT(10)
+#define MSTS_DFIFOF BIT(9)
+#define MSTS_DFIFOE BIT(8)
+#define MSTS_OT BIT(7)
+#define MSTS_ALM_SHIFT 0
+#define MSTS_ALM_LENGTH 7
+
+#define MCTL_RESET BIT(4)
+
+#define CMD_NOP 0x00
+#define CMD_READ 0x01
+#define CMD_WRITE 0x02
+
+static void zynq_xadc_update_ints(ZynqXADCState *s)
+{
+
+ /* We are fast, commands are actioned instantly so the CFIFO is always
+ * empty (and below threshold).
+ */
+ s->regs[INT_STS] |= INT_CFIFO_LTH;
+
+ if (s->xadc_dfifo_entries >
+ extract32(s->regs[CFG], CFG_DFIFOTH_SHIFT, CFG_DFIFOTH_LENGTH)) {
+ s->regs[INT_STS] |= INT_DFIFO_GTH;
+ }
+
+ qemu_set_irq(s->qemu_irq, !!(s->regs[INT_STS] & ~s->regs[INT_MASK]));
+}
+
+static void zynq_xadc_reset(DeviceState *d)
+{
+ ZynqXADCState *s = ZYNQ_XADC(d);
+
+ s->regs[CFG] = 0x14 << CFG_IGAP_SHIFT |
+ CFG_TCKRATE_DIV(4) << CFG_TCKRATE_SHIFT | CFG_REDGE;
+ s->regs[INT_STS] = INT_CFIFO_LTH;
+ s->regs[INT_MASK] = 0xffffffff;
+ s->regs[CMDFIFO] = 0;
+ s->regs[RDFIFO] = 0;
+ s->regs[MCTL] = MCTL_RESET;
+
+ memset(s->xadc_regs, 0, sizeof(s->xadc_regs));
+ memset(s->xadc_dfifo, 0, sizeof(s->xadc_dfifo));
+ s->xadc_dfifo_entries = 0;
+
+ zynq_xadc_update_ints(s);
+}
+
+static uint16_t xadc_pop_dfifo(ZynqXADCState *s)
+{
+ uint16_t rv = s->xadc_dfifo[0];
+ int i;
+
+ if (s->xadc_dfifo_entries > 0) {
+ s->xadc_dfifo_entries--;
+ }
+ for (i = 0; i < s->xadc_dfifo_entries; i++) {
+ s->xadc_dfifo[i] = s->xadc_dfifo[i + 1];
+ }
+ s->xadc_dfifo[s->xadc_dfifo_entries] = 0;
+ zynq_xadc_update_ints(s);
+ return rv;
+}
+
+static void xadc_push_dfifo(ZynqXADCState *s, uint16_t regval)
+{
+ if (s->xadc_dfifo_entries < ZYNQ_XADC_FIFO_DEPTH) {
+ s->xadc_dfifo[s->xadc_dfifo_entries++] = s->xadc_read_reg_previous;
+ }
+ s->xadc_read_reg_previous = regval;
+ zynq_xadc_update_ints(s);
+}
+
+static bool zynq_xadc_check_offset(hwaddr offset, bool rnw)
+{
+ switch (offset) {
+ case CFG:
+ case INT_MASK:
+ case INT_STS:
+ case MCTL:
+ return true;
+ case RDFIFO:
+ case MSTS:
+ return rnw; /* read only */
+ case CMDFIFO:
+ return !rnw; /* write only */
+ default:
+ return false;
+ }
+}
+
+static uint64_t zynq_xadc_read(void *opaque, hwaddr offset, unsigned size)
+{
+ ZynqXADCState *s = opaque;
+ int reg = offset / 4;
+ uint32_t rv = 0;
+
+ if (!zynq_xadc_check_offset(reg, true)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "zynq_xadc: Invalid read access to "
+ "addr %" HWADDR_PRIx "\n", offset);
+ return 0;
+ }
+
+ switch (reg) {
+ case CFG:
+ case INT_MASK:
+ case INT_STS:
+ case MCTL:
+ rv = s->regs[reg];
+ break;
+ case MSTS:
+ rv = MSTS_CFIFOE;
+ rv |= s->xadc_dfifo_entries << MSTS_DFIFO_LVL_SHIFT;
+ if (!s->xadc_dfifo_entries) {
+ rv |= MSTS_DFIFOE;
+ } else if (s->xadc_dfifo_entries == ZYNQ_XADC_FIFO_DEPTH) {
+ rv |= MSTS_DFIFOF;
+ }
+ break;
+ case RDFIFO:
+ rv = xadc_pop_dfifo(s);
+ break;
+ }
+ return rv;
+}
+
+static void zynq_xadc_write(void *opaque, hwaddr offset, uint64_t val,
+ unsigned size)
+{
+ ZynqXADCState *s = (ZynqXADCState *)opaque;
+ int reg = offset / 4;
+ int xadc_reg;
+ int xadc_cmd;
+ int xadc_data;
+
+ if (!zynq_xadc_check_offset(reg, false)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "zynq_xadc: Invalid write access "
+ "to addr %" HWADDR_PRIx "\n", offset);
+ return;
+ }
+
+ switch (reg) {
+ case CFG:
+ s->regs[CFG] = val;
+ break;
+ case INT_STS:
+ s->regs[INT_STS] &= ~val;
+ break;
+ case INT_MASK:
+ s->regs[INT_MASK] = val & INT_ALL;
+ break;
+ case CMDFIFO:
+ xadc_cmd = extract32(val, 26, 4);
+ xadc_reg = extract32(val, 16, 10);
+ xadc_data = extract32(val, 0, 16);
+
+ if (s->regs[MCTL] & MCTL_RESET) {
+ qemu_log_mask(LOG_GUEST_ERROR, "zynq_xadc: Sending command "
+ "while comm channel held in reset: %" PRIx32 "\n",
+ (uint32_t) val);
+ break;
+ }
+
+ if (xadc_reg > ZYNQ_XADC_NUM_ADC_REGS && xadc_cmd != CMD_NOP) {
+ qemu_log_mask(LOG_GUEST_ERROR, "read/write op to invalid xadc "
+ "reg 0x%x\n", xadc_reg);
+ break;
+ }
+
+ switch (xadc_cmd) {
+ case CMD_READ:
+ xadc_push_dfifo(s, s->xadc_regs[xadc_reg]);
+ break;
+ case CMD_WRITE:
+ s->xadc_regs[xadc_reg] = xadc_data;
+ /* fallthrough */
+ case CMD_NOP:
+ xadc_push_dfifo(s, 0);
+ break;
+ }
+ break;
+ case MCTL:
+ s->regs[MCTL] = val & 0x00fffeff;
+ break;
+ }
+ zynq_xadc_update_ints(s);
+}
+
+static const MemoryRegionOps xadc_ops = {
+ .read = zynq_xadc_read,
+ .write = zynq_xadc_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void zynq_xadc_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ ZynqXADCState *s = ZYNQ_XADC(obj);
+
+ memory_region_init_io(&s->iomem, obj, &xadc_ops, s, "zynq-xadc",
+ ZYNQ_XADC_MMIO_SIZE);
+ sysbus_init_mmio(sbd, &s->iomem);
+ sysbus_init_irq(sbd, &s->qemu_irq);
+}
+
+static const VMStateDescription vmstate_zynq_xadc = {
+ .name = "zynq-xadc",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, ZynqXADCState, ZYNQ_XADC_NUM_IO_REGS),
+ VMSTATE_UINT16_ARRAY(xadc_regs, ZynqXADCState,
+ ZYNQ_XADC_NUM_ADC_REGS),
+ VMSTATE_UINT16_ARRAY(xadc_dfifo, ZynqXADCState,
+ ZYNQ_XADC_FIFO_DEPTH),
+ VMSTATE_UINT16(xadc_read_reg_previous, ZynqXADCState),
+ VMSTATE_UINT16(xadc_dfifo_entries, ZynqXADCState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void zynq_xadc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->vmsd = &vmstate_zynq_xadc;
+ dc->reset = zynq_xadc_reset;
+}
+
+static const TypeInfo zynq_xadc_info = {
+ .class_init = zynq_xadc_class_init,
+ .name = TYPE_ZYNQ_XADC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(ZynqXADCState),
+ .instance_init = zynq_xadc_init,
+};
+
+static void zynq_xadc_register_types(void)
+{
+ type_register_static(&zynq_xadc_info);
+}
+
+type_init(zynq_xadc_register_types)
diff --git a/hw/net/e1000.c b/hw/net/e1000.c
index 910de3a..c877e06 100644
--- a/hw/net/e1000.c
+++ b/hw/net/e1000.c
@@ -37,24 +37,26 @@
#include "e1000_regs.h"
+static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
#define E1000_DEBUG
#ifdef E1000_DEBUG
enum {
- DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
- DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
- DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
+ DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
+ DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
+ DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
};
-#define DBGBIT(x) (1<<DEBUG_##x)
+#define DBGBIT(x) (1<<DEBUG_##x)
static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
-#define DBGOUT(what, fmt, ...) do { \
+#define DBGOUT(what, fmt, ...) do { \
if (debugflags & DBGBIT(what)) \
fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
} while (0)
#else
-#define DBGOUT(what, fmt, ...) do {} while (0)
+#define DBGOUT(what, fmt, ...) do {} while (0)
#endif
#define IOPORT_SIZE 0x40
@@ -118,7 +120,7 @@ typedef struct E1000State_st {
} tx;
struct {
- uint32_t val_in; // shifted in from guest driver
+ uint32_t val_in; /* shifted in from guest driver */
uint16_t bitnum_in;
uint16_t bitnum_out;
uint16_t reading;
@@ -135,11 +137,15 @@ typedef struct E1000State_st {
/* Compatibility flags for migration to/from qemu 1.3.0 and older */
#define E1000_FLAG_AUTONEG_BIT 0
#define E1000_FLAG_MIT_BIT 1
+#define E1000_FLAG_MAC_BIT 2
#define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
#define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
+#define E1000_FLAG_MAC (1 << E1000_FLAG_MAC_BIT)
uint32_t compat_flags;
} E1000State;
+#define chkflag(x) (s->compat_flags & E1000_FLAG_##x)
+
typedef struct E1000BaseClass {
PCIDeviceClass parent_class;
uint16_t phy_id2;
@@ -155,20 +161,36 @@ typedef struct E1000BaseClass {
#define E1000_DEVICE_GET_CLASS(obj) \
OBJECT_GET_CLASS(E1000BaseClass, (obj), TYPE_E1000_BASE)
-#define defreg(x) x = (E1000_##x>>2)
+#define defreg(x) x = (E1000_##x>>2)
enum {
- defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
- defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
- defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
- defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
- defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
- defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
- defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
- defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
- defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
- defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
- defreg(VET), defreg(RDTR), defreg(RADV), defreg(TADV),
- defreg(ITR),
+ defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
+ defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
+ defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
+ defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
+ defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
+ defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
+ defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
+ defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
+ defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
+ defreg(RA), defreg(MTA), defreg(CRCERRS), defreg(VFTA),
+ defreg(VET), defreg(RDTR), defreg(RADV), defreg(TADV),
+ defreg(ITR), defreg(FCRUC), defreg(TDFH), defreg(TDFT),
+ defreg(TDFHS), defreg(TDFTS), defreg(TDFPC), defreg(RDFH),
+ defreg(RDFT), defreg(RDFHS), defreg(RDFTS), defreg(RDFPC),
+ defreg(IPAV), defreg(WUC), defreg(WUS), defreg(AIT),
+ defreg(IP6AT), defreg(IP4AT), defreg(FFLT), defreg(FFMT),
+ defreg(FFVT), defreg(WUPM), defreg(PBM), defreg(SCC),
+ defreg(ECOL), defreg(MCC), defreg(LATECOL), defreg(COLC),
+ defreg(DC), defreg(TNCRS), defreg(SEC), defreg(CEXTERR),
+ defreg(RLEC), defreg(XONRXC), defreg(XONTXC), defreg(XOFFRXC),
+ defreg(XOFFTXC), defreg(RFC), defreg(RJC), defreg(RNBC),
+ defreg(TSCTFC), defreg(MGTPRC), defreg(MGTPDC), defreg(MGTPTC),
+ defreg(RUC), defreg(ROC), defreg(GORCL), defreg(GORCH),
+ defreg(GOTCL), defreg(GOTCH), defreg(BPRC), defreg(MPRC),
+ defreg(TSCTC), defreg(PRC64), defreg(PRC127), defreg(PRC255),
+ defreg(PRC511), defreg(PRC1023), defreg(PRC1522), defreg(PTC64),
+ defreg(PTC127), defreg(PTC255), defreg(PTC511), defreg(PTC1023),
+ defreg(PTC1522), defreg(MPTC), defreg(BPTC)
};
static void
@@ -193,8 +215,7 @@ e1000_link_up(E1000State *s)
static bool
have_autoneg(E1000State *s)
{
- return (s->compat_flags & E1000_FLAG_AUTONEG) &&
- (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN);
+ return chkflag(AUTONEG) && (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN);
}
static void
@@ -226,18 +247,18 @@ enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
static const char phy_regcap[0x20] = {
- [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
- [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
- [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
- [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
- [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
- [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R,
+ [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
+ [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
+ [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
+ [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
+ [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
+ [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R,
[PHY_AUTONEG_EXP] = PHY_R,
};
/* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
static const uint16_t phy_reg_init[] = {
- [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB |
+ [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB |
MII_CR_FULL_DUPLEX |
MII_CR_AUTO_NEG_EN,
@@ -264,15 +285,15 @@ static const uint16_t phy_reg_init[] = {
};
static const uint32_t mac_reg_init[] = {
- [PBA] = 0x00100030,
- [LEDCTL] = 0x602,
- [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
+ [PBA] = 0x00100030,
+ [LEDCTL] = 0x602,
+ [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
- [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
+ [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
E1000_STATUS_LU,
- [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
+ [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
E1000_MANC_RMCP_EN,
};
@@ -319,7 +340,7 @@ set_interrupt_cause(E1000State *s, int index, uint32_t val)
if (s->mit_timer_on) {
return;
}
- if (s->compat_flags & E1000_FLAG_MIT) {
+ if (chkflag(MIT)) {
/* Compute the next mitigation delay according to pending
* interrupts and the current values of RADV (provided
* RDTR!=0), TADV and ITR.
@@ -510,17 +531,19 @@ set_eecd(E1000State *s, int index, uint32_t val)
s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
- if (!(E1000_EECD_CS & val)) // CS inactive; nothing to do
- return;
- if (E1000_EECD_CS & (val ^ oldval)) { // CS rise edge; reset state
- s->eecd_state.val_in = 0;
- s->eecd_state.bitnum_in = 0;
- s->eecd_state.bitnum_out = 0;
- s->eecd_state.reading = 0;
+ if (!(E1000_EECD_CS & val)) { /* CS inactive; nothing to do */
+ return;
}
- if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
+ if (E1000_EECD_CS & (val ^ oldval)) { /* CS rise edge; reset state */
+ s->eecd_state.val_in = 0;
+ s->eecd_state.bitnum_in = 0;
+ s->eecd_state.bitnum_out = 0;
+ s->eecd_state.reading = 0;
+ }
+ if (!(E1000_EECD_SK & (val ^ oldval))) { /* no clock edge */
return;
- if (!(E1000_EECD_SK & val)) { // falling edge
+ }
+ if (!(E1000_EECD_SK & val)) { /* falling edge */
s->eecd_state.bitnum_out++;
return;
}
@@ -565,6 +588,56 @@ putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
}
}
+static inline void
+inc_reg_if_not_full(E1000State *s, int index)
+{
+ if (s->mac_reg[index] != 0xffffffff) {
+ s->mac_reg[index]++;
+ }
+}
+
+static inline void
+inc_tx_bcast_or_mcast_count(E1000State *s, const unsigned char *arr)
+{
+ if (!memcmp(arr, bcast, sizeof bcast)) {
+ inc_reg_if_not_full(s, BPTC);
+ } else if (arr[0] & 1) {
+ inc_reg_if_not_full(s, MPTC);
+ }
+}
+
+static void
+grow_8reg_if_not_full(E1000State *s, int index, int size)
+{
+ uint64_t sum = s->mac_reg[index] | (uint64_t)s->mac_reg[index+1] << 32;
+
+ if (sum + size < sum) {
+ sum = ~0ULL;
+ } else {
+ sum += size;
+ }
+ s->mac_reg[index] = sum;
+ s->mac_reg[index+1] = sum >> 32;
+}
+
+static void
+increase_size_stats(E1000State *s, const int *size_regs, int size)
+{
+ if (size > 1023) {
+ inc_reg_if_not_full(s, size_regs[5]);
+ } else if (size > 511) {
+ inc_reg_if_not_full(s, size_regs[4]);
+ } else if (size > 255) {
+ inc_reg_if_not_full(s, size_regs[3]);
+ } else if (size > 127) {
+ inc_reg_if_not_full(s, size_regs[2]);
+ } else if (size > 64) {
+ inc_reg_if_not_full(s, size_regs[1]);
+ } else if (size == 64) {
+ inc_reg_if_not_full(s, size_regs[0]);
+ }
+}
+
static inline int
vlan_enabled(E1000State *s)
{
@@ -602,40 +675,49 @@ fcs_len(E1000State *s)
static void
e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
{
+ static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511,
+ PTC1023, PTC1522 };
+
NetClientState *nc = qemu_get_queue(s->nic);
if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
nc->info->receive(nc, buf, size);
} else {
qemu_send_packet(nc, buf, size);
}
+ inc_tx_bcast_or_mcast_count(s, buf);
+ increase_size_stats(s, PTCregs, size);
}
static void
xmit_seg(E1000State *s)
{
uint16_t len, *sp;
- unsigned int frames = s->tx.tso_frames, css, sofar, n;
+ unsigned int frames = s->tx.tso_frames, css, sofar;
struct e1000_tx *tp = &s->tx;
if (tp->tse && tp->cptse) {
css = tp->ipcss;
DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
frames, tp->size, css);
- if (tp->ip) { // IPv4
+ if (tp->ip) { /* IPv4 */
stw_be_p(tp->data+css+2, tp->size - css);
stw_be_p(tp->data+css+4,
- be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
- } else // IPv6
+ be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
+ } else { /* IPv6 */
stw_be_p(tp->data+css+4, tp->size - css);
+ }
css = tp->tucss;
len = tp->size - css;
DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
if (tp->tcp) {
sofar = frames * tp->mss;
stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
- if (tp->paylen - sofar > tp->mss)
- tp->data[css + 13] &= ~9; // PSH, FIN
- } else // UDP
+ if (tp->paylen - sofar > tp->mss) {
+ tp->data[css + 13] &= ~9; /* PSH, FIN */
+ } else if (frames) {
+ inc_reg_if_not_full(s, TSCTC);
+ }
+ } else /* UDP */
stw_be_p(tp->data+css+4, len);
if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
unsigned int phsum;
@@ -657,13 +739,15 @@ xmit_seg(E1000State *s)
memmove(tp->data, tp->data + 4, 8);
memcpy(tp->data + 8, tp->vlan_header, 4);
e1000_send_packet(s, tp->vlan, tp->size + 4);
- } else
+ } else {
e1000_send_packet(s, tp->data, tp->size);
- s->mac_reg[TPT]++;
- s->mac_reg[GPTC]++;
- n = s->mac_reg[TOTL];
- if ((s->mac_reg[TOTL] += s->tx.size) < n)
- s->mac_reg[TOTH]++;
+ }
+
+ inc_reg_if_not_full(s, TPT);
+ grow_8reg_if_not_full(s, TOTL, s->tx.size);
+ s->mac_reg[GPTC] = s->mac_reg[TPT];
+ s->mac_reg[GOTCL] = s->mac_reg[TOTL];
+ s->mac_reg[GOTCH] = s->mac_reg[TOTH];
}
static void
@@ -679,7 +763,7 @@ process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
struct e1000_tx *tp = &s->tx;
s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
- if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
+ if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */
op = le32_to_cpu(xp->cmd_and_length);
tp->ipcss = xp->lower_setup.ip_fields.ipcss;
tp->ipcso = xp->lower_setup.ip_fields.ipcso;
@@ -694,7 +778,7 @@ process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
tp->tso_frames = 0;
- if (tp->tucso == 0) { // this is probably wrong
+ if (tp->tucso == 0) { /* this is probably wrong */
DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
}
@@ -718,7 +802,7 @@ process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
stw_be_p(tp->vlan_header + 2,
le16_to_cpu(dp->upper.fields.special));
}
-
+
addr = le64_to_cpu(dp->buffer_addr);
if (tp->tse && tp->cptse) {
msh = tp->hdr_len + tp->mss;
@@ -831,9 +915,9 @@ start_xmit(E1000State *s)
static int
receive_filter(E1000State *s, const uint8_t *buf, int size)
{
- static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
static const int mta_shift[] = {4, 3, 2, 0};
uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
+ int isbcast = !memcmp(buf, bcast, sizeof bcast), ismcast = (buf[0] & 1);
if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
@@ -843,14 +927,19 @@ receive_filter(E1000State *s, const uint8_t *buf, int size)
return 0;
}
- if (rctl & E1000_RCTL_UPE) // promiscuous
+ if (!isbcast && !ismcast && (rctl & E1000_RCTL_UPE)) { /* promiscuous ucast */
return 1;
+ }
- if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
+ if (ismcast && (rctl & E1000_RCTL_MPE)) { /* promiscuous mcast */
+ inc_reg_if_not_full(s, MPRC);
return 1;
+ }
- if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
+ if (isbcast && (rctl & E1000_RCTL_BAM)) { /* broadcast enabled */
+ inc_reg_if_not_full(s, BPRC);
return 1;
+ }
for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
if (!(rp[1] & E1000_RAH_AV))
@@ -870,8 +959,10 @@ receive_filter(E1000State *s, const uint8_t *buf, int size)
f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
- if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
+ if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f))) {
+ inc_reg_if_not_full(s, MPRC);
return 1;
+ }
DBGOUT(RXFILTER,
"dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
@@ -960,6 +1051,8 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
size_t desc_offset;
size_t desc_size;
size_t total_size;
+ static const int PRCregs[6] = { PRC64, PRC127, PRC255, PRC511,
+ PRC1023, PRC1522 };
if (!(s->mac_reg[STATUS] & E1000_STATUS_LU)) {
return -1;
@@ -973,6 +1066,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
if (size < sizeof(min_buf)) {
iov_to_buf(iov, iovcnt, 0, min_buf, size);
memset(&min_buf[size], 0, sizeof(min_buf) - size);
+ inc_reg_if_not_full(s, RUC);
min_iov.iov_base = filter_buf = min_buf;
min_iov.iov_len = size = sizeof(min_buf);
iovcnt = 1;
@@ -988,6 +1082,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
(size > MAXIMUM_ETHERNET_VLAN_SIZE
&& !(s->mac_reg[RCTL] & E1000_RCTL_LPE)))
&& !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) {
+ inc_reg_if_not_full(s, ROC);
return size;
}
@@ -1073,16 +1168,17 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
}
} while (desc_offset < total_size);
- s->mac_reg[GPRC]++;
- s->mac_reg[TPR]++;
+ increase_size_stats(s, PRCregs, total_size);
+ inc_reg_if_not_full(s, TPR);
+ s->mac_reg[GPRC] = s->mac_reg[TPR];
/* TOR - Total Octets Received:
* This register includes bytes received in a packet from the <Destination
* Address> field through the <CRC> field, inclusively.
+ * Always include FCS length (4) in size.
*/
- n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
- if (n < s->mac_reg[TORL])
- s->mac_reg[TORH]++;
- s->mac_reg[TORL] = n;
+ grow_8reg_if_not_full(s, TORL, size+4);
+ s->mac_reg[GORCL] = s->mac_reg[TORL];
+ s->mac_reg[GORCH] = s->mac_reg[TORH];
n = E1000_ICS_RXT0;
if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
@@ -1114,6 +1210,30 @@ mac_readreg(E1000State *s, int index)
}
static uint32_t
+mac_low4_read(E1000State *s, int index)
+{
+ return s->mac_reg[index] & 0xf;
+}
+
+static uint32_t
+mac_low11_read(E1000State *s, int index)
+{
+ return s->mac_reg[index] & 0x7ff;
+}
+
+static uint32_t
+mac_low13_read(E1000State *s, int index)
+{
+ return s->mac_reg[index] & 0x1fff;
+}
+
+static uint32_t
+mac_low16_read(E1000State *s, int index)
+{
+ return s->mac_reg[index] & 0xffff;
+}
+
+static uint32_t
mac_icr_read(E1000State *s, int index)
{
uint32_t ret = s->mac_reg[ICR];
@@ -1206,46 +1326,144 @@ set_ims(E1000State *s, int index, uint32_t val)
set_ics(s, 0, 0);
}
-#define getreg(x) [x] = mac_readreg
+#define getreg(x) [x] = mac_readreg
static uint32_t (*macreg_readops[])(E1000State *, int) = {
- getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
- getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
- getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
- getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
- getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
- getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
- getreg(TDLEN), getreg(RDLEN), getreg(RDTR), getreg(RADV),
- getreg(TADV), getreg(ITR),
-
- [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
- [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
- [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
- [CRCERRS ... MPC] = &mac_readreg,
- [RA ... RA+31] = &mac_readreg,
- [MTA ... MTA+127] = &mac_readreg,
+ getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
+ getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
+ getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
+ getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
+ getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
+ getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
+ getreg(TDLEN), getreg(RDLEN), getreg(RDTR), getreg(RADV),
+ getreg(TADV), getreg(ITR), getreg(FCRUC), getreg(IPAV),
+ getreg(WUC), getreg(WUS), getreg(SCC), getreg(ECOL),
+ getreg(MCC), getreg(LATECOL), getreg(COLC), getreg(DC),
+ getreg(TNCRS), getreg(SEC), getreg(CEXTERR), getreg(RLEC),
+ getreg(XONRXC), getreg(XONTXC), getreg(XOFFRXC), getreg(XOFFTXC),
+ getreg(RFC), getreg(RJC), getreg(RNBC), getreg(TSCTFC),
+ getreg(MGTPRC), getreg(MGTPDC), getreg(MGTPTC), getreg(GORCL),
+ getreg(GOTCL),
+
+ [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8,
+ [GOTCH] = mac_read_clr8, [GORCH] = mac_read_clr8,
+ [PRC64] = mac_read_clr4, [PRC127] = mac_read_clr4,
+ [PRC255] = mac_read_clr4, [PRC511] = mac_read_clr4,
+ [PRC1023] = mac_read_clr4, [PRC1522] = mac_read_clr4,
+ [PTC64] = mac_read_clr4, [PTC127] = mac_read_clr4,
+ [PTC255] = mac_read_clr4, [PTC511] = mac_read_clr4,
+ [PTC1023] = mac_read_clr4, [PTC1522] = mac_read_clr4,
+ [GPRC] = mac_read_clr4, [GPTC] = mac_read_clr4,
+ [TPT] = mac_read_clr4, [TPR] = mac_read_clr4,
+ [RUC] = mac_read_clr4, [ROC] = mac_read_clr4,
+ [BPRC] = mac_read_clr4, [MPRC] = mac_read_clr4,
+ [TSCTC] = mac_read_clr4, [BPTC] = mac_read_clr4,
+ [MPTC] = mac_read_clr4,
+ [ICR] = mac_icr_read, [EECD] = get_eecd,
+ [EERD] = flash_eerd_read,
+ [RDFH] = mac_low13_read, [RDFT] = mac_low13_read,
+ [RDFHS] = mac_low13_read, [RDFTS] = mac_low13_read,
+ [RDFPC] = mac_low13_read,
+ [TDFH] = mac_low11_read, [TDFT] = mac_low11_read,
+ [TDFHS] = mac_low13_read, [TDFTS] = mac_low13_read,
+ [TDFPC] = mac_low13_read,
+ [AIT] = mac_low16_read,
+
+ [CRCERRS ... MPC] = &mac_readreg,
+ [IP6AT ... IP6AT+3] = &mac_readreg, [IP4AT ... IP4AT+6] = &mac_readreg,
+ [FFLT ... FFLT+6] = &mac_low11_read,
+ [RA ... RA+31] = &mac_readreg,
+ [WUPM ... WUPM+31] = &mac_readreg,
+ [MTA ... MTA+127] = &mac_readreg,
[VFTA ... VFTA+127] = &mac_readreg,
+ [FFMT ... FFMT+254] = &mac_low4_read,
+ [FFVT ... FFVT+254] = &mac_readreg,
+ [PBM ... PBM+16383] = &mac_readreg,
};
enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
-#define putreg(x) [x] = mac_writereg
+#define putreg(x) [x] = mac_writereg
static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
- putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
- putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
- putreg(RDBAL), putreg(LEDCTL), putreg(VET),
- [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
- [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
- [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
- [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
- [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
- [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit,
- [ITR] = set_16bit,
- [RA ... RA+31] = &mac_writereg,
- [MTA ... MTA+127] = &mac_writereg,
+ putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
+ putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
+ putreg(RDBAL), putreg(LEDCTL), putreg(VET), putreg(FCRUC),
+ putreg(TDFH), putreg(TDFT), putreg(TDFHS), putreg(TDFTS),
+ putreg(TDFPC), putreg(RDFH), putreg(RDFT), putreg(RDFHS),
+ putreg(RDFTS), putreg(RDFPC), putreg(IPAV), putreg(WUC),
+ putreg(WUS), putreg(AIT),
+
+ [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
+ [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
+ [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
+ [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
+ [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
+ [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit,
+ [ITR] = set_16bit,
+
+ [IP6AT ... IP6AT+3] = &mac_writereg, [IP4AT ... IP4AT+6] = &mac_writereg,
+ [FFLT ... FFLT+6] = &mac_writereg,
+ [RA ... RA+31] = &mac_writereg,
+ [WUPM ... WUPM+31] = &mac_writereg,
+ [MTA ... MTA+127] = &mac_writereg,
[VFTA ... VFTA+127] = &mac_writereg,
+ [FFMT ... FFMT+254] = &mac_writereg, [FFVT ... FFVT+254] = &mac_writereg,
+ [PBM ... PBM+16383] = &mac_writereg,
};
enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
+enum { MAC_ACCESS_PARTIAL = 1, MAC_ACCESS_FLAG_NEEDED = 2 };
+
+#define markflag(x) ((E1000_FLAG_##x << 2) | MAC_ACCESS_FLAG_NEEDED)
+/* In the array below the meaning of the bits is: [f|f|f|f|f|f|n|p]
+ * f - flag bits (up to 6 possible flags)
+ * n - flag needed
+ * p - partially implenented */
+static const uint8_t mac_reg_access[0x8000] = {
+ [RDTR] = markflag(MIT), [TADV] = markflag(MIT),
+ [RADV] = markflag(MIT), [ITR] = markflag(MIT),
+
+ [IPAV] = markflag(MAC), [WUC] = markflag(MAC),
+ [IP6AT] = markflag(MAC), [IP4AT] = markflag(MAC),
+ [FFVT] = markflag(MAC), [WUPM] = markflag(MAC),
+ [ECOL] = markflag(MAC), [MCC] = markflag(MAC),
+ [DC] = markflag(MAC), [TNCRS] = markflag(MAC),
+ [RLEC] = markflag(MAC), [XONRXC] = markflag(MAC),
+ [XOFFTXC] = markflag(MAC), [RFC] = markflag(MAC),
+ [TSCTFC] = markflag(MAC), [MGTPRC] = markflag(MAC),
+ [WUS] = markflag(MAC), [AIT] = markflag(MAC),
+ [FFLT] = markflag(MAC), [FFMT] = markflag(MAC),
+ [SCC] = markflag(MAC), [FCRUC] = markflag(MAC),
+ [LATECOL] = markflag(MAC), [COLC] = markflag(MAC),
+ [SEC] = markflag(MAC), [CEXTERR] = markflag(MAC),
+ [XONTXC] = markflag(MAC), [XOFFRXC] = markflag(MAC),
+ [RJC] = markflag(MAC), [RNBC] = markflag(MAC),
+ [MGTPDC] = markflag(MAC), [MGTPTC] = markflag(MAC),
+ [RUC] = markflag(MAC), [ROC] = markflag(MAC),
+ [GORCL] = markflag(MAC), [GORCH] = markflag(MAC),
+ [GOTCL] = markflag(MAC), [GOTCH] = markflag(MAC),
+ [BPRC] = markflag(MAC), [MPRC] = markflag(MAC),
+ [TSCTC] = markflag(MAC), [PRC64] = markflag(MAC),
+ [PRC127] = markflag(MAC), [PRC255] = markflag(MAC),
+ [PRC511] = markflag(MAC), [PRC1023] = markflag(MAC),
+ [PRC1522] = markflag(MAC), [PTC64] = markflag(MAC),
+ [PTC127] = markflag(MAC), [PTC255] = markflag(MAC),
+ [PTC511] = markflag(MAC), [PTC1023] = markflag(MAC),
+ [PTC1522] = markflag(MAC), [MPTC] = markflag(MAC),
+ [BPTC] = markflag(MAC),
+
+ [TDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL,
+ [TDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL,
+ [TDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
+ [TDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
+ [TDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL,
+ [RDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL,
+ [RDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL,
+ [RDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
+ [RDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
+ [RDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL,
+ [PBM] = markflag(MAC) | MAC_ACCESS_PARTIAL,
+};
+
static void
e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
unsigned size)
@@ -1254,9 +1472,20 @@ e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
unsigned int index = (addr & 0x1ffff) >> 2;
if (index < NWRITEOPS && macreg_writeops[index]) {
- macreg_writeops[index](s, index, val);
+ if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED)
+ || (s->compat_flags & (mac_reg_access[index] >> 2))) {
+ if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
+ DBGOUT(GENERAL, "Writing to register at offset: 0x%08x. "
+ "It is not fully implemented.\n", index<<2);
+ }
+ macreg_writeops[index](s, index, val);
+ } else { /* "flag needed" bit is set, but the flag is not active */
+ DBGOUT(MMIO, "MMIO write attempt to disabled reg. addr=0x%08x\n",
+ index<<2);
+ }
} else if (index < NREADOPS && macreg_readops[index]) {
- DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val);
+ DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n",
+ index<<2, val);
} else {
DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
index<<2, val);
@@ -1269,11 +1498,21 @@ e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
E1000State *s = opaque;
unsigned int index = (addr & 0x1ffff) >> 2;
- if (index < NREADOPS && macreg_readops[index])
- {
- return macreg_readops[index](s, index);
+ if (index < NREADOPS && macreg_readops[index]) {
+ if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED)
+ || (s->compat_flags & (mac_reg_access[index] >> 2))) {
+ if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
+ DBGOUT(GENERAL, "Reading register at offset: 0x%08x. "
+ "It is not fully implemented.\n", index<<2);
+ }
+ return macreg_readops[index](s, index);
+ } else { /* "flag needed" bit is set, but the flag is not active */
+ DBGOUT(MMIO, "MMIO read attempt of disabled reg. addr=0x%08x\n",
+ index<<2);
+ }
+ } else {
+ DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
}
- DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
return 0;
}
@@ -1340,7 +1579,7 @@ static int e1000_post_load(void *opaque, int version_id)
E1000State *s = opaque;
NetClientState *nc = qemu_get_queue(s->nic);
- if (!(s->compat_flags & E1000_FLAG_MIT)) {
+ if (!chkflag(MIT)) {
s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] =
s->mac_reg[TADV] = 0;
s->mit_irq_level = false;
@@ -1367,7 +1606,14 @@ static bool e1000_mit_state_needed(void *opaque)
{
E1000State *s = opaque;
- return s->compat_flags & E1000_FLAG_MIT;
+ return chkflag(MIT);
+}
+
+static bool e1000_full_mac_needed(void *opaque)
+{
+ E1000State *s = opaque;
+
+ return chkflag(MAC);
}
static const VMStateDescription vmstate_e1000_mit_state = {
@@ -1385,6 +1631,17 @@ static const VMStateDescription vmstate_e1000_mit_state = {
}
};
+static const VMStateDescription vmstate_e1000_full_mac_state = {
+ .name = "e1000/full_mac_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = e1000_full_mac_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(mac_reg, E1000State, 0x8000),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_e1000 = {
.name = "e1000",
.version_id = 2,
@@ -1464,6 +1721,7 @@ static const VMStateDescription vmstate_e1000 = {
},
.subsections = (const VMStateDescription*[]) {
&vmstate_e1000_mit_state,
+ &vmstate_e1000_full_mac_state,
NULL
}
};
@@ -1596,6 +1854,8 @@ static Property e1000_properties[] = {
compat_flags, E1000_FLAG_AUTONEG_BIT, true),
DEFINE_PROP_BIT("mitigation", E1000State,
compat_flags, E1000_FLAG_MIT_BIT, true),
+ DEFINE_PROP_BIT("extra_mac_registers", E1000State,
+ compat_flags, E1000_FLAG_MAC_BIT, true),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/net/e1000_regs.h b/hw/net/e1000_regs.h
index 60b96aa..1c40244 100644
--- a/hw/net/e1000_regs.h
+++ b/hw/net/e1000_regs.h
@@ -158,7 +158,8 @@
#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
#define FEXTNVM_SW_CONFIG 0x0001
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
-#define E1000_PBS 0x01008 /* Packet Buffer Size */
+#define E1000_PBM 0x10000 /* Packet Buffer Memory - RW */
+#define E1000_PBS 0x01008 /* Packet Buffer Size - RW */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
#define E1000_FLASH_UPDATES 1000
#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
@@ -191,6 +192,11 @@
#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */
#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_RDFH 0x02410 /* Receive Data FIFO Head Register - RW */
+#define E1000_RDFT 0x02418 /* Receive Data FIFO Tail Register - RW */
+#define E1000_RDFHS 0x02420 /* Receive Data FIFO Head Saved Register - RW */
+#define E1000_RDFTS 0x02428 /* Receive Data FIFO Tail Saved Register - RW */
+#define E1000_RDFPC 0x02430 /* Receive Data FIFO Packet Count - RW */
#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
diff --git a/hw/net/rocker/rocker.c b/hw/net/rocker/rocker.c
index bb6fdc3..c57f1a6 100644
--- a/hw/net/rocker/rocker.c
+++ b/hw/net/rocker/rocker.c
@@ -101,8 +101,7 @@ RockerSwitch *qmp_query_rocker(const char *name, Error **errp)
r = rocker_find(name);
if (!r) {
- error_set(errp, ERROR_CLASS_GENERIC_ERROR,
- "rocker %s not found", name);
+ error_setg(errp, "rocker %s not found", name);
return NULL;
}
@@ -122,8 +121,7 @@ RockerPortList *qmp_query_rocker_ports(const char *name, Error **errp)
r = rocker_find(name);
if (!r) {
- error_set(errp, ERROR_CLASS_GENERIC_ERROR,
- "rocker %s not found", name);
+ error_setg(errp, "rocker %s not found", name);
return NULL;
}
diff --git a/hw/net/rocker/rocker_of_dpa.c b/hw/net/rocker/rocker_of_dpa.c
index 1ad2791..3cf1d61 100644
--- a/hw/net/rocker/rocker_of_dpa.c
+++ b/hw/net/rocker/rocker_of_dpa.c
@@ -2462,15 +2462,13 @@ RockerOfDpaFlowList *qmp_query_rocker_of_dpa_flows(const char *name,
r = rocker_find(name);
if (!r) {
- error_set(errp, ERROR_CLASS_GENERIC_ERROR,
- "rocker %s not found", name);
+ error_setg(errp, "rocker %s not found", name);
return NULL;
}
w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
if (!w) {
- error_set(errp, ERROR_CLASS_GENERIC_ERROR,
- "rocker %s doesn't have OF-DPA world", name);
+ error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
return NULL;
}
@@ -2597,15 +2595,13 @@ RockerOfDpaGroupList *qmp_query_rocker_of_dpa_groups(const char *name,
r = rocker_find(name);
if (!r) {
- error_set(errp, ERROR_CLASS_GENERIC_ERROR,
- "rocker %s not found", name);
+ error_setg(errp, "rocker %s not found", name);
return NULL;
}
w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA);
if (!w) {
- error_set(errp, ERROR_CLASS_GENERIC_ERROR,
- "rocker %s doesn't have OF-DPA world", name);
+ error_setg(errp, "rocker %s doesn't have OF-DPA world", name);
return NULL;
}
diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c
index 32c65c2..0eab29d 100644
--- a/hw/pci/pcie.c
+++ b/hw/pci/pcie.c
@@ -426,13 +426,13 @@ void pcie_cap_slot_write_config(PCIDevice *dev,
*/
if ((sltsta & PCI_EXP_SLTSTA_PDS) && (val & PCI_EXP_SLTCTL_PCC) &&
((val & PCI_EXP_SLTCTL_PIC_OFF) == PCI_EXP_SLTCTL_PIC_OFF)) {
- PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev));
- pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
- pcie_unplug_device, NULL);
+ PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev));
+ pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
+ pcie_unplug_device, NULL);
- pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA,
- PCI_EXP_SLTSTA_PDS);
- pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA,
+ pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_PDS);
+ pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_PDC);
}
diff --git a/hw/ppc/mac.h b/hw/ppc/mac.h
index 8bdba30..e375ed2 100644
--- a/hw/ppc/mac.h
+++ b/hw/ppc/mac.h
@@ -103,6 +103,9 @@ typedef struct CUDAState {
uint8_t last_b;
uint8_t last_acr;
+ /* MacOS 9 is racy and requires a delay upon setting the SR_INT bit */
+ QEMUTimer *sr_delay_timer;
+
int data_in_size;
int data_in_index;
int data_out_index;
diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c
index 66d016c..1b9a573 100644
--- a/hw/ppc/mac_newworld.c
+++ b/hw/ppc/mac_newworld.c
@@ -371,12 +371,13 @@ static void ppc_core99_init(MachineState *machine)
/* 970 gets a U3 bus */
pci_bus = pci_pmac_u3_init(pic, get_system_memory(), get_system_io());
machine_arch = ARCH_MAC99_U3;
- machine->usb |= defaults_enabled() && !machine->usb_disabled;
} else {
pci_bus = pci_pmac_init(pic, get_system_memory(), get_system_io());
machine_arch = ARCH_MAC99;
}
+ machine->usb |= defaults_enabled() && !machine->usb_disabled;
+
/* Timebase Frequency */
if (kvm_enabled()) {
tbfreq = kvmppc_get_tbfreq();
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 0ed8527..030ee35 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1021,9 +1021,19 @@ static void spapr_alloc_htab(sPAPRMachineState *spapr)
* RAM */
shift = kvmppc_reset_htab(spapr->htab_shift);
-
- if (shift > 0) {
- /* Kernel handles htab, we don't need to allocate one */
+ if (shift < 0) {
+ /*
+ * For HV KVM, host kernel will return -ENOMEM when requested
+ * HTAB size can't be allocated.
+ */
+ error_setg(&error_abort, "Failed to allocate HTAB of requested size, try with smaller maxmem");
+ } else if (shift > 0) {
+ /*
+ * Kernel handles htab, we don't need to allocate one
+ *
+ * Older kernels can fall back to lower HTAB shift values,
+ * but we don't allow booting of such guests.
+ */
if (shift != spapr->htab_shift) {
error_setg(&error_abort, "Failed to allocate HTAB of requested size, try with smaller maxmem");
}
@@ -1055,7 +1065,9 @@ static void spapr_reset_htab(sPAPRMachineState *spapr)
int index;
shift = kvmppc_reset_htab(spapr->htab_shift);
- if (shift > 0) {
+ if (shift < 0) {
+ error_setg(&error_abort, "Failed to reset HTAB");
+ } else if (shift > 0) {
if (shift != spapr->htab_shift) {
error_setg(&error_abort, "Requested HTAB allocation failed during reset");
}
@@ -1588,7 +1600,7 @@ static int htab_load(QEMUFile *f, void *opaque, int version_id)
static SaveVMHandlers savevm_htab_handlers = {
.save_live_setup = htab_save_setup,
.save_live_iterate = htab_save_iterate,
- .save_live_complete = htab_save_complete,
+ .save_live_complete_precopy = htab_save_complete,
.load_state = htab_load,
};
diff --git a/hw/s390x/css.c b/hw/s390x/css.c
index c033612..19851ce 100644
--- a/hw/s390x/css.c
+++ b/hw/s390x/css.c
@@ -892,8 +892,14 @@ int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len)
/* If a unit check is pending, copy sense data. */
if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) &&
(p->chars & PMCW_CHARS_MASK_CSENSE)) {
+ int i;
+
irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL;
+ /* Attention: sense_data is already BE! */
memcpy(irb.ecw, sch->sense_data, sizeof(sch->sense_data));
+ for (i = 0; i < ARRAY_SIZE(irb.ecw); i++) {
+ irb.ecw[i] = be32_to_cpu(irb.ecw[i]);
+ }
irb.esw[1] = 0x01000000 | (sizeof(sch->sense_data) << 8);
}
}
diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c
index 5f7f349..b91fcc6 100644
--- a/hw/s390x/ipl.c
+++ b/hw/s390x/ipl.c
@@ -15,7 +15,6 @@
#include "cpu.h"
#include "elf.h"
#include "hw/loader.h"
-#include "hw/sysbus.h"
#include "hw/s390x/virtio-ccw.h"
#include "hw/s390x/css.h"
#include "ipl.h"
@@ -29,44 +28,6 @@
#define ZIPL_IMAGE_START 0x009000UL
#define IPL_PSW_MASK (PSW_MASK_32 | PSW_MASK_64)
-#define TYPE_S390_IPL "s390-ipl"
-#define S390_IPL(obj) \
- OBJECT_CHECK(S390IPLState, (obj), TYPE_S390_IPL)
-#if 0
-#define S390_IPL_CLASS(klass) \
- OBJECT_CLASS_CHECK(S390IPLState, (klass), TYPE_S390_IPL)
-#define S390_IPL_GET_CLASS(obj) \
- OBJECT_GET_CLASS(S390IPLState, (obj), TYPE_S390_IPL)
-#endif
-
-typedef struct S390IPLClass {
- /*< private >*/
- SysBusDeviceClass parent_class;
- /*< public >*/
-
- void (*parent_reset) (SysBusDevice *dev);
-} S390IPLClass;
-
-typedef struct S390IPLState {
- /*< private >*/
- SysBusDevice parent_obj;
- uint64_t start_addr;
- uint64_t bios_start_addr;
- bool enforce_bios;
- IplParameterBlock iplb;
- bool iplb_valid;
- bool reipl_requested;
-
- /*< public >*/
- char *kernel;
- char *initrd;
- char *cmdline;
- char *firmware;
- uint8_t cssid;
- uint8_t ssid;
- uint16_t devno;
-} S390IPLState;
-
static const VMStateDescription vmstate_iplb = {
.name = "ipl/iplb",
.version_id = 0,
@@ -110,11 +71,12 @@ static uint64_t bios_translate_addr(void *opaque, uint64_t srcaddr)
return srcaddr + dstaddr;
}
-static int s390_ipl_init(SysBusDevice *dev)
+static void s390_ipl_realize(DeviceState *dev, Error **errp)
{
S390IPLState *ipl = S390_IPL(dev);
uint64_t pentry = KERN_IMAGE_START;
int kernel_size;
+ Error *l_err = NULL;
int bios_size;
char *bios_filename;
@@ -132,7 +94,8 @@ static int s390_ipl_init(SysBusDevice *dev)
bios_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
if (bios_filename == NULL) {
- hw_error("could not find stage1 bootloader\n");
+ error_setg(&l_err, "could not find stage1 bootloader\n");
+ goto error;
}
bios_size = load_elf(bios_filename, bios_translate_addr, &fwbase,
@@ -150,7 +113,8 @@ static int s390_ipl_init(SysBusDevice *dev)
g_free(bios_filename);
if (bios_size == -1) {
- hw_error("could not load bootloader '%s'\n", bios_name);
+ error_setg(&l_err, "could not load bootloader '%s'\n", bios_name);
+ goto error;
}
/* default boot target is the bios */
@@ -164,8 +128,8 @@ static int s390_ipl_init(SysBusDevice *dev)
kernel_size = load_image_targphys(ipl->kernel, 0, ram_size);
}
if (kernel_size < 0) {
- fprintf(stderr, "could not load kernel '%s'\n", ipl->kernel);
- return -1;
+ error_setg(&l_err, "could not load kernel '%s'\n", ipl->kernel);
+ goto error;
}
/*
* Is it a Linux kernel (starting at 0x10000)? If yes, we fill in the
@@ -192,9 +156,8 @@ static int s390_ipl_init(SysBusDevice *dev)
initrd_size = load_image_targphys(ipl->initrd, initrd_offset,
ram_size - initrd_offset);
if (initrd_size == -1) {
- fprintf(stderr, "qemu: could not load initrd '%s'\n",
- ipl->initrd);
- exit(1);
+ error_setg(&l_err, "could not load initrd '%s'\n", ipl->initrd);
+ goto error;
}
/*
@@ -205,7 +168,9 @@ static int s390_ipl_init(SysBusDevice *dev)
stq_p(rom_ptr(INITRD_PARM_SIZE), initrd_size);
}
}
- return 0;
+ qemu_register_reset(qdev_reset_all_fn, dev);
+error:
+ error_propagate(errp, l_err);
}
static Property s390_ipl_properties[] = {
@@ -308,9 +273,8 @@ static void s390_ipl_reset(DeviceState *dev)
static void s390_ipl_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = s390_ipl_init;
+ dc->realize = s390_ipl_realize;
dc->props = s390_ipl_properties;
dc->reset = s390_ipl_reset;
dc->vmsd = &vmstate_ipl;
@@ -319,8 +283,8 @@ static void s390_ipl_class_init(ObjectClass *klass, void *data)
static const TypeInfo s390_ipl_info = {
.class_init = s390_ipl_class_init,
- .parent = TYPE_SYS_BUS_DEVICE,
- .name = "s390-ipl",
+ .parent = TYPE_DEVICE,
+ .name = TYPE_S390_IPL,
.instance_size = sizeof(S390IPLState),
};
diff --git a/hw/s390x/ipl.h b/hw/s390x/ipl.h
index 7f2b403..6b48ed7 100644
--- a/hw/s390x/ipl.h
+++ b/hw/s390x/ipl.h
@@ -12,6 +12,7 @@
#ifndef HW_S390_IPL_H
#define HW_S390_IPL_H
+#include "hw/qdev.h"
#include "cpu.h"
typedef struct IplParameterBlock {
@@ -25,4 +26,28 @@ void s390_ipl_prepare_cpu(S390CPU *cpu);
IplParameterBlock *s390_ipl_get_iplb(void);
void s390_reipl_request(void);
+#define TYPE_S390_IPL "s390-ipl"
+#define S390_IPL(obj) OBJECT_CHECK(S390IPLState, (obj), TYPE_S390_IPL)
+
+struct S390IPLState {
+ /*< private >*/
+ DeviceState parent_obj;
+ uint64_t start_addr;
+ uint64_t bios_start_addr;
+ bool enforce_bios;
+ IplParameterBlock iplb;
+ bool iplb_valid;
+ bool reipl_requested;
+
+ /*< public >*/
+ char *kernel;
+ char *initrd;
+ char *cmdline;
+ char *firmware;
+ uint8_t cssid;
+ uint8_t ssid;
+ uint16_t devno;
+};
+typedef struct S390IPLState S390IPLState;
+
#endif
diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c
index 560b66a..d5712ae 100644
--- a/hw/s390x/s390-pci-bus.c
+++ b/hw/s390x/s390-pci-bus.c
@@ -309,8 +309,7 @@ static IOMMUTLBEntry s390_translate_iommu(MemoryRegion *iommu, hwaddr addr,
uint64_t pte;
uint32_t flags;
S390PCIBusDevice *pbdev = container_of(iommu, S390PCIBusDevice, mr);
- S390pciState *s = S390_PCI_HOST_BRIDGE(pci_device_root_bus(pbdev->pdev)
- ->qbus.parent);
+ S390pciState *s;
IOMMUTLBEntry ret = {
.target_as = &address_space_memory,
.iova = 0,
@@ -319,8 +318,13 @@ static IOMMUTLBEntry s390_translate_iommu(MemoryRegion *iommu, hwaddr addr,
.perm = IOMMU_NONE,
};
+ if (!pbdev->configured || !pbdev->pdev) {
+ return ret;
+ }
+
DPRINTF("iommu trans addr 0x%" PRIx64 "\n", addr);
+ s = S390_PCI_HOST_BRIDGE(pci_device_root_bus(pbdev->pdev)->qbus.parent);
/* s390 does not have an APIC mapped to main storage so we use
* a separate AddressSpace only for msix notifications
*/
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index 84221f4..5a52ff2 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -104,8 +104,7 @@ void s390_memory_init(ram_addr_t mem_size)
MemoryRegion *ram = g_new(MemoryRegion, 1);
/* allocate RAM for core */
- memory_region_init_ram(ram, NULL, "s390.ram", mem_size, &error_fatal);
- vmstate_register_ram_global(ram);
+ memory_region_allocate_system_memory(ram, NULL, "s390.ram", mem_size);
memory_region_add_subregion(sysmem, 0, ram);
/* Initialize storage key device */
diff --git a/hw/s390x/s390-virtio.c b/hw/s390x/s390-virtio.c
index cbde977..51dc0a8 100644
--- a/hw/s390x/s390-virtio.c
+++ b/hw/s390x/s390-virtio.c
@@ -31,7 +31,6 @@
#include "hw/boards.h"
#include "hw/loader.h"
#include "hw/virtio/virtio.h"
-#include "hw/sysbus.h"
#include "sysemu/kvm.h"
#include "exec/address-spaces.h"
@@ -151,9 +150,9 @@ void s390_init_ipl_dev(const char *kernel_filename,
const char *firmware,
bool enforce_bios)
{
- DeviceState *dev;
+ Object *new = object_new(TYPE_S390_IPL);
+ DeviceState *dev = DEVICE(new);
- dev = qdev_create(NULL, "s390-ipl");
if (kernel_filename) {
qdev_prop_set_string(dev, "kernel", kernel_filename);
}
@@ -163,8 +162,9 @@ void s390_init_ipl_dev(const char *kernel_filename,
qdev_prop_set_string(dev, "cmdline", kernel_cmdline);
qdev_prop_set_string(dev, "firmware", firmware);
qdev_prop_set_bit(dev, "enforce_bios", enforce_bios);
- object_property_add_child(qdev_get_machine(), "s390-ipl",
- OBJECT(dev), NULL);
+ object_property_add_child(qdev_get_machine(), TYPE_S390_IPL,
+ new, NULL);
+ object_unref(new);
qdev_init_nofail(dev);
}
@@ -268,6 +268,10 @@ static void s390_init(MachineState *machine)
hwaddr virtio_region_len;
hwaddr virtio_region_start;
+ error_printf("WARNING\n"
+ "The s390-virtio machine (non-ccw) is deprecated.\n"
+ "It will be removed in 2.6. Please use s390-ccw-virtio\n");
+
if (machine->ram_slots) {
error_report("Memory hotplug not supported by the selected machine.");
exit(EXIT_FAILURE);
@@ -334,7 +338,7 @@ static void s390_machine_class_init(ObjectClass *oc, void *data)
NMIClass *nc = NMI_CLASS(oc);
mc->alias = "s390";
- mc->desc = "VirtIO based S390 machine";
+ mc->desc = "VirtIO based S390 machine (deprecated)";
mc->init = s390_init;
mc->reset = s390_machine_reset;
mc->block_default_type = IF_VIRTIO;
diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
index dcd724e..d7dc667 100644
--- a/hw/scsi/megasas.c
+++ b/hw/scsi/megasas.c
@@ -757,7 +757,7 @@ static int megasas_ctrl_get_info(MegasasState *s, MegasasCmd *cmd)
memcpy(info.product_name, base_class->product_name, 24);
snprintf(info.serial_number, 32, "%s", s->hba_serial);
- snprintf(info.package_version, 0x60, "%s-QEMU", QEMU_VERSION);
+ snprintf(info.package_version, 0x60, "%s-QEMU", qemu_hw_version());
memcpy(info.image_component[0].name, "APP", 3);
snprintf(info.image_component[0].version, 10, "%s-QEMU",
base_class->product_version);
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index d373c1b..fd1171e 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -453,7 +453,7 @@ static bool scsi_target_emulate_inquiry(SCSITargetReq *r)
r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */
memcpy(&r->buf[8], "QEMU ", 8);
memcpy(&r->buf[16], "QEMU TARGET ", 16);
- pstrcpy((char *) &r->buf[32], 4, qemu_get_version());
+ pstrcpy((char *) &r->buf[32], 4, qemu_hw_version());
}
return true;
}
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index bada9a7..4797d83 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -90,7 +90,7 @@ struct SCSIDiskState
bool tray_locked;
};
-static int scsi_handle_rw_error(SCSIDiskReq *r, int error);
+static int scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed);
static void scsi_free_request(SCSIRequest *req)
{
@@ -169,18 +169,18 @@ static void scsi_aio_complete(void *opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
- block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
if (r->req.io_canceled) {
scsi_req_cancel_complete(&r->req);
goto done;
}
if (ret < 0) {
- if (scsi_handle_rw_error(r, -ret)) {
+ if (scsi_handle_rw_error(r, -ret, true)) {
goto done;
}
}
+ block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
scsi_req_complete(&r->req, GOOD);
done:
@@ -247,7 +247,7 @@ static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
}
if (ret < 0) {
- if (scsi_handle_rw_error(r, -ret)) {
+ if (scsi_handle_rw_error(r, -ret, false)) {
goto done;
}
}
@@ -273,7 +273,11 @@ static void scsi_dma_complete(void *opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
- block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
+ } else {
+ block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
+ }
scsi_dma_complete_noio(r, ret);
}
@@ -285,18 +289,18 @@ static void scsi_read_complete(void * opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
- block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
if (r->req.io_canceled) {
scsi_req_cancel_complete(&r->req);
goto done;
}
if (ret < 0) {
- if (scsi_handle_rw_error(r, -ret)) {
+ if (scsi_handle_rw_error(r, -ret, true)) {
goto done;
}
}
+ block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size);
n = r->qiov.size / 512;
@@ -322,7 +326,7 @@ static void scsi_do_read(SCSIDiskReq *r, int ret)
}
if (ret < 0) {
- if (scsi_handle_rw_error(r, -ret)) {
+ if (scsi_handle_rw_error(r, -ret, false)) {
goto done;
}
}
@@ -355,7 +359,11 @@ static void scsi_do_read_cb(void *opaque, int ret)
assert (r->req.aiocb != NULL);
r->req.aiocb = NULL;
- block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
+ } else {
+ block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
+ }
scsi_do_read(opaque, ret);
}
@@ -407,7 +415,7 @@ static void scsi_read_data(SCSIRequest *req)
* scsi_handle_rw_error always manages its reference counts, independent
* of the return value.
*/
-static int scsi_handle_rw_error(SCSIDiskReq *r, int error)
+static int scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed)
{
bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
@@ -415,6 +423,9 @@ static int scsi_handle_rw_error(SCSIDiskReq *r, int error)
is_read, error);
if (action == BLOCK_ERROR_ACTION_REPORT) {
+ if (acct_failed) {
+ block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
+ }
switch (error) {
case ENOMEDIUM:
scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
@@ -452,7 +463,7 @@ static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
}
if (ret < 0) {
- if (scsi_handle_rw_error(r, -ret)) {
+ if (scsi_handle_rw_error(r, -ret, false)) {
goto done;
}
}
@@ -481,7 +492,11 @@ static void scsi_write_complete(void * opaque, int ret)
assert (r->req.aiocb != NULL);
r->req.aiocb = NULL;
- block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
+ } else {
+ block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
+ }
scsi_write_complete_noio(r, ret);
}
@@ -1592,7 +1607,7 @@ static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
}
if (ret < 0) {
- if (scsi_handle_rw_error(r, -ret)) {
+ if (scsi_handle_rw_error(r, -ret, false)) {
goto done;
}
}
@@ -1696,18 +1711,19 @@ static void scsi_write_same_complete(void *opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
- block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
if (r->req.io_canceled) {
scsi_req_cancel_complete(&r->req);
goto done;
}
if (ret < 0) {
- if (scsi_handle_rw_error(r, -ret)) {
+ if (scsi_handle_rw_error(r, -ret, true)) {
goto done;
}
}
+ block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
+
data->nb_sectors -= data->iov.iov_len / 512;
data->sector += data->iov.iov_len / 512;
data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len);
@@ -2315,7 +2331,7 @@ static void scsi_realize(SCSIDevice *dev, Error **errp)
}
if (!s->version) {
- s->version = g_strdup(qemu_get_version());
+ s->version = g_strdup(qemu_hw_version());
}
if (!s->vendor) {
s->vendor = g_strdup("QEMU");
diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c
index 3037bef..7f0391c 100644
--- a/hw/timer/hpet.c
+++ b/hw/timer/hpet.c
@@ -116,12 +116,12 @@ static uint32_t timer_enabled(HPETTimer *t)
static uint32_t hpet_time_after(uint64_t a, uint64_t b)
{
- return ((int32_t)(b) - (int32_t)(a) < 0);
+ return ((int32_t)(b - a) < 0);
}
static uint32_t hpet_time_after64(uint64_t a, uint64_t b)
{
- return ((int64_t)(b) - (int64_t)(a) < 0);
+ return ((int64_t)(b - a) < 0);
}
static uint64_t ticks_to_ns(uint64_t value)
diff --git a/hw/tpm/tpm_tis.c b/hw/tpm/tpm_tis.c
index 0806b5f..ff073d5 100644
--- a/hw/tpm/tpm_tis.c
+++ b/hw/tpm/tpm_tis.c
@@ -141,7 +141,7 @@
#define TPM_TIS_IFACE_ID_SUPPORTED_FLAGS1_3 \
(TPM_TIS_IFACE_ID_INTERFACE_TIS1_3 | \
- (~0 << 4)/* all of it is don't care */)
+ (~0u << 4)/* all of it is don't care */)
/* if backend was a TPM 2.0: */
#define TPM_TIS_IFACE_ID_SUPPORTED_FLAGS2_0 \
diff --git a/hw/usb/ccid-card-emulated.c b/hw/usb/ccid-card-emulated.c
index 72329ed..869a63c 100644
--- a/hw/usb/ccid-card-emulated.c
+++ b/hw/usb/ccid-card-emulated.c
@@ -166,7 +166,7 @@ static void emulated_push_event(EmulatedState *card, EmulEvent *event)
static void emulated_push_type(EmulatedState *card, uint32_t type)
{
- EmulEvent *event = (EmulEvent *)g_malloc(sizeof(EmulEvent));
+ EmulEvent *event = g_new(EmulEvent, 1);
assert(event);
event->p.gen.type = type;
@@ -175,7 +175,7 @@ static void emulated_push_type(EmulatedState *card, uint32_t type)
static void emulated_push_error(EmulatedState *card, uint64_t code)
{
- EmulEvent *event = (EmulEvent *)g_malloc(sizeof(EmulEvent));
+ EmulEvent *event = g_new(EmulEvent, 1);
assert(event);
event->p.error.type = EMUL_ERROR;
diff --git a/hw/usb/dev-mtp.c b/hw/usb/dev-mtp.c
index 809b1cb..a276267 100644
--- a/hw/usb/dev-mtp.c
+++ b/hw/usb/dev-mtp.c
@@ -359,8 +359,7 @@ static void usb_mtp_object_readdir(MTPState *s, MTPObject *o)
}
while ((entry = readdir(dir)) != NULL) {
if ((o->nchildren % 32) == 0) {
- o->children = g_realloc(o->children,
- (o->nchildren + 32) * sizeof(MTPObject *));
+ o->children = g_renew(MTPObject *, o->children, o->nchildren + 32);
}
o->children[o->nchildren] =
usb_mtp_object_alloc(s, s->next_handle++, o, entry->d_name);
diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
index 1c57e20..268ab36 100644
--- a/hw/usb/hcd-xhci.c
+++ b/hw/usb/hcd-xhci.c
@@ -2188,7 +2188,7 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid,
xfer->trbs = NULL;
}
if (!xfer->trbs) {
- xfer->trbs = g_malloc(sizeof(XHCITRB) * length);
+ xfer->trbs = g_new(XHCITRB, length);
xfer->trb_alloced = length;
}
xfer->trb_count = length;
diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c
index 38086cd..30ff742 100644
--- a/hw/usb/redirect.c
+++ b/hw/usb/redirect.c
@@ -322,7 +322,7 @@ static void packet_id_queue_add(struct PacketIdQueue *q, uint64_t id)
DPRINTF("adding packet id %"PRIu64" to %s queue\n", id, q->name);
- e = g_malloc0(sizeof(struct PacketIdQueueEntry));
+ e = g_new0(struct PacketIdQueueEntry, 1);
e->id = id;
QTAILQ_INSERT_TAIL(&q->head, e, next);
q->size++;
@@ -468,7 +468,7 @@ static void bufp_alloc(USBRedirDevice *dev, uint8_t *data, uint16_t len,
dev->endpoint[EP2I(ep)].bufpq_dropping_packets = 0;
}
- bufp = g_malloc(sizeof(struct buf_packet));
+ bufp = g_new(struct buf_packet, 1);
bufp->data = data;
bufp->len = len;
bufp->offset = 0;
@@ -2234,7 +2234,7 @@ static int usbredir_get_bufpq(QEMUFile *f, void *priv, size_t unused)
endp->bufpq_size = qemu_get_be32(f);
for (i = 0; i < endp->bufpq_size; i++) {
- bufp = g_malloc(sizeof(struct buf_packet));
+ bufp = g_new(struct buf_packet, 1);
bufp->len = qemu_get_be32(f);
bufp->status = qemu_get_be32(f);
bufp->offset = 0;
diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c
index c675d1b..30c68a1 100644
--- a/hw/vfio/pci-quirks.c
+++ b/hw/vfio/pci-quirks.c
@@ -284,7 +284,7 @@ static void vfio_vga_probe_ati_3c3_quirk(VFIOPCIDevice *vdev)
}
quirk = g_malloc0(sizeof(*quirk));
- quirk->mem = g_malloc0(sizeof(MemoryRegion));
+ quirk->mem = g_new0(MemoryRegion, 1);
quirk->nr_mem = 1;
memory_region_init_io(quirk->mem, OBJECT(vdev), &vfio_ati_3c3_quirk, vdev,
@@ -319,7 +319,7 @@ static void vfio_probe_ati_bar4_quirk(VFIOPCIDevice *vdev, int nr)
}
quirk = g_malloc0(sizeof(*quirk));
- quirk->mem = g_malloc0(sizeof(MemoryRegion) * 2);
+ quirk->mem = g_new0(MemoryRegion, 2);
quirk->nr_mem = 2;
window = quirk->data = g_malloc0(sizeof(*window) +
sizeof(VFIOConfigWindowMatch));
@@ -368,7 +368,7 @@ static void vfio_probe_ati_bar2_quirk(VFIOPCIDevice *vdev, int nr)
quirk = g_malloc0(sizeof(*quirk));
mirror = quirk->data = g_malloc0(sizeof(*mirror));
- mirror->mem = quirk->mem = g_malloc0(sizeof(MemoryRegion));
+ mirror->mem = quirk->mem = g_new0(MemoryRegion, 1);
quirk->nr_mem = 1;
mirror->vdev = vdev;
mirror->offset = 0x4000;
@@ -544,7 +544,7 @@ static void vfio_vga_probe_nvidia_3d0_quirk(VFIOPCIDevice *vdev)
quirk = g_malloc0(sizeof(*quirk));
quirk->data = data = g_malloc0(sizeof(*data));
- quirk->mem = g_malloc0(sizeof(MemoryRegion) * 2);
+ quirk->mem = g_new0(MemoryRegion, 2);
quirk->nr_mem = 2;
data->vdev = vdev;
@@ -661,7 +661,7 @@ static void vfio_probe_nvidia_bar5_quirk(VFIOPCIDevice *vdev, int nr)
}
quirk = g_malloc0(sizeof(*quirk));
- quirk->mem = g_malloc0(sizeof(MemoryRegion) * 4);
+ quirk->mem = g_new0(MemoryRegion, 4);
quirk->nr_mem = 4;
bar5 = quirk->data = g_malloc0(sizeof(*bar5) +
(sizeof(VFIOConfigWindowMatch) * 2));
@@ -756,7 +756,7 @@ static void vfio_probe_nvidia_bar0_quirk(VFIOPCIDevice *vdev, int nr)
quirk = g_malloc0(sizeof(*quirk));
mirror = quirk->data = g_malloc0(sizeof(*mirror));
- mirror->mem = quirk->mem = g_malloc0(sizeof(MemoryRegion));
+ mirror->mem = quirk->mem = g_new0(MemoryRegion, 1);
quirk->nr_mem = 1;
mirror->vdev = vdev;
mirror->offset = 0x88000;
@@ -775,7 +775,7 @@ static void vfio_probe_nvidia_bar0_quirk(VFIOPCIDevice *vdev, int nr)
if (vdev->has_vga) {
quirk = g_malloc0(sizeof(*quirk));
mirror = quirk->data = g_malloc0(sizeof(*mirror));
- mirror->mem = quirk->mem = g_malloc0(sizeof(MemoryRegion));
+ mirror->mem = quirk->mem = g_new0(MemoryRegion, 1);
quirk->nr_mem = 1;
mirror->vdev = vdev;
mirror->offset = 0x1800;
@@ -938,7 +938,7 @@ static void vfio_probe_rtl8168_bar2_quirk(VFIOPCIDevice *vdev, int nr)
}
quirk = g_malloc0(sizeof(*quirk));
- quirk->mem = g_malloc0(sizeof(MemoryRegion) * 2);
+ quirk->mem = g_new0(MemoryRegion, 2);
quirk->nr_mem = 2;
quirk->data = rtl = g_malloc0(sizeof(*rtl));
rtl->vdev = vdev;
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index 8fadbcf..1fb868c 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -28,6 +28,7 @@
#include "config.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
+#include "hw/pci/pci_bridge.h"
#include "qemu/error-report.h"
#include "qemu/range.h"
#include "sysemu/kvm.h"
@@ -586,7 +587,7 @@ static void vfio_msix_enable(VFIOPCIDevice *vdev)
{
vfio_disable_interrupts(vdev);
- vdev->msi_vectors = g_malloc0(vdev->msix->entries * sizeof(VFIOMSIVector));
+ vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
vdev->interrupt = VFIO_INT_MSIX;
@@ -622,7 +623,7 @@ static void vfio_msi_enable(VFIOPCIDevice *vdev)
vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
retry:
- vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector));
+ vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
for (i = 0; i < vdev->nr_vectors; i++) {
VFIOMSIVector *vector = &vdev->msi_vectors[i];
@@ -1524,13 +1525,38 @@ static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size)
}
if (!pci_bus_is_express(vdev->pdev.bus)) {
+ PCIBus *bus = vdev->pdev.bus;
+ PCIDevice *bridge;
+
/*
- * Use express capability as-is on PCI bus. It doesn't make much
- * sense to even expose, but some drivers (ex. tg3) depend on it
- * and guests don't seem to be particular about it. We'll need
- * to revist this or force express devices to express buses if we
- * ever expose an IOMMU to the guest.
+ * Traditionally PCI device assignment exposes the PCIe capability
+ * as-is on non-express buses. The reason being that some drivers
+ * simply assume that it's there, for example tg3. However when
+ * we're running on a native PCIe machine type, like Q35, we need
+ * to hide the PCIe capability. The reason for this is twofold;
+ * first Windows guests get a Code 10 error when the PCIe capability
+ * is exposed in this configuration. Therefore express devices won't
+ * work at all unless they're attached to express buses in the VM.
+ * Second, a native PCIe machine introduces the possibility of fine
+ * granularity IOMMUs supporting both translation and isolation.
+ * Guest code to discover the IOMMU visibility of a device, such as
+ * IOMMU grouping code on Linux, is very aware of device types and
+ * valid transitions between bus types. An express device on a non-
+ * express bus is not a valid combination on bare metal systems.
+ *
+ * Drivers that require a PCIe capability to make the device
+ * functional are simply going to need to have their devices placed
+ * on a PCIe bus in the VM.
*/
+ while (!pci_bus_is_root(bus)) {
+ bridge = pci_bridge_get_device(bus);
+ bus = bridge->bus;
+ }
+
+ if (pci_bus_is_express(bus)) {
+ return 0;
+ }
+
} else if (pci_bus_is_root(vdev->pdev.bus)) {
/*
* On a Root Complex bus Endpoints become Root Complex Integrated
diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c
index 5c1156c..289b498 100644
--- a/hw/vfio/platform.c
+++ b/hw/vfio/platform.c
@@ -478,7 +478,7 @@ static int vfio_populate_device(VFIODevice *vbasedev)
struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
VFIORegion *ptr;
- vdev->regions[i] = g_malloc0(sizeof(VFIORegion));
+ vdev->regions[i] = g_new0(VFIORegion, 1);
ptr = vdev->regions[i];
reg_info.index = i;
ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
diff --git a/hw/virtio/dataplane/vring.c b/hw/virtio/dataplane/vring.c
index 68f1994..23f667e 100644
--- a/hw/virtio/dataplane/vring.c
+++ b/hw/virtio/dataplane/vring.c
@@ -25,15 +25,30 @@
/* vring_map can be coupled with vring_unmap or (if you still have the
* value returned in *mr) memory_region_unref.
+ * Returns NULL on failure.
+ * Callers that can handle a partial mapping must supply mapped_len pointer to
+ * get the actual length mapped.
+ * Passing mapped_len == NULL requires either a full mapping or a failure.
*/
-static void *vring_map(MemoryRegion **mr, hwaddr phys, hwaddr len,
+static void *vring_map(MemoryRegion **mr, hwaddr phys,
+ hwaddr len, hwaddr *mapped_len,
bool is_write)
{
MemoryRegionSection section = memory_region_find(get_system_memory(), phys, len);
+ uint64_t size;
- if (!section.mr || int128_get64(section.size) < len) {
+ if (!section.mr) {
goto out;
}
+
+ size = int128_get64(section.size);
+ assert(size);
+
+ /* Passing mapped_len == NULL requires either a full mapping or a failure. */
+ if (!mapped_len && size < len) {
+ goto out;
+ }
+
if (is_write && section.readonly) {
goto out;
}
@@ -46,6 +61,10 @@ static void *vring_map(MemoryRegion **mr, hwaddr phys, hwaddr len,
goto out;
}
+ if (mapped_len) {
+ *mapped_len = MIN(size, len);
+ }
+
*mr = section.mr;
return memory_region_get_ram_ptr(section.mr) + section.offset_within_region;
@@ -78,7 +97,7 @@ bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
addr = virtio_queue_get_desc_addr(vdev, n);
size = virtio_queue_get_desc_size(vdev, n);
/* Map the descriptor area as read only */
- ptr = vring_map(&vring->mr_desc, addr, size, false);
+ ptr = vring_map(&vring->mr_desc, addr, size, NULL, false);
if (!ptr) {
error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring desc "
"at 0x%" HWADDR_PRIx,
@@ -92,7 +111,7 @@ bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
/* Add the size of the used_event_idx */
size += sizeof(uint16_t);
/* Map the driver area as read only */
- ptr = vring_map(&vring->mr_avail, addr, size, false);
+ ptr = vring_map(&vring->mr_avail, addr, size, NULL, false);
if (!ptr) {
error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring avail "
"at 0x%" HWADDR_PRIx,
@@ -106,7 +125,7 @@ bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
/* Add the size of the avail_event_idx */
size += sizeof(uint16_t);
/* Map the device area as read-write */
- ptr = vring_map(&vring->mr_used, addr, size, true);
+ ptr = vring_map(&vring->mr_used, addr, size, NULL, true);
if (!ptr) {
error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring used "
"at 0x%" HWADDR_PRIx,
@@ -206,6 +225,7 @@ static int get_desc(Vring *vring, VirtQueueElement *elem,
struct iovec *iov;
hwaddr *addr;
MemoryRegion *mr;
+ hwaddr len;
if (desc->flags & VRING_DESC_F_WRITE) {
num = &elem->in_num;
@@ -224,26 +244,30 @@ static int get_desc(Vring *vring, VirtQueueElement *elem,
}
}
- /* Stop for now if there are not enough iovecs available. */
- if (*num >= VIRTQUEUE_MAX_SIZE) {
- error_report("Invalid SG num: %u", *num);
- return -EFAULT;
- }
+ while (desc->len) {
+ /* Stop for now if there are not enough iovecs available. */
+ if (*num >= VIRTQUEUE_MAX_SIZE) {
+ error_report("Invalid SG num: %u", *num);
+ return -EFAULT;
+ }
- /* TODO handle non-contiguous memory across region boundaries */
- iov->iov_base = vring_map(&mr, desc->addr, desc->len,
- desc->flags & VRING_DESC_F_WRITE);
- if (!iov->iov_base) {
- error_report("Failed to map descriptor addr %#" PRIx64 " len %u",
- (uint64_t)desc->addr, desc->len);
- return -EFAULT;
+ iov->iov_base = vring_map(&mr, desc->addr, desc->len, &len,
+ desc->flags & VRING_DESC_F_WRITE);
+ if (!iov->iov_base) {
+ error_report("Failed to map descriptor addr %#" PRIx64 " len %u",
+ (uint64_t)desc->addr, desc->len);
+ return -EFAULT;
+ }
+
+ /* The MemoryRegion is looked up again and unref'ed later, leave the
+ * ref in place. */
+ (iov++)->iov_len = len;
+ *addr++ = desc->addr;
+ desc->len -= len;
+ desc->addr += len;
+ *num += 1;
}
- /* The MemoryRegion is looked up again and unref'ed later, leave the
- * ref in place. */
- iov->iov_len = desc->len;
- *addr = desc->addr;
- *num += 1;
return 0;
}
@@ -257,6 +281,21 @@ static void copy_in_vring_desc(VirtIODevice *vdev,
host->next = virtio_lduw_p(vdev, &guest->next);
}
+static bool read_vring_desc(VirtIODevice *vdev,
+ hwaddr guest,
+ struct vring_desc *host)
+{
+ if (address_space_read(&address_space_memory, guest, MEMTXATTRS_UNSPECIFIED,
+ (uint8_t *)host, sizeof *host)) {
+ return false;
+ }
+ host->addr = virtio_tswap64(vdev, host->addr);
+ host->len = virtio_tswap32(vdev, host->len);
+ host->flags = virtio_tswap16(vdev, host->flags);
+ host->next = virtio_tswap16(vdev, host->next);
+ return true;
+}
+
/* This is stolen from linux/drivers/vhost/vhost.c. */
static int get_indirect(VirtIODevice *vdev, Vring *vring,
VirtQueueElement *elem, struct vring_desc *indirect)
@@ -284,23 +323,16 @@ static int get_indirect(VirtIODevice *vdev, Vring *vring,
}
do {
- struct vring_desc *desc_ptr;
- MemoryRegion *mr;
-
/* Translate indirect descriptor */
- desc_ptr = vring_map(&mr,
- indirect->addr + found * sizeof(desc),
- sizeof(desc), false);
- if (!desc_ptr) {
- error_report("Failed to map indirect descriptor "
+ if (!read_vring_desc(vdev, indirect->addr + found * sizeof(desc),
+ &desc)) {
+ error_report("Failed to read indirect descriptor "
"addr %#" PRIx64 " len %zu",
(uint64_t)indirect->addr + found * sizeof(desc),
sizeof(desc));
vring->broken = true;
return -EFAULT;
}
- copy_in_vring_desc(vdev, desc_ptr, &desc);
- memory_region_unref(mr);
/* Ensure descriptor has been loaded before accessing fields */
barrier(); /* read_barrier_depends(); */
diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 1d5f684..b734a60 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -156,7 +156,7 @@ static int vhost_kernel_set_owner(struct vhost_dev *dev)
static int vhost_kernel_reset_device(struct vhost_dev *dev)
{
- return vhost_kernel_call(dev, VHOST_RESET_DEVICE, NULL);
+ return vhost_kernel_call(dev, VHOST_RESET_OWNER, NULL);
}
static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx)
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 83c84f1..c443602 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -43,7 +43,7 @@ typedef enum VhostUserRequest {
VHOST_USER_GET_FEATURES = 1,
VHOST_USER_SET_FEATURES = 2,
VHOST_USER_SET_OWNER = 3,
- VHOST_USER_RESET_DEVICE = 4,
+ VHOST_USER_RESET_OWNER = 4,
VHOST_USER_SET_MEM_TABLE = 5,
VHOST_USER_SET_LOG_BASE = 6,
VHOST_USER_SET_LOG_FD = 7,
@@ -75,6 +75,11 @@ typedef struct VhostUserMemory {
VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
} VhostUserMemory;
+typedef struct VhostUserLog {
+ uint64_t mmap_size;
+ uint64_t mmap_offset;
+} VhostUserLog;
+
typedef struct VhostUserMsg {
VhostUserRequest request;
@@ -89,6 +94,7 @@ typedef struct VhostUserMsg {
struct vhost_vring_state state;
struct vhost_vring_addr addr;
VhostUserMemory memory;
+ VhostUserLog log;
} payload;
} QEMU_PACKED VhostUserMsg;
@@ -157,7 +163,7 @@ static bool vhost_user_one_time_request(VhostUserRequest request)
{
switch (request) {
case VHOST_USER_SET_OWNER:
- case VHOST_USER_RESET_DEVICE:
+ case VHOST_USER_RESET_OWNER:
case VHOST_USER_SET_MEM_TABLE:
case VHOST_USER_GET_QUEUE_NUM:
return true;
@@ -200,8 +206,9 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
VhostUserMsg msg = {
.request = VHOST_USER_SET_LOG_BASE,
.flags = VHOST_USER_VERSION,
- .payload.u64 = base,
- .size = sizeof(msg.payload.u64),
+ .payload.log.mmap_size = log->size,
+ .payload.log.mmap_offset = 0,
+ .size = sizeof(msg.payload.log),
};
if (shmfd && log->fd != -1) {
@@ -486,7 +493,7 @@ static int vhost_user_set_owner(struct vhost_dev *dev)
static int vhost_user_reset_device(struct vhost_dev *dev)
{
VhostUserMsg msg = {
- .request = VHOST_USER_RESET_DEVICE,
+ .request = VHOST_USER_RESET_OWNER,
.flags = VHOST_USER_VERSION,
};
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index de29968..1794f0d 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1226,6 +1226,11 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
}
}
+ if (hdev->vhost_ops->vhost_set_vring_enable) {
+ /* only enable first vq pair by default */
+ hdev->vhost_ops->vhost_set_vring_enable(hdev, hdev->vq_index == 0);
+ }
+
return 0;
fail_log:
vhost_log_put(hdev, false);
@@ -1256,6 +1261,10 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
hdev->vq_index + i);
}
+ if (hdev->vhost_ops->vhost_set_vring_enable) {
+ hdev->vhost_ops->vhost_set_vring_enable(hdev, 0);
+ }
+
vhost_log_put(hdev, true);
hdev->started = false;
hdev->log = NULL;
diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c
index c419b17..9671635 100644
--- a/hw/virtio/virtio-balloon.c
+++ b/hw/virtio/virtio-balloon.c
@@ -37,9 +37,11 @@
static void balloon_page(void *addr, int deflate)
{
#if defined(__linux__)
- if (!kvm_enabled() || kvm_has_sync_mmu())
+ if (!qemu_balloon_is_inhibited() && (!kvm_enabled() ||
+ kvm_has_sync_mmu())) {
qemu_madvise(addr, TARGET_PAGE_SIZE,
deflate ? QEMU_MADV_WILLNEED : QEMU_MADV_DONTNEED);
+ }
#endif
}
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index f55dd2b..dd48562 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -86,6 +86,129 @@ static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
qemu_put_be16(f, vdev->config_vector);
}
+static void virtio_pci_load_modern_queue_state(VirtIOPCIQueue *vq,
+ QEMUFile *f)
+{
+ vq->num = qemu_get_be16(f);
+ vq->enabled = qemu_get_be16(f);
+ vq->desc[0] = qemu_get_be32(f);
+ vq->desc[1] = qemu_get_be32(f);
+ vq->avail[0] = qemu_get_be32(f);
+ vq->avail[1] = qemu_get_be32(f);
+ vq->used[0] = qemu_get_be32(f);
+ vq->used[1] = qemu_get_be32(f);
+}
+
+static bool virtio_pci_has_extra_state(DeviceState *d)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+ return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
+}
+
+static int get_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
+{
+ VirtIOPCIProxy *proxy = pv;
+ int i;
+
+ proxy->dfselect = qemu_get_be32(f);
+ proxy->gfselect = qemu_get_be32(f);
+ proxy->guest_features[0] = qemu_get_be32(f);
+ proxy->guest_features[1] = qemu_get_be32(f);
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ virtio_pci_load_modern_queue_state(&proxy->vqs[i], f);
+ }
+
+ return 0;
+}
+
+static void virtio_pci_save_modern_queue_state(VirtIOPCIQueue *vq,
+ QEMUFile *f)
+{
+ qemu_put_be16(f, vq->num);
+ qemu_put_be16(f, vq->enabled);
+ qemu_put_be32(f, vq->desc[0]);
+ qemu_put_be32(f, vq->desc[1]);
+ qemu_put_be32(f, vq->avail[0]);
+ qemu_put_be32(f, vq->avail[1]);
+ qemu_put_be32(f, vq->used[0]);
+ qemu_put_be32(f, vq->used[1]);
+}
+
+static void put_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
+{
+ VirtIOPCIProxy *proxy = pv;
+ int i;
+
+ qemu_put_be32(f, proxy->dfselect);
+ qemu_put_be32(f, proxy->gfselect);
+ qemu_put_be32(f, proxy->guest_features[0]);
+ qemu_put_be32(f, proxy->guest_features[1]);
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ virtio_pci_save_modern_queue_state(&proxy->vqs[i], f);
+ }
+}
+
+static const VMStateInfo vmstate_info_virtio_pci_modern_state = {
+ .name = "virtqueue_state",
+ .get = get_virtio_pci_modern_state,
+ .put = put_virtio_pci_modern_state,
+};
+
+static bool virtio_pci_modern_state_needed(void *opaque)
+{
+ VirtIOPCIProxy *proxy = opaque;
+
+ return !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
+}
+
+static const VMStateDescription vmstate_virtio_pci_modern_state = {
+ .name = "virtio_pci/modern_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = &virtio_pci_modern_state_needed,
+ .fields = (VMStateField[]) {
+ {
+ .name = "modern_state",
+ .version_id = 0,
+ .field_exists = NULL,
+ .size = 0,
+ .info = &vmstate_info_virtio_pci_modern_state,
+ .flags = VMS_SINGLE,
+ .offset = 0,
+ },
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_virtio_pci = {
+ .name = "virtio_pci",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_virtio_pci_modern_state,
+ NULL
+ }
+};
+
+static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+ vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL);
+}
+
+static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+ return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1);
+}
+
static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
{
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
@@ -133,6 +256,7 @@ static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
if (vector != VIRTIO_NO_VECTOR) {
return msix_vector_use(&proxy->pci_dev, vector);
}
+
return 0;
}
@@ -146,7 +270,10 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
+ bool fast_mmio = kvm_ioeventfd_any_length_enabled();
+ bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
MemoryRegion *modern_mr = &proxy->notify.mr;
+ MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
MemoryRegion *legacy_mr = &proxy->bar;
hwaddr modern_addr = QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
virtio_get_queue_index(vq);
@@ -162,8 +289,17 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
}
virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
if (modern) {
- memory_region_add_eventfd(modern_mr, modern_addr, 2,
- true, n, notifier);
+ if (fast_mmio) {
+ memory_region_add_eventfd(modern_mr, modern_addr, 0,
+ false, n, notifier);
+ } else {
+ memory_region_add_eventfd(modern_mr, modern_addr, 2,
+ false, n, notifier);
+ }
+ if (modern_pio) {
+ memory_region_add_eventfd(modern_notify_mr, 0, 2,
+ true, n, notifier);
+ }
}
if (legacy) {
memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
@@ -171,8 +307,17 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
}
} else {
if (modern) {
- memory_region_del_eventfd(modern_mr, modern_addr, 2,
- true, n, notifier);
+ if (fast_mmio) {
+ memory_region_del_eventfd(modern_mr, modern_addr, 0,
+ false, n, notifier);
+ } else {
+ memory_region_del_eventfd(modern_mr, modern_addr, 2,
+ false, n, notifier);
+ }
+ if (modern_pio) {
+ memory_region_del_eventfd(modern_notify_mr, 0, 2,
+ true, n, notifier);
+ }
}
if (legacy) {
memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
@@ -1239,6 +1384,7 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr,
proxy->vqs[vdev->queue_sel].avail[0],
((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
proxy->vqs[vdev->queue_sel].used[0]);
+ proxy->vqs[vdev->queue_sel].enabled = 1;
break;
case VIRTIO_PCI_COMMON_Q_DESCLO:
proxy->vqs[vdev->queue_sel].desc[0] = val;
@@ -1281,6 +1427,17 @@ static void virtio_pci_notify_write(void *opaque, hwaddr addr,
}
}
+static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ VirtIODevice *vdev = opaque;
+ unsigned queue = val;
+
+ if (queue < VIRTIO_QUEUE_MAX) {
+ virtio_queue_notify(vdev, queue);
+ }
+}
+
static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
unsigned size)
{
@@ -1374,6 +1531,16 @@ static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
},
.endianness = DEVICE_LITTLE_ENDIAN,
};
+ static const MemoryRegionOps notify_pio_ops = {
+ .read = virtio_pci_notify_read,
+ .write = virtio_pci_notify_write_pio,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+
memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
&common_ops,
@@ -1398,30 +1565,60 @@ static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
virtio_bus_get_device(&proxy->bus),
"virtio-pci-notify",
proxy->notify.size);
+
+ memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy),
+ &notify_pio_ops,
+ virtio_bus_get_device(&proxy->bus),
+ "virtio-pci-notify-pio",
+ proxy->notify.size);
}
static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
VirtIOPCIRegion *region,
- struct virtio_pci_cap *cap)
+ struct virtio_pci_cap *cap,
+ MemoryRegion *mr,
+ uint8_t bar)
{
- memory_region_add_subregion(&proxy->modern_bar,
- region->offset,
- &region->mr);
+ memory_region_add_subregion(mr, region->offset, &region->mr);
cap->cfg_type = region->type;
- cap->bar = proxy->modern_mem_bar;
+ cap->bar = bar;
cap->offset = cpu_to_le32(region->offset);
cap->length = cpu_to_le32(region->size);
virtio_pci_add_mem_cap(proxy, cap);
+
+}
+
+static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
+ VirtIOPCIRegion *region,
+ struct virtio_pci_cap *cap)
+{
+ virtio_pci_modern_region_map(proxy, region, cap,
+ &proxy->modern_bar, proxy->modern_mem_bar);
}
-static void virtio_pci_modern_region_unmap(VirtIOPCIProxy *proxy,
- VirtIOPCIRegion *region)
+static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
+ VirtIOPCIRegion *region,
+ struct virtio_pci_cap *cap)
+{
+ virtio_pci_modern_region_map(proxy, region, cap,
+ &proxy->io_bar, proxy->modern_io_bar);
+}
+
+static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
+ VirtIOPCIRegion *region)
{
memory_region_del_subregion(&proxy->modern_bar,
&region->mr);
}
+static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
+ VirtIOPCIRegion *region)
+{
+ memory_region_del_subregion(&proxy->io_bar,
+ &region->mr);
+}
+
/* This is called by virtio-bus just after the device is plugged. */
static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
{
@@ -1429,6 +1626,7 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
VirtioBusState *bus = &proxy->bus;
bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
+ bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
uint8_t *config;
uint32_t size;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
@@ -1467,16 +1665,31 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
.cap.cap_len = sizeof cfg,
.cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
};
- struct virtio_pci_cfg_cap *cfg_mask;
+ struct virtio_pci_notify_cap notify_pio = {
+ .cap.cap_len = sizeof notify,
+ .notify_off_multiplier = cpu_to_le32(0x0),
+ };
- /* TODO: add io access for speed */
+ struct virtio_pci_cfg_cap *cfg_mask;
virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
virtio_pci_modern_regions_init(proxy);
- virtio_pci_modern_region_map(proxy, &proxy->common, &cap);
- virtio_pci_modern_region_map(proxy, &proxy->isr, &cap);
- virtio_pci_modern_region_map(proxy, &proxy->device, &cap);
- virtio_pci_modern_region_map(proxy, &proxy->notify, &notify.cap);
+
+ virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
+ virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
+ virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
+ virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap);
+
+ if (modern_pio) {
+ memory_region_init(&proxy->io_bar, OBJECT(proxy),
+ "virtio-pci-io", 0x4);
+
+ pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar,
+ PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);
+
+ virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
+ &notify_pio.cap);
+ }
pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar,
PCI_BASE_ADDRESS_SPACE_MEMORY |
@@ -1532,14 +1745,18 @@ static void virtio_pci_device_unplugged(DeviceState *d)
{
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
+ bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
virtio_pci_stop_ioeventfd(proxy);
if (modern) {
- virtio_pci_modern_region_unmap(proxy, &proxy->common);
- virtio_pci_modern_region_unmap(proxy, &proxy->isr);
- virtio_pci_modern_region_unmap(proxy, &proxy->device);
- virtio_pci_modern_region_unmap(proxy, &proxy->notify);
+ virtio_pci_modern_mem_region_unmap(proxy, &proxy->common);
+ virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
+ virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
+ virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
+ if (modern_pio) {
+ virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
+ }
}
}
@@ -1559,6 +1776,7 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
*/
proxy->legacy_io_bar = 0;
proxy->msix_bar = 1;
+ proxy->modern_io_bar = 2;
proxy->modern_mem_bar = 4;
proxy->common.offset = 0x0;
@@ -1578,6 +1796,10 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * VIRTIO_QUEUE_MAX;
proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
+ proxy->notify_pio.offset = 0x0;
+ proxy->notify_pio.size = 0x4;
+ proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
+
/* subclasses can enforce modern, so do this unconditionally */
memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
2 * QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
@@ -1592,6 +1814,26 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
address_space_init(&proxy->modern_as, &proxy->modern_cfg, "virtio-pci-cfg-as");
+ if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE)
+ && !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN)
+ && pci_bus_is_express(pci_dev->bus)
+ && !pci_bus_is_root(pci_dev->bus)) {
+ int pos;
+
+ pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
+ pos = pcie_endpoint_cap_init(pci_dev, 0);
+ assert(pos > 0);
+
+ pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, PCI_PM_SIZEOF);
+ assert(pos > 0);
+
+ /*
+ * Indicates that this function complies with revision 1.2 of the
+ * PCI Power Management Interface Specification.
+ */
+ pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
+ }
+
virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
if (k->realize) {
k->realize(proxy, errp);
@@ -1610,9 +1852,15 @@ static void virtio_pci_reset(DeviceState *qdev)
{
VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
+ int i;
+
virtio_pci_stop_ioeventfd(proxy);
virtio_bus_reset(bus);
msix_unuse_all_vectors(&proxy->pci_dev);
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ proxy->vqs[i].enabled = 0;
+ }
}
static Property virtio_pci_properties[] = {
@@ -1622,6 +1870,12 @@ static Property virtio_pci_properties[] = {
VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT, false),
DEFINE_PROP_BIT("disable-modern", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT, true),
+ DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
+ DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
+ DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
DEFINE_PROP_END_OF_LIST(),
};
@@ -2212,6 +2466,9 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
k->load_config = virtio_pci_load_config;
k->save_queue = virtio_pci_save_queue;
k->load_queue = virtio_pci_load_queue;
+ k->save_extra_state = virtio_pci_save_extra_state;
+ k->load_extra_state = virtio_pci_load_extra_state;
+ k->has_extra_state = virtio_pci_has_extra_state;
k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
k->set_host_notifier = virtio_pci_set_host_notifier;
k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
index 801c23a..ffb74bb 100644
--- a/hw/virtio/virtio-pci.h
+++ b/hw/virtio/virtio-pci.h
@@ -72,8 +72,19 @@ typedef struct VirtioBusClass VirtioPCIBusClass;
/* virtio version flags */
#define VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT 2
#define VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT 3
+#define VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT 4
#define VIRTIO_PCI_FLAG_DISABLE_LEGACY (1 << VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT)
#define VIRTIO_PCI_FLAG_DISABLE_MODERN (1 << VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT)
+#define VIRTIO_PCI_FLAG_DISABLE_PCIE (1 << VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT)
+
+/* migrate extra state */
+#define VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT 4
+#define VIRTIO_PCI_FLAG_MIGRATE_EXTRA (1 << VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT)
+
+/* have pio notification for modern device ? */
+#define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT 5
+#define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY \
+ (1 << VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT)
typedef struct {
MSIMessage msg;
@@ -104,6 +115,14 @@ typedef struct VirtIOPCIRegion {
uint32_t type;
} VirtIOPCIRegion;
+typedef struct VirtIOPCIQueue {
+ uint16_t num;
+ bool enabled;
+ uint32_t desc[2];
+ uint32_t avail[2];
+ uint32_t used[2];
+} VirtIOPCIQueue;
+
struct VirtIOPCIProxy {
PCIDevice pci_dev;
MemoryRegion bar;
@@ -111,11 +130,14 @@ struct VirtIOPCIProxy {
VirtIOPCIRegion isr;
VirtIOPCIRegion device;
VirtIOPCIRegion notify;
+ VirtIOPCIRegion notify_pio;
MemoryRegion modern_bar;
+ MemoryRegion io_bar;
MemoryRegion modern_cfg;
AddressSpace modern_as;
uint32_t legacy_io_bar;
uint32_t msix_bar;
+ uint32_t modern_io_bar;
uint32_t modern_mem_bar;
int config_cap;
uint32_t flags;
@@ -124,13 +146,7 @@ struct VirtIOPCIProxy {
uint32_t dfselect;
uint32_t gfselect;
uint32_t guest_features[2];
- struct {
- uint16_t num;
- bool enabled;
- uint32_t desc[2];
- uint32_t avail[2];
- uint32_t used[2];
- } vqs[VIRTIO_QUEUE_MAX];
+ VirtIOPCIQueue vqs[VIRTIO_QUEUE_MAX];
bool ioeventfd_disabled;
bool ioeventfd_started;
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 939f802..1edef59 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -1116,6 +1116,16 @@ static bool virtio_ringsize_needed(void *opaque)
return false;
}
+static bool virtio_extra_state_needed(void *opaque)
+{
+ VirtIODevice *vdev = opaque;
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+
+ return k->has_extra_state &&
+ k->has_extra_state(qbus->parent);
+}
+
static void put_virtqueue_state(QEMUFile *f, void *pv, size_t size)
{
VirtIODevice *vdev = pv;
@@ -1210,6 +1220,53 @@ static const VMStateDescription vmstate_virtio_ringsize = {
}
};
+static int get_extra_state(QEMUFile *f, void *pv, size_t size)
+{
+ VirtIODevice *vdev = pv;
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+
+ if (!k->load_extra_state) {
+ return -1;
+ } else {
+ return k->load_extra_state(qbus->parent, f);
+ }
+}
+
+static void put_extra_state(QEMUFile *f, void *pv, size_t size)
+{
+ VirtIODevice *vdev = pv;
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+
+ k->save_extra_state(qbus->parent, f);
+}
+
+static const VMStateInfo vmstate_info_extra_state = {
+ .name = "virtqueue_extra_state",
+ .get = get_extra_state,
+ .put = put_extra_state,
+};
+
+static const VMStateDescription vmstate_virtio_extra_state = {
+ .name = "virtio/extra_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = &virtio_extra_state_needed,
+ .fields = (VMStateField[]) {
+ {
+ .name = "extra_state",
+ .version_id = 0,
+ .field_exists = NULL,
+ .size = 0,
+ .info = &vmstate_info_extra_state,
+ .flags = VMS_SINGLE,
+ .offset = 0,
+ },
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_virtio_device_endian = {
.name = "virtio/device_endian",
.version_id = 1,
@@ -1245,6 +1302,7 @@ static const VMStateDescription vmstate_virtio = {
&vmstate_virtio_64bit_features,
&vmstate_virtio_virtqueues,
&vmstate_virtio_ringsize,
+ &vmstate_virtio_extra_state,
NULL
}
};
diff --git a/hw/xen/xen_pt_config_init.c b/hw/xen/xen_pt_config_init.c
index 0efee11..3d8686d 100644
--- a/hw/xen/xen_pt_config_init.c
+++ b/hw/xen/xen_pt_config_init.c
@@ -1937,7 +1937,7 @@ static int xen_pt_config_reg_init(XenPCIPassthroughState *s,
break;
case 4: rc = xen_host_pci_get_long(&s->real_device, offset, &val);
break;
- default: assert(1);
+ default: abort();
}
if (rc) {
/* Serious issues when we cannot read the host values! */
@@ -1982,7 +1982,7 @@ static int xen_pt_config_reg_init(XenPCIPassthroughState *s,
break;
case 4: pci_set_long(s->dev.config + offset, val);
break;
- default: assert(1);
+ default: abort();
}
/* set register value pointer to the data. */
reg_entry->ptr.byte = s->dev.config + offset;
OpenPOWER on IntegriCloud