summaryrefslogtreecommitdiffstats
path: root/src/hw/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'src/hw/virtio')
-rw-r--r--src/hw/virtio/Makefile.objs8
-rw-r--r--src/hw/virtio/dataplane/Makefile.objs1
-rw-r--r--src/hw/virtio/dataplane/vring.c526
-rw-r--r--src/hw/virtio/vhost-backend.c211
-rw-r--r--src/hw/virtio/vhost-user.c657
-rw-r--r--src/hw/virtio/vhost.c1271
-rw-r--r--src/hw/virtio/virtio-balloon.c465
-rw-r--r--src/hw/virtio/virtio-bus.c181
-rw-r--r--src/hw/virtio/virtio-mmio.c579
-rw-r--r--src/hw/virtio/virtio-pci.c2534
-rw-r--r--src/hw/virtio/virtio-pci.h318
-rw-r--r--src/hw/virtio/virtio-rng.c267
-rw-r--r--src/hw/virtio/virtio.c1803
13 files changed, 8821 insertions, 0 deletions
diff --git a/src/hw/virtio/Makefile.objs b/src/hw/virtio/Makefile.objs
new file mode 100644
index 0000000..19b224a
--- /dev/null
+++ b/src/hw/virtio/Makefile.objs
@@ -0,0 +1,8 @@
+common-obj-y += virtio-rng.o
+common-obj-$(CONFIG_VIRTIO_PCI) += virtio-pci.o
+common-obj-y += virtio-bus.o
+common-obj-y += virtio-mmio.o
+obj-$(CONFIG_VIRTIO) += dataplane/
+
+obj-y += virtio.o virtio-balloon.o
+obj-$(CONFIG_LINUX) += vhost.o vhost-backend.o vhost-user.o
diff --git a/src/hw/virtio/dataplane/Makefile.objs b/src/hw/virtio/dataplane/Makefile.objs
new file mode 100644
index 0000000..753a9ca
--- /dev/null
+++ b/src/hw/virtio/dataplane/Makefile.objs
@@ -0,0 +1 @@
+obj-y += vring.o
diff --git a/src/hw/virtio/dataplane/vring.c b/src/hw/virtio/dataplane/vring.c
new file mode 100644
index 0000000..23f667e
--- /dev/null
+++ b/src/hw/virtio/dataplane/vring.c
@@ -0,0 +1,526 @@
+/* Copyright 2012 Red Hat, Inc.
+ * Copyright IBM, Corp. 2012
+ *
+ * Based on Linux 2.6.39 vhost code:
+ * Copyright (C) 2009 Red Hat, Inc.
+ * Copyright (C) 2006 Rusty Russell IBM Corporation
+ *
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * Inspiration, some code, and most witty comments come from
+ * Documentation/virtual/lguest/lguest.c, by Rusty Russell
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+#include "trace.h"
+#include "hw/hw.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
+#include "hw/virtio/virtio-access.h"
+#include "hw/virtio/dataplane/vring.h"
+#include "hw/virtio/dataplane/vring-accessors.h"
+#include "qemu/error-report.h"
+
+/* vring_map can be coupled with vring_unmap or (if you still have the
+ * value returned in *mr) memory_region_unref.
+ * Returns NULL on failure.
+ * Callers that can handle a partial mapping must supply mapped_len pointer to
+ * get the actual length mapped.
+ * Passing mapped_len == NULL requires either a full mapping or a failure.
+ */
+static void *vring_map(MemoryRegion **mr, hwaddr phys,
+ hwaddr len, hwaddr *mapped_len,
+ bool is_write)
+{
+ MemoryRegionSection section = memory_region_find(get_system_memory(), phys, len);
+ uint64_t size;
+
+ if (!section.mr) {
+ goto out;
+ }
+
+ size = int128_get64(section.size);
+ assert(size);
+
+ /* Passing mapped_len == NULL requires either a full mapping or a failure. */
+ if (!mapped_len && size < len) {
+ goto out;
+ }
+
+ if (is_write && section.readonly) {
+ goto out;
+ }
+ if (!memory_region_is_ram(section.mr)) {
+ goto out;
+ }
+
+ /* Ignore regions with dirty logging, we cannot mark them dirty */
+ if (memory_region_get_dirty_log_mask(section.mr)) {
+ goto out;
+ }
+
+ if (mapped_len) {
+ *mapped_len = MIN(size, len);
+ }
+
+ *mr = section.mr;
+ return memory_region_get_ram_ptr(section.mr) + section.offset_within_region;
+
+out:
+ memory_region_unref(section.mr);
+ *mr = NULL;
+ return NULL;
+}
+
+static void vring_unmap(void *buffer, bool is_write)
+{
+ ram_addr_t addr;
+ MemoryRegion *mr;
+
+ mr = qemu_ram_addr_from_host(buffer, &addr);
+ memory_region_unref(mr);
+}
+
+/* Map the guest's vring to host memory */
+bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
+{
+ struct vring *vr = &vring->vr;
+ hwaddr addr;
+ hwaddr size;
+ void *ptr;
+
+ vring->broken = false;
+ vr->num = virtio_queue_get_num(vdev, n);
+
+ addr = virtio_queue_get_desc_addr(vdev, n);
+ size = virtio_queue_get_desc_size(vdev, n);
+ /* Map the descriptor area as read only */
+ ptr = vring_map(&vring->mr_desc, addr, size, NULL, false);
+ if (!ptr) {
+ error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring desc "
+ "at 0x%" HWADDR_PRIx,
+ size, addr);
+ goto out_err_desc;
+ }
+ vr->desc = ptr;
+
+ addr = virtio_queue_get_avail_addr(vdev, n);
+ size = virtio_queue_get_avail_size(vdev, n);
+ /* Add the size of the used_event_idx */
+ size += sizeof(uint16_t);
+ /* Map the driver area as read only */
+ ptr = vring_map(&vring->mr_avail, addr, size, NULL, false);
+ if (!ptr) {
+ error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring avail "
+ "at 0x%" HWADDR_PRIx,
+ size, addr);
+ goto out_err_avail;
+ }
+ vr->avail = ptr;
+
+ addr = virtio_queue_get_used_addr(vdev, n);
+ size = virtio_queue_get_used_size(vdev, n);
+ /* Add the size of the avail_event_idx */
+ size += sizeof(uint16_t);
+ /* Map the device area as read-write */
+ ptr = vring_map(&vring->mr_used, addr, size, NULL, true);
+ if (!ptr) {
+ error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring used "
+ "at 0x%" HWADDR_PRIx,
+ size, addr);
+ goto out_err_used;
+ }
+ vr->used = ptr;
+
+ vring->last_avail_idx = virtio_queue_get_last_avail_idx(vdev, n);
+ vring->last_used_idx = vring_get_used_idx(vdev, vring);
+ vring->signalled_used = 0;
+ vring->signalled_used_valid = false;
+
+ trace_vring_setup(virtio_queue_get_ring_addr(vdev, n),
+ vring->vr.desc, vring->vr.avail, vring->vr.used);
+ return true;
+
+out_err_used:
+ memory_region_unref(vring->mr_avail);
+out_err_avail:
+ memory_region_unref(vring->mr_desc);
+out_err_desc:
+ vring->broken = true;
+ return false;
+}
+
+void vring_teardown(Vring *vring, VirtIODevice *vdev, int n)
+{
+ virtio_queue_set_last_avail_idx(vdev, n, vring->last_avail_idx);
+ virtio_queue_invalidate_signalled_used(vdev, n);
+
+ memory_region_unref(vring->mr_desc);
+ memory_region_unref(vring->mr_avail);
+ memory_region_unref(vring->mr_used);
+}
+
+/* Disable guest->host notifies */
+void vring_disable_notification(VirtIODevice *vdev, Vring *vring)
+{
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
+ vring_set_used_flags(vdev, vring, VRING_USED_F_NO_NOTIFY);
+ }
+}
+
+/* Enable guest->host notifies
+ *
+ * Return true if the vring is empty, false if there are more requests.
+ */
+bool vring_enable_notification(VirtIODevice *vdev, Vring *vring)
+{
+ if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
+ vring_avail_event(&vring->vr) = vring->vr.avail->idx;
+ } else {
+ vring_clear_used_flags(vdev, vring, VRING_USED_F_NO_NOTIFY);
+ }
+ smp_mb(); /* ensure update is seen before reading avail_idx */
+ return !vring_more_avail(vdev, vring);
+}
+
+/* This is stolen from linux/drivers/vhost/vhost.c:vhost_notify() */
+bool vring_should_notify(VirtIODevice *vdev, Vring *vring)
+{
+ uint16_t old, new;
+ bool v;
+ /* Flush out used index updates. This is paired
+ * with the barrier that the Guest executes when enabling
+ * interrupts. */
+ smp_mb();
+
+ if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
+ unlikely(!vring_more_avail(vdev, vring))) {
+ return true;
+ }
+
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
+ return !(vring_get_avail_flags(vdev, vring) &
+ VRING_AVAIL_F_NO_INTERRUPT);
+ }
+ old = vring->signalled_used;
+ v = vring->signalled_used_valid;
+ new = vring->signalled_used = vring->last_used_idx;
+ vring->signalled_used_valid = true;
+
+ if (unlikely(!v)) {
+ return true;
+ }
+
+ return vring_need_event(virtio_tswap16(vdev, vring_used_event(&vring->vr)),
+ new, old);
+}
+
+
+static int get_desc(Vring *vring, VirtQueueElement *elem,
+ struct vring_desc *desc)
+{
+ unsigned *num;
+ struct iovec *iov;
+ hwaddr *addr;
+ MemoryRegion *mr;
+ hwaddr len;
+
+ if (desc->flags & VRING_DESC_F_WRITE) {
+ num = &elem->in_num;
+ iov = &elem->in_sg[*num];
+ addr = &elem->in_addr[*num];
+ } else {
+ num = &elem->out_num;
+ iov = &elem->out_sg[*num];
+ addr = &elem->out_addr[*num];
+
+ /* If it's an output descriptor, they're all supposed
+ * to come before any input descriptors. */
+ if (unlikely(elem->in_num)) {
+ error_report("Descriptor has out after in");
+ return -EFAULT;
+ }
+ }
+
+ while (desc->len) {
+ /* Stop for now if there are not enough iovecs available. */
+ if (*num >= VIRTQUEUE_MAX_SIZE) {
+ error_report("Invalid SG num: %u", *num);
+ return -EFAULT;
+ }
+
+ iov->iov_base = vring_map(&mr, desc->addr, desc->len, &len,
+ desc->flags & VRING_DESC_F_WRITE);
+ if (!iov->iov_base) {
+ error_report("Failed to map descriptor addr %#" PRIx64 " len %u",
+ (uint64_t)desc->addr, desc->len);
+ return -EFAULT;
+ }
+
+ /* The MemoryRegion is looked up again and unref'ed later, leave the
+ * ref in place. */
+ (iov++)->iov_len = len;
+ *addr++ = desc->addr;
+ desc->len -= len;
+ desc->addr += len;
+ *num += 1;
+ }
+
+ return 0;
+}
+
+static void copy_in_vring_desc(VirtIODevice *vdev,
+ const struct vring_desc *guest,
+ struct vring_desc *host)
+{
+ host->addr = virtio_ldq_p(vdev, &guest->addr);
+ host->len = virtio_ldl_p(vdev, &guest->len);
+ host->flags = virtio_lduw_p(vdev, &guest->flags);
+ host->next = virtio_lduw_p(vdev, &guest->next);
+}
+
+static bool read_vring_desc(VirtIODevice *vdev,
+ hwaddr guest,
+ struct vring_desc *host)
+{
+ if (address_space_read(&address_space_memory, guest, MEMTXATTRS_UNSPECIFIED,
+ (uint8_t *)host, sizeof *host)) {
+ return false;
+ }
+ host->addr = virtio_tswap64(vdev, host->addr);
+ host->len = virtio_tswap32(vdev, host->len);
+ host->flags = virtio_tswap16(vdev, host->flags);
+ host->next = virtio_tswap16(vdev, host->next);
+ return true;
+}
+
+/* This is stolen from linux/drivers/vhost/vhost.c. */
+static int get_indirect(VirtIODevice *vdev, Vring *vring,
+ VirtQueueElement *elem, struct vring_desc *indirect)
+{
+ struct vring_desc desc;
+ unsigned int i = 0, count, found = 0;
+ int ret;
+
+ /* Sanity check */
+ if (unlikely(indirect->len % sizeof(desc))) {
+ error_report("Invalid length in indirect descriptor: "
+ "len %#x not multiple of %#zx",
+ indirect->len, sizeof(desc));
+ vring->broken = true;
+ return -EFAULT;
+ }
+
+ count = indirect->len / sizeof(desc);
+ /* Buffers are chained via a 16 bit next field, so
+ * we can have at most 2^16 of these. */
+ if (unlikely(count > USHRT_MAX + 1)) {
+ error_report("Indirect buffer length too big: %d", indirect->len);
+ vring->broken = true;
+ return -EFAULT;
+ }
+
+ do {
+ /* Translate indirect descriptor */
+ if (!read_vring_desc(vdev, indirect->addr + found * sizeof(desc),
+ &desc)) {
+ error_report("Failed to read indirect descriptor "
+ "addr %#" PRIx64 " len %zu",
+ (uint64_t)indirect->addr + found * sizeof(desc),
+ sizeof(desc));
+ vring->broken = true;
+ return -EFAULT;
+ }
+
+ /* Ensure descriptor has been loaded before accessing fields */
+ barrier(); /* read_barrier_depends(); */
+
+ if (unlikely(++found > count)) {
+ error_report("Loop detected: last one at %u "
+ "indirect size %u", i, count);
+ vring->broken = true;
+ return -EFAULT;
+ }
+
+ if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
+ error_report("Nested indirect descriptor");
+ vring->broken = true;
+ return -EFAULT;
+ }
+
+ ret = get_desc(vring, elem, &desc);
+ if (ret < 0) {
+ vring->broken |= (ret == -EFAULT);
+ return ret;
+ }
+ i = desc.next;
+ } while (desc.flags & VRING_DESC_F_NEXT);
+ return 0;
+}
+
+static void vring_unmap_element(VirtQueueElement *elem)
+{
+ int i;
+
+ /* This assumes that the iovecs, if changed, are never moved past
+ * the end of the valid area. This is true if iovec manipulations
+ * are done with iov_discard_front and iov_discard_back.
+ */
+ for (i = 0; i < elem->out_num; i++) {
+ vring_unmap(elem->out_sg[i].iov_base, false);
+ }
+
+ for (i = 0; i < elem->in_num; i++) {
+ vring_unmap(elem->in_sg[i].iov_base, true);
+ }
+}
+
+/* This looks in the virtqueue and for the first available buffer, and converts
+ * it to an iovec for convenient access. Since descriptors consist of some
+ * number of output then some number of input descriptors, it's actually two
+ * iovecs, but we pack them into one and note how many of each there were.
+ *
+ * This function returns the descriptor number found, or vq->num (which is
+ * never a valid descriptor number) if none was found. A negative code is
+ * returned on error.
+ *
+ * Stolen from linux/drivers/vhost/vhost.c.
+ */
+int vring_pop(VirtIODevice *vdev, Vring *vring,
+ VirtQueueElement *elem)
+{
+ struct vring_desc desc;
+ unsigned int i, head, found = 0, num = vring->vr.num;
+ uint16_t avail_idx, last_avail_idx;
+ int ret;
+
+ /* Initialize elem so it can be safely unmapped */
+ elem->in_num = elem->out_num = 0;
+
+ /* If there was a fatal error then refuse operation */
+ if (vring->broken) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* Check it isn't doing very strange things with descriptor numbers. */
+ last_avail_idx = vring->last_avail_idx;
+ avail_idx = vring_get_avail_idx(vdev, vring);
+ barrier(); /* load indices now and not again later */
+
+ if (unlikely((uint16_t)(avail_idx - last_avail_idx) > num)) {
+ error_report("Guest moved used index from %u to %u",
+ last_avail_idx, avail_idx);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* If there's nothing new since last we looked. */
+ if (avail_idx == last_avail_idx) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ /* Only get avail ring entries after they have been exposed by guest. */
+ smp_rmb();
+
+ /* Grab the next descriptor number they're advertising, and increment
+ * the index we've seen. */
+ head = vring_get_avail_ring(vdev, vring, last_avail_idx % num);
+
+ elem->index = head;
+
+ /* If their number is silly, that's an error. */
+ if (unlikely(head >= num)) {
+ error_report("Guest says index %u > %u is available", head, num);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ i = head;
+ do {
+ if (unlikely(i >= num)) {
+ error_report("Desc index is %u > %u, head = %u", i, num, head);
+ ret = -EFAULT;
+ goto out;
+ }
+ if (unlikely(++found > num)) {
+ error_report("Loop detected: last one at %u vq size %u head %u",
+ i, num, head);
+ ret = -EFAULT;
+ goto out;
+ }
+ copy_in_vring_desc(vdev, &vring->vr.desc[i], &desc);
+
+ /* Ensure descriptor is loaded before accessing fields */
+ barrier();
+
+ if (desc.flags & VRING_DESC_F_INDIRECT) {
+ ret = get_indirect(vdev, vring, elem, &desc);
+ if (ret < 0) {
+ goto out;
+ }
+ continue;
+ }
+
+ ret = get_desc(vring, elem, &desc);
+ if (ret < 0) {
+ goto out;
+ }
+
+ i = desc.next;
+ } while (desc.flags & VRING_DESC_F_NEXT);
+
+ /* On success, increment avail index. */
+ vring->last_avail_idx++;
+ if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
+ vring_avail_event(&vring->vr) =
+ virtio_tswap16(vdev, vring->last_avail_idx);
+ }
+
+ return head;
+
+out:
+ assert(ret < 0);
+ if (ret == -EFAULT) {
+ vring->broken = true;
+ }
+ vring_unmap_element(elem);
+ return ret;
+}
+
+/* After we've used one of their buffers, we tell them about it.
+ *
+ * Stolen from linux/drivers/vhost/vhost.c.
+ */
+void vring_push(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem,
+ int len)
+{
+ unsigned int head = elem->index;
+ uint16_t new;
+
+ vring_unmap_element(elem);
+
+ /* Don't touch vring if a fatal error occurred */
+ if (vring->broken) {
+ return;
+ }
+
+ /* The virtqueue contains a ring of used buffers. Get a pointer to the
+ * next entry in that used ring. */
+ vring_set_used_ring_id(vdev, vring, vring->last_used_idx % vring->vr.num,
+ head);
+ vring_set_used_ring_len(vdev, vring, vring->last_used_idx % vring->vr.num,
+ len);
+
+ /* Make sure buffer is written before we update index. */
+ smp_wmb();
+
+ new = ++vring->last_used_idx;
+ vring_set_used_idx(vdev, vring, new);
+ if (unlikely((int16_t)(new - vring->signalled_used) < (uint16_t)1)) {
+ vring->signalled_used_valid = false;
+ }
+}
diff --git a/src/hw/virtio/vhost-backend.c b/src/hw/virtio/vhost-backend.c
new file mode 100644
index 0000000..b734a60
--- /dev/null
+++ b/src/hw/virtio/vhost-backend.c
@@ -0,0 +1,211 @@
+/*
+ * vhost-backend
+ *
+ * Copyright (c) 2013 Virtual Open Systems Sarl.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-backend.h"
+#include "qemu/error-report.h"
+#include "linux/vhost.h"
+
+#include <sys/ioctl.h>
+
+static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
+ void *arg)
+{
+ int fd = (uintptr_t) dev->opaque;
+
+ assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
+
+ return ioctl(fd, request, arg);
+}
+
+static int vhost_kernel_init(struct vhost_dev *dev, void *opaque)
+{
+ assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
+
+ dev->opaque = opaque;
+
+ return 0;
+}
+
+static int vhost_kernel_cleanup(struct vhost_dev *dev)
+{
+ int fd = (uintptr_t) dev->opaque;
+
+ assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
+
+ return close(fd);
+}
+
+static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
+{
+ int limit = 64;
+ char *s;
+
+ if (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions",
+ &s, NULL, NULL)) {
+ uint64_t val = g_ascii_strtoull(s, NULL, 10);
+ if (!((val == G_MAXUINT64 || !val) && errno)) {
+ return val;
+ }
+ error_report("ignoring invalid max_mem_regions value in vhost module:"
+ " %s", s);
+ }
+ return limit;
+}
+
+static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
+ struct vhost_vring_file *file)
+{
+ return vhost_kernel_call(dev, VHOST_NET_SET_BACKEND, file);
+}
+
+static int vhost_kernel_scsi_set_endpoint(struct vhost_dev *dev,
+ struct vhost_scsi_target *target)
+{
+ return vhost_kernel_call(dev, VHOST_SCSI_SET_ENDPOINT, target);
+}
+
+static int vhost_kernel_scsi_clear_endpoint(struct vhost_dev *dev,
+ struct vhost_scsi_target *target)
+{
+ return vhost_kernel_call(dev, VHOST_SCSI_CLEAR_ENDPOINT, target);
+}
+
+static int vhost_kernel_scsi_get_abi_version(struct vhost_dev *dev, int *version)
+{
+ return vhost_kernel_call(dev, VHOST_SCSI_GET_ABI_VERSION, version);
+}
+
+static int vhost_kernel_set_log_base(struct vhost_dev *dev, uint64_t base,
+ struct vhost_log *log)
+{
+ return vhost_kernel_call(dev, VHOST_SET_LOG_BASE, &base);
+}
+
+static int vhost_kernel_set_mem_table(struct vhost_dev *dev,
+ struct vhost_memory *mem)
+{
+ return vhost_kernel_call(dev, VHOST_SET_MEM_TABLE, mem);
+}
+
+static int vhost_kernel_set_vring_addr(struct vhost_dev *dev,
+ struct vhost_vring_addr *addr)
+{
+ return vhost_kernel_call(dev, VHOST_SET_VRING_ADDR, addr);
+}
+
+static int vhost_kernel_set_vring_endian(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
+{
+ return vhost_kernel_call(dev, VHOST_SET_VRING_ENDIAN, ring);
+}
+
+static int vhost_kernel_set_vring_num(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
+{
+ return vhost_kernel_call(dev, VHOST_SET_VRING_NUM, ring);
+}
+
+static int vhost_kernel_set_vring_base(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
+{
+ return vhost_kernel_call(dev, VHOST_SET_VRING_BASE, ring);
+}
+
+static int vhost_kernel_get_vring_base(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
+{
+ return vhost_kernel_call(dev, VHOST_GET_VRING_BASE, ring);
+}
+
+static int vhost_kernel_set_vring_kick(struct vhost_dev *dev,
+ struct vhost_vring_file *file)
+{
+ return vhost_kernel_call(dev, VHOST_SET_VRING_KICK, file);
+}
+
+static int vhost_kernel_set_vring_call(struct vhost_dev *dev,
+ struct vhost_vring_file *file)
+{
+ return vhost_kernel_call(dev, VHOST_SET_VRING_CALL, file);
+}
+
+static int vhost_kernel_set_features(struct vhost_dev *dev,
+ uint64_t features)
+{
+ return vhost_kernel_call(dev, VHOST_SET_FEATURES, &features);
+}
+
+static int vhost_kernel_get_features(struct vhost_dev *dev,
+ uint64_t *features)
+{
+ return vhost_kernel_call(dev, VHOST_GET_FEATURES, features);
+}
+
+static int vhost_kernel_set_owner(struct vhost_dev *dev)
+{
+ return vhost_kernel_call(dev, VHOST_SET_OWNER, NULL);
+}
+
+static int vhost_kernel_reset_device(struct vhost_dev *dev)
+{
+ return vhost_kernel_call(dev, VHOST_RESET_OWNER, NULL);
+}
+
+static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx)
+{
+ assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
+
+ return idx - dev->vq_index;
+}
+
+static const VhostOps kernel_ops = {
+ .backend_type = VHOST_BACKEND_TYPE_KERNEL,
+ .vhost_backend_init = vhost_kernel_init,
+ .vhost_backend_cleanup = vhost_kernel_cleanup,
+ .vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
+ .vhost_net_set_backend = vhost_kernel_net_set_backend,
+ .vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
+ .vhost_scsi_clear_endpoint = vhost_kernel_scsi_clear_endpoint,
+ .vhost_scsi_get_abi_version = vhost_kernel_scsi_get_abi_version,
+ .vhost_set_log_base = vhost_kernel_set_log_base,
+ .vhost_set_mem_table = vhost_kernel_set_mem_table,
+ .vhost_set_vring_addr = vhost_kernel_set_vring_addr,
+ .vhost_set_vring_endian = vhost_kernel_set_vring_endian,
+ .vhost_set_vring_num = vhost_kernel_set_vring_num,
+ .vhost_set_vring_base = vhost_kernel_set_vring_base,
+ .vhost_get_vring_base = vhost_kernel_get_vring_base,
+ .vhost_set_vring_kick = vhost_kernel_set_vring_kick,
+ .vhost_set_vring_call = vhost_kernel_set_vring_call,
+ .vhost_set_features = vhost_kernel_set_features,
+ .vhost_get_features = vhost_kernel_get_features,
+ .vhost_set_owner = vhost_kernel_set_owner,
+ .vhost_reset_device = vhost_kernel_reset_device,
+ .vhost_get_vq_index = vhost_kernel_get_vq_index,
+};
+
+int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
+{
+ int r = 0;
+
+ switch (backend_type) {
+ case VHOST_BACKEND_TYPE_KERNEL:
+ dev->vhost_ops = &kernel_ops;
+ break;
+ case VHOST_BACKEND_TYPE_USER:
+ dev->vhost_ops = &user_ops;
+ break;
+ default:
+ error_report("Unknown vhost backend type");
+ r = -1;
+ }
+
+ return r;
+}
diff --git a/src/hw/virtio/vhost-user.c b/src/hw/virtio/vhost-user.c
new file mode 100644
index 0000000..577c95e
--- /dev/null
+++ b/src/hw/virtio/vhost-user.c
@@ -0,0 +1,657 @@
+/*
+ * vhost-user
+ *
+ * Copyright (c) 2013 Virtual Open Systems Sarl.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-backend.h"
+#include "hw/virtio/virtio-net.h"
+#include "sysemu/char.h"
+#include "sysemu/kvm.h"
+#include "qemu/error-report.h"
+#include "qemu/sockets.h"
+#include "exec/ram_addr.h"
+#include "migration/migration.h"
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <linux/vhost.h>
+
+#define VHOST_MEMORY_MAX_NREGIONS 8
+#define VHOST_USER_F_PROTOCOL_FEATURES 30
+
+enum VhostUserProtocolFeature {
+ VHOST_USER_PROTOCOL_F_MQ = 0,
+ VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
+ VHOST_USER_PROTOCOL_F_RARP = 2,
+
+ VHOST_USER_PROTOCOL_F_MAX
+};
+
+#define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
+
+typedef enum VhostUserRequest {
+ VHOST_USER_NONE = 0,
+ VHOST_USER_GET_FEATURES = 1,
+ VHOST_USER_SET_FEATURES = 2,
+ VHOST_USER_SET_OWNER = 3,
+ VHOST_USER_RESET_OWNER = 4,
+ VHOST_USER_SET_MEM_TABLE = 5,
+ VHOST_USER_SET_LOG_BASE = 6,
+ VHOST_USER_SET_LOG_FD = 7,
+ VHOST_USER_SET_VRING_NUM = 8,
+ VHOST_USER_SET_VRING_ADDR = 9,
+ VHOST_USER_SET_VRING_BASE = 10,
+ VHOST_USER_GET_VRING_BASE = 11,
+ VHOST_USER_SET_VRING_KICK = 12,
+ VHOST_USER_SET_VRING_CALL = 13,
+ VHOST_USER_SET_VRING_ERR = 14,
+ VHOST_USER_GET_PROTOCOL_FEATURES = 15,
+ VHOST_USER_SET_PROTOCOL_FEATURES = 16,
+ VHOST_USER_GET_QUEUE_NUM = 17,
+ VHOST_USER_SET_VRING_ENABLE = 18,
+ VHOST_USER_SEND_RARP = 19,
+ VHOST_USER_MAX
+} VhostUserRequest;
+
+typedef struct VhostUserMemoryRegion {
+ uint64_t guest_phys_addr;
+ uint64_t memory_size;
+ uint64_t userspace_addr;
+ uint64_t mmap_offset;
+} VhostUserMemoryRegion;
+
+typedef struct VhostUserMemory {
+ uint32_t nregions;
+ uint32_t padding;
+ VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
+} VhostUserMemory;
+
+typedef struct VhostUserLog {
+ uint64_t mmap_size;
+ uint64_t mmap_offset;
+} VhostUserLog;
+
+typedef struct VhostUserMsg {
+ VhostUserRequest request;
+
+#define VHOST_USER_VERSION_MASK (0x3)
+#define VHOST_USER_REPLY_MASK (0x1<<2)
+ uint32_t flags;
+ uint32_t size; /* the following payload size */
+ union {
+#define VHOST_USER_VRING_IDX_MASK (0xff)
+#define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
+ uint64_t u64;
+ struct vhost_vring_state state;
+ struct vhost_vring_addr addr;
+ VhostUserMemory memory;
+ VhostUserLog log;
+ } payload;
+} QEMU_PACKED VhostUserMsg;
+
+static VhostUserMsg m __attribute__ ((unused));
+#define VHOST_USER_HDR_SIZE (sizeof(m.request) \
+ + sizeof(m.flags) \
+ + sizeof(m.size))
+
+#define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
+
+/* The version of the protocol we support */
+#define VHOST_USER_VERSION (0x1)
+
+static bool ioeventfd_enabled(void)
+{
+ return kvm_enabled() && kvm_eventfds_enabled();
+}
+
+static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
+{
+ CharDriverState *chr = dev->opaque;
+ uint8_t *p = (uint8_t *) msg;
+ int r, size = VHOST_USER_HDR_SIZE;
+
+ r = qemu_chr_fe_read_all(chr, p, size);
+ if (r != size) {
+ error_report("Failed to read msg header. Read %d instead of %d."
+ " Original request %d.", r, size, msg->request);
+ goto fail;
+ }
+
+ /* validate received flags */
+ if (msg->flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
+ error_report("Failed to read msg header."
+ " Flags 0x%x instead of 0x%x.", msg->flags,
+ VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
+ goto fail;
+ }
+
+ /* validate message size is sane */
+ if (msg->size > VHOST_USER_PAYLOAD_SIZE) {
+ error_report("Failed to read msg header."
+ " Size %d exceeds the maximum %zu.", msg->size,
+ VHOST_USER_PAYLOAD_SIZE);
+ goto fail;
+ }
+
+ if (msg->size) {
+ p += VHOST_USER_HDR_SIZE;
+ size = msg->size;
+ r = qemu_chr_fe_read_all(chr, p, size);
+ if (r != size) {
+ error_report("Failed to read msg payload."
+ " Read %d instead of %d.", r, msg->size);
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ return -1;
+}
+
+static bool vhost_user_one_time_request(VhostUserRequest request)
+{
+ switch (request) {
+ case VHOST_USER_SET_OWNER:
+ case VHOST_USER_RESET_OWNER:
+ case VHOST_USER_SET_MEM_TABLE:
+ case VHOST_USER_GET_QUEUE_NUM:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* most non-init callers ignore the error */
+static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
+ int *fds, int fd_num)
+{
+ CharDriverState *chr = dev->opaque;
+ int size = VHOST_USER_HDR_SIZE + msg->size;
+
+ /*
+ * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
+ * we just need send it once in the first time. For later such
+ * request, we just ignore it.
+ */
+ if (vhost_user_one_time_request(msg->request) && dev->vq_index != 0) {
+ return 0;
+ }
+
+ if (fd_num) {
+ qemu_chr_fe_set_msgfds(chr, fds, fd_num);
+ }
+
+ return qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size) == size ?
+ 0 : -1;
+}
+
+static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
+ struct vhost_log *log)
+{
+ int fds[VHOST_MEMORY_MAX_NREGIONS];
+ size_t fd_num = 0;
+ bool shmfd = virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_LOG_SHMFD);
+ VhostUserMsg msg = {
+ .request = VHOST_USER_SET_LOG_BASE,
+ .flags = VHOST_USER_VERSION,
+ .payload.log.mmap_size = log->size * sizeof(*(log->log)),
+ .payload.log.mmap_offset = 0,
+ .size = sizeof(msg.payload.log),
+ };
+
+ if (shmfd && log->fd != -1) {
+ fds[fd_num++] = log->fd;
+ }
+
+ vhost_user_write(dev, &msg, fds, fd_num);
+
+ if (shmfd) {
+ msg.size = 0;
+ if (vhost_user_read(dev, &msg) < 0) {
+ return 0;
+ }
+
+ if (msg.request != VHOST_USER_SET_LOG_BASE) {
+ error_report("Received unexpected msg type. "
+ "Expected %d received %d",
+ VHOST_USER_SET_LOG_BASE, msg.request);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int vhost_user_set_mem_table(struct vhost_dev *dev,
+ struct vhost_memory *mem)
+{
+ int fds[VHOST_MEMORY_MAX_NREGIONS];
+ int i, fd;
+ size_t fd_num = 0;
+ VhostUserMsg msg = {
+ .request = VHOST_USER_SET_MEM_TABLE,
+ .flags = VHOST_USER_VERSION,
+ };
+
+ for (i = 0; i < dev->mem->nregions; ++i) {
+ struct vhost_memory_region *reg = dev->mem->regions + i;
+ ram_addr_t ram_addr;
+
+ assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
+ qemu_ram_addr_from_host((void *)(uintptr_t)reg->userspace_addr,
+ &ram_addr);
+ fd = qemu_get_ram_fd(ram_addr);
+ if (fd > 0) {
+ msg.payload.memory.regions[fd_num].userspace_addr = reg->userspace_addr;
+ msg.payload.memory.regions[fd_num].memory_size = reg->memory_size;
+ msg.payload.memory.regions[fd_num].guest_phys_addr = reg->guest_phys_addr;
+ msg.payload.memory.regions[fd_num].mmap_offset = reg->userspace_addr -
+ (uintptr_t) qemu_get_ram_block_host_ptr(ram_addr);
+ assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
+ fds[fd_num++] = fd;
+ }
+ }
+
+ msg.payload.memory.nregions = fd_num;
+
+ if (!fd_num) {
+ error_report("Failed initializing vhost-user memory map, "
+ "consider using -object memory-backend-file share=on");
+ return -1;
+ }
+
+ msg.size = sizeof(msg.payload.memory.nregions);
+ msg.size += sizeof(msg.payload.memory.padding);
+ msg.size += fd_num * sizeof(VhostUserMemoryRegion);
+
+ vhost_user_write(dev, &msg, fds, fd_num);
+
+ return 0;
+}
+
+static int vhost_user_set_vring_addr(struct vhost_dev *dev,
+ struct vhost_vring_addr *addr)
+{
+ VhostUserMsg msg = {
+ .request = VHOST_USER_SET_VRING_ADDR,
+ .flags = VHOST_USER_VERSION,
+ .payload.addr = *addr,
+ .size = sizeof(msg.payload.addr),
+ };
+
+ vhost_user_write(dev, &msg, NULL, 0);
+
+ return 0;
+}
+
+static int vhost_user_set_vring_endian(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
+{
+ error_report("vhost-user trying to send unhandled ioctl");
+ return -1;
+}
+
+static int vhost_set_vring(struct vhost_dev *dev,
+ unsigned long int request,
+ struct vhost_vring_state *ring)
+{
+ VhostUserMsg msg = {
+ .request = request,
+ .flags = VHOST_USER_VERSION,
+ .payload.state = *ring,
+ .size = sizeof(msg.payload.state),
+ };
+
+ vhost_user_write(dev, &msg, NULL, 0);
+
+ return 0;
+}
+
+static int vhost_user_set_vring_num(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
+{
+ return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
+}
+
+static int vhost_user_set_vring_base(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
+{
+ return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
+}
+
+static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
+{
+ int i;
+
+ if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
+ return -1;
+ }
+
+ for (i = 0; i < dev->nvqs; ++i) {
+ struct vhost_vring_state state = {
+ .index = dev->vq_index + i,
+ .num = enable,
+ };
+
+ vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
+ }
+
+ return 0;
+}
+
+static int vhost_user_get_vring_base(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
+{
+ VhostUserMsg msg = {
+ .request = VHOST_USER_GET_VRING_BASE,
+ .flags = VHOST_USER_VERSION,
+ .payload.state = *ring,
+ .size = sizeof(msg.payload.state),
+ };
+
+ vhost_user_write(dev, &msg, NULL, 0);
+
+ if (vhost_user_read(dev, &msg) < 0) {
+ return 0;
+ }
+
+ if (msg.request != VHOST_USER_GET_VRING_BASE) {
+ error_report("Received unexpected msg type. Expected %d received %d",
+ VHOST_USER_GET_VRING_BASE, msg.request);
+ return -1;
+ }
+
+ if (msg.size != sizeof(msg.payload.state)) {
+ error_report("Received bad msg size.");
+ return -1;
+ }
+
+ *ring = msg.payload.state;
+
+ return 0;
+}
+
+static int vhost_set_vring_file(struct vhost_dev *dev,
+ VhostUserRequest request,
+ struct vhost_vring_file *file)
+{
+ int fds[VHOST_MEMORY_MAX_NREGIONS];
+ size_t fd_num = 0;
+ VhostUserMsg msg = {
+ .request = request,
+ .flags = VHOST_USER_VERSION,
+ .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
+ .size = sizeof(msg.payload.u64),
+ };
+
+ if (ioeventfd_enabled() && file->fd > 0) {
+ fds[fd_num++] = file->fd;
+ } else {
+ msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
+ }
+
+ vhost_user_write(dev, &msg, fds, fd_num);
+
+ return 0;
+}
+
+static int vhost_user_set_vring_kick(struct vhost_dev *dev,
+ struct vhost_vring_file *file)
+{
+ return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
+}
+
+static int vhost_user_set_vring_call(struct vhost_dev *dev,
+ struct vhost_vring_file *file)
+{
+ return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
+}
+
+static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
+{
+ VhostUserMsg msg = {
+ .request = request,
+ .flags = VHOST_USER_VERSION,
+ .payload.u64 = u64,
+ .size = sizeof(msg.payload.u64),
+ };
+
+ vhost_user_write(dev, &msg, NULL, 0);
+
+ return 0;
+}
+
+static int vhost_user_set_features(struct vhost_dev *dev,
+ uint64_t features)
+{
+ return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
+}
+
+static int vhost_user_set_protocol_features(struct vhost_dev *dev,
+ uint64_t features)
+{
+ return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
+}
+
+static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
+{
+ VhostUserMsg msg = {
+ .request = request,
+ .flags = VHOST_USER_VERSION,
+ };
+
+ if (vhost_user_one_time_request(request) && dev->vq_index != 0) {
+ return 0;
+ }
+
+ vhost_user_write(dev, &msg, NULL, 0);
+
+ if (vhost_user_read(dev, &msg) < 0) {
+ return 0;
+ }
+
+ if (msg.request != request) {
+ error_report("Received unexpected msg type. Expected %d received %d",
+ request, msg.request);
+ return -1;
+ }
+
+ if (msg.size != sizeof(msg.payload.u64)) {
+ error_report("Received bad msg size.");
+ return -1;
+ }
+
+ *u64 = msg.payload.u64;
+
+ return 0;
+}
+
+static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
+{
+ return vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features);
+}
+
+static int vhost_user_set_owner(struct vhost_dev *dev)
+{
+ VhostUserMsg msg = {
+ .request = VHOST_USER_SET_OWNER,
+ .flags = VHOST_USER_VERSION,
+ };
+
+ vhost_user_write(dev, &msg, NULL, 0);
+
+ return 0;
+}
+
+static int vhost_user_reset_device(struct vhost_dev *dev)
+{
+ VhostUserMsg msg = {
+ .request = VHOST_USER_RESET_OWNER,
+ .flags = VHOST_USER_VERSION,
+ };
+
+ vhost_user_write(dev, &msg, NULL, 0);
+
+ return 0;
+}
+
+static int vhost_user_init(struct vhost_dev *dev, void *opaque)
+{
+ uint64_t features;
+ int err;
+
+ assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
+
+ dev->opaque = opaque;
+
+ err = vhost_user_get_features(dev, &features);
+ if (err < 0) {
+ return err;
+ }
+
+ if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
+ dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
+
+ err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES,
+ &features);
+ if (err < 0) {
+ return err;
+ }
+
+ dev->protocol_features = features & VHOST_USER_PROTOCOL_FEATURE_MASK;
+ err = vhost_user_set_protocol_features(dev, dev->protocol_features);
+ if (err < 0) {
+ return err;
+ }
+
+ /* query the max queues we support if backend supports Multiple Queue */
+ if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
+ err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM,
+ &dev->max_queues);
+ if (err < 0) {
+ return err;
+ }
+ }
+ }
+
+ if (dev->migration_blocker == NULL &&
+ !virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
+ error_setg(&dev->migration_blocker,
+ "Migration disabled: vhost-user backend lacks "
+ "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
+ }
+
+ return 0;
+}
+
+static int vhost_user_cleanup(struct vhost_dev *dev)
+{
+ assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
+
+ dev->opaque = 0;
+
+ return 0;
+}
+
+static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
+{
+ assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
+
+ return idx;
+}
+
+static int vhost_user_memslots_limit(struct vhost_dev *dev)
+{
+ return VHOST_MEMORY_MAX_NREGIONS;
+}
+
+static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
+{
+ assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
+
+ return virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_LOG_SHMFD);
+}
+
+static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
+{
+ VhostUserMsg msg = { 0 };
+ int err;
+
+ assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
+
+ /* If guest supports GUEST_ANNOUNCE do nothing */
+ if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) {
+ return 0;
+ }
+
+ /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
+ if (virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_RARP)) {
+ msg.request = VHOST_USER_SEND_RARP;
+ msg.flags = VHOST_USER_VERSION;
+ memcpy((char *)&msg.payload.u64, mac_addr, 6);
+ msg.size = sizeof(msg.payload.u64);
+
+ err = vhost_user_write(dev, &msg, NULL, 0);
+ return err;
+ }
+ return -1;
+}
+
+static bool vhost_user_can_merge(struct vhost_dev *dev,
+ uint64_t start1, uint64_t size1,
+ uint64_t start2, uint64_t size2)
+{
+ ram_addr_t ram_addr;
+ int mfd, rfd;
+ MemoryRegion *mr;
+
+ mr = qemu_ram_addr_from_host((void *)(uintptr_t)start1, &ram_addr);
+ assert(mr);
+ mfd = qemu_get_ram_fd(ram_addr);
+
+ mr = qemu_ram_addr_from_host((void *)(uintptr_t)start2, &ram_addr);
+ assert(mr);
+ rfd = qemu_get_ram_fd(ram_addr);
+
+ return mfd == rfd;
+}
+
+const VhostOps user_ops = {
+ .backend_type = VHOST_BACKEND_TYPE_USER,
+ .vhost_backend_init = vhost_user_init,
+ .vhost_backend_cleanup = vhost_user_cleanup,
+ .vhost_backend_memslots_limit = vhost_user_memslots_limit,
+ .vhost_set_log_base = vhost_user_set_log_base,
+ .vhost_set_mem_table = vhost_user_set_mem_table,
+ .vhost_set_vring_addr = vhost_user_set_vring_addr,
+ .vhost_set_vring_endian = vhost_user_set_vring_endian,
+ .vhost_set_vring_num = vhost_user_set_vring_num,
+ .vhost_set_vring_base = vhost_user_set_vring_base,
+ .vhost_get_vring_base = vhost_user_get_vring_base,
+ .vhost_set_vring_kick = vhost_user_set_vring_kick,
+ .vhost_set_vring_call = vhost_user_set_vring_call,
+ .vhost_set_features = vhost_user_set_features,
+ .vhost_get_features = vhost_user_get_features,
+ .vhost_set_owner = vhost_user_set_owner,
+ .vhost_reset_device = vhost_user_reset_device,
+ .vhost_get_vq_index = vhost_user_get_vq_index,
+ .vhost_set_vring_enable = vhost_user_set_vring_enable,
+ .vhost_requires_shm_log = vhost_user_requires_shm_log,
+ .vhost_migration_done = vhost_user_migration_done,
+ .vhost_backend_can_merge = vhost_user_can_merge,
+};
diff --git a/src/hw/virtio/vhost.c b/src/hw/virtio/vhost.c
new file mode 100644
index 0000000..90c60a7
--- /dev/null
+++ b/src/hw/virtio/vhost.c
@@ -0,0 +1,1271 @@
+/*
+ * vhost support
+ *
+ * Copyright Red Hat, Inc. 2010
+ *
+ * Authors:
+ * Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include "hw/virtio/vhost.h"
+#include "hw/hw.h"
+#include "qemu/atomic.h"
+#include "qemu/range.h"
+#include "qemu/error-report.h"
+#include "qemu/memfd.h"
+#include <linux/vhost.h>
+#include "exec/address-spaces.h"
+#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/virtio-access.h"
+#include "migration/migration.h"
+
+static struct vhost_log *vhost_log;
+static struct vhost_log *vhost_log_shm;
+
+static unsigned int used_memslots;
+static QLIST_HEAD(, vhost_dev) vhost_devices =
+ QLIST_HEAD_INITIALIZER(vhost_devices);
+
+bool vhost_has_free_slot(void)
+{
+ unsigned int slots_limit = ~0U;
+ struct vhost_dev *hdev;
+
+ QLIST_FOREACH(hdev, &vhost_devices, entry) {
+ unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
+ slots_limit = MIN(slots_limit, r);
+ }
+ return slots_limit > used_memslots;
+}
+
+static void vhost_dev_sync_region(struct vhost_dev *dev,
+ MemoryRegionSection *section,
+ uint64_t mfirst, uint64_t mlast,
+ uint64_t rfirst, uint64_t rlast)
+{
+ vhost_log_chunk_t *log = dev->log->log;
+
+ uint64_t start = MAX(mfirst, rfirst);
+ uint64_t end = MIN(mlast, rlast);
+ vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
+ vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
+ uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
+
+ if (end < start) {
+ return;
+ }
+ assert(end / VHOST_LOG_CHUNK < dev->log_size);
+ assert(start / VHOST_LOG_CHUNK < dev->log_size);
+
+ for (;from < to; ++from) {
+ vhost_log_chunk_t log;
+ /* We first check with non-atomic: much cheaper,
+ * and we expect non-dirty to be the common case. */
+ if (!*from) {
+ addr += VHOST_LOG_CHUNK;
+ continue;
+ }
+ /* Data must be read atomically. We don't really need barrier semantics
+ * but it's easier to use atomic_* than roll our own. */
+ log = atomic_xchg(from, 0);
+ while (log) {
+ int bit = ctzl(log);
+ hwaddr page_addr;
+ hwaddr section_offset;
+ hwaddr mr_offset;
+ page_addr = addr + bit * VHOST_LOG_PAGE;
+ section_offset = page_addr - section->offset_within_address_space;
+ mr_offset = section_offset + section->offset_within_region;
+ memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
+ log &= ~(0x1ull << bit);
+ }
+ addr += VHOST_LOG_CHUNK;
+ }
+}
+
+static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
+ MemoryRegionSection *section,
+ hwaddr first,
+ hwaddr last)
+{
+ int i;
+ hwaddr start_addr;
+ hwaddr end_addr;
+
+ if (!dev->log_enabled || !dev->started) {
+ return 0;
+ }
+ start_addr = section->offset_within_address_space;
+ end_addr = range_get_last(start_addr, int128_get64(section->size));
+ start_addr = MAX(first, start_addr);
+ end_addr = MIN(last, end_addr);
+
+ for (i = 0; i < dev->mem->nregions; ++i) {
+ struct vhost_memory_region *reg = dev->mem->regions + i;
+ vhost_dev_sync_region(dev, section, start_addr, end_addr,
+ reg->guest_phys_addr,
+ range_get_last(reg->guest_phys_addr,
+ reg->memory_size));
+ }
+ for (i = 0; i < dev->nvqs; ++i) {
+ struct vhost_virtqueue *vq = dev->vqs + i;
+ vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
+ range_get_last(vq->used_phys, vq->used_size));
+ }
+ return 0;
+}
+
+static void vhost_log_sync(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+ memory_listener);
+ vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
+}
+
+static void vhost_log_sync_range(struct vhost_dev *dev,
+ hwaddr first, hwaddr last)
+{
+ int i;
+ /* FIXME: this is N^2 in number of sections */
+ for (i = 0; i < dev->n_mem_sections; ++i) {
+ MemoryRegionSection *section = &dev->mem_sections[i];
+ vhost_sync_dirty_bitmap(dev, section, first, last);
+ }
+}
+
+/* Assign/unassign. Keep an unsorted array of non-overlapping
+ * memory regions in dev->mem. */
+static void vhost_dev_unassign_memory(struct vhost_dev *dev,
+ uint64_t start_addr,
+ uint64_t size)
+{
+ int from, to, n = dev->mem->nregions;
+ /* Track overlapping/split regions for sanity checking. */
+ int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
+
+ for (from = 0, to = 0; from < n; ++from, ++to) {
+ struct vhost_memory_region *reg = dev->mem->regions + to;
+ uint64_t reglast;
+ uint64_t memlast;
+ uint64_t change;
+
+ /* clone old region */
+ if (to != from) {
+ memcpy(reg, dev->mem->regions + from, sizeof *reg);
+ }
+
+ /* No overlap is simple */
+ if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
+ start_addr, size)) {
+ continue;
+ }
+
+ /* Split only happens if supplied region
+ * is in the middle of an existing one. Thus it can not
+ * overlap with any other existing region. */
+ assert(!split);
+
+ reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
+ memlast = range_get_last(start_addr, size);
+
+ /* Remove whole region */
+ if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
+ --dev->mem->nregions;
+ --to;
+ ++overlap_middle;
+ continue;
+ }
+
+ /* Shrink region */
+ if (memlast >= reglast) {
+ reg->memory_size = start_addr - reg->guest_phys_addr;
+ assert(reg->memory_size);
+ assert(!overlap_end);
+ ++overlap_end;
+ continue;
+ }
+
+ /* Shift region */
+ if (start_addr <= reg->guest_phys_addr) {
+ change = memlast + 1 - reg->guest_phys_addr;
+ reg->memory_size -= change;
+ reg->guest_phys_addr += change;
+ reg->userspace_addr += change;
+ assert(reg->memory_size);
+ assert(!overlap_start);
+ ++overlap_start;
+ continue;
+ }
+
+ /* This only happens if supplied region
+ * is in the middle of an existing one. Thus it can not
+ * overlap with any other existing region. */
+ assert(!overlap_start);
+ assert(!overlap_end);
+ assert(!overlap_middle);
+ /* Split region: shrink first part, shift second part. */
+ memcpy(dev->mem->regions + n, reg, sizeof *reg);
+ reg->memory_size = start_addr - reg->guest_phys_addr;
+ assert(reg->memory_size);
+ change = memlast + 1 - reg->guest_phys_addr;
+ reg = dev->mem->regions + n;
+ reg->memory_size -= change;
+ assert(reg->memory_size);
+ reg->guest_phys_addr += change;
+ reg->userspace_addr += change;
+ /* Never add more than 1 region */
+ assert(dev->mem->nregions == n);
+ ++dev->mem->nregions;
+ ++split;
+ }
+}
+
+/* Called after unassign, so no regions overlap the given range. */
+static void vhost_dev_assign_memory(struct vhost_dev *dev,
+ uint64_t start_addr,
+ uint64_t size,
+ uint64_t uaddr)
+{
+ int from, to;
+ struct vhost_memory_region *merged = NULL;
+ for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
+ struct vhost_memory_region *reg = dev->mem->regions + to;
+ uint64_t prlast, urlast;
+ uint64_t pmlast, umlast;
+ uint64_t s, e, u;
+
+ /* clone old region */
+ if (to != from) {
+ memcpy(reg, dev->mem->regions + from, sizeof *reg);
+ }
+ prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
+ pmlast = range_get_last(start_addr, size);
+ urlast = range_get_last(reg->userspace_addr, reg->memory_size);
+ umlast = range_get_last(uaddr, size);
+
+ /* check for overlapping regions: should never happen. */
+ assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
+ /* Not an adjacent or overlapping region - do not merge. */
+ if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
+ (pmlast + 1 != reg->guest_phys_addr ||
+ umlast + 1 != reg->userspace_addr)) {
+ continue;
+ }
+
+ if (dev->vhost_ops->vhost_backend_can_merge &&
+ !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
+ reg->userspace_addr,
+ reg->memory_size)) {
+ continue;
+ }
+
+ if (merged) {
+ --to;
+ assert(to >= 0);
+ } else {
+ merged = reg;
+ }
+ u = MIN(uaddr, reg->userspace_addr);
+ s = MIN(start_addr, reg->guest_phys_addr);
+ e = MAX(pmlast, prlast);
+ uaddr = merged->userspace_addr = u;
+ start_addr = merged->guest_phys_addr = s;
+ size = merged->memory_size = e - s + 1;
+ assert(merged->memory_size);
+ }
+
+ if (!merged) {
+ struct vhost_memory_region *reg = dev->mem->regions + to;
+ memset(reg, 0, sizeof *reg);
+ reg->memory_size = size;
+ assert(reg->memory_size);
+ reg->guest_phys_addr = start_addr;
+ reg->userspace_addr = uaddr;
+ ++to;
+ }
+ assert(to <= dev->mem->nregions + 1);
+ dev->mem->nregions = to;
+}
+
+static uint64_t vhost_get_log_size(struct vhost_dev *dev)
+{
+ uint64_t log_size = 0;
+ int i;
+ for (i = 0; i < dev->mem->nregions; ++i) {
+ struct vhost_memory_region *reg = dev->mem->regions + i;
+ uint64_t last = range_get_last(reg->guest_phys_addr,
+ reg->memory_size);
+ log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
+ }
+ for (i = 0; i < dev->nvqs; ++i) {
+ struct vhost_virtqueue *vq = dev->vqs + i;
+ uint64_t last = vq->used_phys + vq->used_size - 1;
+ log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
+ }
+ return log_size;
+}
+
+static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
+{
+ struct vhost_log *log;
+ uint64_t logsize = size * sizeof(*(log->log));
+ int fd = -1;
+
+ log = g_new0(struct vhost_log, 1);
+ if (share) {
+ log->log = qemu_memfd_alloc("vhost-log", logsize,
+ F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
+ &fd);
+ memset(log->log, 0, logsize);
+ } else {
+ log->log = g_malloc0(logsize);
+ }
+
+ log->size = size;
+ log->refcnt = 1;
+ log->fd = fd;
+
+ return log;
+}
+
+static struct vhost_log *vhost_log_get(uint64_t size, bool share)
+{
+ struct vhost_log *log = share ? vhost_log_shm : vhost_log;
+
+ if (!log || log->size != size) {
+ log = vhost_log_alloc(size, share);
+ if (share) {
+ vhost_log_shm = log;
+ } else {
+ vhost_log = log;
+ }
+ } else {
+ ++log->refcnt;
+ }
+
+ return log;
+}
+
+static void vhost_log_put(struct vhost_dev *dev, bool sync)
+{
+ struct vhost_log *log = dev->log;
+
+ if (!log) {
+ return;
+ }
+
+ --log->refcnt;
+ if (log->refcnt == 0) {
+ /* Sync only the range covered by the old log */
+ if (dev->log_size && sync) {
+ vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
+ }
+
+ if (vhost_log == log) {
+ g_free(log->log);
+ vhost_log = NULL;
+ } else if (vhost_log_shm == log) {
+ qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
+ log->fd);
+ vhost_log_shm = NULL;
+ }
+
+ g_free(log);
+ }
+}
+
+static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
+{
+ return dev->vhost_ops->vhost_requires_shm_log &&
+ dev->vhost_ops->vhost_requires_shm_log(dev);
+}
+
+static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
+{
+ struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
+ uint64_t log_base = (uintptr_t)log->log;
+ int r;
+
+ /* inform backend of log switching, this must be done before
+ releasing the current log, to ensure no logging is lost */
+ r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
+ assert(r >= 0);
+ vhost_log_put(dev, true);
+ dev->log = log;
+ dev->log_size = size;
+}
+
+static int vhost_verify_ring_mappings(struct vhost_dev *dev,
+ uint64_t start_addr,
+ uint64_t size)
+{
+ int i;
+ int r = 0;
+
+ for (i = 0; !r && i < dev->nvqs; ++i) {
+ struct vhost_virtqueue *vq = dev->vqs + i;
+ hwaddr l;
+ void *p;
+
+ if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
+ continue;
+ }
+ l = vq->ring_size;
+ p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
+ if (!p || l != vq->ring_size) {
+ fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
+ r = -ENOMEM;
+ }
+ if (p != vq->ring) {
+ fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
+ r = -EBUSY;
+ }
+ cpu_physical_memory_unmap(p, l, 0, 0);
+ }
+ return r;
+}
+
+static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
+ uint64_t start_addr,
+ uint64_t size)
+{
+ int i, n = dev->mem->nregions;
+ for (i = 0; i < n; ++i) {
+ struct vhost_memory_region *reg = dev->mem->regions + i;
+ if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
+ start_addr, size)) {
+ return reg;
+ }
+ }
+ return NULL;
+}
+
+static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
+ uint64_t start_addr,
+ uint64_t size,
+ uint64_t uaddr)
+{
+ struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
+ uint64_t reglast;
+ uint64_t memlast;
+
+ if (!reg) {
+ return true;
+ }
+
+ reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
+ memlast = range_get_last(start_addr, size);
+
+ /* Need to extend region? */
+ if (start_addr < reg->guest_phys_addr || memlast > reglast) {
+ return true;
+ }
+ /* userspace_addr changed? */
+ return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
+}
+
+static void vhost_set_memory(MemoryListener *listener,
+ MemoryRegionSection *section,
+ bool add)
+{
+ struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+ memory_listener);
+ hwaddr start_addr = section->offset_within_address_space;
+ ram_addr_t size = int128_get64(section->size);
+ bool log_dirty =
+ memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
+ int s = offsetof(struct vhost_memory, regions) +
+ (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
+ void *ram;
+
+ dev->mem = g_realloc(dev->mem, s);
+
+ if (log_dirty) {
+ add = false;
+ }
+
+ assert(size);
+
+ /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
+ ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
+ if (add) {
+ if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
+ /* Region exists with same address. Nothing to do. */
+ return;
+ }
+ } else {
+ if (!vhost_dev_find_reg(dev, start_addr, size)) {
+ /* Removing region that we don't access. Nothing to do. */
+ return;
+ }
+ }
+
+ vhost_dev_unassign_memory(dev, start_addr, size);
+ if (add) {
+ /* Add given mapping, merging adjacent regions if any */
+ vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
+ } else {
+ /* Remove old mapping for this memory, if any. */
+ vhost_dev_unassign_memory(dev, start_addr, size);
+ }
+ dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
+ dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
+ dev->memory_changed = true;
+ used_memslots = dev->mem->nregions;
+}
+
+static bool vhost_section(MemoryRegionSection *section)
+{
+ return memory_region_is_ram(section->mr);
+}
+
+static void vhost_begin(MemoryListener *listener)
+{
+ struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+ memory_listener);
+ dev->mem_changed_end_addr = 0;
+ dev->mem_changed_start_addr = -1;
+}
+
+static void vhost_commit(MemoryListener *listener)
+{
+ struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+ memory_listener);
+ hwaddr start_addr = 0;
+ ram_addr_t size = 0;
+ uint64_t log_size;
+ int r;
+
+ if (!dev->memory_changed) {
+ return;
+ }
+ if (!dev->started) {
+ return;
+ }
+ if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
+ return;
+ }
+
+ if (dev->started) {
+ start_addr = dev->mem_changed_start_addr;
+ size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
+
+ r = vhost_verify_ring_mappings(dev, start_addr, size);
+ assert(r >= 0);
+ }
+
+ if (!dev->log_enabled) {
+ r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
+ assert(r >= 0);
+ dev->memory_changed = false;
+ return;
+ }
+ log_size = vhost_get_log_size(dev);
+ /* We allocate an extra 4K bytes to log,
+ * to reduce the * number of reallocations. */
+#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
+ /* To log more, must increase log size before table update. */
+ if (dev->log_size < log_size) {
+ vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
+ }
+ r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
+ assert(r >= 0);
+ /* To log less, can only decrease log size after table update. */
+ if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
+ vhost_dev_log_resize(dev, log_size);
+ }
+ dev->memory_changed = false;
+}
+
+static void vhost_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+ memory_listener);
+
+ if (!vhost_section(section)) {
+ return;
+ }
+
+ ++dev->n_mem_sections;
+ dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
+ dev->n_mem_sections);
+ dev->mem_sections[dev->n_mem_sections - 1] = *section;
+ memory_region_ref(section->mr);
+ vhost_set_memory(listener, section, true);
+}
+
+static void vhost_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+ memory_listener);
+ int i;
+
+ if (!vhost_section(section)) {
+ return;
+ }
+
+ vhost_set_memory(listener, section, false);
+ memory_region_unref(section->mr);
+ for (i = 0; i < dev->n_mem_sections; ++i) {
+ if (dev->mem_sections[i].offset_within_address_space
+ == section->offset_within_address_space) {
+ --dev->n_mem_sections;
+ memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
+ (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
+ break;
+ }
+ }
+}
+
+static void vhost_region_nop(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+}
+
+static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
+ struct vhost_virtqueue *vq,
+ unsigned idx, bool enable_log)
+{
+ struct vhost_vring_addr addr = {
+ .index = idx,
+ .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
+ .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
+ .used_user_addr = (uint64_t)(unsigned long)vq->used,
+ .log_guest_addr = vq->used_phys,
+ .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
+ };
+ int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
+ if (r < 0) {
+ return -errno;
+ }
+ return 0;
+}
+
+static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
+{
+ uint64_t features = dev->acked_features;
+ int r;
+ if (enable_log) {
+ features |= 0x1ULL << VHOST_F_LOG_ALL;
+ }
+ r = dev->vhost_ops->vhost_set_features(dev, features);
+ return r < 0 ? -errno : 0;
+}
+
+static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
+{
+ int r, t, i, idx;
+ r = vhost_dev_set_features(dev, enable_log);
+ if (r < 0) {
+ goto err_features;
+ }
+ for (i = 0; i < dev->nvqs; ++i) {
+ idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
+ r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
+ enable_log);
+ if (r < 0) {
+ goto err_vq;
+ }
+ }
+ return 0;
+err_vq:
+ for (; i >= 0; --i) {
+ idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
+ t = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
+ dev->log_enabled);
+ assert(t >= 0);
+ }
+ t = vhost_dev_set_features(dev, dev->log_enabled);
+ assert(t >= 0);
+err_features:
+ return r;
+}
+
+static int vhost_migration_log(MemoryListener *listener, int enable)
+{
+ struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+ memory_listener);
+ int r;
+ if (!!enable == dev->log_enabled) {
+ return 0;
+ }
+ if (!dev->started) {
+ dev->log_enabled = enable;
+ return 0;
+ }
+ if (!enable) {
+ r = vhost_dev_set_log(dev, false);
+ if (r < 0) {
+ return r;
+ }
+ vhost_log_put(dev, false);
+ dev->log = NULL;
+ dev->log_size = 0;
+ } else {
+ vhost_dev_log_resize(dev, vhost_get_log_size(dev));
+ r = vhost_dev_set_log(dev, true);
+ if (r < 0) {
+ return r;
+ }
+ }
+ dev->log_enabled = enable;
+ return 0;
+}
+
+static void vhost_log_global_start(MemoryListener *listener)
+{
+ int r;
+
+ r = vhost_migration_log(listener, true);
+ if (r < 0) {
+ abort();
+ }
+}
+
+static void vhost_log_global_stop(MemoryListener *listener)
+{
+ int r;
+
+ r = vhost_migration_log(listener, false);
+ if (r < 0) {
+ abort();
+ }
+}
+
+static void vhost_log_start(MemoryListener *listener,
+ MemoryRegionSection *section,
+ int old, int new)
+{
+ /* FIXME: implement */
+}
+
+static void vhost_log_stop(MemoryListener *listener,
+ MemoryRegionSection *section,
+ int old, int new)
+{
+ /* FIXME: implement */
+}
+
+static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
+ bool is_big_endian,
+ int vhost_vq_index)
+{
+ struct vhost_vring_state s = {
+ .index = vhost_vq_index,
+ .num = is_big_endian
+ };
+
+ if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
+ return 0;
+ }
+
+ if (errno == ENOTTY) {
+ error_report("vhost does not support cross-endian");
+ return -ENOSYS;
+ }
+
+ return -errno;
+}
+
+static int vhost_virtqueue_start(struct vhost_dev *dev,
+ struct VirtIODevice *vdev,
+ struct vhost_virtqueue *vq,
+ unsigned idx)
+{
+ hwaddr s, l, a;
+ int r;
+ int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
+ struct vhost_vring_file file = {
+ .index = vhost_vq_index
+ };
+ struct vhost_vring_state state = {
+ .index = vhost_vq_index
+ };
+ struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
+
+
+ vq->num = state.num = virtio_queue_get_num(vdev, idx);
+ r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
+ if (r) {
+ return -errno;
+ }
+
+ state.num = virtio_queue_get_last_avail_idx(vdev, idx);
+ r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
+ if (r) {
+ return -errno;
+ }
+
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
+ virtio_legacy_is_cross_endian(vdev)) {
+ r = vhost_virtqueue_set_vring_endian_legacy(dev,
+ virtio_is_big_endian(vdev),
+ vhost_vq_index);
+ if (r) {
+ return -errno;
+ }
+ }
+
+ s = l = virtio_queue_get_desc_size(vdev, idx);
+ a = virtio_queue_get_desc_addr(vdev, idx);
+ vq->desc = cpu_physical_memory_map(a, &l, 0);
+ if (!vq->desc || l != s) {
+ r = -ENOMEM;
+ goto fail_alloc_desc;
+ }
+ s = l = virtio_queue_get_avail_size(vdev, idx);
+ a = virtio_queue_get_avail_addr(vdev, idx);
+ vq->avail = cpu_physical_memory_map(a, &l, 0);
+ if (!vq->avail || l != s) {
+ r = -ENOMEM;
+ goto fail_alloc_avail;
+ }
+ vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
+ vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
+ vq->used = cpu_physical_memory_map(a, &l, 1);
+ if (!vq->used || l != s) {
+ r = -ENOMEM;
+ goto fail_alloc_used;
+ }
+
+ vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
+ vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
+ vq->ring = cpu_physical_memory_map(a, &l, 1);
+ if (!vq->ring || l != s) {
+ r = -ENOMEM;
+ goto fail_alloc_ring;
+ }
+
+ r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
+ if (r < 0) {
+ r = -errno;
+ goto fail_alloc;
+ }
+
+ file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
+ r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
+ if (r) {
+ r = -errno;
+ goto fail_kick;
+ }
+
+ /* Clear and discard previous events if any. */
+ event_notifier_test_and_clear(&vq->masked_notifier);
+
+ return 0;
+
+fail_kick:
+fail_alloc:
+ cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
+ 0, 0);
+fail_alloc_ring:
+ cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
+ 0, 0);
+fail_alloc_used:
+ cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
+ 0, 0);
+fail_alloc_avail:
+ cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
+ 0, 0);
+fail_alloc_desc:
+ return r;
+}
+
+static void vhost_virtqueue_stop(struct vhost_dev *dev,
+ struct VirtIODevice *vdev,
+ struct vhost_virtqueue *vq,
+ unsigned idx)
+{
+ int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
+ struct vhost_vring_state state = {
+ .index = vhost_vq_index,
+ };
+ int r;
+
+ r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
+ if (r < 0) {
+ fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
+ fflush(stderr);
+ }
+ virtio_queue_set_last_avail_idx(vdev, idx, state.num);
+ virtio_queue_invalidate_signalled_used(vdev, idx);
+
+ /* In the cross-endian case, we need to reset the vring endianness to
+ * native as legacy devices expect so by default.
+ */
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
+ virtio_legacy_is_cross_endian(vdev)) {
+ r = vhost_virtqueue_set_vring_endian_legacy(dev,
+ !virtio_is_big_endian(vdev),
+ vhost_vq_index);
+ if (r < 0) {
+ error_report("failed to reset vring endianness");
+ }
+ }
+
+ assert (r >= 0);
+ cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
+ 0, virtio_queue_get_ring_size(vdev, idx));
+ cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
+ 1, virtio_queue_get_used_size(vdev, idx));
+ cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
+ 0, virtio_queue_get_avail_size(vdev, idx));
+ cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
+ 0, virtio_queue_get_desc_size(vdev, idx));
+}
+
+static void vhost_eventfd_add(MemoryListener *listener,
+ MemoryRegionSection *section,
+ bool match_data, uint64_t data, EventNotifier *e)
+{
+}
+
+static void vhost_eventfd_del(MemoryListener *listener,
+ MemoryRegionSection *section,
+ bool match_data, uint64_t data, EventNotifier *e)
+{
+}
+
+static int vhost_virtqueue_init(struct vhost_dev *dev,
+ struct vhost_virtqueue *vq, int n)
+{
+ int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
+ struct vhost_vring_file file = {
+ .index = vhost_vq_index,
+ };
+ int r = event_notifier_init(&vq->masked_notifier, 0);
+ if (r < 0) {
+ return r;
+ }
+
+ file.fd = event_notifier_get_fd(&vq->masked_notifier);
+ r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
+ if (r) {
+ r = -errno;
+ goto fail_call;
+ }
+ return 0;
+fail_call:
+ event_notifier_cleanup(&vq->masked_notifier);
+ return r;
+}
+
+static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
+{
+ event_notifier_cleanup(&vq->masked_notifier);
+}
+
+int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
+ VhostBackendType backend_type)
+{
+ uint64_t features;
+ int i, r;
+
+ hdev->migration_blocker = NULL;
+
+ if (vhost_set_backend_type(hdev, backend_type) < 0) {
+ close((uintptr_t)opaque);
+ return -1;
+ }
+
+ if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) {
+ close((uintptr_t)opaque);
+ return -errno;
+ }
+
+ if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
+ fprintf(stderr, "vhost backend memory slots limit is less"
+ " than current number of present memory slots\n");
+ close((uintptr_t)opaque);
+ return -1;
+ }
+ QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
+
+ r = hdev->vhost_ops->vhost_set_owner(hdev);
+ if (r < 0) {
+ goto fail;
+ }
+
+ r = hdev->vhost_ops->vhost_get_features(hdev, &features);
+ if (r < 0) {
+ goto fail;
+ }
+
+ for (i = 0; i < hdev->nvqs; ++i) {
+ r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
+ if (r < 0) {
+ goto fail_vq;
+ }
+ }
+ hdev->features = features;
+
+ hdev->memory_listener = (MemoryListener) {
+ .begin = vhost_begin,
+ .commit = vhost_commit,
+ .region_add = vhost_region_add,
+ .region_del = vhost_region_del,
+ .region_nop = vhost_region_nop,
+ .log_start = vhost_log_start,
+ .log_stop = vhost_log_stop,
+ .log_sync = vhost_log_sync,
+ .log_global_start = vhost_log_global_start,
+ .log_global_stop = vhost_log_global_stop,
+ .eventfd_add = vhost_eventfd_add,
+ .eventfd_del = vhost_eventfd_del,
+ .priority = 10
+ };
+
+ if (hdev->migration_blocker == NULL) {
+ if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
+ error_setg(&hdev->migration_blocker,
+ "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
+ } else if (!qemu_memfd_check()) {
+ error_setg(&hdev->migration_blocker,
+ "Migration disabled: failed to allocate shared memory");
+ }
+ }
+
+ if (hdev->migration_blocker != NULL) {
+ migrate_add_blocker(hdev->migration_blocker);
+ }
+
+ hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
+ hdev->n_mem_sections = 0;
+ hdev->mem_sections = NULL;
+ hdev->log = NULL;
+ hdev->log_size = 0;
+ hdev->log_enabled = false;
+ hdev->started = false;
+ hdev->memory_changed = false;
+ memory_listener_register(&hdev->memory_listener, &address_space_memory);
+ return 0;
+fail_vq:
+ while (--i >= 0) {
+ vhost_virtqueue_cleanup(hdev->vqs + i);
+ }
+fail:
+ r = -errno;
+ hdev->vhost_ops->vhost_backend_cleanup(hdev);
+ QLIST_REMOVE(hdev, entry);
+ return r;
+}
+
+void vhost_dev_cleanup(struct vhost_dev *hdev)
+{
+ int i;
+ for (i = 0; i < hdev->nvqs; ++i) {
+ vhost_virtqueue_cleanup(hdev->vqs + i);
+ }
+ memory_listener_unregister(&hdev->memory_listener);
+ if (hdev->migration_blocker) {
+ migrate_del_blocker(hdev->migration_blocker);
+ error_free(hdev->migration_blocker);
+ }
+ g_free(hdev->mem);
+ g_free(hdev->mem_sections);
+ hdev->vhost_ops->vhost_backend_cleanup(hdev);
+ QLIST_REMOVE(hdev, entry);
+}
+
+/* Stop processing guest IO notifications in qemu.
+ * Start processing them in vhost in kernel.
+ */
+int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
+{
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ VirtioBusState *vbus = VIRTIO_BUS(qbus);
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
+ int i, r, e;
+ if (!k->set_host_notifier) {
+ fprintf(stderr, "binding does not support host notifiers\n");
+ r = -ENOSYS;
+ goto fail;
+ }
+
+ for (i = 0; i < hdev->nvqs; ++i) {
+ r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
+ if (r < 0) {
+ fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
+ goto fail_vq;
+ }
+ }
+
+ return 0;
+fail_vq:
+ while (--i >= 0) {
+ e = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
+ if (e < 0) {
+ fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
+ fflush(stderr);
+ }
+ assert (e >= 0);
+ }
+fail:
+ return r;
+}
+
+/* Stop processing guest IO notifications in vhost.
+ * Start processing them in qemu.
+ * This might actually run the qemu handlers right away,
+ * so virtio in qemu must be completely setup when this is called.
+ */
+void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
+{
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ VirtioBusState *vbus = VIRTIO_BUS(qbus);
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
+ int i, r;
+
+ for (i = 0; i < hdev->nvqs; ++i) {
+ r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
+ if (r < 0) {
+ fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
+ fflush(stderr);
+ }
+ assert (r >= 0);
+ }
+}
+
+/* Test and clear event pending status.
+ * Should be called after unmask to avoid losing events.
+ */
+bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
+{
+ struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
+ assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
+ return event_notifier_test_and_clear(&vq->masked_notifier);
+}
+
+/* Mask/unmask events from this vq. */
+void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
+ bool mask)
+{
+ struct VirtQueue *vvq = virtio_get_queue(vdev, n);
+ int r, index = n - hdev->vq_index;
+ struct vhost_vring_file file;
+
+ if (mask) {
+ file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
+ } else {
+ file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
+ }
+
+ file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
+ r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
+ assert(r >= 0);
+}
+
+uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
+ uint64_t features)
+{
+ const int *bit = feature_bits;
+ while (*bit != VHOST_INVALID_FEATURE_BIT) {
+ uint64_t bit_mask = (1ULL << *bit);
+ if (!(hdev->features & bit_mask)) {
+ features &= ~bit_mask;
+ }
+ bit++;
+ }
+ return features;
+}
+
+void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
+ uint64_t features)
+{
+ const int *bit = feature_bits;
+ while (*bit != VHOST_INVALID_FEATURE_BIT) {
+ uint64_t bit_mask = (1ULL << *bit);
+ if (features & bit_mask) {
+ hdev->acked_features |= bit_mask;
+ }
+ bit++;
+ }
+}
+
+/* Host notifiers must be enabled at this point. */
+int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
+{
+ int i, r;
+
+ hdev->started = true;
+
+ r = vhost_dev_set_features(hdev, hdev->log_enabled);
+ if (r < 0) {
+ goto fail_features;
+ }
+ r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
+ if (r < 0) {
+ r = -errno;
+ goto fail_mem;
+ }
+ for (i = 0; i < hdev->nvqs; ++i) {
+ r = vhost_virtqueue_start(hdev,
+ vdev,
+ hdev->vqs + i,
+ hdev->vq_index + i);
+ if (r < 0) {
+ goto fail_vq;
+ }
+ }
+
+ if (hdev->log_enabled) {
+ uint64_t log_base;
+
+ hdev->log_size = vhost_get_log_size(hdev);
+ hdev->log = vhost_log_get(hdev->log_size,
+ vhost_dev_log_is_shared(hdev));
+ log_base = (uintptr_t)hdev->log->log;
+ r = hdev->vhost_ops->vhost_set_log_base(hdev,
+ hdev->log_size ? log_base : 0,
+ hdev->log);
+ if (r < 0) {
+ r = -errno;
+ goto fail_log;
+ }
+ }
+
+ return 0;
+fail_log:
+ vhost_log_put(hdev, false);
+fail_vq:
+ while (--i >= 0) {
+ vhost_virtqueue_stop(hdev,
+ vdev,
+ hdev->vqs + i,
+ hdev->vq_index + i);
+ }
+ i = hdev->nvqs;
+fail_mem:
+fail_features:
+
+ hdev->started = false;
+ return r;
+}
+
+/* Host notifiers must be enabled at this point. */
+void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
+{
+ int i;
+
+ for (i = 0; i < hdev->nvqs; ++i) {
+ vhost_virtqueue_stop(hdev,
+ vdev,
+ hdev->vqs + i,
+ hdev->vq_index + i);
+ }
+
+ vhost_log_put(hdev, true);
+ hdev->started = false;
+ hdev->log = NULL;
+ hdev->log_size = 0;
+}
+
diff --git a/src/hw/virtio/virtio-balloon.c b/src/hw/virtio/virtio-balloon.c
new file mode 100644
index 0000000..9671635
--- /dev/null
+++ b/src/hw/virtio/virtio-balloon.c
@@ -0,0 +1,465 @@
+/*
+ * Virtio Balloon Device
+ *
+ * Copyright IBM, Corp. 2008
+ * Copyright (C) 2011 Red Hat, Inc.
+ * Copyright (C) 2011 Amit Shah <amit.shah@redhat.com>
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/iov.h"
+#include "qemu/timer.h"
+#include "qemu-common.h"
+#include "hw/virtio/virtio.h"
+#include "hw/i386/pc.h"
+#include "cpu.h"
+#include "sysemu/balloon.h"
+#include "hw/virtio/virtio-balloon.h"
+#include "sysemu/kvm.h"
+#include "exec/address-spaces.h"
+#include "qapi/visitor.h"
+#include "qapi-event.h"
+#include "trace.h"
+
+#if defined(__linux__)
+#include <sys/mman.h>
+#endif
+
+#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/virtio-access.h"
+
+static void balloon_page(void *addr, int deflate)
+{
+#if defined(__linux__)
+ if (!qemu_balloon_is_inhibited() && (!kvm_enabled() ||
+ kvm_has_sync_mmu())) {
+ qemu_madvise(addr, TARGET_PAGE_SIZE,
+ deflate ? QEMU_MADV_WILLNEED : QEMU_MADV_DONTNEED);
+ }
+#endif
+}
+
+static const char *balloon_stat_names[] = {
+ [VIRTIO_BALLOON_S_SWAP_IN] = "stat-swap-in",
+ [VIRTIO_BALLOON_S_SWAP_OUT] = "stat-swap-out",
+ [VIRTIO_BALLOON_S_MAJFLT] = "stat-major-faults",
+ [VIRTIO_BALLOON_S_MINFLT] = "stat-minor-faults",
+ [VIRTIO_BALLOON_S_MEMFREE] = "stat-free-memory",
+ [VIRTIO_BALLOON_S_MEMTOT] = "stat-total-memory",
+ [VIRTIO_BALLOON_S_NR] = NULL
+};
+
+/*
+ * reset_stats - Mark all items in the stats array as unset
+ *
+ * This function needs to be called at device initialization and before
+ * updating to a set of newly-generated stats. This will ensure that no
+ * stale values stick around in case the guest reports a subset of the supported
+ * statistics.
+ */
+static inline void reset_stats(VirtIOBalloon *dev)
+{
+ int i;
+ for (i = 0; i < VIRTIO_BALLOON_S_NR; dev->stats[i++] = -1);
+}
+
+static bool balloon_stats_supported(const VirtIOBalloon *s)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
+ return virtio_vdev_has_feature(vdev, VIRTIO_BALLOON_F_STATS_VQ);
+}
+
+static bool balloon_stats_enabled(const VirtIOBalloon *s)
+{
+ return s->stats_poll_interval > 0;
+}
+
+static void balloon_stats_destroy_timer(VirtIOBalloon *s)
+{
+ if (balloon_stats_enabled(s)) {
+ timer_del(s->stats_timer);
+ timer_free(s->stats_timer);
+ s->stats_timer = NULL;
+ s->stats_poll_interval = 0;
+ }
+}
+
+static void balloon_stats_change_timer(VirtIOBalloon *s, int64_t secs)
+{
+ timer_mod(s->stats_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + secs * 1000);
+}
+
+static void balloon_stats_poll_cb(void *opaque)
+{
+ VirtIOBalloon *s = opaque;
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
+
+ if (!balloon_stats_supported(s)) {
+ /* re-schedule */
+ balloon_stats_change_timer(s, s->stats_poll_interval);
+ return;
+ }
+
+ virtqueue_push(s->svq, &s->stats_vq_elem, s->stats_vq_offset);
+ virtio_notify(vdev, s->svq);
+}
+
+static void balloon_stats_get_all(Object *obj, struct Visitor *v,
+ void *opaque, const char *name, Error **errp)
+{
+ Error *err = NULL;
+ VirtIOBalloon *s = opaque;
+ int i;
+
+ visit_start_struct(v, NULL, "guest-stats", name, 0, &err);
+ if (err) {
+ goto out;
+ }
+ visit_type_int(v, &s->stats_last_update, "last-update", &err);
+ if (err) {
+ goto out_end;
+ }
+
+ visit_start_struct(v, NULL, NULL, "stats", 0, &err);
+ if (err) {
+ goto out_end;
+ }
+ for (i = 0; !err && i < VIRTIO_BALLOON_S_NR; i++) {
+ visit_type_int64(v, (int64_t *) &s->stats[i], balloon_stat_names[i],
+ &err);
+ }
+ error_propagate(errp, err);
+ err = NULL;
+ visit_end_struct(v, &err);
+
+out_end:
+ error_propagate(errp, err);
+ err = NULL;
+ visit_end_struct(v, &err);
+out:
+ error_propagate(errp, err);
+}
+
+static void balloon_stats_get_poll_interval(Object *obj, struct Visitor *v,
+ void *opaque, const char *name,
+ Error **errp)
+{
+ VirtIOBalloon *s = opaque;
+ visit_type_int(v, &s->stats_poll_interval, name, errp);
+}
+
+static void balloon_stats_set_poll_interval(Object *obj, struct Visitor *v,
+ void *opaque, const char *name,
+ Error **errp)
+{
+ VirtIOBalloon *s = opaque;
+ Error *local_err = NULL;
+ int64_t value;
+
+ visit_type_int(v, &value, name, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (value < 0) {
+ error_setg(errp, "timer value must be greater than zero");
+ return;
+ }
+
+ if (value > UINT32_MAX) {
+ error_setg(errp, "timer value is too big");
+ return;
+ }
+
+ if (value == s->stats_poll_interval) {
+ return;
+ }
+
+ if (value == 0) {
+ /* timer=0 disables the timer */
+ balloon_stats_destroy_timer(s);
+ return;
+ }
+
+ if (balloon_stats_enabled(s)) {
+ /* timer interval change */
+ s->stats_poll_interval = value;
+ balloon_stats_change_timer(s, value);
+ return;
+ }
+
+ /* create a new timer */
+ g_assert(s->stats_timer == NULL);
+ s->stats_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, balloon_stats_poll_cb, s);
+ s->stats_poll_interval = value;
+ balloon_stats_change_timer(s, 0);
+}
+
+static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
+ VirtQueueElement elem;
+ MemoryRegionSection section;
+
+ while (virtqueue_pop(vq, &elem)) {
+ size_t offset = 0;
+ uint32_t pfn;
+
+ while (iov_to_buf(elem.out_sg, elem.out_num, offset, &pfn, 4) == 4) {
+ ram_addr_t pa;
+ ram_addr_t addr;
+ int p = virtio_ldl_p(vdev, &pfn);
+
+ pa = (ram_addr_t) p << VIRTIO_BALLOON_PFN_SHIFT;
+ offset += 4;
+
+ /* FIXME: remove get_system_memory(), but how? */
+ section = memory_region_find(get_system_memory(), pa, 1);
+ if (!int128_nz(section.size) || !memory_region_is_ram(section.mr))
+ continue;
+
+ trace_virtio_balloon_handle_output(memory_region_name(section.mr),
+ pa);
+ /* Using memory_region_get_ram_ptr is bending the rules a bit, but
+ should be OK because we only want a single page. */
+ addr = section.offset_within_region;
+ balloon_page(memory_region_get_ram_ptr(section.mr) + addr,
+ !!(vq == s->dvq));
+ memory_region_unref(section.mr);
+ }
+
+ virtqueue_push(vq, &elem, offset);
+ virtio_notify(vdev, vq);
+ }
+}
+
+static void virtio_balloon_receive_stats(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
+ VirtQueueElement *elem = &s->stats_vq_elem;
+ VirtIOBalloonStat stat;
+ size_t offset = 0;
+ qemu_timeval tv;
+
+ if (!virtqueue_pop(vq, elem)) {
+ goto out;
+ }
+
+ /* Initialize the stats to get rid of any stale values. This is only
+ * needed to handle the case where a guest supports fewer stats than it
+ * used to (ie. it has booted into an old kernel).
+ */
+ reset_stats(s);
+
+ while (iov_to_buf(elem->out_sg, elem->out_num, offset, &stat, sizeof(stat))
+ == sizeof(stat)) {
+ uint16_t tag = virtio_tswap16(vdev, stat.tag);
+ uint64_t val = virtio_tswap64(vdev, stat.val);
+
+ offset += sizeof(stat);
+ if (tag < VIRTIO_BALLOON_S_NR)
+ s->stats[tag] = val;
+ }
+ s->stats_vq_offset = offset;
+
+ if (qemu_gettimeofday(&tv) < 0) {
+ fprintf(stderr, "warning: %s: failed to get time of day\n", __func__);
+ goto out;
+ }
+
+ s->stats_last_update = tv.tv_sec;
+
+out:
+ if (balloon_stats_enabled(s)) {
+ balloon_stats_change_timer(s, s->stats_poll_interval);
+ }
+}
+
+static void virtio_balloon_get_config(VirtIODevice *vdev, uint8_t *config_data)
+{
+ VirtIOBalloon *dev = VIRTIO_BALLOON(vdev);
+ struct virtio_balloon_config config;
+
+ config.num_pages = cpu_to_le32(dev->num_pages);
+ config.actual = cpu_to_le32(dev->actual);
+
+ trace_virtio_balloon_get_config(config.num_pages, config.actual);
+ memcpy(config_data, &config, sizeof(struct virtio_balloon_config));
+}
+
+static void virtio_balloon_set_config(VirtIODevice *vdev,
+ const uint8_t *config_data)
+{
+ VirtIOBalloon *dev = VIRTIO_BALLOON(vdev);
+ struct virtio_balloon_config config;
+ uint32_t oldactual = dev->actual;
+ ram_addr_t vm_ram_size = get_current_ram_size();
+
+ memcpy(&config, config_data, sizeof(struct virtio_balloon_config));
+ dev->actual = le32_to_cpu(config.actual);
+ if (dev->actual != oldactual) {
+ qapi_event_send_balloon_change(vm_ram_size -
+ ((ram_addr_t) dev->actual << VIRTIO_BALLOON_PFN_SHIFT),
+ &error_abort);
+ }
+ trace_virtio_balloon_set_config(dev->actual, oldactual);
+}
+
+static uint64_t virtio_balloon_get_features(VirtIODevice *vdev, uint64_t f,
+ Error **errp)
+{
+ VirtIOBalloon *dev = VIRTIO_BALLOON(vdev);
+ f |= dev->host_features;
+ virtio_add_feature(&f, VIRTIO_BALLOON_F_STATS_VQ);
+ return f;
+}
+
+static void virtio_balloon_stat(void *opaque, BalloonInfo *info)
+{
+ VirtIOBalloon *dev = opaque;
+ info->actual = get_current_ram_size() - ((uint64_t) dev->actual <<
+ VIRTIO_BALLOON_PFN_SHIFT);
+}
+
+static void virtio_balloon_to_target(void *opaque, ram_addr_t target)
+{
+ VirtIOBalloon *dev = VIRTIO_BALLOON(opaque);
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ ram_addr_t vm_ram_size = get_current_ram_size();
+
+ if (target > vm_ram_size) {
+ target = vm_ram_size;
+ }
+ if (target) {
+ dev->num_pages = (vm_ram_size - target) >> VIRTIO_BALLOON_PFN_SHIFT;
+ virtio_notify_config(vdev);
+ }
+ trace_virtio_balloon_to_target(target, dev->num_pages);
+}
+
+static void virtio_balloon_save(QEMUFile *f, void *opaque)
+{
+ virtio_save(VIRTIO_DEVICE(opaque), f);
+}
+
+static void virtio_balloon_save_device(VirtIODevice *vdev, QEMUFile *f)
+{
+ VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
+
+ qemu_put_be32(f, s->num_pages);
+ qemu_put_be32(f, s->actual);
+}
+
+static int virtio_balloon_load(QEMUFile *f, void *opaque, int version_id)
+{
+ if (version_id != 1)
+ return -EINVAL;
+
+ return virtio_load(VIRTIO_DEVICE(opaque), f, version_id);
+}
+
+static int virtio_balloon_load_device(VirtIODevice *vdev, QEMUFile *f,
+ int version_id)
+{
+ VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
+
+ s->num_pages = qemu_get_be32(f);
+ s->actual = qemu_get_be32(f);
+ return 0;
+}
+
+static void virtio_balloon_device_realize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VirtIOBalloon *s = VIRTIO_BALLOON(dev);
+ int ret;
+
+ virtio_init(vdev, "virtio-balloon", VIRTIO_ID_BALLOON,
+ sizeof(struct virtio_balloon_config));
+
+ ret = qemu_add_balloon_handler(virtio_balloon_to_target,
+ virtio_balloon_stat, s);
+
+ if (ret < 0) {
+ error_setg(errp, "Only one balloon device is supported");
+ virtio_cleanup(vdev);
+ return;
+ }
+
+ s->ivq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
+ s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
+ s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats);
+
+ reset_stats(s);
+
+ register_savevm(dev, "virtio-balloon", -1, 1,
+ virtio_balloon_save, virtio_balloon_load, s);
+}
+
+static void virtio_balloon_device_unrealize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VirtIOBalloon *s = VIRTIO_BALLOON(dev);
+
+ balloon_stats_destroy_timer(s);
+ qemu_remove_balloon_handler(s);
+ unregister_savevm(dev, "virtio-balloon", s);
+ virtio_cleanup(vdev);
+}
+
+static void virtio_balloon_instance_init(Object *obj)
+{
+ VirtIOBalloon *s = VIRTIO_BALLOON(obj);
+
+ object_property_add(obj, "guest-stats", "guest statistics",
+ balloon_stats_get_all, NULL, NULL, s, NULL);
+
+ object_property_add(obj, "guest-stats-polling-interval", "int",
+ balloon_stats_get_poll_interval,
+ balloon_stats_set_poll_interval,
+ NULL, s, NULL);
+}
+
+static Property virtio_balloon_properties[] = {
+ DEFINE_PROP_BIT("deflate-on-oom", VirtIOBalloon, host_features,
+ VIRTIO_BALLOON_F_DEFLATE_ON_OOM, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_balloon_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+
+ dc->props = virtio_balloon_properties;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ vdc->realize = virtio_balloon_device_realize;
+ vdc->unrealize = virtio_balloon_device_unrealize;
+ vdc->get_config = virtio_balloon_get_config;
+ vdc->set_config = virtio_balloon_set_config;
+ vdc->get_features = virtio_balloon_get_features;
+ vdc->save = virtio_balloon_save_device;
+ vdc->load = virtio_balloon_load_device;
+}
+
+static const TypeInfo virtio_balloon_info = {
+ .name = TYPE_VIRTIO_BALLOON,
+ .parent = TYPE_VIRTIO_DEVICE,
+ .instance_size = sizeof(VirtIOBalloon),
+ .instance_init = virtio_balloon_instance_init,
+ .class_init = virtio_balloon_class_init,
+};
+
+static void virtio_register_types(void)
+{
+ type_register_static(&virtio_balloon_info);
+}
+
+type_init(virtio_register_types)
diff --git a/src/hw/virtio/virtio-bus.c b/src/hw/virtio/virtio-bus.c
new file mode 100644
index 0000000..81c7cdd
--- /dev/null
+++ b/src/hw/virtio/virtio-bus.c
@@ -0,0 +1,181 @@
+/*
+ * VirtioBus
+ *
+ * Copyright (C) 2012 : GreenSocs Ltd
+ * http://www.greensocs.com/ , email: info@greensocs.com
+ *
+ * Developed by :
+ * Frederic Konrad <fred.konrad@greensocs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "hw/hw.h"
+#include "qemu/error-report.h"
+#include "hw/qdev.h"
+#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/virtio.h"
+
+/* #define DEBUG_VIRTIO_BUS */
+
+#ifdef DEBUG_VIRTIO_BUS
+#define DPRINTF(fmt, ...) \
+do { printf("virtio_bus: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) do { } while (0)
+#endif
+
+/* A VirtIODevice is being plugged */
+void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp)
+{
+ DeviceState *qdev = DEVICE(vdev);
+ BusState *qbus = BUS(qdev_get_parent_bus(qdev));
+ VirtioBusState *bus = VIRTIO_BUS(qbus);
+ VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(bus);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+
+ DPRINTF("%s: plug device.\n", qbus->name);
+
+ if (klass->device_plugged != NULL) {
+ klass->device_plugged(qbus->parent, errp);
+ }
+
+ /* Get the features of the plugged device. */
+ assert(vdc->get_features != NULL);
+ vdev->host_features = vdc->get_features(vdev, vdev->host_features,
+ errp);
+ if (klass->post_plugged != NULL) {
+ klass->post_plugged(qbus->parent, errp);
+ }
+}
+
+/* Reset the virtio_bus */
+void virtio_bus_reset(VirtioBusState *bus)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
+
+ DPRINTF("%s: reset device.\n", BUS(bus)->name);
+ if (vdev != NULL) {
+ virtio_reset(vdev);
+ }
+}
+
+/* A VirtIODevice is being unplugged */
+void virtio_bus_device_unplugged(VirtIODevice *vdev)
+{
+ DeviceState *qdev = DEVICE(vdev);
+ BusState *qbus = BUS(qdev_get_parent_bus(qdev));
+ VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(qbus);
+
+ DPRINTF("%s: remove device.\n", qbus->name);
+
+ if (vdev != NULL) {
+ if (klass->device_unplugged != NULL) {
+ klass->device_unplugged(qbus->parent);
+ }
+ }
+}
+
+/* Get the device id of the plugged device. */
+uint16_t virtio_bus_get_vdev_id(VirtioBusState *bus)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
+ assert(vdev != NULL);
+ return vdev->device_id;
+}
+
+/* Get the config_len field of the plugged device. */
+size_t virtio_bus_get_vdev_config_len(VirtioBusState *bus)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
+ assert(vdev != NULL);
+ return vdev->config_len;
+}
+
+/* Get bad features of the plugged device. */
+uint32_t virtio_bus_get_vdev_bad_features(VirtioBusState *bus)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
+ VirtioDeviceClass *k;
+
+ assert(vdev != NULL);
+ k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ if (k->bad_features != NULL) {
+ return k->bad_features(vdev);
+ } else {
+ return 0;
+ }
+}
+
+/* Get config of the plugged device. */
+void virtio_bus_get_vdev_config(VirtioBusState *bus, uint8_t *config)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
+ VirtioDeviceClass *k;
+
+ assert(vdev != NULL);
+ k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ if (k->get_config != NULL) {
+ k->get_config(vdev, config);
+ }
+}
+
+/* Set config of the plugged device. */
+void virtio_bus_set_vdev_config(VirtioBusState *bus, uint8_t *config)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
+ VirtioDeviceClass *k;
+
+ assert(vdev != NULL);
+ k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ if (k->set_config != NULL) {
+ k->set_config(vdev, config);
+ }
+}
+
+static char *virtio_bus_get_dev_path(DeviceState *dev)
+{
+ BusState *bus = qdev_get_parent_bus(dev);
+ DeviceState *proxy = DEVICE(bus->parent);
+ return qdev_get_dev_path(proxy);
+}
+
+static char *virtio_bus_get_fw_dev_path(DeviceState *dev)
+{
+ return NULL;
+}
+
+static void virtio_bus_class_init(ObjectClass *klass, void *data)
+{
+ BusClass *bus_class = BUS_CLASS(klass);
+ bus_class->get_dev_path = virtio_bus_get_dev_path;
+ bus_class->get_fw_dev_path = virtio_bus_get_fw_dev_path;
+}
+
+static const TypeInfo virtio_bus_info = {
+ .name = TYPE_VIRTIO_BUS,
+ .parent = TYPE_BUS,
+ .instance_size = sizeof(VirtioBusState),
+ .abstract = true,
+ .class_size = sizeof(VirtioBusClass),
+ .class_init = virtio_bus_class_init
+};
+
+static void virtio_register_types(void)
+{
+ type_register_static(&virtio_bus_info);
+}
+
+type_init(virtio_register_types)
diff --git a/src/hw/virtio/virtio-mmio.c b/src/hw/virtio/virtio-mmio.c
new file mode 100644
index 0000000..18660b0
--- /dev/null
+++ b/src/hw/virtio/virtio-mmio.c
@@ -0,0 +1,579 @@
+/*
+ * Virtio MMIO bindings
+ *
+ * Copyright (c) 2011 Linaro Limited
+ *
+ * Author:
+ * Peter Maydell <peter.maydell@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw/sysbus.h"
+#include "hw/virtio/virtio.h"
+#include "qemu/host-utils.h"
+#include "sysemu/kvm.h"
+#include "hw/virtio/virtio-bus.h"
+#include "qemu/error-report.h"
+
+/* #define DEBUG_VIRTIO_MMIO */
+
+#ifdef DEBUG_VIRTIO_MMIO
+
+#define DPRINTF(fmt, ...) \
+do { printf("virtio_mmio: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) do {} while (0)
+#endif
+
+/* QOM macros */
+/* virtio-mmio-bus */
+#define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus"
+#define VIRTIO_MMIO_BUS(obj) \
+ OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_MMIO_BUS)
+#define VIRTIO_MMIO_BUS_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(VirtioBusClass, (obj), TYPE_VIRTIO_MMIO_BUS)
+#define VIRTIO_MMIO_BUS_CLASS(klass) \
+ OBJECT_CLASS_CHECK(VirtioBusClass, (klass), TYPE_VIRTIO_MMIO_BUS)
+
+/* virtio-mmio */
+#define TYPE_VIRTIO_MMIO "virtio-mmio"
+#define VIRTIO_MMIO(obj) \
+ OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO)
+
+/* Memory mapped register offsets */
+#define VIRTIO_MMIO_MAGIC 0x0
+#define VIRTIO_MMIO_VERSION 0x4
+#define VIRTIO_MMIO_DEVICEID 0x8
+#define VIRTIO_MMIO_VENDORID 0xc
+#define VIRTIO_MMIO_HOSTFEATURES 0x10
+#define VIRTIO_MMIO_HOSTFEATURESSEL 0x14
+#define VIRTIO_MMIO_GUESTFEATURES 0x20
+#define VIRTIO_MMIO_GUESTFEATURESSEL 0x24
+#define VIRTIO_MMIO_GUESTPAGESIZE 0x28
+#define VIRTIO_MMIO_QUEUESEL 0x30
+#define VIRTIO_MMIO_QUEUENUMMAX 0x34
+#define VIRTIO_MMIO_QUEUENUM 0x38
+#define VIRTIO_MMIO_QUEUEALIGN 0x3c
+#define VIRTIO_MMIO_QUEUEPFN 0x40
+#define VIRTIO_MMIO_QUEUENOTIFY 0x50
+#define VIRTIO_MMIO_INTERRUPTSTATUS 0x60
+#define VIRTIO_MMIO_INTERRUPTACK 0x64
+#define VIRTIO_MMIO_STATUS 0x70
+/* Device specific config space starts here */
+#define VIRTIO_MMIO_CONFIG 0x100
+
+#define VIRT_MAGIC 0x74726976 /* 'virt' */
+#define VIRT_VERSION 1
+#define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
+
+typedef struct {
+ /* Generic */
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ qemu_irq irq;
+ /* Guest accessible state needing migration and reset */
+ uint32_t host_features_sel;
+ uint32_t guest_features_sel;
+ uint32_t guest_page_shift;
+ /* virtio-bus */
+ VirtioBusState bus;
+ bool ioeventfd_disabled;
+ bool ioeventfd_started;
+} VirtIOMMIOProxy;
+
+static int virtio_mmio_set_host_notifier_internal(VirtIOMMIOProxy *proxy,
+ int n, bool assign,
+ bool set_handler)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+ EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
+ int r = 0;
+
+ if (assign) {
+ r = event_notifier_init(notifier, 1);
+ if (r < 0) {
+ error_report("%s: unable to init event notifier: %d",
+ __func__, r);
+ return r;
+ }
+ virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
+ memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
+ true, n, notifier);
+ } else {
+ memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
+ true, n, notifier);
+ virtio_queue_set_host_notifier_fd_handler(vq, false, false);
+ event_notifier_cleanup(notifier);
+ }
+ return r;
+}
+
+static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ int n, r;
+
+ if (!kvm_eventfds_enabled() ||
+ proxy->ioeventfd_disabled ||
+ proxy->ioeventfd_started) {
+ return;
+ }
+
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+
+ r = virtio_mmio_set_host_notifier_internal(proxy, n, true, true);
+ if (r < 0) {
+ goto assign_error;
+ }
+ }
+ proxy->ioeventfd_started = true;
+ return;
+
+assign_error:
+ while (--n >= 0) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+
+ r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false);
+ assert(r >= 0);
+ }
+ proxy->ioeventfd_started = false;
+ error_report("%s: failed. Fallback to a userspace (slower).", __func__);
+}
+
+static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
+{
+ int r;
+ int n;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+ if (!proxy->ioeventfd_started) {
+ return;
+ }
+
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+
+ r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false);
+ assert(r >= 0);
+ }
+ proxy->ioeventfd_started = false;
+}
+
+static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
+{
+ VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+ DPRINTF("virtio_mmio_read offset 0x%x\n", (int)offset);
+
+ if (!vdev) {
+ /* If no backend is present, we treat most registers as
+ * read-as-zero, except for the magic number, version and
+ * vendor ID. This is not strictly sanctioned by the virtio
+ * spec, but it allows us to provide transports with no backend
+ * plugged in which don't confuse Linux's virtio code: the
+ * probe won't complain about the bad magic number, but the
+ * device ID of zero means no backend will claim it.
+ */
+ switch (offset) {
+ case VIRTIO_MMIO_MAGIC:
+ return VIRT_MAGIC;
+ case VIRTIO_MMIO_VERSION:
+ return VIRT_VERSION;
+ case VIRTIO_MMIO_VENDORID:
+ return VIRT_VENDOR;
+ default:
+ return 0;
+ }
+ }
+
+ if (offset >= VIRTIO_MMIO_CONFIG) {
+ offset -= VIRTIO_MMIO_CONFIG;
+ switch (size) {
+ case 1:
+ return virtio_config_readb(vdev, offset);
+ case 2:
+ return virtio_config_readw(vdev, offset);
+ case 4:
+ return virtio_config_readl(vdev, offset);
+ default:
+ abort();
+ }
+ }
+ if (size != 4) {
+ DPRINTF("wrong size access to register!\n");
+ return 0;
+ }
+ switch (offset) {
+ case VIRTIO_MMIO_MAGIC:
+ return VIRT_MAGIC;
+ case VIRTIO_MMIO_VERSION:
+ return VIRT_VERSION;
+ case VIRTIO_MMIO_DEVICEID:
+ return vdev->device_id;
+ case VIRTIO_MMIO_VENDORID:
+ return VIRT_VENDOR;
+ case VIRTIO_MMIO_HOSTFEATURES:
+ if (proxy->host_features_sel) {
+ return 0;
+ }
+ return vdev->host_features;
+ case VIRTIO_MMIO_QUEUENUMMAX:
+ if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
+ return 0;
+ }
+ return VIRTQUEUE_MAX_SIZE;
+ case VIRTIO_MMIO_QUEUEPFN:
+ return virtio_queue_get_addr(vdev, vdev->queue_sel)
+ >> proxy->guest_page_shift;
+ case VIRTIO_MMIO_INTERRUPTSTATUS:
+ return vdev->isr;
+ case VIRTIO_MMIO_STATUS:
+ return vdev->status;
+ case VIRTIO_MMIO_HOSTFEATURESSEL:
+ case VIRTIO_MMIO_GUESTFEATURES:
+ case VIRTIO_MMIO_GUESTFEATURESSEL:
+ case VIRTIO_MMIO_GUESTPAGESIZE:
+ case VIRTIO_MMIO_QUEUESEL:
+ case VIRTIO_MMIO_QUEUENUM:
+ case VIRTIO_MMIO_QUEUEALIGN:
+ case VIRTIO_MMIO_QUEUENOTIFY:
+ case VIRTIO_MMIO_INTERRUPTACK:
+ DPRINTF("read of write-only register\n");
+ return 0;
+ default:
+ DPRINTF("bad register offset\n");
+ return 0;
+ }
+ return 0;
+}
+
+static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
+ unsigned size)
+{
+ VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+ DPRINTF("virtio_mmio_write offset 0x%x value 0x%" PRIx64 "\n",
+ (int)offset, value);
+
+ if (!vdev) {
+ /* If no backend is present, we just make all registers
+ * write-ignored. This allows us to provide transports with
+ * no backend plugged in.
+ */
+ return;
+ }
+
+ if (offset >= VIRTIO_MMIO_CONFIG) {
+ offset -= VIRTIO_MMIO_CONFIG;
+ switch (size) {
+ case 1:
+ virtio_config_writeb(vdev, offset, value);
+ break;
+ case 2:
+ virtio_config_writew(vdev, offset, value);
+ break;
+ case 4:
+ virtio_config_writel(vdev, offset, value);
+ break;
+ default:
+ abort();
+ }
+ return;
+ }
+ if (size != 4) {
+ DPRINTF("wrong size access to register!\n");
+ return;
+ }
+ switch (offset) {
+ case VIRTIO_MMIO_HOSTFEATURESSEL:
+ proxy->host_features_sel = value;
+ break;
+ case VIRTIO_MMIO_GUESTFEATURES:
+ if (!proxy->guest_features_sel) {
+ virtio_set_features(vdev, value);
+ }
+ break;
+ case VIRTIO_MMIO_GUESTFEATURESSEL:
+ proxy->guest_features_sel = value;
+ break;
+ case VIRTIO_MMIO_GUESTPAGESIZE:
+ proxy->guest_page_shift = ctz32(value);
+ if (proxy->guest_page_shift > 31) {
+ proxy->guest_page_shift = 0;
+ }
+ DPRINTF("guest page size %" PRIx64 " shift %d\n", value,
+ proxy->guest_page_shift);
+ break;
+ case VIRTIO_MMIO_QUEUESEL:
+ if (value < VIRTIO_QUEUE_MAX) {
+ vdev->queue_sel = value;
+ }
+ break;
+ case VIRTIO_MMIO_QUEUENUM:
+ DPRINTF("mmio_queue write %d max %d\n", (int)value, VIRTQUEUE_MAX_SIZE);
+ virtio_queue_set_num(vdev, vdev->queue_sel, value);
+ /* Note: only call this function for legacy devices */
+ virtio_queue_update_rings(vdev, vdev->queue_sel);
+ break;
+ case VIRTIO_MMIO_QUEUEALIGN:
+ /* Note: this is only valid for legacy devices */
+ virtio_queue_set_align(vdev, vdev->queue_sel, value);
+ break;
+ case VIRTIO_MMIO_QUEUEPFN:
+ if (value == 0) {
+ virtio_reset(vdev);
+ } else {
+ virtio_queue_set_addr(vdev, vdev->queue_sel,
+ value << proxy->guest_page_shift);
+ }
+ break;
+ case VIRTIO_MMIO_QUEUENOTIFY:
+ if (value < VIRTIO_QUEUE_MAX) {
+ virtio_queue_notify(vdev, value);
+ }
+ break;
+ case VIRTIO_MMIO_INTERRUPTACK:
+ vdev->isr &= ~value;
+ virtio_update_irq(vdev);
+ break;
+ case VIRTIO_MMIO_STATUS:
+ if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ virtio_mmio_stop_ioeventfd(proxy);
+ }
+
+ virtio_set_status(vdev, value & 0xff);
+
+ if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
+ virtio_mmio_start_ioeventfd(proxy);
+ }
+
+ if (vdev->status == 0) {
+ virtio_reset(vdev);
+ }
+ break;
+ case VIRTIO_MMIO_MAGIC:
+ case VIRTIO_MMIO_VERSION:
+ case VIRTIO_MMIO_DEVICEID:
+ case VIRTIO_MMIO_VENDORID:
+ case VIRTIO_MMIO_HOSTFEATURES:
+ case VIRTIO_MMIO_QUEUENUMMAX:
+ case VIRTIO_MMIO_INTERRUPTSTATUS:
+ DPRINTF("write to readonly register\n");
+ break;
+
+ default:
+ DPRINTF("bad register offset\n");
+ }
+}
+
+static const MemoryRegionOps virtio_mem_ops = {
+ .read = virtio_mmio_read,
+ .write = virtio_mmio_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
+{
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ int level;
+
+ if (!vdev) {
+ return;
+ }
+ level = (vdev->isr != 0);
+ DPRINTF("virtio_mmio setting IRQ %d\n", level);
+ qemu_set_irq(proxy->irq, level);
+}
+
+static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f)
+{
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
+
+ proxy->host_features_sel = qemu_get_be32(f);
+ proxy->guest_features_sel = qemu_get_be32(f);
+ proxy->guest_page_shift = qemu_get_be32(f);
+ return 0;
+}
+
+static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f)
+{
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
+
+ qemu_put_be32(f, proxy->host_features_sel);
+ qemu_put_be32(f, proxy->guest_features_sel);
+ qemu_put_be32(f, proxy->guest_page_shift);
+}
+
+static void virtio_mmio_reset(DeviceState *d)
+{
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
+
+ virtio_mmio_stop_ioeventfd(proxy);
+ virtio_bus_reset(&proxy->bus);
+ proxy->host_features_sel = 0;
+ proxy->guest_features_sel = 0;
+ proxy->guest_page_shift = 0;
+}
+
+static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
+ bool with_irqfd)
+{
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+ EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
+
+ if (assign) {
+ int r = event_notifier_init(notifier, 0);
+ if (r < 0) {
+ return r;
+ }
+ virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
+ } else {
+ virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
+ event_notifier_cleanup(notifier);
+ }
+
+ if (vdc->guest_notifier_mask) {
+ vdc->guest_notifier_mask(vdev, n, !assign);
+ }
+
+ return 0;
+}
+
+static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
+ bool assign)
+{
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ /* TODO: need to check if kvm-arm supports irqfd */
+ bool with_irqfd = false;
+ int r, n;
+
+ nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
+
+ for (n = 0; n < nvqs; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ break;
+ }
+
+ r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd);
+ if (r < 0) {
+ goto assign_error;
+ }
+ }
+
+ return 0;
+
+assign_error:
+ /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
+ assert(assign);
+ while (--n >= 0) {
+ virtio_mmio_set_guest_notifier(d, n, !assign, false);
+ }
+ return r;
+}
+
+static int virtio_mmio_set_host_notifier(DeviceState *opaque, int n,
+ bool assign)
+{
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
+
+ /* Stop using ioeventfd for virtqueue kick if the device starts using host
+ * notifiers. This makes it easy to avoid stepping on each others' toes.
+ */
+ proxy->ioeventfd_disabled = assign;
+ if (assign) {
+ virtio_mmio_stop_ioeventfd(proxy);
+ }
+ /* We don't need to start here: it's not needed because backend
+ * currently only stops on status change away from ok,
+ * reset, vmstop and such. If we do add code to start here,
+ * need to check vmstate, device state etc. */
+ return virtio_mmio_set_host_notifier_internal(proxy, n, assign, false);
+}
+
+/* virtio-mmio device */
+
+static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
+{
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(d);
+
+ qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
+ d, NULL);
+ sysbus_init_irq(sbd, &proxy->irq);
+ memory_region_init_io(&proxy->iomem, OBJECT(d), &virtio_mem_ops, proxy,
+ TYPE_VIRTIO_MMIO, 0x200);
+ sysbus_init_mmio(sbd, &proxy->iomem);
+}
+
+static void virtio_mmio_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = virtio_mmio_realizefn;
+ dc->reset = virtio_mmio_reset;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static const TypeInfo virtio_mmio_info = {
+ .name = TYPE_VIRTIO_MMIO,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(VirtIOMMIOProxy),
+ .class_init = virtio_mmio_class_init,
+};
+
+/* virtio-mmio-bus. */
+
+static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
+{
+ BusClass *bus_class = BUS_CLASS(klass);
+ VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
+
+ k->notify = virtio_mmio_update_irq;
+ k->save_config = virtio_mmio_save_config;
+ k->load_config = virtio_mmio_load_config;
+ k->set_host_notifier = virtio_mmio_set_host_notifier;
+ k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
+ k->has_variable_vring_alignment = true;
+ bus_class->max_dev = 1;
+}
+
+static const TypeInfo virtio_mmio_bus_info = {
+ .name = TYPE_VIRTIO_MMIO_BUS,
+ .parent = TYPE_VIRTIO_BUS,
+ .instance_size = sizeof(VirtioBusState),
+ .class_init = virtio_mmio_bus_class_init,
+};
+
+static void virtio_mmio_register_types(void)
+{
+ type_register_static(&virtio_mmio_bus_info);
+ type_register_static(&virtio_mmio_info);
+}
+
+type_init(virtio_mmio_register_types)
diff --git a/src/hw/virtio/virtio-pci.c b/src/hw/virtio/virtio-pci.c
new file mode 100644
index 0000000..94667e6
--- /dev/null
+++ b/src/hw/virtio/virtio-pci.c
@@ -0,0 +1,2534 @@
+/*
+ * Virtio PCI Bindings
+ *
+ * Copyright IBM, Corp. 2007
+ * Copyright (c) 2009 CodeSourcery
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Paul Brook <paul@codesourcery.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include <inttypes.h>
+
+#include "standard-headers/linux/virtio_pci.h"
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-blk.h"
+#include "hw/virtio/virtio-net.h"
+#include "hw/virtio/virtio-serial.h"
+#include "hw/virtio/virtio-scsi.h"
+#include "hw/virtio/virtio-balloon.h"
+#include "hw/virtio/virtio-input.h"
+#include "hw/pci/pci.h"
+#include "qemu/error-report.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
+#include "hw/loader.h"
+#include "sysemu/kvm.h"
+#include "sysemu/block-backend.h"
+#include "virtio-pci.h"
+#include "qemu/range.h"
+#include "hw/virtio/virtio-bus.h"
+#include "qapi/visitor.h"
+
+#define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
+
+#undef VIRTIO_PCI_CONFIG
+
+/* The remaining space is defined by each driver as the per-driver
+ * configuration space */
+#define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
+
+static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
+ VirtIOPCIProxy *dev);
+
+/* virtio device */
+/* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
+static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
+{
+ return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
+}
+
+/* DeviceState to VirtIOPCIProxy. Note: used on datapath,
+ * be careful and test performance if you change this.
+ */
+static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
+{
+ return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
+}
+
+static void virtio_pci_notify(DeviceState *d, uint16_t vector)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
+
+ if (msix_enabled(&proxy->pci_dev))
+ msix_notify(&proxy->pci_dev, vector);
+ else {
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ pci_set_irq(&proxy->pci_dev, vdev->isr & 1);
+ }
+}
+
+static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+ pci_device_save(&proxy->pci_dev, f);
+ msix_save(&proxy->pci_dev, f);
+ if (msix_present(&proxy->pci_dev))
+ qemu_put_be16(f, vdev->config_vector);
+}
+
+static void virtio_pci_load_modern_queue_state(VirtIOPCIQueue *vq,
+ QEMUFile *f)
+{
+ vq->num = qemu_get_be16(f);
+ vq->enabled = qemu_get_be16(f);
+ vq->desc[0] = qemu_get_be32(f);
+ vq->desc[1] = qemu_get_be32(f);
+ vq->avail[0] = qemu_get_be32(f);
+ vq->avail[1] = qemu_get_be32(f);
+ vq->used[0] = qemu_get_be32(f);
+ vq->used[1] = qemu_get_be32(f);
+}
+
+static bool virtio_pci_has_extra_state(DeviceState *d)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+ return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
+}
+
+static int get_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
+{
+ VirtIOPCIProxy *proxy = pv;
+ int i;
+
+ proxy->dfselect = qemu_get_be32(f);
+ proxy->gfselect = qemu_get_be32(f);
+ proxy->guest_features[0] = qemu_get_be32(f);
+ proxy->guest_features[1] = qemu_get_be32(f);
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ virtio_pci_load_modern_queue_state(&proxy->vqs[i], f);
+ }
+
+ return 0;
+}
+
+static void virtio_pci_save_modern_queue_state(VirtIOPCIQueue *vq,
+ QEMUFile *f)
+{
+ qemu_put_be16(f, vq->num);
+ qemu_put_be16(f, vq->enabled);
+ qemu_put_be32(f, vq->desc[0]);
+ qemu_put_be32(f, vq->desc[1]);
+ qemu_put_be32(f, vq->avail[0]);
+ qemu_put_be32(f, vq->avail[1]);
+ qemu_put_be32(f, vq->used[0]);
+ qemu_put_be32(f, vq->used[1]);
+}
+
+static void put_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
+{
+ VirtIOPCIProxy *proxy = pv;
+ int i;
+
+ qemu_put_be32(f, proxy->dfselect);
+ qemu_put_be32(f, proxy->gfselect);
+ qemu_put_be32(f, proxy->guest_features[0]);
+ qemu_put_be32(f, proxy->guest_features[1]);
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ virtio_pci_save_modern_queue_state(&proxy->vqs[i], f);
+ }
+}
+
+static const VMStateInfo vmstate_info_virtio_pci_modern_state = {
+ .name = "virtqueue_state",
+ .get = get_virtio_pci_modern_state,
+ .put = put_virtio_pci_modern_state,
+};
+
+static bool virtio_pci_modern_state_needed(void *opaque)
+{
+ VirtIOPCIProxy *proxy = opaque;
+
+ return !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
+}
+
+static const VMStateDescription vmstate_virtio_pci_modern_state = {
+ .name = "virtio_pci/modern_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = &virtio_pci_modern_state_needed,
+ .fields = (VMStateField[]) {
+ {
+ .name = "modern_state",
+ .version_id = 0,
+ .field_exists = NULL,
+ .size = 0,
+ .info = &vmstate_info_virtio_pci_modern_state,
+ .flags = VMS_SINGLE,
+ .offset = 0,
+ },
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_virtio_pci = {
+ .name = "virtio_pci",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_virtio_pci_modern_state,
+ NULL
+ }
+};
+
+static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+ vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL);
+}
+
+static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+ return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1);
+}
+
+static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+ if (msix_present(&proxy->pci_dev))
+ qemu_put_be16(f, virtio_queue_vector(vdev, n));
+}
+
+static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+ int ret;
+ ret = pci_device_load(&proxy->pci_dev, f);
+ if (ret) {
+ return ret;
+ }
+ msix_unuse_all_vectors(&proxy->pci_dev);
+ msix_load(&proxy->pci_dev, f);
+ if (msix_present(&proxy->pci_dev)) {
+ qemu_get_be16s(f, &vdev->config_vector);
+ } else {
+ vdev->config_vector = VIRTIO_NO_VECTOR;
+ }
+ if (vdev->config_vector != VIRTIO_NO_VECTOR) {
+ return msix_vector_use(&proxy->pci_dev, vdev->config_vector);
+ }
+ return 0;
+}
+
+static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+ uint16_t vector;
+ if (msix_present(&proxy->pci_dev)) {
+ qemu_get_be16s(f, &vector);
+ } else {
+ vector = VIRTIO_NO_VECTOR;
+ }
+ virtio_queue_set_vector(vdev, n, vector);
+ if (vector != VIRTIO_NO_VECTOR) {
+ return msix_vector_use(&proxy->pci_dev, vector);
+ }
+
+ return 0;
+}
+
+#define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
+
+static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
+ int n, bool assign, bool set_handler)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+ EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
+ bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
+ bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
+ bool fast_mmio = kvm_ioeventfd_any_length_enabled();
+ bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
+ MemoryRegion *modern_mr = &proxy->notify.mr;
+ MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
+ MemoryRegion *legacy_mr = &proxy->bar;
+ hwaddr modern_addr = QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
+ virtio_get_queue_index(vq);
+ hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
+ int r = 0;
+
+ if (assign) {
+ r = event_notifier_init(notifier, 1);
+ if (r < 0) {
+ error_report("%s: unable to init event notifier: %d",
+ __func__, r);
+ return r;
+ }
+ virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
+ if (modern) {
+ if (fast_mmio) {
+ memory_region_add_eventfd(modern_mr, modern_addr, 0,
+ false, n, notifier);
+ } else {
+ memory_region_add_eventfd(modern_mr, modern_addr, 2,
+ false, n, notifier);
+ }
+ if (modern_pio) {
+ memory_region_add_eventfd(modern_notify_mr, 0, 2,
+ true, n, notifier);
+ }
+ }
+ if (legacy) {
+ memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
+ true, n, notifier);
+ }
+ } else {
+ if (modern) {
+ if (fast_mmio) {
+ memory_region_del_eventfd(modern_mr, modern_addr, 0,
+ false, n, notifier);
+ } else {
+ memory_region_del_eventfd(modern_mr, modern_addr, 2,
+ false, n, notifier);
+ }
+ if (modern_pio) {
+ memory_region_del_eventfd(modern_notify_mr, 0, 2,
+ true, n, notifier);
+ }
+ }
+ if (legacy) {
+ memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
+ true, n, notifier);
+ }
+ virtio_queue_set_host_notifier_fd_handler(vq, false, false);
+ event_notifier_cleanup(notifier);
+ }
+ return r;
+}
+
+static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ int n, r;
+
+ if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
+ proxy->ioeventfd_disabled ||
+ proxy->ioeventfd_started) {
+ return;
+ }
+
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+
+ r = virtio_pci_set_host_notifier_internal(proxy, n, true, true);
+ if (r < 0) {
+ goto assign_error;
+ }
+ }
+ proxy->ioeventfd_started = true;
+ return;
+
+assign_error:
+ while (--n >= 0) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+
+ r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
+ assert(r >= 0);
+ }
+ proxy->ioeventfd_started = false;
+ error_report("%s: failed. Fallback to a userspace (slower).", __func__);
+}
+
+static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ int r;
+ int n;
+
+ if (!proxy->ioeventfd_started) {
+ return;
+ }
+
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+
+ r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
+ assert(r >= 0);
+ }
+ proxy->ioeventfd_started = false;
+}
+
+static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ hwaddr pa;
+
+ switch (addr) {
+ case VIRTIO_PCI_GUEST_FEATURES:
+ /* Guest does not negotiate properly? We have to assume nothing. */
+ if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
+ val = virtio_bus_get_vdev_bad_features(&proxy->bus);
+ }
+ virtio_set_features(vdev, val);
+ break;
+ case VIRTIO_PCI_QUEUE_PFN:
+ pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
+ if (pa == 0) {
+ virtio_pci_stop_ioeventfd(proxy);
+ virtio_reset(vdev);
+ msix_unuse_all_vectors(&proxy->pci_dev);
+ }
+ else
+ virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
+ break;
+ case VIRTIO_PCI_QUEUE_SEL:
+ if (val < VIRTIO_QUEUE_MAX)
+ vdev->queue_sel = val;
+ break;
+ case VIRTIO_PCI_QUEUE_NOTIFY:
+ if (val < VIRTIO_QUEUE_MAX) {
+ virtio_queue_notify(vdev, val);
+ }
+ break;
+ case VIRTIO_PCI_STATUS:
+ if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ virtio_pci_stop_ioeventfd(proxy);
+ }
+
+ virtio_set_status(vdev, val & 0xFF);
+
+ if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
+ virtio_pci_start_ioeventfd(proxy);
+ }
+
+ if (vdev->status == 0) {
+ virtio_reset(vdev);
+ msix_unuse_all_vectors(&proxy->pci_dev);
+ }
+
+ /* Linux before 2.6.34 drives the device without enabling
+ the PCI device bus master bit. Enable it automatically
+ for the guest. This is a PCI spec violation but so is
+ initiating DMA with bus master bit clear. */
+ if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) {
+ pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
+ proxy->pci_dev.config[PCI_COMMAND] |
+ PCI_COMMAND_MASTER, 1);
+ }
+ break;
+ case VIRTIO_MSI_CONFIG_VECTOR:
+ msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
+ /* Make it possible for guest to discover an error took place. */
+ if (msix_vector_use(&proxy->pci_dev, val) < 0)
+ val = VIRTIO_NO_VECTOR;
+ vdev->config_vector = val;
+ break;
+ case VIRTIO_MSI_QUEUE_VECTOR:
+ msix_vector_unuse(&proxy->pci_dev,
+ virtio_queue_vector(vdev, vdev->queue_sel));
+ /* Make it possible for guest to discover an error took place. */
+ if (msix_vector_use(&proxy->pci_dev, val) < 0)
+ val = VIRTIO_NO_VECTOR;
+ virtio_queue_set_vector(vdev, vdev->queue_sel, val);
+ break;
+ default:
+ error_report("%s: unexpected address 0x%x value 0x%x",
+ __func__, addr, val);
+ break;
+ }
+}
+
+static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ uint32_t ret = 0xFFFFFFFF;
+
+ switch (addr) {
+ case VIRTIO_PCI_HOST_FEATURES:
+ ret = vdev->host_features;
+ break;
+ case VIRTIO_PCI_GUEST_FEATURES:
+ ret = vdev->guest_features;
+ break;
+ case VIRTIO_PCI_QUEUE_PFN:
+ ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
+ >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
+ break;
+ case VIRTIO_PCI_QUEUE_NUM:
+ ret = virtio_queue_get_num(vdev, vdev->queue_sel);
+ break;
+ case VIRTIO_PCI_QUEUE_SEL:
+ ret = vdev->queue_sel;
+ break;
+ case VIRTIO_PCI_STATUS:
+ ret = vdev->status;
+ break;
+ case VIRTIO_PCI_ISR:
+ /* reading from the ISR also clears it. */
+ ret = vdev->isr;
+ vdev->isr = 0;
+ pci_irq_deassert(&proxy->pci_dev);
+ break;
+ case VIRTIO_MSI_CONFIG_VECTOR:
+ ret = vdev->config_vector;
+ break;
+ case VIRTIO_MSI_QUEUE_VECTOR:
+ ret = virtio_queue_vector(vdev, vdev->queue_sel);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
+ uint64_t val = 0;
+ if (addr < config) {
+ return virtio_ioport_read(proxy, addr);
+ }
+ addr -= config;
+
+ switch (size) {
+ case 1:
+ val = virtio_config_readb(vdev, addr);
+ break;
+ case 2:
+ val = virtio_config_readw(vdev, addr);
+ if (virtio_is_big_endian(vdev)) {
+ val = bswap16(val);
+ }
+ break;
+ case 4:
+ val = virtio_config_readl(vdev, addr);
+ if (virtio_is_big_endian(vdev)) {
+ val = bswap32(val);
+ }
+ break;
+ }
+ return val;
+}
+
+static void virtio_pci_config_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ if (addr < config) {
+ virtio_ioport_write(proxy, addr, val);
+ return;
+ }
+ addr -= config;
+ /*
+ * Virtio-PCI is odd. Ioports are LE but config space is target native
+ * endian.
+ */
+ switch (size) {
+ case 1:
+ virtio_config_writeb(vdev, addr, val);
+ break;
+ case 2:
+ if (virtio_is_big_endian(vdev)) {
+ val = bswap16(val);
+ }
+ virtio_config_writew(vdev, addr, val);
+ break;
+ case 4:
+ if (virtio_is_big_endian(vdev)) {
+ val = bswap32(val);
+ }
+ virtio_config_writel(vdev, addr, val);
+ break;
+ }
+}
+
+static const MemoryRegionOps virtio_pci_config_ops = {
+ .read = virtio_pci_config_read,
+ .write = virtio_pci_config_write,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+/* Below are generic functions to do memcpy from/to an address space,
+ * without byteswaps, with input validation.
+ *
+ * As regular address_space_* APIs all do some kind of byteswap at least for
+ * some host/target combinations, we are forced to explicitly convert to a
+ * known-endianness integer value.
+ * It doesn't really matter which endian format to go through, so the code
+ * below selects the endian that causes the least amount of work on the given
+ * host.
+ *
+ * Note: host pointer must be aligned.
+ */
+static
+void virtio_address_space_write(AddressSpace *as, hwaddr addr,
+ const uint8_t *buf, int len)
+{
+ uint32_t val;
+
+ /* address_space_* APIs assume an aligned address.
+ * As address is under guest control, handle illegal values.
+ */
+ addr &= ~(len - 1);
+
+ /* Make sure caller aligned buf properly */
+ assert(!(((uintptr_t)buf) & (len - 1)));
+
+ switch (len) {
+ case 1:
+ val = pci_get_byte(buf);
+ address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
+ break;
+ case 2:
+ val = pci_get_word(buf);
+ address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
+ break;
+ case 4:
+ val = pci_get_long(buf);
+ address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
+ break;
+ default:
+ /* As length is under guest control, handle illegal values. */
+ break;
+ }
+}
+
+static void
+virtio_address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
+{
+ uint32_t val;
+
+ /* address_space_* APIs assume an aligned address.
+ * As address is under guest control, handle illegal values.
+ */
+ addr &= ~(len - 1);
+
+ /* Make sure caller aligned buf properly */
+ assert(!(((uintptr_t)buf) & (len - 1)));
+
+ switch (len) {
+ case 1:
+ val = address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ pci_set_byte(buf, val);
+ break;
+ case 2:
+ val = address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ pci_set_word(buf, val);
+ break;
+ case 4:
+ val = address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ pci_set_long(buf, val);
+ break;
+ default:
+ /* As length is under guest control, handle illegal values. */
+ break;
+ }
+}
+
+static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
+ uint32_t val, int len)
+{
+ VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ struct virtio_pci_cfg_cap *cfg;
+
+ pci_default_write_config(pci_dev, address, val, len);
+
+ if (range_covers_byte(address, len, PCI_COMMAND) &&
+ !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
+ virtio_pci_stop_ioeventfd(proxy);
+ virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
+ }
+
+ if (proxy->config_cap &&
+ ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
+ pci_cfg_data),
+ sizeof cfg->pci_cfg_data)) {
+ uint32_t off;
+ uint32_t len;
+
+ cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
+ off = le32_to_cpu(cfg->cap.offset);
+ len = le32_to_cpu(cfg->cap.length);
+
+ if (len == 1 || len == 2 || len == 4) {
+ assert(len <= sizeof cfg->pci_cfg_data);
+ virtio_address_space_write(&proxy->modern_as, off,
+ cfg->pci_cfg_data, len);
+ }
+ }
+}
+
+static uint32_t virtio_read_config(PCIDevice *pci_dev,
+ uint32_t address, int len)
+{
+ VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
+ struct virtio_pci_cfg_cap *cfg;
+
+ if (proxy->config_cap &&
+ ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
+ pci_cfg_data),
+ sizeof cfg->pci_cfg_data)) {
+ uint32_t off;
+ uint32_t len;
+
+ cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
+ off = le32_to_cpu(cfg->cap.offset);
+ len = le32_to_cpu(cfg->cap.length);
+
+ if (len == 1 || len == 2 || len == 4) {
+ assert(len <= sizeof cfg->pci_cfg_data);
+ virtio_address_space_read(&proxy->modern_as, off,
+ cfg->pci_cfg_data, len);
+ }
+ }
+
+ return pci_default_read_config(pci_dev, address, len);
+}
+
+static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
+ unsigned int queue_no,
+ unsigned int vector,
+ MSIMessage msg)
+{
+ VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
+ int ret;
+
+ if (irqfd->users == 0) {
+ ret = kvm_irqchip_add_msi_route(kvm_state, msg, &proxy->pci_dev);
+ if (ret < 0) {
+ return ret;
+ }
+ irqfd->virq = ret;
+ }
+ irqfd->users++;
+ return 0;
+}
+
+static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
+ unsigned int vector)
+{
+ VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
+ if (--irqfd->users == 0) {
+ kvm_irqchip_release_virq(kvm_state, irqfd->virq);
+ }
+}
+
+static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
+ unsigned int queue_no,
+ unsigned int vector)
+{
+ VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtQueue *vq = virtio_get_queue(vdev, queue_no);
+ EventNotifier *n = virtio_queue_get_guest_notifier(vq);
+ int ret;
+ ret = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
+ return ret;
+}
+
+static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
+ unsigned int queue_no,
+ unsigned int vector)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtQueue *vq = virtio_get_queue(vdev, queue_no);
+ EventNotifier *n = virtio_queue_get_guest_notifier(vq);
+ VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
+ int ret;
+
+ ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
+ assert(ret == 0);
+}
+
+static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
+{
+ PCIDevice *dev = &proxy->pci_dev;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ unsigned int vector;
+ int ret, queue_no;
+ MSIMessage msg;
+
+ for (queue_no = 0; queue_no < nvqs; queue_no++) {
+ if (!virtio_queue_get_num(vdev, queue_no)) {
+ break;
+ }
+ vector = virtio_queue_vector(vdev, queue_no);
+ if (vector >= msix_nr_vectors_allocated(dev)) {
+ continue;
+ }
+ msg = msix_get_message(dev, vector);
+ ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector, msg);
+ if (ret < 0) {
+ goto undo;
+ }
+ /* If guest supports masking, set up irqfd now.
+ * Otherwise, delay until unmasked in the frontend.
+ */
+ if (k->guest_notifier_mask) {
+ ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
+ if (ret < 0) {
+ kvm_virtio_pci_vq_vector_release(proxy, vector);
+ goto undo;
+ }
+ }
+ }
+ return 0;
+
+undo:
+ while (--queue_no >= 0) {
+ vector = virtio_queue_vector(vdev, queue_no);
+ if (vector >= msix_nr_vectors_allocated(dev)) {
+ continue;
+ }
+ if (k->guest_notifier_mask) {
+ kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+ }
+ kvm_virtio_pci_vq_vector_release(proxy, vector);
+ }
+ return ret;
+}
+
+static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
+{
+ PCIDevice *dev = &proxy->pci_dev;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ unsigned int vector;
+ int queue_no;
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+
+ for (queue_no = 0; queue_no < nvqs; queue_no++) {
+ if (!virtio_queue_get_num(vdev, queue_no)) {
+ break;
+ }
+ vector = virtio_queue_vector(vdev, queue_no);
+ if (vector >= msix_nr_vectors_allocated(dev)) {
+ continue;
+ }
+ /* If guest supports masking, clean up irqfd now.
+ * Otherwise, it was cleaned when masked in the frontend.
+ */
+ if (k->guest_notifier_mask) {
+ kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+ }
+ kvm_virtio_pci_vq_vector_release(proxy, vector);
+ }
+}
+
+static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
+ unsigned int queue_no,
+ unsigned int vector,
+ MSIMessage msg)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ VirtQueue *vq = virtio_get_queue(vdev, queue_no);
+ EventNotifier *n = virtio_queue_get_guest_notifier(vq);
+ VirtIOIRQFD *irqfd;
+ int ret = 0;
+
+ if (proxy->vector_irqfd) {
+ irqfd = &proxy->vector_irqfd[vector];
+ if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
+ ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg,
+ &proxy->pci_dev);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ }
+
+ /* If guest supports masking, irqfd is already setup, unmask it.
+ * Otherwise, set it up now.
+ */
+ if (k->guest_notifier_mask) {
+ k->guest_notifier_mask(vdev, queue_no, false);
+ /* Test after unmasking to avoid losing events. */
+ if (k->guest_notifier_pending &&
+ k->guest_notifier_pending(vdev, queue_no)) {
+ event_notifier_set(n);
+ }
+ } else {
+ ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
+ }
+ return ret;
+}
+
+static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
+ unsigned int queue_no,
+ unsigned int vector)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+
+ /* If guest supports masking, keep irqfd but mask it.
+ * Otherwise, clean it up now.
+ */
+ if (k->guest_notifier_mask) {
+ k->guest_notifier_mask(vdev, queue_no, true);
+ } else {
+ kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+ }
+}
+
+static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
+ MSIMessage msg)
+{
+ VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
+ int ret, index, unmasked = 0;
+
+ while (vq) {
+ index = virtio_get_queue_index(vq);
+ if (!virtio_queue_get_num(vdev, index)) {
+ break;
+ }
+ if (index < proxy->nvqs_with_notifiers) {
+ ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
+ if (ret < 0) {
+ goto undo;
+ }
+ ++unmasked;
+ }
+ vq = virtio_vector_next_queue(vq);
+ }
+
+ return 0;
+
+undo:
+ vq = virtio_vector_first_queue(vdev, vector);
+ while (vq && unmasked >= 0) {
+ index = virtio_get_queue_index(vq);
+ if (index < proxy->nvqs_with_notifiers) {
+ virtio_pci_vq_vector_mask(proxy, index, vector);
+ --unmasked;
+ }
+ vq = virtio_vector_next_queue(vq);
+ }
+ return ret;
+}
+
+static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
+{
+ VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
+ int index;
+
+ while (vq) {
+ index = virtio_get_queue_index(vq);
+ if (!virtio_queue_get_num(vdev, index)) {
+ break;
+ }
+ if (index < proxy->nvqs_with_notifiers) {
+ virtio_pci_vq_vector_mask(proxy, index, vector);
+ }
+ vq = virtio_vector_next_queue(vq);
+ }
+}
+
+static void virtio_pci_vector_poll(PCIDevice *dev,
+ unsigned int vector_start,
+ unsigned int vector_end)
+{
+ VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ int queue_no;
+ unsigned int vector;
+ EventNotifier *notifier;
+ VirtQueue *vq;
+
+ for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
+ if (!virtio_queue_get_num(vdev, queue_no)) {
+ break;
+ }
+ vector = virtio_queue_vector(vdev, queue_no);
+ if (vector < vector_start || vector >= vector_end ||
+ !msix_is_masked(dev, vector)) {
+ continue;
+ }
+ vq = virtio_get_queue(vdev, queue_no);
+ notifier = virtio_queue_get_guest_notifier(vq);
+ if (k->guest_notifier_pending) {
+ if (k->guest_notifier_pending(vdev, queue_no)) {
+ msix_set_pending(dev, vector);
+ }
+ } else if (event_notifier_test_and_clear(notifier)) {
+ msix_set_pending(dev, vector);
+ }
+ }
+}
+
+static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
+ bool with_irqfd)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+ EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
+
+ if (assign) {
+ int r = event_notifier_init(notifier, 0);
+ if (r < 0) {
+ return r;
+ }
+ virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
+ } else {
+ virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
+ event_notifier_cleanup(notifier);
+ }
+
+ if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
+ vdc->guest_notifier_mask(vdev, n, !assign);
+ }
+
+ return 0;
+}
+
+static bool virtio_pci_query_guest_notifiers(DeviceState *d)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+ return msix_enabled(&proxy->pci_dev);
+}
+
+static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ int r, n;
+ bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
+ kvm_msi_via_irqfd_enabled();
+
+ nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
+
+ /* When deassigning, pass a consistent nvqs value
+ * to avoid leaking notifiers.
+ */
+ assert(assign || nvqs == proxy->nvqs_with_notifiers);
+
+ proxy->nvqs_with_notifiers = nvqs;
+
+ /* Must unset vector notifier while guest notifier is still assigned */
+ if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) {
+ msix_unset_vector_notifiers(&proxy->pci_dev);
+ if (proxy->vector_irqfd) {
+ kvm_virtio_pci_vector_release(proxy, nvqs);
+ g_free(proxy->vector_irqfd);
+ proxy->vector_irqfd = NULL;
+ }
+ }
+
+ for (n = 0; n < nvqs; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ break;
+ }
+
+ r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd);
+ if (r < 0) {
+ goto assign_error;
+ }
+ }
+
+ /* Must set vector notifier after guest notifier has been assigned */
+ if ((with_irqfd || k->guest_notifier_mask) && assign) {
+ if (with_irqfd) {
+ proxy->vector_irqfd =
+ g_malloc0(sizeof(*proxy->vector_irqfd) *
+ msix_nr_vectors_allocated(&proxy->pci_dev));
+ r = kvm_virtio_pci_vector_use(proxy, nvqs);
+ if (r < 0) {
+ goto assign_error;
+ }
+ }
+ r = msix_set_vector_notifiers(&proxy->pci_dev,
+ virtio_pci_vector_unmask,
+ virtio_pci_vector_mask,
+ virtio_pci_vector_poll);
+ if (r < 0) {
+ goto notifiers_error;
+ }
+ }
+
+ return 0;
+
+notifiers_error:
+ if (with_irqfd) {
+ assert(assign);
+ kvm_virtio_pci_vector_release(proxy, nvqs);
+ }
+
+assign_error:
+ /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
+ assert(assign);
+ while (--n >= 0) {
+ virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
+ }
+ return r;
+}
+
+static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+ /* Stop using ioeventfd for virtqueue kick if the device starts using host
+ * notifiers. This makes it easy to avoid stepping on each others' toes.
+ */
+ proxy->ioeventfd_disabled = assign;
+ if (assign) {
+ virtio_pci_stop_ioeventfd(proxy);
+ }
+ /* We don't need to start here: it's not needed because backend
+ * currently only stops on status change away from ok,
+ * reset, vmstop and such. If we do add code to start here,
+ * need to check vmstate, device state etc. */
+ return virtio_pci_set_host_notifier_internal(proxy, n, assign, false);
+}
+
+static void virtio_pci_vmstate_change(DeviceState *d, bool running)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+ if (running) {
+ /* Old QEMU versions did not set bus master enable on status write.
+ * Detect DRIVER set and enable it.
+ */
+ if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) &&
+ (vdev->status & VIRTIO_CONFIG_S_DRIVER) &&
+ !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
+ pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
+ proxy->pci_dev.config[PCI_COMMAND] |
+ PCI_COMMAND_MASTER, 1);
+ }
+ virtio_pci_start_ioeventfd(proxy);
+ } else {
+ virtio_pci_stop_ioeventfd(proxy);
+ }
+}
+
+#ifdef CONFIG_VIRTFS
+static void virtio_9p_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ V9fsPCIState *dev = VIRTIO_9P_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", errp);
+}
+
+static Property virtio_9p_pci_properties[] = {
+ DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_9p_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+
+ k->realize = virtio_9p_pci_realize;
+ pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+ pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_9P;
+ pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
+ pcidev_k->class_id = 0x2;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+ dc->props = virtio_9p_pci_properties;
+}
+
+static void virtio_9p_pci_instance_init(Object *obj)
+{
+ V9fsPCIState *dev = VIRTIO_9P_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_9P);
+}
+
+static const TypeInfo virtio_9p_pci_info = {
+ .name = TYPE_VIRTIO_9P_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(V9fsPCIState),
+ .instance_init = virtio_9p_pci_instance_init,
+ .class_init = virtio_9p_pci_class_init,
+};
+#endif /* CONFIG_VIRTFS */
+
+/*
+ * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
+ */
+
+static int virtio_pci_query_nvectors(DeviceState *d)
+{
+ VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
+
+ return proxy->nvectors;
+}
+
+static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
+ struct virtio_pci_cap *cap)
+{
+ PCIDevice *dev = &proxy->pci_dev;
+ int offset;
+
+ offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, cap->cap_len);
+ assert(offset > 0);
+
+ assert(cap->cap_len >= sizeof *cap);
+ memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len,
+ cap->cap_len - PCI_CAP_FLAGS);
+
+ return offset;
+}
+
+static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ uint32_t val = 0;
+ int i;
+
+ switch (addr) {
+ case VIRTIO_PCI_COMMON_DFSELECT:
+ val = proxy->dfselect;
+ break;
+ case VIRTIO_PCI_COMMON_DF:
+ if (proxy->dfselect <= 1) {
+ val = (vdev->host_features & ~VIRTIO_LEGACY_FEATURES) >>
+ (32 * proxy->dfselect);
+ }
+ break;
+ case VIRTIO_PCI_COMMON_GFSELECT:
+ val = proxy->gfselect;
+ break;
+ case VIRTIO_PCI_COMMON_GF:
+ if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
+ val = proxy->guest_features[proxy->gfselect];
+ }
+ break;
+ case VIRTIO_PCI_COMMON_MSIX:
+ val = vdev->config_vector;
+ break;
+ case VIRTIO_PCI_COMMON_NUMQ:
+ for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) {
+ if (virtio_queue_get_num(vdev, i)) {
+ val = i + 1;
+ }
+ }
+ break;
+ case VIRTIO_PCI_COMMON_STATUS:
+ val = vdev->status;
+ break;
+ case VIRTIO_PCI_COMMON_CFGGENERATION:
+ val = vdev->generation;
+ break;
+ case VIRTIO_PCI_COMMON_Q_SELECT:
+ val = vdev->queue_sel;
+ break;
+ case VIRTIO_PCI_COMMON_Q_SIZE:
+ val = virtio_queue_get_num(vdev, vdev->queue_sel);
+ break;
+ case VIRTIO_PCI_COMMON_Q_MSIX:
+ val = virtio_queue_vector(vdev, vdev->queue_sel);
+ break;
+ case VIRTIO_PCI_COMMON_Q_ENABLE:
+ val = proxy->vqs[vdev->queue_sel].enabled;
+ break;
+ case VIRTIO_PCI_COMMON_Q_NOFF:
+ /* Simply map queues in order */
+ val = vdev->queue_sel;
+ break;
+ case VIRTIO_PCI_COMMON_Q_DESCLO:
+ val = proxy->vqs[vdev->queue_sel].desc[0];
+ break;
+ case VIRTIO_PCI_COMMON_Q_DESCHI:
+ val = proxy->vqs[vdev->queue_sel].desc[1];
+ break;
+ case VIRTIO_PCI_COMMON_Q_AVAILLO:
+ val = proxy->vqs[vdev->queue_sel].avail[0];
+ break;
+ case VIRTIO_PCI_COMMON_Q_AVAILHI:
+ val = proxy->vqs[vdev->queue_sel].avail[1];
+ break;
+ case VIRTIO_PCI_COMMON_Q_USEDLO:
+ val = proxy->vqs[vdev->queue_sel].used[0];
+ break;
+ case VIRTIO_PCI_COMMON_Q_USEDHI:
+ val = proxy->vqs[vdev->queue_sel].used[1];
+ break;
+ default:
+ val = 0;
+ }
+
+ return val;
+}
+
+static void virtio_pci_common_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+ switch (addr) {
+ case VIRTIO_PCI_COMMON_DFSELECT:
+ proxy->dfselect = val;
+ break;
+ case VIRTIO_PCI_COMMON_GFSELECT:
+ proxy->gfselect = val;
+ break;
+ case VIRTIO_PCI_COMMON_GF:
+ if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
+ proxy->guest_features[proxy->gfselect] = val;
+ virtio_set_features(vdev,
+ (((uint64_t)proxy->guest_features[1]) << 32) |
+ proxy->guest_features[0]);
+ }
+ break;
+ case VIRTIO_PCI_COMMON_MSIX:
+ msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
+ /* Make it possible for guest to discover an error took place. */
+ if (msix_vector_use(&proxy->pci_dev, val) < 0) {
+ val = VIRTIO_NO_VECTOR;
+ }
+ vdev->config_vector = val;
+ break;
+ case VIRTIO_PCI_COMMON_STATUS:
+ if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ virtio_pci_stop_ioeventfd(proxy);
+ }
+
+ virtio_set_status(vdev, val & 0xFF);
+
+ if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
+ virtio_pci_start_ioeventfd(proxy);
+ }
+
+ if (vdev->status == 0) {
+ virtio_reset(vdev);
+ msix_unuse_all_vectors(&proxy->pci_dev);
+ }
+
+ break;
+ case VIRTIO_PCI_COMMON_Q_SELECT:
+ if (val < VIRTIO_QUEUE_MAX) {
+ vdev->queue_sel = val;
+ }
+ break;
+ case VIRTIO_PCI_COMMON_Q_SIZE:
+ proxy->vqs[vdev->queue_sel].num = val;
+ break;
+ case VIRTIO_PCI_COMMON_Q_MSIX:
+ msix_vector_unuse(&proxy->pci_dev,
+ virtio_queue_vector(vdev, vdev->queue_sel));
+ /* Make it possible for guest to discover an error took place. */
+ if (msix_vector_use(&proxy->pci_dev, val) < 0) {
+ val = VIRTIO_NO_VECTOR;
+ }
+ virtio_queue_set_vector(vdev, vdev->queue_sel, val);
+ break;
+ case VIRTIO_PCI_COMMON_Q_ENABLE:
+ /* TODO: need a way to put num back on reset. */
+ virtio_queue_set_num(vdev, vdev->queue_sel,
+ proxy->vqs[vdev->queue_sel].num);
+ virtio_queue_set_rings(vdev, vdev->queue_sel,
+ ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
+ proxy->vqs[vdev->queue_sel].desc[0],
+ ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
+ proxy->vqs[vdev->queue_sel].avail[0],
+ ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
+ proxy->vqs[vdev->queue_sel].used[0]);
+ proxy->vqs[vdev->queue_sel].enabled = 1;
+ break;
+ case VIRTIO_PCI_COMMON_Q_DESCLO:
+ proxy->vqs[vdev->queue_sel].desc[0] = val;
+ break;
+ case VIRTIO_PCI_COMMON_Q_DESCHI:
+ proxy->vqs[vdev->queue_sel].desc[1] = val;
+ break;
+ case VIRTIO_PCI_COMMON_Q_AVAILLO:
+ proxy->vqs[vdev->queue_sel].avail[0] = val;
+ break;
+ case VIRTIO_PCI_COMMON_Q_AVAILHI:
+ proxy->vqs[vdev->queue_sel].avail[1] = val;
+ break;
+ case VIRTIO_PCI_COMMON_Q_USEDLO:
+ proxy->vqs[vdev->queue_sel].used[0] = val;
+ break;
+ case VIRTIO_PCI_COMMON_Q_USEDHI:
+ proxy->vqs[vdev->queue_sel].used[1] = val;
+ break;
+ default:
+ break;
+ }
+}
+
+
+static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ return 0;
+}
+
+static void virtio_pci_notify_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ VirtIODevice *vdev = opaque;
+ unsigned queue = addr / QEMU_VIRTIO_PCI_QUEUE_MEM_MULT;
+
+ if (queue < VIRTIO_QUEUE_MAX) {
+ virtio_queue_notify(vdev, queue);
+ }
+}
+
+static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ VirtIODevice *vdev = opaque;
+ unsigned queue = val;
+
+ if (queue < VIRTIO_QUEUE_MAX) {
+ virtio_queue_notify(vdev, queue);
+ }
+}
+
+static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ uint64_t val = vdev->isr;
+
+ vdev->isr = 0;
+ pci_irq_deassert(&proxy->pci_dev);
+
+ return val;
+}
+
+static void virtio_pci_isr_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+}
+
+static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ VirtIODevice *vdev = opaque;
+ uint64_t val = 0;
+
+ switch (size) {
+ case 1:
+ val = virtio_config_modern_readb(vdev, addr);
+ break;
+ case 2:
+ val = virtio_config_modern_readw(vdev, addr);
+ break;
+ case 4:
+ val = virtio_config_modern_readl(vdev, addr);
+ break;
+ }
+ return val;
+}
+
+static void virtio_pci_device_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ VirtIODevice *vdev = opaque;
+ switch (size) {
+ case 1:
+ virtio_config_modern_writeb(vdev, addr, val);
+ break;
+ case 2:
+ virtio_config_modern_writew(vdev, addr, val);
+ break;
+ case 4:
+ virtio_config_modern_writel(vdev, addr, val);
+ break;
+ }
+}
+
+static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
+{
+ static const MemoryRegionOps common_ops = {
+ .read = virtio_pci_common_read,
+ .write = virtio_pci_common_write,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+ static const MemoryRegionOps isr_ops = {
+ .read = virtio_pci_isr_read,
+ .write = virtio_pci_isr_write,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+ static const MemoryRegionOps device_ops = {
+ .read = virtio_pci_device_read,
+ .write = virtio_pci_device_write,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+ static const MemoryRegionOps notify_ops = {
+ .read = virtio_pci_notify_read,
+ .write = virtio_pci_notify_write,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+ static const MemoryRegionOps notify_pio_ops = {
+ .read = virtio_pci_notify_read,
+ .write = virtio_pci_notify_write_pio,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+
+
+ memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
+ &common_ops,
+ proxy,
+ "virtio-pci-common",
+ proxy->common.size);
+
+ memory_region_init_io(&proxy->isr.mr, OBJECT(proxy),
+ &isr_ops,
+ proxy,
+ "virtio-pci-isr",
+ proxy->isr.size);
+
+ memory_region_init_io(&proxy->device.mr, OBJECT(proxy),
+ &device_ops,
+ virtio_bus_get_device(&proxy->bus),
+ "virtio-pci-device",
+ proxy->device.size);
+
+ memory_region_init_io(&proxy->notify.mr, OBJECT(proxy),
+ &notify_ops,
+ virtio_bus_get_device(&proxy->bus),
+ "virtio-pci-notify",
+ proxy->notify.size);
+
+ memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy),
+ &notify_pio_ops,
+ virtio_bus_get_device(&proxy->bus),
+ "virtio-pci-notify-pio",
+ proxy->notify.size);
+}
+
+static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
+ VirtIOPCIRegion *region,
+ struct virtio_pci_cap *cap,
+ MemoryRegion *mr,
+ uint8_t bar)
+{
+ memory_region_add_subregion(mr, region->offset, &region->mr);
+
+ cap->cfg_type = region->type;
+ cap->bar = bar;
+ cap->offset = cpu_to_le32(region->offset);
+ cap->length = cpu_to_le32(region->size);
+ virtio_pci_add_mem_cap(proxy, cap);
+
+}
+
+static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
+ VirtIOPCIRegion *region,
+ struct virtio_pci_cap *cap)
+{
+ virtio_pci_modern_region_map(proxy, region, cap,
+ &proxy->modern_bar, proxy->modern_mem_bar);
+}
+
+static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
+ VirtIOPCIRegion *region,
+ struct virtio_pci_cap *cap)
+{
+ virtio_pci_modern_region_map(proxy, region, cap,
+ &proxy->io_bar, proxy->modern_io_bar);
+}
+
+static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
+ VirtIOPCIRegion *region)
+{
+ memory_region_del_subregion(&proxy->modern_bar,
+ &region->mr);
+}
+
+static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
+ VirtIOPCIRegion *region)
+{
+ memory_region_del_subregion(&proxy->io_bar,
+ &region->mr);
+}
+
+/* This is called by virtio-bus just after the device is plugged. */
+static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
+{
+ VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
+ VirtioBusState *bus = &proxy->bus;
+ bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
+ bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
+ bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
+ uint8_t *config;
+ uint32_t size;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+ config = proxy->pci_dev.config;
+ if (proxy->class_code) {
+ pci_config_set_class(config, proxy->class_code);
+ }
+
+ if (legacy) {
+ /* legacy and transitional */
+ pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
+ pci_get_word(config + PCI_VENDOR_ID));
+ pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
+ } else {
+ /* pure virtio-1.0 */
+ pci_set_word(config + PCI_VENDOR_ID,
+ PCI_VENDOR_ID_REDHAT_QUMRANET);
+ pci_set_word(config + PCI_DEVICE_ID,
+ 0x1040 + virtio_bus_get_vdev_id(bus));
+ pci_config_set_revision(config, 1);
+ }
+ config[PCI_INTERRUPT_PIN] = 1;
+
+
+ if (modern) {
+ struct virtio_pci_cap cap = {
+ .cap_len = sizeof cap,
+ };
+ struct virtio_pci_notify_cap notify = {
+ .cap.cap_len = sizeof notify,
+ .notify_off_multiplier =
+ cpu_to_le32(QEMU_VIRTIO_PCI_QUEUE_MEM_MULT),
+ };
+ struct virtio_pci_cfg_cap cfg = {
+ .cap.cap_len = sizeof cfg,
+ .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
+ };
+ struct virtio_pci_notify_cap notify_pio = {
+ .cap.cap_len = sizeof notify,
+ .notify_off_multiplier = cpu_to_le32(0x0),
+ };
+
+ struct virtio_pci_cfg_cap *cfg_mask;
+
+ virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
+ virtio_pci_modern_regions_init(proxy);
+
+ virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
+ virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
+ virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
+ virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap);
+
+ if (modern_pio) {
+ memory_region_init(&proxy->io_bar, OBJECT(proxy),
+ "virtio-pci-io", 0x4);
+
+ pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar,
+ PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);
+
+ virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
+ &notify_pio.cap);
+ }
+
+ pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar,
+ PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_PREFETCH |
+ PCI_BASE_ADDRESS_MEM_TYPE_64,
+ &proxy->modern_bar);
+
+ proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap);
+ cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap);
+ pci_set_byte(&cfg_mask->cap.bar, ~0x0);
+ pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0);
+ pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0);
+ pci_set_long(cfg_mask->pci_cfg_data, ~0x0);
+ }
+
+ if (proxy->nvectors) {
+ int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
+ proxy->msix_bar);
+ if (err) {
+ /* Notice when a system that supports MSIx can't initialize it. */
+ if (err != -ENOTSUP) {
+ error_report("unable to init msix vectors to %" PRIu32,
+ proxy->nvectors);
+ }
+ proxy->nvectors = 0;
+ }
+ }
+
+ proxy->pci_dev.config_write = virtio_write_config;
+ proxy->pci_dev.config_read = virtio_read_config;
+
+ if (legacy) {
+ size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
+ + virtio_bus_get_vdev_config_len(bus);
+ size = pow2ceil(size);
+
+ memory_region_init_io(&proxy->bar, OBJECT(proxy),
+ &virtio_pci_config_ops,
+ proxy, "virtio-pci", size);
+
+ pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar,
+ PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
+ }
+
+ if (!kvm_has_many_ioeventfds()) {
+ proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
+ }
+
+ virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE);
+}
+
+static void virtio_pci_device_unplugged(DeviceState *d)
+{
+ VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
+ bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
+ bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
+
+ virtio_pci_stop_ioeventfd(proxy);
+
+ if (modern) {
+ virtio_pci_modern_mem_region_unmap(proxy, &proxy->common);
+ virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
+ virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
+ virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
+ if (modern_pio) {
+ virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
+ }
+ }
+}
+
+static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
+{
+ VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
+ VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
+
+ /*
+ * virtio pci bar layout used by default.
+ * subclasses can re-arrange things if needed.
+ *
+ * region 0 -- virtio legacy io bar
+ * region 1 -- msi-x bar
+ * region 4+5 -- virtio modern memory (64bit) bar
+ *
+ */
+ proxy->legacy_io_bar = 0;
+ proxy->msix_bar = 1;
+ proxy->modern_io_bar = 2;
+ proxy->modern_mem_bar = 4;
+
+ proxy->common.offset = 0x0;
+ proxy->common.size = 0x1000;
+ proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG;
+
+ proxy->isr.offset = 0x1000;
+ proxy->isr.size = 0x1000;
+ proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG;
+
+ proxy->device.offset = 0x2000;
+ proxy->device.size = 0x1000;
+ proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG;
+
+ proxy->notify.offset = 0x3000;
+ proxy->notify.size =
+ QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * VIRTIO_QUEUE_MAX;
+ proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
+
+ proxy->notify_pio.offset = 0x0;
+ proxy->notify_pio.size = 0x4;
+ proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
+
+ /* subclasses can enforce modern, so do this unconditionally */
+ memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
+ 2 * QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
+ VIRTIO_QUEUE_MAX);
+
+ memory_region_init_alias(&proxy->modern_cfg,
+ OBJECT(proxy),
+ "virtio-pci-cfg",
+ &proxy->modern_bar,
+ 0,
+ memory_region_size(&proxy->modern_bar));
+
+ address_space_init(&proxy->modern_as, &proxy->modern_cfg, "virtio-pci-cfg-as");
+
+ if (pci_is_express(pci_dev) && pci_bus_is_express(pci_dev->bus) &&
+ !pci_bus_is_root(pci_dev->bus)) {
+ int pos;
+
+ pos = pcie_endpoint_cap_init(pci_dev, 0);
+ assert(pos > 0);
+
+ pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, PCI_PM_SIZEOF);
+ assert(pos > 0);
+
+ /*
+ * Indicates that this function complies with revision 1.2 of the
+ * PCI Power Management Interface Specification.
+ */
+ pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
+ } else {
+ /*
+ * make future invocations of pci_is_express() return false
+ * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
+ */
+ pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS;
+ }
+
+ virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
+ if (k->realize) {
+ k->realize(proxy, errp);
+ }
+}
+
+static void virtio_pci_exit(PCIDevice *pci_dev)
+{
+ VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
+
+ msix_uninit_exclusive_bar(pci_dev);
+ address_space_destroy(&proxy->modern_as);
+}
+
+static void virtio_pci_reset(DeviceState *qdev)
+{
+ VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
+ VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
+ int i;
+
+ virtio_pci_stop_ioeventfd(proxy);
+ virtio_bus_reset(bus);
+ msix_unuse_all_vectors(&proxy->pci_dev);
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ proxy->vqs[i].enabled = 0;
+ }
+}
+
+static Property virtio_pci_properties[] = {
+ DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
+ DEFINE_PROP_BIT("disable-legacy", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT, false),
+ DEFINE_PROP_BIT("disable-modern", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT, true),
+ DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
+ DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
+ DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
+{
+ VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev);
+ VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
+ PCIDevice *pci_dev = &proxy->pci_dev;
+
+ if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) &&
+ !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN)) {
+ pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
+ }
+
+ vpciklass->parent_dc_realize(qdev, errp);
+}
+
+static void virtio_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+ VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
+
+ dc->props = virtio_pci_properties;
+ k->realize = virtio_pci_realize;
+ k->exit = virtio_pci_exit;
+ k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+ k->revision = VIRTIO_PCI_ABI_VERSION;
+ k->class_id = PCI_CLASS_OTHERS;
+ vpciklass->parent_dc_realize = dc->realize;
+ dc->realize = virtio_pci_dc_realize;
+ dc->reset = virtio_pci_reset;
+}
+
+static const TypeInfo virtio_pci_info = {
+ .name = TYPE_VIRTIO_PCI,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(VirtIOPCIProxy),
+ .class_init = virtio_pci_class_init,
+ .class_size = sizeof(VirtioPCIClass),
+ .abstract = true,
+};
+
+/* virtio-blk-pci */
+
+static Property virtio_blk_pci_properties[] = {
+ DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
+ DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", errp);
+}
+
+static void virtio_blk_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+ dc->props = virtio_blk_pci_properties;
+ k->realize = virtio_blk_pci_realize;
+ pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+ pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
+ pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
+ pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
+}
+
+static void virtio_blk_pci_instance_init(Object *obj)
+{
+ VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_BLK);
+ object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev),"iothread",
+ &error_abort);
+ object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
+ "bootindex", &error_abort);
+}
+
+static const TypeInfo virtio_blk_pci_info = {
+ .name = TYPE_VIRTIO_BLK_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VirtIOBlkPCI),
+ .instance_init = virtio_blk_pci_instance_init,
+ .class_init = virtio_blk_pci_class_init,
+};
+
+/* virtio-scsi-pci */
+
+static Property virtio_scsi_pci_properties[] = {
+ DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
+ DEV_NVECTORS_UNSPECIFIED),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
+ DeviceState *proxy = DEVICE(vpci_dev);
+ char *bus_name;
+
+ if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
+ vpci_dev->nvectors = vs->conf.num_queues + 3;
+ }
+
+ /*
+ * For command line compatibility, this sets the virtio-scsi-device bus
+ * name as before.
+ */
+ if (proxy->id) {
+ bus_name = g_strdup_printf("%s.0", proxy->id);
+ virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
+ g_free(bus_name);
+ }
+
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", errp);
+}
+
+static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ k->realize = virtio_scsi_pci_realize;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+ dc->props = virtio_scsi_pci_properties;
+ pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+ pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
+ pcidev_k->revision = 0x00;
+ pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
+}
+
+static void virtio_scsi_pci_instance_init(Object *obj)
+{
+ VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_SCSI);
+ object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev), "iothread",
+ &error_abort);
+}
+
+static const TypeInfo virtio_scsi_pci_info = {
+ .name = TYPE_VIRTIO_SCSI_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VirtIOSCSIPCI),
+ .instance_init = virtio_scsi_pci_instance_init,
+ .class_init = virtio_scsi_pci_class_init,
+};
+
+/* vhost-scsi-pci */
+
+#ifdef CONFIG_VHOST_SCSI
+static Property vhost_scsi_pci_properties[] = {
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
+ DEV_NVECTORS_UNSPECIFIED),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vhost_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VHostSCSIPCI *dev = VHOST_SCSI_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
+
+ if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
+ vpci_dev->nvectors = vs->conf.num_queues + 3;
+ }
+
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", errp);
+}
+
+static void vhost_scsi_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+ k->realize = vhost_scsi_pci_realize;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+ dc->props = vhost_scsi_pci_properties;
+ pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+ pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
+ pcidev_k->revision = 0x00;
+ pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
+}
+
+static void vhost_scsi_pci_instance_init(Object *obj)
+{
+ VHostSCSIPCI *dev = VHOST_SCSI_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VHOST_SCSI);
+ object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
+ "bootindex", &error_abort);
+}
+
+static const TypeInfo vhost_scsi_pci_info = {
+ .name = TYPE_VHOST_SCSI_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VHostSCSIPCI),
+ .instance_init = vhost_scsi_pci_instance_init,
+ .class_init = vhost_scsi_pci_class_init,
+};
+#endif
+
+/* virtio-balloon-pci */
+
+static Property virtio_balloon_pci_properties[] = {
+ DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_balloon_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+
+ if (vpci_dev->class_code != PCI_CLASS_OTHERS &&
+ vpci_dev->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */
+ vpci_dev->class_code = PCI_CLASS_OTHERS;
+ }
+
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", errp);
+}
+
+static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+ k->realize = virtio_balloon_pci_realize;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->props = virtio_balloon_pci_properties;
+ pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+ pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
+ pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
+ pcidev_k->class_id = PCI_CLASS_OTHERS;
+}
+
+static void virtio_balloon_pci_instance_init(Object *obj)
+{
+ VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_BALLOON);
+ object_property_add_alias(obj, "guest-stats", OBJECT(&dev->vdev),
+ "guest-stats", &error_abort);
+ object_property_add_alias(obj, "guest-stats-polling-interval",
+ OBJECT(&dev->vdev),
+ "guest-stats-polling-interval", &error_abort);
+}
+
+static const TypeInfo virtio_balloon_pci_info = {
+ .name = TYPE_VIRTIO_BALLOON_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VirtIOBalloonPCI),
+ .instance_init = virtio_balloon_pci_instance_init,
+ .class_init = virtio_balloon_pci_class_init,
+};
+
+/* virtio-serial-pci */
+
+static void virtio_serial_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ DeviceState *proxy = DEVICE(vpci_dev);
+ char *bus_name;
+
+ if (vpci_dev->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
+ vpci_dev->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
+ vpci_dev->class_code != PCI_CLASS_OTHERS) { /* qemu-kvm */
+ vpci_dev->class_code = PCI_CLASS_COMMUNICATION_OTHER;
+ }
+
+ /* backwards-compatibility with machines that were created with
+ DEV_NVECTORS_UNSPECIFIED */
+ if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
+ vpci_dev->nvectors = dev->vdev.serial.max_virtserial_ports + 1;
+ }
+
+ /*
+ * For command line compatibility, this sets the virtio-serial-device bus
+ * name as before.
+ */
+ if (proxy->id) {
+ bus_name = g_strdup_printf("%s.0", proxy->id);
+ virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
+ g_free(bus_name);
+ }
+
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", errp);
+}
+
+static Property virtio_serial_pci_properties[] = {
+ DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
+ DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_serial_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+ k->realize = virtio_serial_pci_realize;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+ dc->props = virtio_serial_pci_properties;
+ pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+ pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE;
+ pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
+ pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
+}
+
+static void virtio_serial_pci_instance_init(Object *obj)
+{
+ VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_SERIAL);
+}
+
+static const TypeInfo virtio_serial_pci_info = {
+ .name = TYPE_VIRTIO_SERIAL_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VirtIOSerialPCI),
+ .instance_init = virtio_serial_pci_instance_init,
+ .class_init = virtio_serial_pci_class_init,
+};
+
+/* virtio-net-pci */
+
+static Property virtio_net_properties[] = {
+ DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ DeviceState *qdev = DEVICE(vpci_dev);
+ VirtIONetPCI *dev = VIRTIO_NET_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+
+ virtio_net_set_netclient_name(&dev->vdev, qdev->id,
+ object_get_typename(OBJECT(qdev)));
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", errp);
+}
+
+static void virtio_net_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+ VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
+
+ k->romfile = "efi-virtio.rom";
+ k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+ k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
+ k->revision = VIRTIO_PCI_ABI_VERSION;
+ k->class_id = PCI_CLASS_NETWORK_ETHERNET;
+ set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
+ dc->props = virtio_net_properties;
+ vpciklass->realize = virtio_net_pci_realize;
+}
+
+static void virtio_net_pci_instance_init(Object *obj)
+{
+ VirtIONetPCI *dev = VIRTIO_NET_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_NET);
+ object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
+ "bootindex", &error_abort);
+}
+
+static const TypeInfo virtio_net_pci_info = {
+ .name = TYPE_VIRTIO_NET_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VirtIONetPCI),
+ .instance_init = virtio_net_pci_instance_init,
+ .class_init = virtio_net_pci_class_init,
+};
+
+/* virtio-rng-pci */
+
+static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VirtIORngPCI *vrng = VIRTIO_RNG_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&vrng->vdev);
+ Error *err = NULL;
+
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ object_property_set_link(OBJECT(vrng),
+ OBJECT(vrng->vdev.conf.rng), "rng",
+ NULL);
+}
+
+static void virtio_rng_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ k->realize = virtio_rng_pci_realize;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+
+ pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+ pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_RNG;
+ pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
+ pcidev_k->class_id = PCI_CLASS_OTHERS;
+}
+
+static void virtio_rng_initfn(Object *obj)
+{
+ VirtIORngPCI *dev = VIRTIO_RNG_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_RNG);
+ object_property_add_alias(obj, "rng", OBJECT(&dev->vdev), "rng",
+ &error_abort);
+}
+
+static const TypeInfo virtio_rng_pci_info = {
+ .name = TYPE_VIRTIO_RNG_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VirtIORngPCI),
+ .instance_init = virtio_rng_initfn,
+ .class_init = virtio_rng_pci_class_init,
+};
+
+/* virtio-input-pci */
+
+static Property virtio_input_pci_properties[] = {
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VirtIOInputPCI *vinput = VIRTIO_INPUT_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&vinput->vdev);
+
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ /* force virtio-1.0 */
+ vpci_dev->flags &= ~VIRTIO_PCI_FLAG_DISABLE_MODERN;
+ vpci_dev->flags |= VIRTIO_PCI_FLAG_DISABLE_LEGACY;
+ object_property_set_bool(OBJECT(vdev), true, "realized", errp);
+}
+
+static void virtio_input_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ dc->props = virtio_input_pci_properties;
+ k->realize = virtio_input_pci_realize;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+
+ pcidev_k->class_id = PCI_CLASS_INPUT_OTHER;
+}
+
+static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data)
+{
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ pcidev_k->class_id = PCI_CLASS_INPUT_KEYBOARD;
+}
+
+static void virtio_input_hid_mouse_pci_class_init(ObjectClass *klass,
+ void *data)
+{
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ pcidev_k->class_id = PCI_CLASS_INPUT_MOUSE;
+}
+
+static void virtio_keyboard_initfn(Object *obj)
+{
+ VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_KEYBOARD);
+}
+
+static void virtio_mouse_initfn(Object *obj)
+{
+ VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_MOUSE);
+}
+
+static void virtio_tablet_initfn(Object *obj)
+{
+ VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_TABLET);
+}
+
+static const TypeInfo virtio_input_pci_info = {
+ .name = TYPE_VIRTIO_INPUT_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VirtIOInputPCI),
+ .class_init = virtio_input_pci_class_init,
+ .abstract = true,
+};
+
+static const TypeInfo virtio_input_hid_pci_info = {
+ .name = TYPE_VIRTIO_INPUT_HID_PCI,
+ .parent = TYPE_VIRTIO_INPUT_PCI,
+ .instance_size = sizeof(VirtIOInputHIDPCI),
+ .abstract = true,
+};
+
+static const TypeInfo virtio_keyboard_pci_info = {
+ .name = TYPE_VIRTIO_KEYBOARD_PCI,
+ .parent = TYPE_VIRTIO_INPUT_HID_PCI,
+ .class_init = virtio_input_hid_kbd_pci_class_init,
+ .instance_size = sizeof(VirtIOInputHIDPCI),
+ .instance_init = virtio_keyboard_initfn,
+};
+
+static const TypeInfo virtio_mouse_pci_info = {
+ .name = TYPE_VIRTIO_MOUSE_PCI,
+ .parent = TYPE_VIRTIO_INPUT_HID_PCI,
+ .class_init = virtio_input_hid_mouse_pci_class_init,
+ .instance_size = sizeof(VirtIOInputHIDPCI),
+ .instance_init = virtio_mouse_initfn,
+};
+
+static const TypeInfo virtio_tablet_pci_info = {
+ .name = TYPE_VIRTIO_TABLET_PCI,
+ .parent = TYPE_VIRTIO_INPUT_HID_PCI,
+ .instance_size = sizeof(VirtIOInputHIDPCI),
+ .instance_init = virtio_tablet_initfn,
+};
+
+#ifdef CONFIG_LINUX
+static void virtio_host_initfn(Object *obj)
+{
+ VirtIOInputHostPCI *dev = VIRTIO_INPUT_HOST_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_INPUT_HOST);
+}
+
+static const TypeInfo virtio_host_pci_info = {
+ .name = TYPE_VIRTIO_INPUT_HOST_PCI,
+ .parent = TYPE_VIRTIO_INPUT_PCI,
+ .instance_size = sizeof(VirtIOInputHostPCI),
+ .instance_init = virtio_host_initfn,
+};
+#endif
+
+/* virtio-pci-bus */
+
+static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
+ VirtIOPCIProxy *dev)
+{
+ DeviceState *qdev = DEVICE(dev);
+ char virtio_bus_name[] = "virtio-bus";
+
+ qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev,
+ virtio_bus_name);
+}
+
+static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
+{
+ BusClass *bus_class = BUS_CLASS(klass);
+ VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
+ bus_class->max_dev = 1;
+ k->notify = virtio_pci_notify;
+ k->save_config = virtio_pci_save_config;
+ k->load_config = virtio_pci_load_config;
+ k->save_queue = virtio_pci_save_queue;
+ k->load_queue = virtio_pci_load_queue;
+ k->save_extra_state = virtio_pci_save_extra_state;
+ k->load_extra_state = virtio_pci_load_extra_state;
+ k->has_extra_state = virtio_pci_has_extra_state;
+ k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
+ k->set_host_notifier = virtio_pci_set_host_notifier;
+ k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
+ k->vmstate_change = virtio_pci_vmstate_change;
+ k->device_plugged = virtio_pci_device_plugged;
+ k->device_unplugged = virtio_pci_device_unplugged;
+ k->query_nvectors = virtio_pci_query_nvectors;
+}
+
+static const TypeInfo virtio_pci_bus_info = {
+ .name = TYPE_VIRTIO_PCI_BUS,
+ .parent = TYPE_VIRTIO_BUS,
+ .instance_size = sizeof(VirtioPCIBusState),
+ .class_init = virtio_pci_bus_class_init,
+};
+
+static void virtio_pci_register_types(void)
+{
+ type_register_static(&virtio_rng_pci_info);
+ type_register_static(&virtio_input_pci_info);
+ type_register_static(&virtio_input_hid_pci_info);
+ type_register_static(&virtio_keyboard_pci_info);
+ type_register_static(&virtio_mouse_pci_info);
+ type_register_static(&virtio_tablet_pci_info);
+#ifdef CONFIG_LINUX
+ type_register_static(&virtio_host_pci_info);
+#endif
+ type_register_static(&virtio_pci_bus_info);
+ type_register_static(&virtio_pci_info);
+#ifdef CONFIG_VIRTFS
+ type_register_static(&virtio_9p_pci_info);
+#endif
+ type_register_static(&virtio_blk_pci_info);
+ type_register_static(&virtio_scsi_pci_info);
+ type_register_static(&virtio_balloon_pci_info);
+ type_register_static(&virtio_serial_pci_info);
+ type_register_static(&virtio_net_pci_info);
+#ifdef CONFIG_VHOST_SCSI
+ type_register_static(&vhost_scsi_pci_info);
+#endif
+}
+
+type_init(virtio_pci_register_types)
diff --git a/src/hw/virtio/virtio-pci.h b/src/hw/virtio/virtio-pci.h
new file mode 100644
index 0000000..c8f9cfd
--- /dev/null
+++ b/src/hw/virtio/virtio-pci.h
@@ -0,0 +1,318 @@
+/*
+ * Virtio PCI Bindings
+ *
+ * Copyright IBM, Corp. 2007
+ * Copyright (c) 2009 CodeSourcery
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Paul Brook <paul@codesourcery.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_VIRTIO_PCI_H
+#define QEMU_VIRTIO_PCI_H
+
+#include "hw/pci/msi.h"
+#include "hw/virtio/virtio-blk.h"
+#include "hw/virtio/virtio-net.h"
+#include "hw/virtio/virtio-rng.h"
+#include "hw/virtio/virtio-serial.h"
+#include "hw/virtio/virtio-scsi.h"
+#include "hw/virtio/virtio-balloon.h"
+#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/virtio-9p.h"
+#include "hw/virtio/virtio-input.h"
+#include "hw/virtio/virtio-gpu.h"
+#ifdef CONFIG_VIRTFS
+#include "hw/9pfs/virtio-9p.h"
+#endif
+#ifdef CONFIG_VHOST_SCSI
+#include "hw/virtio/vhost-scsi.h"
+#endif
+
+typedef struct VirtIOPCIProxy VirtIOPCIProxy;
+typedef struct VirtIOBlkPCI VirtIOBlkPCI;
+typedef struct VirtIOSCSIPCI VirtIOSCSIPCI;
+typedef struct VirtIOBalloonPCI VirtIOBalloonPCI;
+typedef struct VirtIOSerialPCI VirtIOSerialPCI;
+typedef struct VirtIONetPCI VirtIONetPCI;
+typedef struct VHostSCSIPCI VHostSCSIPCI;
+typedef struct VirtIORngPCI VirtIORngPCI;
+typedef struct VirtIOInputPCI VirtIOInputPCI;
+typedef struct VirtIOInputHIDPCI VirtIOInputHIDPCI;
+typedef struct VirtIOInputHostPCI VirtIOInputHostPCI;
+typedef struct VirtIOGPUPCI VirtIOGPUPCI;
+
+/* virtio-pci-bus */
+
+typedef struct VirtioBusState VirtioPCIBusState;
+typedef struct VirtioBusClass VirtioPCIBusClass;
+
+#define TYPE_VIRTIO_PCI_BUS "virtio-pci-bus"
+#define VIRTIO_PCI_BUS(obj) \
+ OBJECT_CHECK(VirtioPCIBusState, (obj), TYPE_VIRTIO_PCI_BUS)
+#define VIRTIO_PCI_BUS_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(VirtioPCIBusClass, obj, TYPE_VIRTIO_PCI_BUS)
+#define VIRTIO_PCI_BUS_CLASS(klass) \
+ OBJECT_CLASS_CHECK(VirtioPCIBusClass, klass, TYPE_VIRTIO_PCI_BUS)
+
+enum {
+ VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT,
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT,
+ VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT,
+ VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT,
+ VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT,
+ VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT,
+ VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT,
+};
+
+/* Need to activate work-arounds for buggy guests at vmstate load. */
+#define VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION \
+ (1 << VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT)
+
+/* Performance improves when virtqueue kick processing is decoupled from the
+ * vcpu thread using ioeventfd for some devices. */
+#define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
+
+/* virtio version flags */
+#define VIRTIO_PCI_FLAG_DISABLE_LEGACY (1 << VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT)
+#define VIRTIO_PCI_FLAG_DISABLE_MODERN (1 << VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT)
+#define VIRTIO_PCI_FLAG_DISABLE_PCIE (1 << VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT)
+
+/* migrate extra state */
+#define VIRTIO_PCI_FLAG_MIGRATE_EXTRA (1 << VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT)
+
+/* have pio notification for modern device ? */
+#define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY \
+ (1 << VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT)
+
+typedef struct {
+ MSIMessage msg;
+ int virq;
+ unsigned int users;
+} VirtIOIRQFD;
+
+/*
+ * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
+ */
+#define TYPE_VIRTIO_PCI "virtio-pci"
+#define VIRTIO_PCI_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(VirtioPCIClass, obj, TYPE_VIRTIO_PCI)
+#define VIRTIO_PCI_CLASS(klass) \
+ OBJECT_CLASS_CHECK(VirtioPCIClass, klass, TYPE_VIRTIO_PCI)
+#define VIRTIO_PCI(obj) \
+ OBJECT_CHECK(VirtIOPCIProxy, (obj), TYPE_VIRTIO_PCI)
+
+typedef struct VirtioPCIClass {
+ PCIDeviceClass parent_class;
+ DeviceRealize parent_dc_realize;
+ void (*realize)(VirtIOPCIProxy *vpci_dev, Error **errp);
+} VirtioPCIClass;
+
+typedef struct VirtIOPCIRegion {
+ MemoryRegion mr;
+ uint32_t offset;
+ uint32_t size;
+ uint32_t type;
+} VirtIOPCIRegion;
+
+typedef struct VirtIOPCIQueue {
+ uint16_t num;
+ bool enabled;
+ uint32_t desc[2];
+ uint32_t avail[2];
+ uint32_t used[2];
+} VirtIOPCIQueue;
+
+struct VirtIOPCIProxy {
+ PCIDevice pci_dev;
+ MemoryRegion bar;
+ VirtIOPCIRegion common;
+ VirtIOPCIRegion isr;
+ VirtIOPCIRegion device;
+ VirtIOPCIRegion notify;
+ VirtIOPCIRegion notify_pio;
+ MemoryRegion modern_bar;
+ MemoryRegion io_bar;
+ MemoryRegion modern_cfg;
+ AddressSpace modern_as;
+ uint32_t legacy_io_bar;
+ uint32_t msix_bar;
+ uint32_t modern_io_bar;
+ uint32_t modern_mem_bar;
+ int config_cap;
+ uint32_t flags;
+ uint32_t class_code;
+ uint32_t nvectors;
+ uint32_t dfselect;
+ uint32_t gfselect;
+ uint32_t guest_features[2];
+ VirtIOPCIQueue vqs[VIRTIO_QUEUE_MAX];
+
+ bool ioeventfd_disabled;
+ bool ioeventfd_started;
+ VirtIOIRQFD *vector_irqfd;
+ int nvqs_with_notifiers;
+ VirtioBusState bus;
+};
+
+
+/*
+ * virtio-scsi-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_SCSI_PCI "virtio-scsi-pci"
+#define VIRTIO_SCSI_PCI(obj) \
+ OBJECT_CHECK(VirtIOSCSIPCI, (obj), TYPE_VIRTIO_SCSI_PCI)
+
+struct VirtIOSCSIPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIOSCSI vdev;
+};
+
+#ifdef CONFIG_VHOST_SCSI
+/*
+ * vhost-scsi-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VHOST_SCSI_PCI "vhost-scsi-pci"
+#define VHOST_SCSI_PCI(obj) \
+ OBJECT_CHECK(VHostSCSIPCI, (obj), TYPE_VHOST_SCSI_PCI)
+
+struct VHostSCSIPCI {
+ VirtIOPCIProxy parent_obj;
+ VHostSCSI vdev;
+};
+#endif
+
+/*
+ * virtio-blk-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_BLK_PCI "virtio-blk-pci"
+#define VIRTIO_BLK_PCI(obj) \
+ OBJECT_CHECK(VirtIOBlkPCI, (obj), TYPE_VIRTIO_BLK_PCI)
+
+struct VirtIOBlkPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIOBlock vdev;
+};
+
+/*
+ * virtio-balloon-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_BALLOON_PCI "virtio-balloon-pci"
+#define VIRTIO_BALLOON_PCI(obj) \
+ OBJECT_CHECK(VirtIOBalloonPCI, (obj), TYPE_VIRTIO_BALLOON_PCI)
+
+struct VirtIOBalloonPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIOBalloon vdev;
+};
+
+/*
+ * virtio-serial-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_SERIAL_PCI "virtio-serial-pci"
+#define VIRTIO_SERIAL_PCI(obj) \
+ OBJECT_CHECK(VirtIOSerialPCI, (obj), TYPE_VIRTIO_SERIAL_PCI)
+
+struct VirtIOSerialPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIOSerial vdev;
+};
+
+/*
+ * virtio-net-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_NET_PCI "virtio-net-pci"
+#define VIRTIO_NET_PCI(obj) \
+ OBJECT_CHECK(VirtIONetPCI, (obj), TYPE_VIRTIO_NET_PCI)
+
+struct VirtIONetPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIONet vdev;
+};
+
+/*
+ * virtio-9p-pci: This extends VirtioPCIProxy.
+ */
+
+#ifdef CONFIG_VIRTFS
+
+#define TYPE_VIRTIO_9P_PCI "virtio-9p-pci"
+#define VIRTIO_9P_PCI(obj) \
+ OBJECT_CHECK(V9fsPCIState, (obj), TYPE_VIRTIO_9P_PCI)
+
+typedef struct V9fsPCIState {
+ VirtIOPCIProxy parent_obj;
+ V9fsState vdev;
+} V9fsPCIState;
+
+#endif
+
+/*
+ * virtio-rng-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_RNG_PCI "virtio-rng-pci"
+#define VIRTIO_RNG_PCI(obj) \
+ OBJECT_CHECK(VirtIORngPCI, (obj), TYPE_VIRTIO_RNG_PCI)
+
+struct VirtIORngPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIORNG vdev;
+};
+
+/*
+ * virtio-input-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_INPUT_PCI "virtio-input-pci"
+#define VIRTIO_INPUT_PCI(obj) \
+ OBJECT_CHECK(VirtIOInputPCI, (obj), TYPE_VIRTIO_INPUT_PCI)
+
+struct VirtIOInputPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIOInput vdev;
+};
+
+#define TYPE_VIRTIO_INPUT_HID_PCI "virtio-input-hid-pci"
+#define TYPE_VIRTIO_KEYBOARD_PCI "virtio-keyboard-pci"
+#define TYPE_VIRTIO_MOUSE_PCI "virtio-mouse-pci"
+#define TYPE_VIRTIO_TABLET_PCI "virtio-tablet-pci"
+#define VIRTIO_INPUT_HID_PCI(obj) \
+ OBJECT_CHECK(VirtIOInputHIDPCI, (obj), TYPE_VIRTIO_INPUT_HID_PCI)
+
+struct VirtIOInputHIDPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIOInputHID vdev;
+};
+
+#ifdef CONFIG_LINUX
+
+#define TYPE_VIRTIO_INPUT_HOST_PCI "virtio-input-host-pci"
+#define VIRTIO_INPUT_HOST_PCI(obj) \
+ OBJECT_CHECK(VirtIOInputHostPCI, (obj), TYPE_VIRTIO_INPUT_HOST_PCI)
+
+struct VirtIOInputHostPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIOInputHost vdev;
+};
+
+#endif
+
+/*
+ * virtio-gpu-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_GPU_PCI "virtio-gpu-pci"
+#define VIRTIO_GPU_PCI(obj) \
+ OBJECT_CHECK(VirtIOGPUPCI, (obj), TYPE_VIRTIO_GPU_PCI)
+
+struct VirtIOGPUPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIOGPU vdev;
+};
+
+/* Virtio ABI version, if we increment this, we break the guest driver. */
+#define VIRTIO_PCI_ABI_VERSION 0
+
+#endif
diff --git a/src/hw/virtio/virtio-rng.c b/src/hw/virtio/virtio-rng.c
new file mode 100644
index 0000000..97d1541
--- /dev/null
+++ b/src/hw/virtio/virtio-rng.c
@@ -0,0 +1,267 @@
+/*
+ * A virtio device implementing a hardware random number generator.
+ *
+ * Copyright 2012 Red Hat, Inc.
+ * Copyright 2012 Amit Shah <amit.shah@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include "qemu/iov.h"
+#include "hw/qdev.h"
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-rng.h"
+#include "sysemu/rng.h"
+#include "qom/object_interfaces.h"
+#include "trace.h"
+
+static bool is_guest_ready(VirtIORNG *vrng)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(vrng);
+ if (virtio_queue_ready(vrng->vq)
+ && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ return true;
+ }
+ trace_virtio_rng_guest_not_ready(vrng);
+ return false;
+}
+
+static size_t get_request_size(VirtQueue *vq, unsigned quota)
+{
+ unsigned int in, out;
+
+ virtqueue_get_avail_bytes(vq, &in, &out, quota, 0);
+ return in;
+}
+
+static void virtio_rng_process(VirtIORNG *vrng);
+
+/* Send data from a char device over to the guest */
+static void chr_read(void *opaque, const void *buf, size_t size)
+{
+ VirtIORNG *vrng = opaque;
+ VirtIODevice *vdev = VIRTIO_DEVICE(vrng);
+ VirtQueueElement elem;
+ size_t len;
+ int offset;
+
+ if (!is_guest_ready(vrng)) {
+ return;
+ }
+
+ vrng->quota_remaining -= size;
+
+ offset = 0;
+ while (offset < size) {
+ if (!virtqueue_pop(vrng->vq, &elem)) {
+ break;
+ }
+ len = iov_from_buf(elem.in_sg, elem.in_num,
+ 0, buf + offset, size - offset);
+ offset += len;
+
+ virtqueue_push(vrng->vq, &elem, len);
+ trace_virtio_rng_pushed(vrng, len);
+ }
+ virtio_notify(vdev, vrng->vq);
+}
+
+static void virtio_rng_process(VirtIORNG *vrng)
+{
+ size_t size;
+ unsigned quota;
+
+ if (!is_guest_ready(vrng)) {
+ return;
+ }
+
+ if (vrng->activate_timer) {
+ timer_mod(vrng->rate_limit_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vrng->conf.period_ms);
+ vrng->activate_timer = false;
+ }
+
+ if (vrng->quota_remaining < 0) {
+ quota = 0;
+ } else {
+ quota = MIN((uint64_t)vrng->quota_remaining, (uint64_t)UINT32_MAX);
+ }
+ size = get_request_size(vrng->vq, quota);
+
+ trace_virtio_rng_request(vrng, size, quota);
+
+ size = MIN(vrng->quota_remaining, size);
+ if (size) {
+ rng_backend_request_entropy(vrng->rng, size, chr_read, vrng);
+ }
+}
+
+static void handle_input(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIORNG *vrng = VIRTIO_RNG(vdev);
+ virtio_rng_process(vrng);
+}
+
+static uint64_t get_features(VirtIODevice *vdev, uint64_t f, Error **errp)
+{
+ return f;
+}
+
+static void virtio_rng_save(QEMUFile *f, void *opaque)
+{
+ VirtIODevice *vdev = opaque;
+
+ virtio_save(vdev, f);
+}
+
+static int virtio_rng_load(QEMUFile *f, void *opaque, int version_id)
+{
+ VirtIORNG *vrng = opaque;
+ int ret;
+
+ if (version_id != 1) {
+ return -EINVAL;
+ }
+ ret = virtio_load(VIRTIO_DEVICE(vrng), f, version_id);
+ if (ret != 0) {
+ return ret;
+ }
+
+ /* We may have an element ready but couldn't process it due to a quota
+ * limit. Make sure to try again after live migration when the quota may
+ * have been reset.
+ */
+ virtio_rng_process(vrng);
+
+ return 0;
+}
+
+static void check_rate_limit(void *opaque)
+{
+ VirtIORNG *vrng = opaque;
+
+ vrng->quota_remaining = vrng->conf.max_bytes;
+ virtio_rng_process(vrng);
+ vrng->activate_timer = true;
+}
+
+static void virtio_rng_device_realize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VirtIORNG *vrng = VIRTIO_RNG(dev);
+ Error *local_err = NULL;
+
+ if (vrng->conf.period_ms <= 0) {
+ error_setg(errp, "'period' parameter expects a positive integer");
+ return;
+ }
+
+ /* Workaround: Property parsing does not enforce unsigned integers,
+ * So this is a hack to reject such numbers. */
+ if (vrng->conf.max_bytes > INT64_MAX) {
+ error_setg(errp, "'max-bytes' parameter must be non-negative, "
+ "and less than 2^63");
+ return;
+ }
+
+ if (vrng->conf.rng == NULL) {
+ vrng->conf.default_backend = RNG_RANDOM(object_new(TYPE_RNG_RANDOM));
+
+ user_creatable_complete(OBJECT(vrng->conf.default_backend),
+ &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ object_unref(OBJECT(vrng->conf.default_backend));
+ return;
+ }
+
+ object_property_add_child(OBJECT(dev),
+ "default-backend",
+ OBJECT(vrng->conf.default_backend),
+ NULL);
+
+ /* The child property took a reference, we can safely drop ours now */
+ object_unref(OBJECT(vrng->conf.default_backend));
+
+ object_property_set_link(OBJECT(dev),
+ OBJECT(vrng->conf.default_backend),
+ "rng", NULL);
+ }
+
+ vrng->rng = vrng->conf.rng;
+ if (vrng->rng == NULL) {
+ error_setg(errp, "'rng' parameter expects a valid object");
+ return;
+ }
+
+ virtio_init(vdev, "virtio-rng", VIRTIO_ID_RNG, 0);
+
+ vrng->vq = virtio_add_queue(vdev, 8, handle_input);
+ vrng->quota_remaining = vrng->conf.max_bytes;
+ vrng->rate_limit_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
+ check_rate_limit, vrng);
+ vrng->activate_timer = true;
+ register_savevm(dev, "virtio-rng", -1, 1, virtio_rng_save,
+ virtio_rng_load, vrng);
+}
+
+static void virtio_rng_device_unrealize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VirtIORNG *vrng = VIRTIO_RNG(dev);
+
+ timer_del(vrng->rate_limit_timer);
+ timer_free(vrng->rate_limit_timer);
+ unregister_savevm(dev, "virtio-rng", vrng);
+ virtio_cleanup(vdev);
+}
+
+static Property virtio_rng_properties[] = {
+ /* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s. If
+ * you have an entropy source capable of generating more entropy than this
+ * and you can pass it through via virtio-rng, then hats off to you. Until
+ * then, this is unlimited for all practical purposes.
+ */
+ DEFINE_PROP_UINT64("max-bytes", VirtIORNG, conf.max_bytes, INT64_MAX),
+ DEFINE_PROP_UINT32("period", VirtIORNG, conf.period_ms, 1 << 16),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_rng_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+
+ dc->props = virtio_rng_properties;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ vdc->realize = virtio_rng_device_realize;
+ vdc->unrealize = virtio_rng_device_unrealize;
+ vdc->get_features = get_features;
+}
+
+static void virtio_rng_initfn(Object *obj)
+{
+ VirtIORNG *vrng = VIRTIO_RNG(obj);
+
+ object_property_add_link(obj, "rng", TYPE_RNG_BACKEND,
+ (Object **)&vrng->conf.rng,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE, NULL);
+}
+
+static const TypeInfo virtio_rng_info = {
+ .name = TYPE_VIRTIO_RNG,
+ .parent = TYPE_VIRTIO_DEVICE,
+ .instance_size = sizeof(VirtIORNG),
+ .instance_init = virtio_rng_initfn,
+ .class_init = virtio_rng_class_init,
+};
+
+static void virtio_register_types(void)
+{
+ type_register_static(&virtio_rng_info);
+}
+
+type_init(virtio_register_types)
diff --git a/src/hw/virtio/virtio.c b/src/hw/virtio/virtio.c
new file mode 100644
index 0000000..1edef59
--- /dev/null
+++ b/src/hw/virtio/virtio.c
@@ -0,0 +1,1803 @@
+/*
+ * Virtio Support
+ *
+ * Copyright IBM, Corp. 2007
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include <inttypes.h>
+
+#include "trace.h"
+#include "exec/address-spaces.h"
+#include "qemu/error-report.h"
+#include "hw/virtio/virtio.h"
+#include "qemu/atomic.h"
+#include "hw/virtio/virtio-bus.h"
+#include "migration/migration.h"
+#include "hw/virtio/virtio-access.h"
+
+/*
+ * The alignment to use between consumer and producer parts of vring.
+ * x86 pagesize again. This is the default, used by transports like PCI
+ * which don't provide a means for the guest to tell the host the alignment.
+ */
+#define VIRTIO_PCI_VRING_ALIGN 4096
+
+typedef struct VRingDesc
+{
+ uint64_t addr;
+ uint32_t len;
+ uint16_t flags;
+ uint16_t next;
+} VRingDesc;
+
+typedef struct VRingAvail
+{
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[0];
+} VRingAvail;
+
+typedef struct VRingUsedElem
+{
+ uint32_t id;
+ uint32_t len;
+} VRingUsedElem;
+
+typedef struct VRingUsed
+{
+ uint16_t flags;
+ uint16_t idx;
+ VRingUsedElem ring[0];
+} VRingUsed;
+
+typedef struct VRing
+{
+ unsigned int num;
+ unsigned int num_default;
+ unsigned int align;
+ hwaddr desc;
+ hwaddr avail;
+ hwaddr used;
+} VRing;
+
+struct VirtQueue
+{
+ VRing vring;
+ uint16_t last_avail_idx;
+ /* Last used index value we have signalled on */
+ uint16_t signalled_used;
+
+ /* Last used index value we have signalled on */
+ bool signalled_used_valid;
+
+ /* Notification enabled? */
+ bool notification;
+
+ uint16_t queue_index;
+
+ int inuse;
+
+ uint16_t vector;
+ void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
+ VirtIODevice *vdev;
+ EventNotifier guest_notifier;
+ EventNotifier host_notifier;
+ QLIST_ENTRY(VirtQueue) node;
+};
+
+/* virt queue functions */
+void virtio_queue_update_rings(VirtIODevice *vdev, int n)
+{
+ VRing *vring = &vdev->vq[n].vring;
+
+ if (!vring->desc) {
+ /* not yet setup -> nothing to do */
+ return;
+ }
+ vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
+ vring->used = vring_align(vring->avail +
+ offsetof(VRingAvail, ring[vring->num]),
+ vring->align);
+}
+
+static inline uint64_t vring_desc_addr(VirtIODevice *vdev, hwaddr desc_pa,
+ int i)
+{
+ hwaddr pa;
+ pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
+ return virtio_ldq_phys(vdev, pa);
+}
+
+static inline uint32_t vring_desc_len(VirtIODevice *vdev, hwaddr desc_pa, int i)
+{
+ hwaddr pa;
+ pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
+ return virtio_ldl_phys(vdev, pa);
+}
+
+static inline uint16_t vring_desc_flags(VirtIODevice *vdev, hwaddr desc_pa,
+ int i)
+{
+ hwaddr pa;
+ pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
+ return virtio_lduw_phys(vdev, pa);
+}
+
+static inline uint16_t vring_desc_next(VirtIODevice *vdev, hwaddr desc_pa,
+ int i)
+{
+ hwaddr pa;
+ pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
+ return virtio_lduw_phys(vdev, pa);
+}
+
+static inline uint16_t vring_avail_flags(VirtQueue *vq)
+{
+ hwaddr pa;
+ pa = vq->vring.avail + offsetof(VRingAvail, flags);
+ return virtio_lduw_phys(vq->vdev, pa);
+}
+
+static inline uint16_t vring_avail_idx(VirtQueue *vq)
+{
+ hwaddr pa;
+ pa = vq->vring.avail + offsetof(VRingAvail, idx);
+ return virtio_lduw_phys(vq->vdev, pa);
+}
+
+static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
+{
+ hwaddr pa;
+ pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
+ return virtio_lduw_phys(vq->vdev, pa);
+}
+
+static inline uint16_t vring_get_used_event(VirtQueue *vq)
+{
+ return vring_avail_ring(vq, vq->vring.num);
+}
+
+static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
+{
+ hwaddr pa;
+ pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
+ virtio_stl_phys(vq->vdev, pa, val);
+}
+
+static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
+{
+ hwaddr pa;
+ pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
+ virtio_stl_phys(vq->vdev, pa, val);
+}
+
+static uint16_t vring_used_idx(VirtQueue *vq)
+{
+ hwaddr pa;
+ pa = vq->vring.used + offsetof(VRingUsed, idx);
+ return virtio_lduw_phys(vq->vdev, pa);
+}
+
+static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
+{
+ hwaddr pa;
+ pa = vq->vring.used + offsetof(VRingUsed, idx);
+ virtio_stw_phys(vq->vdev, pa, val);
+}
+
+static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
+{
+ VirtIODevice *vdev = vq->vdev;
+ hwaddr pa;
+ pa = vq->vring.used + offsetof(VRingUsed, flags);
+ virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask);
+}
+
+static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
+{
+ VirtIODevice *vdev = vq->vdev;
+ hwaddr pa;
+ pa = vq->vring.used + offsetof(VRingUsed, flags);
+ virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask);
+}
+
+static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
+{
+ hwaddr pa;
+ if (!vq->notification) {
+ return;
+ }
+ pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
+ virtio_stw_phys(vq->vdev, pa, val);
+}
+
+void virtio_queue_set_notification(VirtQueue *vq, int enable)
+{
+ vq->notification = enable;
+ if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
+ vring_set_avail_event(vq, vring_avail_idx(vq));
+ } else if (enable) {
+ vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
+ } else {
+ vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
+ }
+ if (enable) {
+ /* Expose avail event/used flags before caller checks the avail idx. */
+ smp_mb();
+ }
+}
+
+int virtio_queue_ready(VirtQueue *vq)
+{
+ return vq->vring.avail != 0;
+}
+
+int virtio_queue_empty(VirtQueue *vq)
+{
+ return vring_avail_idx(vq) == vq->last_avail_idx;
+}
+
+static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len)
+{
+ unsigned int offset;
+ int i;
+
+ offset = 0;
+ for (i = 0; i < elem->in_num; i++) {
+ size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
+
+ cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
+ elem->in_sg[i].iov_len,
+ 1, size);
+
+ offset += size;
+ }
+
+ for (i = 0; i < elem->out_num; i++)
+ cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
+ elem->out_sg[i].iov_len,
+ 0, elem->out_sg[i].iov_len);
+}
+
+void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len)
+{
+ vq->last_avail_idx--;
+ virtqueue_unmap_sg(vq, elem, len);
+}
+
+void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len, unsigned int idx)
+{
+ trace_virtqueue_fill(vq, elem, len, idx);
+
+ virtqueue_unmap_sg(vq, elem, len);
+
+ idx = (idx + vring_used_idx(vq)) % vq->vring.num;
+
+ /* Get a pointer to the next entry in the used ring. */
+ vring_used_ring_id(vq, idx, elem->index);
+ vring_used_ring_len(vq, idx, len);
+}
+
+void virtqueue_flush(VirtQueue *vq, unsigned int count)
+{
+ uint16_t old, new;
+ /* Make sure buffer is written before we update index. */
+ smp_wmb();
+ trace_virtqueue_flush(vq, count);
+ old = vring_used_idx(vq);
+ new = old + count;
+ vring_used_idx_set(vq, new);
+ vq->inuse -= count;
+ if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
+ vq->signalled_used_valid = false;
+}
+
+void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len)
+{
+ virtqueue_fill(vq, elem, len, 0);
+ virtqueue_flush(vq, 1);
+}
+
+static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
+{
+ uint16_t num_heads = vring_avail_idx(vq) - idx;
+
+ /* Check it isn't doing very strange things with descriptor numbers. */
+ if (num_heads > vq->vring.num) {
+ error_report("Guest moved used index from %u to %u",
+ idx, vring_avail_idx(vq));
+ exit(1);
+ }
+ /* On success, callers read a descriptor at vq->last_avail_idx.
+ * Make sure descriptor read does not bypass avail index read. */
+ if (num_heads) {
+ smp_rmb();
+ }
+
+ return num_heads;
+}
+
+static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
+{
+ unsigned int head;
+
+ /* Grab the next descriptor number they're advertising, and increment
+ * the index we've seen. */
+ head = vring_avail_ring(vq, idx % vq->vring.num);
+
+ /* If their number is silly, that's a fatal mistake. */
+ if (head >= vq->vring.num) {
+ error_report("Guest says index %u is available", head);
+ exit(1);
+ }
+
+ return head;
+}
+
+static unsigned virtqueue_next_desc(VirtIODevice *vdev, hwaddr desc_pa,
+ unsigned int i, unsigned int max)
+{
+ unsigned int next;
+
+ /* If this descriptor says it doesn't chain, we're done. */
+ if (!(vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_NEXT)) {
+ return max;
+ }
+
+ /* Check they're not leading us off end of descriptors. */
+ next = vring_desc_next(vdev, desc_pa, i);
+ /* Make sure compiler knows to grab that: we don't want it changing! */
+ smp_wmb();
+
+ if (next >= max) {
+ error_report("Desc next is %u", next);
+ exit(1);
+ }
+
+ return next;
+}
+
+void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
+ unsigned int *out_bytes,
+ unsigned max_in_bytes, unsigned max_out_bytes)
+{
+ unsigned int idx;
+ unsigned int total_bufs, in_total, out_total;
+
+ idx = vq->last_avail_idx;
+
+ total_bufs = in_total = out_total = 0;
+ while (virtqueue_num_heads(vq, idx)) {
+ VirtIODevice *vdev = vq->vdev;
+ unsigned int max, num_bufs, indirect = 0;
+ hwaddr desc_pa;
+ int i;
+
+ max = vq->vring.num;
+ num_bufs = total_bufs;
+ i = virtqueue_get_head(vq, idx++);
+ desc_pa = vq->vring.desc;
+
+ if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) {
+ if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) {
+ error_report("Invalid size for indirect buffer table");
+ exit(1);
+ }
+
+ /* If we've got too many, that implies a descriptor loop. */
+ if (num_bufs >= max) {
+ error_report("Looped descriptor");
+ exit(1);
+ }
+
+ /* loop over the indirect descriptor table */
+ indirect = 1;
+ max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc);
+ desc_pa = vring_desc_addr(vdev, desc_pa, i);
+ num_bufs = i = 0;
+ }
+
+ do {
+ /* If we've got too many, that implies a descriptor loop. */
+ if (++num_bufs > max) {
+ error_report("Looped descriptor");
+ exit(1);
+ }
+
+ if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) {
+ in_total += vring_desc_len(vdev, desc_pa, i);
+ } else {
+ out_total += vring_desc_len(vdev, desc_pa, i);
+ }
+ if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
+ goto done;
+ }
+ } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max);
+
+ if (!indirect)
+ total_bufs = num_bufs;
+ else
+ total_bufs++;
+ }
+done:
+ if (in_bytes) {
+ *in_bytes = in_total;
+ }
+ if (out_bytes) {
+ *out_bytes = out_total;
+ }
+}
+
+int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
+ unsigned int out_bytes)
+{
+ unsigned int in_total, out_total;
+
+ virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
+ return in_bytes <= in_total && out_bytes <= out_total;
+}
+
+static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
+ unsigned int *num_sg, unsigned int max_size,
+ int is_write)
+{
+ unsigned int i;
+ hwaddr len;
+
+ /* Note: this function MUST validate input, some callers
+ * are passing in num_sg values received over the network.
+ */
+ /* TODO: teach all callers that this can fail, and return failure instead
+ * of asserting here.
+ * When we do, we might be able to re-enable NDEBUG below.
+ */
+#ifdef NDEBUG
+#error building with NDEBUG is not supported
+#endif
+ assert(*num_sg <= max_size);
+
+ for (i = 0; i < *num_sg; i++) {
+ len = sg[i].iov_len;
+ sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
+ if (!sg[i].iov_base) {
+ error_report("virtio: error trying to map MMIO memory");
+ exit(1);
+ }
+ if (len == sg[i].iov_len) {
+ continue;
+ }
+ if (*num_sg >= max_size) {
+ error_report("virtio: memory split makes iovec too large");
+ exit(1);
+ }
+ memmove(sg + i + 1, sg + i, sizeof(*sg) * (*num_sg - i));
+ memmove(addr + i + 1, addr + i, sizeof(*addr) * (*num_sg - i));
+ assert(len < sg[i + 1].iov_len);
+ sg[i].iov_len = len;
+ addr[i + 1] += len;
+ sg[i + 1].iov_len -= len;
+ ++*num_sg;
+ }
+}
+
+void virtqueue_map(VirtQueueElement *elem)
+{
+ virtqueue_map_iovec(elem->in_sg, elem->in_addr, &elem->in_num,
+ MIN(ARRAY_SIZE(elem->in_sg), ARRAY_SIZE(elem->in_addr)),
+ 1);
+ virtqueue_map_iovec(elem->out_sg, elem->out_addr, &elem->out_num,
+ MIN(ARRAY_SIZE(elem->out_sg), ARRAY_SIZE(elem->out_addr)),
+ 0);
+}
+
+int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
+{
+ unsigned int i, head, max;
+ hwaddr desc_pa = vq->vring.desc;
+ VirtIODevice *vdev = vq->vdev;
+
+ if (!virtqueue_num_heads(vq, vq->last_avail_idx))
+ return 0;
+
+ /* When we start there are none of either input nor output. */
+ elem->out_num = elem->in_num = 0;
+
+ max = vq->vring.num;
+
+ i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
+ if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
+ vring_set_avail_event(vq, vq->last_avail_idx);
+ }
+
+ if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) {
+ if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) {
+ error_report("Invalid size for indirect buffer table");
+ exit(1);
+ }
+
+ /* loop over the indirect descriptor table */
+ max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc);
+ desc_pa = vring_desc_addr(vdev, desc_pa, i);
+ i = 0;
+ }
+
+ /* Collect all the descriptors */
+ do {
+ struct iovec *sg;
+
+ if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) {
+ if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) {
+ error_report("Too many write descriptors in indirect table");
+ exit(1);
+ }
+ elem->in_addr[elem->in_num] = vring_desc_addr(vdev, desc_pa, i);
+ sg = &elem->in_sg[elem->in_num++];
+ } else {
+ if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) {
+ error_report("Too many read descriptors in indirect table");
+ exit(1);
+ }
+ elem->out_addr[elem->out_num] = vring_desc_addr(vdev, desc_pa, i);
+ sg = &elem->out_sg[elem->out_num++];
+ }
+
+ sg->iov_len = vring_desc_len(vdev, desc_pa, i);
+
+ /* If we've got too many, that implies a descriptor loop. */
+ if ((elem->in_num + elem->out_num) > max) {
+ error_report("Looped descriptor");
+ exit(1);
+ }
+ } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max);
+
+ /* Now map what we have collected */
+ virtqueue_map(elem);
+
+ elem->index = head;
+
+ vq->inuse++;
+
+ trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
+ return elem->in_num + elem->out_num;
+}
+
+/* virtio device */
+static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
+{
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+
+ if (k->notify) {
+ k->notify(qbus->parent, vector);
+ }
+}
+
+void virtio_update_irq(VirtIODevice *vdev)
+{
+ virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
+}
+
+static int virtio_validate_features(VirtIODevice *vdev)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+
+ if (k->validate_features) {
+ return k->validate_features(vdev);
+ } else {
+ return 0;
+ }
+}
+
+int virtio_set_status(VirtIODevice *vdev, uint8_t val)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ trace_virtio_set_status(vdev, val);
+
+ if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
+ if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
+ val & VIRTIO_CONFIG_S_FEATURES_OK) {
+ int ret = virtio_validate_features(vdev);
+
+ if (ret) {
+ return ret;
+ }
+ }
+ }
+ if (k->set_status) {
+ k->set_status(vdev, val);
+ }
+ vdev->status = val;
+ return 0;
+}
+
+bool target_words_bigendian(void);
+static enum virtio_device_endian virtio_default_endian(void)
+{
+ if (target_words_bigendian()) {
+ return VIRTIO_DEVICE_ENDIAN_BIG;
+ } else {
+ return VIRTIO_DEVICE_ENDIAN_LITTLE;
+ }
+}
+
+static enum virtio_device_endian virtio_current_cpu_endian(void)
+{
+ CPUClass *cc = CPU_GET_CLASS(current_cpu);
+
+ if (cc->virtio_is_big_endian(current_cpu)) {
+ return VIRTIO_DEVICE_ENDIAN_BIG;
+ } else {
+ return VIRTIO_DEVICE_ENDIAN_LITTLE;
+ }
+}
+
+void virtio_reset(void *opaque)
+{
+ VirtIODevice *vdev = opaque;
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ int i;
+
+ virtio_set_status(vdev, 0);
+ if (current_cpu) {
+ /* Guest initiated reset */
+ vdev->device_endian = virtio_current_cpu_endian();
+ } else {
+ /* System reset */
+ vdev->device_endian = virtio_default_endian();
+ }
+
+ if (k->reset) {
+ k->reset(vdev);
+ }
+
+ vdev->guest_features = 0;
+ vdev->queue_sel = 0;
+ vdev->status = 0;
+ vdev->isr = 0;
+ vdev->config_vector = VIRTIO_NO_VECTOR;
+ virtio_notify_vector(vdev, vdev->config_vector);
+
+ for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ vdev->vq[i].vring.desc = 0;
+ vdev->vq[i].vring.avail = 0;
+ vdev->vq[i].vring.used = 0;
+ vdev->vq[i].last_avail_idx = 0;
+ virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
+ vdev->vq[i].signalled_used = 0;
+ vdev->vq[i].signalled_used_valid = false;
+ vdev->vq[i].notification = true;
+ vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
+ }
+}
+
+uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint8_t val;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return (uint32_t)-1;
+ }
+
+ k->get_config(vdev, vdev->config);
+
+ val = ldub_p(vdev->config + addr);
+ return val;
+}
+
+uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint16_t val;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return (uint32_t)-1;
+ }
+
+ k->get_config(vdev, vdev->config);
+
+ val = lduw_p(vdev->config + addr);
+ return val;
+}
+
+uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint32_t val;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return (uint32_t)-1;
+ }
+
+ k->get_config(vdev, vdev->config);
+
+ val = ldl_p(vdev->config + addr);
+ return val;
+}
+
+void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint8_t val = data;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return;
+ }
+
+ stb_p(vdev->config + addr, val);
+
+ if (k->set_config) {
+ k->set_config(vdev, vdev->config);
+ }
+}
+
+void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint16_t val = data;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return;
+ }
+
+ stw_p(vdev->config + addr, val);
+
+ if (k->set_config) {
+ k->set_config(vdev, vdev->config);
+ }
+}
+
+void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint32_t val = data;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return;
+ }
+
+ stl_p(vdev->config + addr, val);
+
+ if (k->set_config) {
+ k->set_config(vdev, vdev->config);
+ }
+}
+
+uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint8_t val;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return (uint32_t)-1;
+ }
+
+ k->get_config(vdev, vdev->config);
+
+ val = ldub_p(vdev->config + addr);
+ return val;
+}
+
+uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint16_t val;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return (uint32_t)-1;
+ }
+
+ k->get_config(vdev, vdev->config);
+
+ val = lduw_le_p(vdev->config + addr);
+ return val;
+}
+
+uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint32_t val;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return (uint32_t)-1;
+ }
+
+ k->get_config(vdev, vdev->config);
+
+ val = ldl_le_p(vdev->config + addr);
+ return val;
+}
+
+void virtio_config_modern_writeb(VirtIODevice *vdev,
+ uint32_t addr, uint32_t data)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint8_t val = data;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return;
+ }
+
+ stb_p(vdev->config + addr, val);
+
+ if (k->set_config) {
+ k->set_config(vdev, vdev->config);
+ }
+}
+
+void virtio_config_modern_writew(VirtIODevice *vdev,
+ uint32_t addr, uint32_t data)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint16_t val = data;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return;
+ }
+
+ stw_le_p(vdev->config + addr, val);
+
+ if (k->set_config) {
+ k->set_config(vdev, vdev->config);
+ }
+}
+
+void virtio_config_modern_writel(VirtIODevice *vdev,
+ uint32_t addr, uint32_t data)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint32_t val = data;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return;
+ }
+
+ stl_le_p(vdev->config + addr, val);
+
+ if (k->set_config) {
+ k->set_config(vdev, vdev->config);
+ }
+}
+
+void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
+{
+ vdev->vq[n].vring.desc = addr;
+ virtio_queue_update_rings(vdev, n);
+}
+
+hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
+{
+ return vdev->vq[n].vring.desc;
+}
+
+void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
+ hwaddr avail, hwaddr used)
+{
+ vdev->vq[n].vring.desc = desc;
+ vdev->vq[n].vring.avail = avail;
+ vdev->vq[n].vring.used = used;
+}
+
+void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
+{
+ /* Don't allow guest to flip queue between existent and
+ * nonexistent states, or to set it to an invalid size.
+ */
+ if (!!num != !!vdev->vq[n].vring.num ||
+ num > VIRTQUEUE_MAX_SIZE ||
+ num < 0) {
+ return;
+ }
+ vdev->vq[n].vring.num = num;
+}
+
+VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
+{
+ return QLIST_FIRST(&vdev->vector_queues[vector]);
+}
+
+VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
+{
+ return QLIST_NEXT(vq, node);
+}
+
+int virtio_queue_get_num(VirtIODevice *vdev, int n)
+{
+ return vdev->vq[n].vring.num;
+}
+
+int virtio_get_num_queues(VirtIODevice *vdev)
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ if (!virtio_queue_get_num(vdev, i)) {
+ break;
+ }
+ }
+
+ return i;
+}
+
+int virtio_queue_get_id(VirtQueue *vq)
+{
+ VirtIODevice *vdev = vq->vdev;
+ assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_QUEUE_MAX]);
+ return vq - &vdev->vq[0];
+}
+
+void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
+{
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+
+ /* virtio-1 compliant devices cannot change the alignment */
+ if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
+ error_report("tried to modify queue alignment for virtio-1 device");
+ return;
+ }
+ /* Check that the transport told us it was going to do this
+ * (so a buggy transport will immediately assert rather than
+ * silently failing to migrate this state)
+ */
+ assert(k->has_variable_vring_alignment);
+
+ vdev->vq[n].vring.align = align;
+ virtio_queue_update_rings(vdev, n);
+}
+
+void virtio_queue_notify_vq(VirtQueue *vq)
+{
+ if (vq->vring.desc && vq->handle_output) {
+ VirtIODevice *vdev = vq->vdev;
+
+ trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
+ vq->handle_output(vdev, vq);
+ }
+}
+
+void virtio_queue_notify(VirtIODevice *vdev, int n)
+{
+ virtio_queue_notify_vq(&vdev->vq[n]);
+}
+
+uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
+{
+ return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
+ VIRTIO_NO_VECTOR;
+}
+
+void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
+{
+ VirtQueue *vq = &vdev->vq[n];
+
+ if (n < VIRTIO_QUEUE_MAX) {
+ if (vdev->vector_queues &&
+ vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
+ QLIST_REMOVE(vq, node);
+ }
+ vdev->vq[n].vector = vector;
+ if (vdev->vector_queues &&
+ vector != VIRTIO_NO_VECTOR) {
+ QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
+ }
+ }
+}
+
+VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
+ void (*handle_output)(VirtIODevice *, VirtQueue *))
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ if (vdev->vq[i].vring.num == 0)
+ break;
+ }
+
+ if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
+ abort();
+
+ vdev->vq[i].vring.num = queue_size;
+ vdev->vq[i].vring.num_default = queue_size;
+ vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
+ vdev->vq[i].handle_output = handle_output;
+
+ return &vdev->vq[i];
+}
+
+void virtio_del_queue(VirtIODevice *vdev, int n)
+{
+ if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
+ abort();
+ }
+
+ vdev->vq[n].vring.num = 0;
+ vdev->vq[n].vring.num_default = 0;
+}
+
+void virtio_irq(VirtQueue *vq)
+{
+ trace_virtio_irq(vq);
+ vq->vdev->isr |= 0x01;
+ virtio_notify_vector(vq->vdev, vq->vector);
+}
+
+static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
+{
+ uint16_t old, new;
+ bool v;
+ /* We need to expose used array entries before checking used event. */
+ smp_mb();
+ /* Always notify when queue is empty (when feature acknowledge) */
+ if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
+ !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx) {
+ return true;
+ }
+
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
+ return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
+ }
+
+ v = vq->signalled_used_valid;
+ vq->signalled_used_valid = true;
+ old = vq->signalled_used;
+ new = vq->signalled_used = vring_used_idx(vq);
+ return !v || vring_need_event(vring_get_used_event(vq), new, old);
+}
+
+void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
+{
+ if (!vring_notify(vdev, vq)) {
+ return;
+ }
+
+ trace_virtio_notify(vdev, vq);
+ vdev->isr |= 0x01;
+ virtio_notify_vector(vdev, vq->vector);
+}
+
+void virtio_notify_config(VirtIODevice *vdev)
+{
+ if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
+ return;
+
+ vdev->isr |= 0x03;
+ vdev->generation++;
+ virtio_notify_vector(vdev, vdev->config_vector);
+}
+
+static bool virtio_device_endian_needed(void *opaque)
+{
+ VirtIODevice *vdev = opaque;
+
+ assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
+ return vdev->device_endian != virtio_default_endian();
+ }
+ /* Devices conforming to VIRTIO 1.0 or later are always LE. */
+ return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
+}
+
+static bool virtio_64bit_features_needed(void *opaque)
+{
+ VirtIODevice *vdev = opaque;
+
+ return (vdev->host_features >> 32) != 0;
+}
+
+static bool virtio_virtqueue_needed(void *opaque)
+{
+ VirtIODevice *vdev = opaque;
+
+ return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
+}
+
+static bool virtio_ringsize_needed(void *opaque)
+{
+ VirtIODevice *vdev = opaque;
+ int i;
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool virtio_extra_state_needed(void *opaque)
+{
+ VirtIODevice *vdev = opaque;
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+
+ return k->has_extra_state &&
+ k->has_extra_state(qbus->parent);
+}
+
+static void put_virtqueue_state(QEMUFile *f, void *pv, size_t size)
+{
+ VirtIODevice *vdev = pv;
+ int i;
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ qemu_put_be64(f, vdev->vq[i].vring.avail);
+ qemu_put_be64(f, vdev->vq[i].vring.used);
+ }
+}
+
+static int get_virtqueue_state(QEMUFile *f, void *pv, size_t size)
+{
+ VirtIODevice *vdev = pv;
+ int i;
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ vdev->vq[i].vring.avail = qemu_get_be64(f);
+ vdev->vq[i].vring.used = qemu_get_be64(f);
+ }
+ return 0;
+}
+
+static VMStateInfo vmstate_info_virtqueue = {
+ .name = "virtqueue_state",
+ .get = get_virtqueue_state,
+ .put = put_virtqueue_state,
+};
+
+static const VMStateDescription vmstate_virtio_virtqueues = {
+ .name = "virtio/virtqueues",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = &virtio_virtqueue_needed,
+ .fields = (VMStateField[]) {
+ {
+ .name = "virtqueues",
+ .version_id = 0,
+ .field_exists = NULL,
+ .size = 0,
+ .info = &vmstate_info_virtqueue,
+ .flags = VMS_SINGLE,
+ .offset = 0,
+ },
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void put_ringsize_state(QEMUFile *f, void *pv, size_t size)
+{
+ VirtIODevice *vdev = pv;
+ int i;
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ qemu_put_be32(f, vdev->vq[i].vring.num_default);
+ }
+}
+
+static int get_ringsize_state(QEMUFile *f, void *pv, size_t size)
+{
+ VirtIODevice *vdev = pv;
+ int i;
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ vdev->vq[i].vring.num_default = qemu_get_be32(f);
+ }
+ return 0;
+}
+
+static VMStateInfo vmstate_info_ringsize = {
+ .name = "ringsize_state",
+ .get = get_ringsize_state,
+ .put = put_ringsize_state,
+};
+
+static const VMStateDescription vmstate_virtio_ringsize = {
+ .name = "virtio/ringsize",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = &virtio_ringsize_needed,
+ .fields = (VMStateField[]) {
+ {
+ .name = "ringsize",
+ .version_id = 0,
+ .field_exists = NULL,
+ .size = 0,
+ .info = &vmstate_info_ringsize,
+ .flags = VMS_SINGLE,
+ .offset = 0,
+ },
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int get_extra_state(QEMUFile *f, void *pv, size_t size)
+{
+ VirtIODevice *vdev = pv;
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+
+ if (!k->load_extra_state) {
+ return -1;
+ } else {
+ return k->load_extra_state(qbus->parent, f);
+ }
+}
+
+static void put_extra_state(QEMUFile *f, void *pv, size_t size)
+{
+ VirtIODevice *vdev = pv;
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+
+ k->save_extra_state(qbus->parent, f);
+}
+
+static const VMStateInfo vmstate_info_extra_state = {
+ .name = "virtqueue_extra_state",
+ .get = get_extra_state,
+ .put = put_extra_state,
+};
+
+static const VMStateDescription vmstate_virtio_extra_state = {
+ .name = "virtio/extra_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = &virtio_extra_state_needed,
+ .fields = (VMStateField[]) {
+ {
+ .name = "extra_state",
+ .version_id = 0,
+ .field_exists = NULL,
+ .size = 0,
+ .info = &vmstate_info_extra_state,
+ .flags = VMS_SINGLE,
+ .offset = 0,
+ },
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_virtio_device_endian = {
+ .name = "virtio/device_endian",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = &virtio_device_endian_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(device_endian, VirtIODevice),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_virtio_64bit_features = {
+ .name = "virtio/64bit_features",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = &virtio_64bit_features_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(guest_features, VirtIODevice),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_virtio = {
+ .name = "virtio",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_virtio_device_endian,
+ &vmstate_virtio_64bit_features,
+ &vmstate_virtio_virtqueues,
+ &vmstate_virtio_ringsize,
+ &vmstate_virtio_extra_state,
+ NULL
+ }
+};
+
+void virtio_save(VirtIODevice *vdev, QEMUFile *f)
+{
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
+ int i;
+
+ if (k->save_config) {
+ k->save_config(qbus->parent, f);
+ }
+
+ qemu_put_8s(f, &vdev->status);
+ qemu_put_8s(f, &vdev->isr);
+ qemu_put_be16s(f, &vdev->queue_sel);
+ qemu_put_be32s(f, &guest_features_lo);
+ qemu_put_be32(f, vdev->config_len);
+ qemu_put_buffer(f, vdev->config, vdev->config_len);
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ if (vdev->vq[i].vring.num == 0)
+ break;
+ }
+
+ qemu_put_be32(f, i);
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ if (vdev->vq[i].vring.num == 0)
+ break;
+
+ qemu_put_be32(f, vdev->vq[i].vring.num);
+ if (k->has_variable_vring_alignment) {
+ qemu_put_be32(f, vdev->vq[i].vring.align);
+ }
+ /* XXX virtio-1 devices */
+ qemu_put_be64(f, vdev->vq[i].vring.desc);
+ qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
+ if (k->save_queue) {
+ k->save_queue(qbus->parent, i, f);
+ }
+ }
+
+ if (vdc->save != NULL) {
+ vdc->save(vdev, f);
+ }
+
+ /* Subsections */
+ vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
+}
+
+static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ bool bad = (val & ~(vdev->host_features)) != 0;
+
+ val &= vdev->host_features;
+ if (k->set_features) {
+ k->set_features(vdev, val);
+ }
+ vdev->guest_features = val;
+ return bad ? -1 : 0;
+}
+
+int virtio_set_features(VirtIODevice *vdev, uint64_t val)
+{
+ /*
+ * The driver must not attempt to set features after feature negotiation
+ * has finished.
+ */
+ if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
+ return -EINVAL;
+ }
+ return virtio_set_features_nocheck(vdev, val);
+}
+
+int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
+{
+ int i, ret;
+ int32_t config_len;
+ uint32_t num;
+ uint32_t features;
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+
+ /*
+ * We poison the endianness to ensure it does not get used before
+ * subsections have been loaded.
+ */
+ vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
+
+ if (k->load_config) {
+ ret = k->load_config(qbus->parent, f);
+ if (ret)
+ return ret;
+ }
+
+ qemu_get_8s(f, &vdev->status);
+ qemu_get_8s(f, &vdev->isr);
+ qemu_get_be16s(f, &vdev->queue_sel);
+ if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
+ return -1;
+ }
+ qemu_get_be32s(f, &features);
+
+ config_len = qemu_get_be32(f);
+
+ /*
+ * There are cases where the incoming config can be bigger or smaller
+ * than what we have; so load what we have space for, and skip
+ * any excess that's in the stream.
+ */
+ qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
+
+ while (config_len > vdev->config_len) {
+ qemu_get_byte(f);
+ config_len--;
+ }
+
+ num = qemu_get_be32(f);
+
+ if (num > VIRTIO_QUEUE_MAX) {
+ error_report("Invalid number of PCI queues: 0x%x", num);
+ return -1;
+ }
+
+ for (i = 0; i < num; i++) {
+ vdev->vq[i].vring.num = qemu_get_be32(f);
+ if (k->has_variable_vring_alignment) {
+ vdev->vq[i].vring.align = qemu_get_be32(f);
+ }
+ vdev->vq[i].vring.desc = qemu_get_be64(f);
+ qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
+ vdev->vq[i].signalled_used_valid = false;
+ vdev->vq[i].notification = true;
+
+ if (vdev->vq[i].vring.desc) {
+ /* XXX virtio-1 devices */
+ virtio_queue_update_rings(vdev, i);
+ } else if (vdev->vq[i].last_avail_idx) {
+ error_report("VQ %d address 0x0 "
+ "inconsistent with Host index 0x%x",
+ i, vdev->vq[i].last_avail_idx);
+ return -1;
+ }
+ if (k->load_queue) {
+ ret = k->load_queue(qbus->parent, i, f);
+ if (ret)
+ return ret;
+ }
+ }
+
+ virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
+
+ if (vdc->load != NULL) {
+ ret = vdc->load(vdev, f, version_id);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ /* Subsections */
+ ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
+ if (ret) {
+ return ret;
+ }
+
+ if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
+ vdev->device_endian = virtio_default_endian();
+ }
+
+ if (virtio_64bit_features_needed(vdev)) {
+ /*
+ * Subsection load filled vdev->guest_features. Run them
+ * through virtio_set_features to sanity-check them against
+ * host_features.
+ */
+ uint64_t features64 = vdev->guest_features;
+ if (virtio_set_features_nocheck(vdev, features64) < 0) {
+ error_report("Features 0x%" PRIx64 " unsupported. "
+ "Allowed features: 0x%" PRIx64,
+ features64, vdev->host_features);
+ return -1;
+ }
+ } else {
+ if (virtio_set_features_nocheck(vdev, features) < 0) {
+ error_report("Features 0x%x unsupported. "
+ "Allowed features: 0x%" PRIx64,
+ features, vdev->host_features);
+ return -1;
+ }
+ }
+
+ for (i = 0; i < num; i++) {
+ if (vdev->vq[i].vring.desc) {
+ uint16_t nheads;
+ nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
+ /* Check it isn't doing strange things with descriptor numbers. */
+ if (nheads > vdev->vq[i].vring.num) {
+ error_report("VQ %d size 0x%x Guest index 0x%x "
+ "inconsistent with Host index 0x%x: delta 0x%x",
+ i, vdev->vq[i].vring.num,
+ vring_avail_idx(&vdev->vq[i]),
+ vdev->vq[i].last_avail_idx, nheads);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void virtio_cleanup(VirtIODevice *vdev)
+{
+ qemu_del_vm_change_state_handler(vdev->vmstate);
+ g_free(vdev->config);
+ g_free(vdev->vq);
+ g_free(vdev->vector_queues);
+}
+
+static void virtio_vmstate_change(void *opaque, int running, RunState state)
+{
+ VirtIODevice *vdev = opaque;
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
+ vdev->vm_running = running;
+
+ if (backend_run) {
+ virtio_set_status(vdev, vdev->status);
+ }
+
+ if (k->vmstate_change) {
+ k->vmstate_change(qbus->parent, backend_run);
+ }
+
+ if (!backend_run) {
+ virtio_set_status(vdev, vdev->status);
+ }
+}
+
+void virtio_instance_init_common(Object *proxy_obj, void *data,
+ size_t vdev_size, const char *vdev_name)
+{
+ DeviceState *vdev = data;
+
+ object_initialize(vdev, vdev_size, vdev_name);
+ object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL);
+ object_unref(OBJECT(vdev));
+ qdev_alias_all_properties(vdev, proxy_obj);
+}
+
+void virtio_init(VirtIODevice *vdev, const char *name,
+ uint16_t device_id, size_t config_size)
+{
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ int i;
+ int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
+
+ if (nvectors) {
+ vdev->vector_queues =
+ g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
+ }
+
+ vdev->device_id = device_id;
+ vdev->status = 0;
+ vdev->isr = 0;
+ vdev->queue_sel = 0;
+ vdev->config_vector = VIRTIO_NO_VECTOR;
+ vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
+ vdev->vm_running = runstate_is_running();
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ vdev->vq[i].vector = VIRTIO_NO_VECTOR;
+ vdev->vq[i].vdev = vdev;
+ vdev->vq[i].queue_index = i;
+ }
+
+ vdev->name = name;
+ vdev->config_len = config_size;
+ if (vdev->config_len) {
+ vdev->config = g_malloc0(config_size);
+ } else {
+ vdev->config = NULL;
+ }
+ vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
+ vdev);
+ vdev->device_endian = virtio_default_endian();
+}
+
+hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
+{
+ return vdev->vq[n].vring.desc;
+}
+
+hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
+{
+ return vdev->vq[n].vring.avail;
+}
+
+hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
+{
+ return vdev->vq[n].vring.used;
+}
+
+hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
+{
+ return vdev->vq[n].vring.desc;
+}
+
+hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
+{
+ return sizeof(VRingDesc) * vdev->vq[n].vring.num;
+}
+
+hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
+{
+ return offsetof(VRingAvail, ring) +
+ sizeof(uint16_t) * vdev->vq[n].vring.num;
+}
+
+hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
+{
+ return offsetof(VRingUsed, ring) +
+ sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
+}
+
+hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
+{
+ return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
+ virtio_queue_get_used_size(vdev, n);
+}
+
+uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
+{
+ return vdev->vq[n].last_avail_idx;
+}
+
+void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
+{
+ vdev->vq[n].last_avail_idx = idx;
+}
+
+void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
+{
+ vdev->vq[n].signalled_used_valid = false;
+}
+
+VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
+{
+ return vdev->vq + n;
+}
+
+uint16_t virtio_get_queue_index(VirtQueue *vq)
+{
+ return vq->queue_index;
+}
+
+static void virtio_queue_guest_notifier_read(EventNotifier *n)
+{
+ VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
+ if (event_notifier_test_and_clear(n)) {
+ virtio_irq(vq);
+ }
+}
+
+void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
+ bool with_irqfd)
+{
+ if (assign && !with_irqfd) {
+ event_notifier_set_handler(&vq->guest_notifier,
+ virtio_queue_guest_notifier_read);
+ } else {
+ event_notifier_set_handler(&vq->guest_notifier, NULL);
+ }
+ if (!assign) {
+ /* Test and clear notifier before closing it,
+ * in case poll callback didn't have time to run. */
+ virtio_queue_guest_notifier_read(&vq->guest_notifier);
+ }
+}
+
+EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
+{
+ return &vq->guest_notifier;
+}
+
+static void virtio_queue_host_notifier_read(EventNotifier *n)
+{
+ VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
+ if (event_notifier_test_and_clear(n)) {
+ virtio_queue_notify_vq(vq);
+ }
+}
+
+void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
+ bool set_handler)
+{
+ if (assign && set_handler) {
+ event_notifier_set_handler(&vq->host_notifier,
+ virtio_queue_host_notifier_read);
+ } else {
+ event_notifier_set_handler(&vq->host_notifier, NULL);
+ }
+ if (!assign) {
+ /* Test and clear notifier before after disabling event,
+ * in case poll callback didn't have time to run. */
+ virtio_queue_host_notifier_read(&vq->host_notifier);
+ }
+}
+
+EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
+{
+ return &vq->host_notifier;
+}
+
+void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
+{
+ g_free(vdev->bus_name);
+ vdev->bus_name = g_strdup(bus_name);
+}
+
+static void virtio_device_realize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
+ Error *err = NULL;
+
+ if (vdc->realize != NULL) {
+ vdc->realize(dev, &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+ }
+
+ virtio_bus_device_plugged(vdev, &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+}
+
+static void virtio_device_unrealize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
+ Error *err = NULL;
+
+ virtio_bus_device_unplugged(vdev);
+
+ if (vdc->unrealize != NULL) {
+ vdc->unrealize(dev, &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+ }
+
+ g_free(vdev->bus_name);
+ vdev->bus_name = NULL;
+}
+
+static Property virtio_properties[] = {
+ DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_device_class_init(ObjectClass *klass, void *data)
+{
+ /* Set the default value here. */
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = virtio_device_realize;
+ dc->unrealize = virtio_device_unrealize;
+ dc->bus_type = TYPE_VIRTIO_BUS;
+ dc->props = virtio_properties;
+}
+
+static const TypeInfo virtio_device_info = {
+ .name = TYPE_VIRTIO_DEVICE,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(VirtIODevice),
+ .class_init = virtio_device_class_init,
+ .abstract = true,
+ .class_size = sizeof(VirtioDeviceClass),
+};
+
+static void virtio_register_types(void)
+{
+ type_register_static(&virtio_device_info);
+}
+
+type_init(virtio_register_types)
OpenPOWER on IntegriCloud