summaryrefslogtreecommitdiffstats
path: root/hw/virtio
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2015-07-08 13:36:19 +0100
committerPeter Maydell <peter.maydell@linaro.org>2015-07-08 13:36:19 +0100
commitc8232b39bb18a91cde39b8e0b60e731a4ce782b1 (patch)
tree8cc13bdc7b677a07edeb762d216e366ca3e0b1eb /hw/virtio
parent62a3864eb09414d3cee94a2a592ecd414200912f (diff)
parentc4fc82bf1ad088a84ccedf779f6aa928e4fadb5f (diff)
downloadhqemu-c8232b39bb18a91cde39b8e0b60e731a4ce782b1.zip
hqemu-c8232b39bb18a91cde39b8e0b60e731a4ce782b1.tar.gz
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
pc,virtio,pci: fixes and updates Most notably, this includes the TCO support for ICH: the last feature for 2.4 as we are entering the hard freeze. Bugfixes only from now on. virtio pci also gained cfg access capability - arguably a bugfix since virtio spec makes it mandatory, but it's a big patch. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # gpg: Signature made Wed Jul 8 10:40:07 2015 BST using RSA key ID D28D5469 # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" * remotes/mst/tags/for_upstream: tco-test: fix up config accesses and re-enable virtio fix cfg endian-ness for BE targets virtio-pci: implement cfg capability virtio: define virtio_pci_cfg_cap in header. pcie: Set the "link active" in the link status register pci_regs.h: import from linux virtio_net: reuse constants from linux hw/i386/pc: don't carry FDC from pc_basic_device_init() to pc_cmos_init() hw/i386/pc: reflect any FDC @ ioport 0x3f0 in the CMOS hw/i386/pc: factor out pc_cmos_init_floppy() ich9: implement strap SPKR pin logic tests: add testcase for TCO watchdog emulation ich9: add TCO interface emulation acpi: split out ICH ACPI support Revert "dataplane: allow virtio-1 devices" dataplane: fix cross-endian issues Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw/virtio')
-rw-r--r--hw/virtio/dataplane/vring.c53
-rw-r--r--hw/virtio/virtio-pci.c151
-rw-r--r--hw/virtio/virtio-pci.h3
3 files changed, 178 insertions, 29 deletions
diff --git a/hw/virtio/dataplane/vring.c b/hw/virtio/dataplane/vring.c
index 3589185..07fd69c 100644
--- a/hw/virtio/dataplane/vring.c
+++ b/hw/virtio/dataplane/vring.c
@@ -153,22 +153,20 @@ bool vring_should_notify(VirtIODevice *vdev, Vring *vring)
return true;
}
- return vring_need_event(vring_used_event(&vring->vr), new, old);
+ return vring_need_event(virtio_tswap16(vdev, vring_used_event(&vring->vr)),
+ new, old);
}
-static int get_desc(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem,
+static int get_desc(Vring *vring, VirtQueueElement *elem,
struct vring_desc *desc)
{
unsigned *num;
struct iovec *iov;
hwaddr *addr;
MemoryRegion *mr;
- int is_write = virtio_tswap16(vdev, desc->flags) & VRING_DESC_F_WRITE;
- uint32_t len = virtio_tswap32(vdev, desc->len);
- uint64_t desc_addr = virtio_tswap64(vdev, desc->addr);
- if (is_write) {
+ if (desc->flags & VRING_DESC_F_WRITE) {
num = &elem->in_num;
iov = &elem->in_sg[*num];
addr = &elem->in_addr[*num];
@@ -192,17 +190,18 @@ static int get_desc(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem,
}
/* TODO handle non-contiguous memory across region boundaries */
- iov->iov_base = vring_map(&mr, desc_addr, len, is_write);
+ iov->iov_base = vring_map(&mr, desc->addr, desc->len,
+ desc->flags & VRING_DESC_F_WRITE);
if (!iov->iov_base) {
error_report("Failed to map descriptor addr %#" PRIx64 " len %u",
- (uint64_t)desc_addr, len);
+ (uint64_t)desc->addr, desc->len);
return -EFAULT;
}
/* The MemoryRegion is looked up again and unref'ed later, leave the
* ref in place. */
- iov->iov_len = len;
- *addr = desc_addr;
+ iov->iov_len = desc->len;
+ *addr = desc->addr;
*num += 1;
return 0;
}
@@ -224,23 +223,21 @@ static int get_indirect(VirtIODevice *vdev, Vring *vring,
struct vring_desc desc;
unsigned int i = 0, count, found = 0;
int ret;
- uint32_t len = virtio_tswap32(vdev, indirect->len);
- uint64_t addr = virtio_tswap64(vdev, indirect->addr);
/* Sanity check */
- if (unlikely(len % sizeof(desc))) {
+ if (unlikely(indirect->len % sizeof(desc))) {
error_report("Invalid length in indirect descriptor: "
"len %#x not multiple of %#zx",
- len, sizeof(desc));
+ indirect->len, sizeof(desc));
vring->broken = true;
return -EFAULT;
}
- count = len / sizeof(desc);
+ count = indirect->len / sizeof(desc);
/* Buffers are chained via a 16 bit next field, so
* we can have at most 2^16 of these. */
if (unlikely(count > USHRT_MAX + 1)) {
- error_report("Indirect buffer length too big: %d", len);
+ error_report("Indirect buffer length too big: %d", indirect->len);
vring->broken = true;
return -EFAULT;
}
@@ -251,12 +248,12 @@ static int get_indirect(VirtIODevice *vdev, Vring *vring,
/* Translate indirect descriptor */
desc_ptr = vring_map(&mr,
- addr + found * sizeof(desc),
+ indirect->addr + found * sizeof(desc),
sizeof(desc), false);
if (!desc_ptr) {
error_report("Failed to map indirect descriptor "
"addr %#" PRIx64 " len %zu",
- (uint64_t)addr + found * sizeof(desc),
+ (uint64_t)indirect->addr + found * sizeof(desc),
sizeof(desc));
vring->broken = true;
return -EFAULT;
@@ -274,20 +271,19 @@ static int get_indirect(VirtIODevice *vdev, Vring *vring,
return -EFAULT;
}
- if (unlikely(virtio_tswap16(vdev, desc.flags)
- & VRING_DESC_F_INDIRECT)) {
+ if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
error_report("Nested indirect descriptor");
vring->broken = true;
return -EFAULT;
}
- ret = get_desc(vdev, vring, elem, &desc);
+ ret = get_desc(vring, elem, &desc);
if (ret < 0) {
vring->broken |= (ret == -EFAULT);
return ret;
}
- i = virtio_tswap16(vdev, desc.next);
- } while (virtio_tswap16(vdev, desc.flags) & VRING_DESC_F_NEXT);
+ i = desc.next;
+ } while (desc.flags & VRING_DESC_F_NEXT);
return 0;
}
@@ -388,7 +384,7 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
/* Ensure descriptor is loaded before accessing fields */
barrier();
- if (virtio_tswap16(vdev, desc.flags) & VRING_DESC_F_INDIRECT) {
+ if (desc.flags & VRING_DESC_F_INDIRECT) {
ret = get_indirect(vdev, vring, elem, &desc);
if (ret < 0) {
goto out;
@@ -396,18 +392,19 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
continue;
}
- ret = get_desc(vdev, vring, elem, &desc);
+ ret = get_desc(vring, elem, &desc);
if (ret < 0) {
goto out;
}
- i = virtio_tswap16(vdev, desc.next);
- } while (virtio_tswap16(vdev, desc.flags) & VRING_DESC_F_NEXT);
+ i = desc.next;
+ } while (desc.flags & VRING_DESC_F_NEXT);
/* On success, increment avail index. */
vring->last_avail_idx++;
if (virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
- vring_avail_event(&vring->vr) = vring->last_avail_idx;
+ vring_avail_event(&vring->vr) =
+ virtio_tswap16(vdev, vring->last_avail_idx);
}
return head;
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 7a89081..ccca2b6 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -443,11 +443,89 @@ static const MemoryRegionOps virtio_pci_config_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
+/* Below are generic functions to do memcpy from/to an address space,
+ * without byteswaps, with input validation.
+ *
+ * As regular address_space_* APIs all do some kind of byteswap at least for
+ * some host/target combinations, we are forced to explicitly convert to a
+ * known-endianness integer value.
+ * It doesn't really matter which endian format to go through, so the code
+ * below selects the endian that causes the least amount of work on the given
+ * host.
+ *
+ * Note: host pointer must be aligned.
+ */
+static
+void virtio_address_space_write(AddressSpace *as, hwaddr addr,
+ const uint8_t *buf, int len)
+{
+ uint32_t val;
+
+ /* address_space_* APIs assume an aligned address.
+ * As address is under guest control, handle illegal values.
+ */
+ addr &= ~(len - 1);
+
+ /* Make sure caller aligned buf properly */
+ assert(!(((uintptr_t)buf) & (len - 1)));
+
+ switch (len) {
+ case 1:
+ val = pci_get_byte(buf);
+ address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
+ break;
+ case 2:
+ val = pci_get_word(buf);
+ address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
+ break;
+ case 4:
+ val = pci_get_long(buf);
+ address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
+ break;
+ default:
+ /* As length is under guest control, handle illegal values. */
+ break;
+ }
+}
+
+static void
+virtio_address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
+{
+ uint32_t val;
+
+ /* address_space_* APIs assume an aligned address.
+ * As address is under guest control, handle illegal values.
+ */
+ addr &= ~(len - 1);
+
+ /* Make sure caller aligned buf properly */
+ assert(!(((uintptr_t)buf) & (len - 1)));
+
+ switch (len) {
+ case 1:
+ val = address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ pci_set_byte(buf, val);
+ break;
+ case 2:
+ val = address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ pci_set_word(buf, val);
+ break;
+ case 4:
+ val = address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ pci_set_long(buf, val);
+ break;
+ default:
+ /* As length is under guest control, handle illegal values. */
+ break;
+ }
+}
+
static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
uint32_t val, int len)
{
VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ struct virtio_pci_cfg_cap *cfg;
pci_default_write_config(pci_dev, address, val, len);
@@ -456,6 +534,49 @@ static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
virtio_pci_stop_ioeventfd(proxy);
virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
}
+
+ if (proxy->config_cap &&
+ ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
+ pci_cfg_data),
+ sizeof cfg->pci_cfg_data)) {
+ uint32_t off;
+ uint32_t len;
+
+ cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
+ off = le32_to_cpu(cfg->cap.offset);
+ len = le32_to_cpu(cfg->cap.length);
+
+ if (len <= sizeof cfg->pci_cfg_data) {
+ virtio_address_space_write(&proxy->modern_as, off,
+ cfg->pci_cfg_data, len);
+ }
+ }
+}
+
+static uint32_t virtio_read_config(PCIDevice *pci_dev,
+ uint32_t address, int len)
+{
+ VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
+ struct virtio_pci_cfg_cap *cfg;
+
+ if (proxy->config_cap &&
+ ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
+ pci_cfg_data),
+ sizeof cfg->pci_cfg_data)) {
+ uint32_t off;
+ uint32_t len;
+
+ cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
+ off = le32_to_cpu(cfg->cap.offset);
+ len = le32_to_cpu(cfg->cap.length);
+
+ if (len <= sizeof cfg->pci_cfg_data) {
+ virtio_address_space_read(&proxy->modern_as, off,
+ cfg->pci_cfg_data, len);
+ }
+ }
+
+ return pci_default_read_config(pci_dev, address, len);
}
static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
@@ -942,7 +1063,7 @@ static int virtio_pci_query_nvectors(DeviceState *d)
return proxy->nvectors;
}
-static void virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
+static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
struct virtio_pci_cap *cap)
{
PCIDevice *dev = &proxy->pci_dev;
@@ -954,6 +1075,8 @@ static void virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
assert(cap->cap_len >= sizeof *cap);
memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len,
cap->cap_len - PCI_CAP_FLAGS);
+
+ return offset;
}
static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
@@ -1329,6 +1452,11 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
.notify_off_multiplier =
cpu_to_le32(QEMU_VIRTIO_PCI_QUEUE_MEM_MULT),
};
+ struct virtio_pci_cfg_cap cfg = {
+ .cap.cap_len = sizeof cfg,
+ .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
+ };
+ struct virtio_pci_cfg_cap *cfg_mask;
/* TODO: add io access for speed */
@@ -1338,11 +1466,19 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
virtio_pci_modern_region_map(proxy, &proxy->isr, &cap);
virtio_pci_modern_region_map(proxy, &proxy->device, &cap);
virtio_pci_modern_region_map(proxy, &proxy->notify, &notify.cap);
+
pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar,
PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_PREFETCH |
PCI_BASE_ADDRESS_MEM_TYPE_64,
&proxy->modern_bar);
+
+ proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap);
+ cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap);
+ pci_set_byte(&cfg_mask->cap.bar, ~0x0);
+ pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0);
+ pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0);
+ pci_set_long(cfg_mask->pci_cfg_data, ~0x0);
}
if (proxy->nvectors &&
@@ -1354,6 +1490,7 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
}
proxy->pci_dev.config_write = virtio_write_config;
+ proxy->pci_dev.config_read = virtio_read_config;
if (legacy) {
size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
@@ -1424,6 +1561,15 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
2 * QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
VIRTIO_QUEUE_MAX);
+ memory_region_init_alias(&proxy->modern_cfg,
+ OBJECT(proxy),
+ "virtio-pci-cfg",
+ &proxy->modern_bar,
+ 0,
+ memory_region_size(&proxy->modern_bar));
+
+ address_space_init(&proxy->modern_as, &proxy->modern_cfg, "virtio-pci-cfg-as");
+
virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
if (k->realize) {
k->realize(proxy, errp);
@@ -1432,7 +1578,10 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
static void virtio_pci_exit(PCIDevice *pci_dev)
{
+ VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
+
msix_uninit_exclusive_bar(pci_dev);
+ address_space_destroy(&proxy->modern_as);
}
static void virtio_pci_reset(DeviceState *qdev)
diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
index 05d9d24..b6c442f 100644
--- a/hw/virtio/virtio-pci.h
+++ b/hw/virtio/virtio-pci.h
@@ -112,9 +112,12 @@ struct VirtIOPCIProxy {
VirtIOPCIRegion device;
VirtIOPCIRegion notify;
MemoryRegion modern_bar;
+ MemoryRegion modern_cfg;
+ AddressSpace modern_as;
uint32_t legacy_io_bar;
uint32_t msix_bar;
uint32_t modern_mem_bar;
+ int config_cap;
uint32_t flags;
uint32_t class_code;
uint32_t nvectors;
OpenPOWER on IntegriCloud