summaryrefslogtreecommitdiffstats
path: root/include/hw/xen
diff options
context:
space:
mode:
authorPaul Durrant <paul.durrant@citrix.com>2015-01-20 11:06:19 +0000
committerStefano Stabellini <stefano.stabellini@eu.citrix.com>2015-01-20 14:24:10 +0000
commit3996e85c1822e05c50250f8d2d1e57b6bea1229d (patch)
tree275774a6b43e86327dcbc98021e3ddfa52d54bf1 /include/hw/xen
parent707ff80021ccd7a68f4b3d2c44eebf87efbb41c4 (diff)
downloadhqemu-3996e85c1822e05c50250f8d2d1e57b6bea1229d.zip
hqemu-3996e85c1822e05c50250f8d2d1e57b6bea1229d.tar.gz
Xen: Use the ioreq-server API when available
The ioreq-server API added to Xen 4.5 offers better security than the existing Xen/QEMU interface because the shared pages that are used to pass emulation request/results back and forth are removed from the guest's memory space before any requests are serviced. This prevents the guest from mapping these pages (they are in a well known location) and attempting to attack QEMU by synthesizing its own request structures. Hence, this patch modifies configure to detect whether the API is available, and adds the necessary code to use the API if it is. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Diffstat (limited to 'include/hw/xen')
-rw-r--r--include/hw/xen/xen_common.h223
1 files changed, 223 insertions, 0 deletions
diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_common.h
index 95612a4..519696f 100644
--- a/include/hw/xen/xen_common.h
+++ b/include/hw/xen/xen_common.h
@@ -16,7 +16,9 @@
#include "hw/hw.h"
#include "hw/xen/xen.h"
+#include "hw/pci/pci.h"
#include "qemu/queue.h"
+#include "trace.h"
/*
* We don't support Xen prior to 3.3.0.
@@ -179,4 +181,225 @@ static inline int xen_get_vmport_regs_pfn(XenXC xc, domid_t dom,
}
#endif
+/* Xen before 4.5 */
+#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450
+
+#ifndef HVM_PARAM_BUFIOREQ_EVTCHN
+#define HVM_PARAM_BUFIOREQ_EVTCHN 26
+#endif
+
+#define IOREQ_TYPE_PCI_CONFIG 2
+
+typedef uint32_t ioservid_t;
+
+static inline void xen_map_memory_section(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ MemoryRegionSection *section)
+{
+}
+
+static inline void xen_unmap_memory_section(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ MemoryRegionSection *section)
+{
+}
+
+static inline void xen_map_io_section(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ MemoryRegionSection *section)
+{
+}
+
+static inline void xen_unmap_io_section(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ MemoryRegionSection *section)
+{
+}
+
+static inline void xen_map_pcidev(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ PCIDevice *pci_dev)
+{
+}
+
+static inline void xen_unmap_pcidev(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ PCIDevice *pci_dev)
+{
+}
+
+static inline int xen_create_ioreq_server(XenXC xc, domid_t dom,
+ ioservid_t *ioservid)
+{
+ return 0;
+}
+
+static inline void xen_destroy_ioreq_server(XenXC xc, domid_t dom,
+ ioservid_t ioservid)
+{
+}
+
+static inline int xen_get_ioreq_server_info(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ xen_pfn_t *ioreq_pfn,
+ xen_pfn_t *bufioreq_pfn,
+ evtchn_port_t *bufioreq_evtchn)
+{
+ unsigned long param;
+ int rc;
+
+ rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, &param);
+ if (rc < 0) {
+ fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
+ return -1;
+ }
+
+ *ioreq_pfn = param;
+
+ rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
+ if (rc < 0) {
+ fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
+ return -1;
+ }
+
+ *bufioreq_pfn = param;
+
+ rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
+ &param);
+ if (rc < 0) {
+ fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
+ return -1;
+ }
+
+ *bufioreq_evtchn = param;
+
+ return 0;
+}
+
+static inline int xen_set_ioreq_server_state(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ bool enable)
+{
+ return 0;
+}
+
+/* Xen 4.5 */
+#else
+
+static inline void xen_map_memory_section(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ MemoryRegionSection *section)
+{
+ hwaddr start_addr = section->offset_within_address_space;
+ ram_addr_t size = int128_get64(section->size);
+ hwaddr end_addr = start_addr + size - 1;
+
+ trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
+ xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 1,
+ start_addr, end_addr);
+}
+
+static inline void xen_unmap_memory_section(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ MemoryRegionSection *section)
+{
+ hwaddr start_addr = section->offset_within_address_space;
+ ram_addr_t size = int128_get64(section->size);
+ hwaddr end_addr = start_addr + size - 1;
+
+ trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
+ xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 1,
+ start_addr, end_addr);
+}
+
+static inline void xen_map_io_section(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ MemoryRegionSection *section)
+{
+ hwaddr start_addr = section->offset_within_address_space;
+ ram_addr_t size = int128_get64(section->size);
+ hwaddr end_addr = start_addr + size - 1;
+
+ trace_xen_map_portio_range(ioservid, start_addr, end_addr);
+ xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 0,
+ start_addr, end_addr);
+}
+
+static inline void xen_unmap_io_section(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ MemoryRegionSection *section)
+{
+ hwaddr start_addr = section->offset_within_address_space;
+ ram_addr_t size = int128_get64(section->size);
+ hwaddr end_addr = start_addr + size - 1;
+
+ trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
+ xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 0,
+ start_addr, end_addr);
+}
+
+static inline void xen_map_pcidev(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ PCIDevice *pci_dev)
+{
+ trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
+ PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
+ xc_hvm_map_pcidev_to_ioreq_server(xc, dom, ioservid,
+ 0, pci_bus_num(pci_dev->bus),
+ PCI_SLOT(pci_dev->devfn),
+ PCI_FUNC(pci_dev->devfn));
+}
+
+static inline void xen_unmap_pcidev(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ PCIDevice *pci_dev)
+{
+ trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
+ PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
+ xc_hvm_unmap_pcidev_from_ioreq_server(xc, dom, ioservid,
+ 0, pci_bus_num(pci_dev->bus),
+ PCI_SLOT(pci_dev->devfn),
+ PCI_FUNC(pci_dev->devfn));
+}
+
+static inline int xen_create_ioreq_server(XenXC xc, domid_t dom,
+ ioservid_t *ioservid)
+{
+ int rc = xc_hvm_create_ioreq_server(xc, dom, 1, ioservid);
+
+ if (rc == 0) {
+ trace_xen_ioreq_server_create(*ioservid);
+ }
+
+ return rc;
+}
+
+static inline void xen_destroy_ioreq_server(XenXC xc, domid_t dom,
+ ioservid_t ioservid)
+{
+ trace_xen_ioreq_server_destroy(ioservid);
+ xc_hvm_destroy_ioreq_server(xc, dom, ioservid);
+}
+
+static inline int xen_get_ioreq_server_info(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ xen_pfn_t *ioreq_pfn,
+ xen_pfn_t *bufioreq_pfn,
+ evtchn_port_t *bufioreq_evtchn)
+{
+ return xc_hvm_get_ioreq_server_info(xc, dom, ioservid,
+ ioreq_pfn, bufioreq_pfn,
+ bufioreq_evtchn);
+}
+
+static inline int xen_set_ioreq_server_state(XenXC xc, domid_t dom,
+ ioservid_t ioservid,
+ bool enable)
+{
+ trace_xen_ioreq_server_state(ioservid, enable);
+ return xc_hvm_set_ioreq_server_state(xc, dom, ioservid, enable);
+}
+
+#endif
+
#endif /* QEMU_HW_XEN_COMMON_H */
OpenPOWER on IntegriCloud