summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/block/aio.h9
-rw-r--r--include/block/block.h1
-rw-r--r--include/hw/virtio/dataplane/vring.h2
-rw-r--r--include/hw/virtio/virtio-blk.h2
-rw-r--r--include/hw/virtio/virtio_ring.h167
-rw-r--r--include/qemu/main-loop.h2
-rw-r--r--include/sysemu/arch_init.h34
7 files changed, 193 insertions, 24 deletions
diff --git a/include/block/aio.h b/include/block/aio.h
index 4603c0f..1562721 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -26,7 +26,8 @@ typedef struct BlockDriverAIOCB BlockDriverAIOCB;
typedef void BlockDriverCompletionFunc(void *opaque, int ret);
typedef struct AIOCBInfo {
- void (*cancel)(BlockDriverAIOCB *acb);
+ void (*cancel_async)(BlockDriverAIOCB *acb);
+ AioContext *(*get_aio_context)(BlockDriverAIOCB *acb);
size_t aiocb_size;
} AIOCBInfo;
@@ -35,11 +36,13 @@ struct BlockDriverAIOCB {
BlockDriverState *bs;
BlockDriverCompletionFunc *cb;
void *opaque;
+ int refcnt;
};
void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
BlockDriverCompletionFunc *cb, void *opaque);
-void qemu_aio_release(void *p);
+void qemu_aio_unref(void *p);
+void qemu_aio_ref(void *p);
typedef struct AioHandler AioHandler;
typedef void QEMUBHFunc(void *opaque);
@@ -99,7 +102,7 @@ void aio_set_dispatching(AioContext *ctx, bool dispatching);
* They also provide bottom halves, a service to execute a piece of code
* as soon as possible.
*/
-AioContext *aio_context_new(void);
+AioContext *aio_context_new(Error **errp);
/**
* aio_context_ref:
diff --git a/include/block/block.h b/include/block/block.h
index 07d6d8e..3318f0d 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -337,6 +337,7 @@ BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
int64_t sector_num, int nb_sectors,
BlockDriverCompletionFunc *cb, void *opaque);
void bdrv_aio_cancel(BlockDriverAIOCB *acb);
+void bdrv_aio_cancel_async(BlockDriverAIOCB *acb);
typedef struct BlockRequest {
/* Fields to be filled by multiwrite caller */
diff --git a/include/hw/virtio/dataplane/vring.h b/include/hw/virtio/dataplane/vring.h
index af73ee2..d3e086a 100644
--- a/include/hw/virtio/dataplane/vring.h
+++ b/include/hw/virtio/dataplane/vring.h
@@ -17,8 +17,8 @@
#ifndef VRING_H
#define VRING_H
-#include <linux/virtio_ring.h>
#include "qemu-common.h"
+#include "hw/virtio/virtio_ring.h"
#include "hw/virtio/virtio.h"
typedef struct {
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index cf61154..f3853f2 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -132,10 +132,8 @@ typedef struct VirtIOBlock {
VMChangeStateEntry *change;
/* Function to push to vq and notify guest */
void (*complete_request)(struct VirtIOBlockReq *req, unsigned char status);
-#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
Notifier migration_state_notifier;
struct VirtIOBlockDataPlane *dataplane;
-#endif
} VirtIOBlock;
typedef struct MultiReqBuffer {
diff --git a/include/hw/virtio/virtio_ring.h b/include/hw/virtio/virtio_ring.h
new file mode 100644
index 0000000..8f58bc9
--- /dev/null
+++ b/include/hw/virtio/virtio_ring.h
@@ -0,0 +1,167 @@
+#ifndef _LINUX_VIRTIO_RING_H
+#define _LINUX_VIRTIO_RING_H
+/*
+ * This file is copied from /usr/include/linux while converting __uNN types
+ * to uXX_t, __inline__ to inline, and tab to spaces.
+ * */
+
+/* An interface for efficient virtio implementation, currently for use by KVM
+ * and lguest, but hopefully others soon. Do NOT change this since it will
+ * break existing servers and clients.
+ *
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright Rusty Russell IBM Corporation 2007. */
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
+
+/* The Host uses this in used->flags to advise the Guest: don't kick me when
+ * you add a buffer. It's unreliable, so it's simply an optimization. Guest
+ * will still kick if it's out of buffers. */
+#define VRING_USED_F_NO_NOTIFY 1
+/* The Guest uses this in avail->flags to advise the Host: don't interrupt me
+ * when you consume a buffer. It's unreliable, so it's simply an
+ * optimization. */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
+/* The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field. */
+/* The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field. */
+#define VIRTIO_RING_F_EVENT_IDX 29
+
+/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
+struct vring_desc {
+ /* Address (guest-physical). */
+ uint64_t addr;
+ /* Length. */
+ uint32_t len;
+ /* The flags as indicated above. */
+ uint16_t flags;
+ /* We chain unused descriptors via this, too */
+ uint16_t next;
+};
+
+struct vring_avail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[];
+};
+
+/* u32 is used here for ids for padding reasons. */
+struct vring_used_elem {
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was used (written to) */
+ uint32_t len;
+};
+
+struct vring_used {
+ uint16_t flags;
+ uint16_t idx;
+ struct vring_used_elem ring[];
+};
+
+struct vring {
+ unsigned int num;
+
+ struct vring_desc *desc;
+
+ struct vring_avail *avail;
+
+ struct vring_used *used;
+};
+
+/* The standard layout for the ring is a continuous chunk of memory which looks
+ * like this. We assume num is a power of 2.
+ *
+ * struct vring
+ * {
+ * // The actual descriptors (16 bytes each)
+ * struct vring_desc desc[num];
+ *
+ * // A ring of available descriptor heads with free-running index.
+ * uint16_t avail_flags;
+ * uint16_t avail_idx;
+ * uint16_t available[num];
+ * uint16_t used_event_idx;
+ *
+ * // Padding to the next align boundary.
+ * char pad[];
+ *
+ * // A ring of used descriptor heads with free-running index.
+ * uint16_t used_flags;
+ * uint16_t used_idx;
+ * struct vring_used_elem used[num];
+ * uint16_t avail_event_idx;
+ * };
+ */
+/* We publish the used event index at the end of the available ring, and vice
+ * versa. They are at the end for backwards compatibility. */
+#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
+#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
+
+static inline void vring_init(struct vring *vr, unsigned int num, void *p,
+ unsigned long align)
+{
+ vr->num = num;
+ vr->desc = p;
+ vr->avail = p + num*sizeof(struct vring_desc);
+ vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(uint16_t)
+ + align-1) & ~(align - 1));
+}
+
+static inline unsigned vring_size(unsigned int num, unsigned long align)
+{
+ return ((sizeof(struct vring_desc) * num + sizeof(uint16_t) * (3 + num)
+ + align - 1) & ~(align - 1))
+ + sizeof(uint16_t) * 3 + sizeof(struct vring_used_elem) * num;
+}
+
+/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
+/* Assuming a given event_idx value from the other size, if
+ * we have just incremented index from old to new_idx,
+ * should we trigger an event? */
+static inline int vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+{
+ /* Note: Xen has similar logic for notification hold-off
+ * in include/xen/interface/io/ring.h with req_event and req_prod
+ * corresponding to event_idx + 1 and new_idx respectively.
+ * Note also that req_event and req_prod in Xen start at 1,
+ * event indexes in virtio start at 0. */
+ return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
+}
+
+#endif /* _LINUX_VIRTIO_RING_H */
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
index 6f0200a..62c68c0 100644
--- a/include/qemu/main-loop.h
+++ b/include/qemu/main-loop.h
@@ -42,7 +42,7 @@
*
* In the case of QEMU tools, this will also start/initialize timers.
*/
-int qemu_init_main_loop(void);
+int qemu_init_main_loop(Error **errp);
/**
* main_loop_wait: Run one iteration of the main loop.
diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h
index 8939233..769ec06 100644
--- a/include/sysemu/arch_init.h
+++ b/include/sysemu/arch_init.h
@@ -6,23 +6,23 @@
enum {
QEMU_ARCH_ALL = -1,
- QEMU_ARCH_ALPHA = 1,
- QEMU_ARCH_ARM = 2,
- QEMU_ARCH_CRIS = 4,
- QEMU_ARCH_I386 = 8,
- QEMU_ARCH_M68K = 16,
- QEMU_ARCH_LM32 = 32,
- QEMU_ARCH_MICROBLAZE = 64,
- QEMU_ARCH_MIPS = 128,
- QEMU_ARCH_PPC = 256,
- QEMU_ARCH_S390X = 512,
- QEMU_ARCH_SH4 = 1024,
- QEMU_ARCH_SPARC = 2048,
- QEMU_ARCH_XTENSA = 4096,
- QEMU_ARCH_OPENRISC = 8192,
- QEMU_ARCH_UNICORE32 = 0x4000,
- QEMU_ARCH_MOXIE = 0x8000,
- QEMU_ARCH_TRICORE = 0x10000,
+ QEMU_ARCH_ALPHA = (1 << 0),
+ QEMU_ARCH_ARM = (1 << 1),
+ QEMU_ARCH_CRIS = (1 << 2),
+ QEMU_ARCH_I386 = (1 << 3),
+ QEMU_ARCH_M68K = (1 << 4),
+ QEMU_ARCH_LM32 = (1 << 5),
+ QEMU_ARCH_MICROBLAZE = (1 << 6),
+ QEMU_ARCH_MIPS = (1 << 7),
+ QEMU_ARCH_PPC = (1 << 8),
+ QEMU_ARCH_S390X = (1 << 9),
+ QEMU_ARCH_SH4 = (1 << 10),
+ QEMU_ARCH_SPARC = (1 << 11),
+ QEMU_ARCH_XTENSA = (1 << 12),
+ QEMU_ARCH_OPENRISC = (1 << 13),
+ QEMU_ARCH_UNICORE32 = (1 << 14),
+ QEMU_ARCH_MOXIE = (1 << 15),
+ QEMU_ARCH_TRICORE = (1 << 16),
};
extern const uint32_t arch_type;
OpenPOWER on IntegriCloud