summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2014-07-14 13:09:29 +0100
committerPeter Maydell <peter.maydell@linaro.org>2014-07-14 13:09:29 +0100
commit7a6d04e73fdd571234e05dcad96895fafb3f22f0 (patch)
treeacf7065c975ca4056f3041c58907803239b9941e /include
parentc15a34eda0f270888a0e4676997317e1bd7894b8 (diff)
parent58ac321135af890b503ebe56d0d00e184779918f (diff)
downloadhqemu-7a6d04e73fdd571234e05dcad96895fafb3f22f0.zip
hqemu-7a6d04e73fdd571234e05dcad96895fafb3f22f0.tar.gz
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
Block patches for 2.1.0-rc2 (v2) # gpg: Signature made Mon 14 Jul 2014 11:04:12 BST using RSA key ID C88F2FD6 # gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" * remotes/kevin/tags/for-upstream: (22 commits) ide: Treat read/write beyond end as invalid virtio-blk: Treat read/write beyond end as invalid virtio-blk: Bypass error action and I/O accounting on invalid r/w virtio-blk: Factor common checks out of virtio_blk_handle_read/write() dma-helpers: Fix too long qiov qtest: fix vhost-user-test compilation with old GLib tests: Fix unterminated string output visitor enum human string AioContext: do not rely on aio_poll(ctx, true) result to end a loop virtio-blk: embed VirtQueueElement in VirtIOBlockReq virtio-blk: avoid g_slice_new0() for VirtIOBlockReq and VirtQueueElement dataplane: do not free VirtQueueElement in vring_push() virtio-blk: avoid dataplane VirtIOBlockReq early free block: Assert qiov length matches request length qed: Make qiov match request size until backing file EOF qcow2: Make qiov match request size until backing file EOF block: Make qiov match the request size until EOF AioContext: speed up aio_notify test-aio: fix GSource-based timer test block: drop aio functions that operate on the main AioContext block: prefer aio_poll to qemu_aio_wait ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'include')
-rw-r--r--include/block/aio.h32
-rw-r--r--include/block/blockjob.h4
-rw-r--r--include/block/coroutine.h2
-rw-r--r--include/hw/virtio/dataplane/vring.h3
-rw-r--r--include/hw/virtio/virtio-blk.h6
-rw-r--r--include/qemu-common.h1
6 files changed, 24 insertions, 24 deletions
diff --git a/include/block/aio.h b/include/block/aio.h
index a92511b..c23de3c 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -60,8 +60,14 @@ struct AioContext {
*/
int walking_handlers;
+ /* Used to avoid unnecessary event_notifier_set calls in aio_notify.
+ * Writes protected by lock or BQL, reads are lockless.
+ */
+ bool dispatching;
+
/* lock to protect between bh's adders and deleter */
QemuMutex bh_lock;
+
/* Anchor of the list of Bottom Halves belonging to the context */
struct QEMUBH *first_bh;
@@ -83,6 +89,9 @@ struct AioContext {
QEMUTimerListGroup tlg;
};
+/* Used internally to synchronize aio_poll against qemu_bh_schedule. */
+void aio_set_dispatching(AioContext *ctx, bool dispatching);
+
/**
* aio_context_new: Allocate a new AioContext.
*
@@ -205,9 +214,9 @@ bool aio_pending(AioContext *ctx);
/* Progress in completing AIO work to occur. This can issue new pending
* aio as a result of executing I/O completion or bh callbacks.
*
- * If there is no pending AIO operation or completion (bottom half),
- * return false. If there are pending AIO operations of bottom halves,
- * return true.
+ * Return whether any progress was made by executing AIO or bottom half
+ * handlers. If @blocking == true, this should always be true except
+ * if someone called aio_notify.
*
* If there are no pending bottom halves, but there are pending AIO
* operations, it may not be possible to make any progress without
@@ -220,7 +229,7 @@ bool aio_poll(AioContext *ctx, bool blocking);
#ifdef CONFIG_POSIX
/* Register a file descriptor and associated callbacks. Behaves very similarly
* to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will
- * be invoked when using qemu_aio_wait().
+ * be invoked when using aio_poll().
*
* Code that invokes AIO completion functions should rely on this function
* instead of qemu_set_fd_handler[2].
@@ -234,7 +243,7 @@ void aio_set_fd_handler(AioContext *ctx,
/* Register an event notifier and associated callbacks. Behaves very similarly
* to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
- * will be invoked when using qemu_aio_wait().
+ * will be invoked when using aio_poll().
*
* Code that invokes AIO completion functions should rely on this function
* instead of event_notifier_set_handler.
@@ -251,19 +260,6 @@ GSource *aio_get_g_source(AioContext *ctx);
/* Return the ThreadPool bound to this AioContext */
struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
-/* Functions to operate on the main QEMU AioContext. */
-
-bool qemu_aio_wait(void);
-void qemu_aio_set_event_notifier(EventNotifier *notifier,
- EventNotifierHandler *io_read);
-
-#ifdef CONFIG_POSIX
-void qemu_aio_set_fd_handler(int fd,
- IOHandler *io_read,
- IOHandler *io_write,
- void *opaque);
-#endif
-
/**
* aio_timer_new:
* @ctx: the aio context
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
index f3cf63f..60aa835 100644
--- a/include/block/blockjob.h
+++ b/include/block/blockjob.h
@@ -74,7 +74,7 @@ struct BlockJob {
* Set to true if the job should cancel itself. The flag must
* always be tested just before toggling the busy flag from false
* to true. After a job has been cancelled, it should only yield
- * if #qemu_aio_wait will ("sooner or later") reenter the coroutine.
+ * if #aio_poll will ("sooner or later") reenter the coroutine.
*/
bool cancelled;
@@ -87,7 +87,7 @@ struct BlockJob {
/**
* Set to false by the job while it is in a quiescent state, where
* no I/O is pending and the job has yielded on any condition
- * that is not detected by #qemu_aio_wait, such as a timer.
+ * that is not detected by #aio_poll, such as a timer.
*/
bool busy;
diff --git a/include/block/coroutine.h b/include/block/coroutine.h
index a1797ae..b408f96 100644
--- a/include/block/coroutine.h
+++ b/include/block/coroutine.h
@@ -212,7 +212,7 @@ void coroutine_fn co_sleep_ns(QEMUClockType type, int64_t ns);
* Yield the coroutine for a given duration
*
* Behaves similarly to co_sleep_ns(), but the sleeping coroutine will be
- * resumed when using qemu_aio_wait().
+ * resumed when using aio_poll().
*/
void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type,
int64_t ns);
diff --git a/include/hw/virtio/dataplane/vring.h b/include/hw/virtio/dataplane/vring.h
index 63e7bf4..af73ee2 100644
--- a/include/hw/virtio/dataplane/vring.h
+++ b/include/hw/virtio/dataplane/vring.h
@@ -53,8 +53,7 @@ void vring_teardown(Vring *vring, VirtIODevice *vdev, int n);
void vring_disable_notification(VirtIODevice *vdev, Vring *vring);
bool vring_enable_notification(VirtIODevice *vdev, Vring *vring);
bool vring_should_notify(VirtIODevice *vdev, Vring *vring);
-int vring_pop(VirtIODevice *vdev, Vring *vring, VirtQueueElement **elem);
+int vring_pop(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem);
void vring_push(Vring *vring, VirtQueueElement *elem, int len);
-void vring_free_element(VirtQueueElement *elem);
#endif /* VRING_H */
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index b3080a2..afb7b8d 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -144,7 +144,7 @@ typedef struct MultiReqBuffer {
typedef struct VirtIOBlockReq {
VirtIOBlock *dev;
- VirtQueueElement *elem;
+ VirtQueueElement elem;
struct virtio_blk_inhdr *in;
struct virtio_blk_outhdr out;
QEMUIOVector qiov;
@@ -152,6 +152,10 @@ typedef struct VirtIOBlockReq {
BlockAcctCookie acct;
} VirtIOBlockReq;
+VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s);
+
+void virtio_blk_free_request(VirtIOBlockReq *req);
+
int virtio_blk_handle_scsi_req(VirtIOBlock *blk,
VirtQueueElement *elem);
diff --git a/include/qemu-common.h b/include/qemu-common.h
index ae76197..6ef8282 100644
--- a/include/qemu-common.h
+++ b/include/qemu-common.h
@@ -329,6 +329,7 @@ size_t qemu_iovec_memset(QEMUIOVector *qiov, size_t offset,
int fillc, size_t bytes);
ssize_t qemu_iovec_compare(QEMUIOVector *a, QEMUIOVector *b);
void qemu_iovec_clone(QEMUIOVector *dest, const QEMUIOVector *src, void *buf);
+void qemu_iovec_discard_back(QEMUIOVector *qiov, size_t bytes);
bool buffer_is_zero(const void *buf, size_t len);
OpenPOWER on IntegriCloud