diff options
author | Anthony Liguori <anthony@codemonkey.ws> | 2013-08-20 09:52:18 -0500 |
---|---|---|
committer | Anthony Liguori <anthony@codemonkey.ws> | 2013-08-20 09:52:18 -0500 |
commit | 9176e8fb8f78206bd4039f001aca0d931a47d663 (patch) | |
tree | 01b2298af04ffc11e0546cb88150286a36f6b26c | |
parent | 72420ce9f0bafef7d55563a8bd14237a5fe88a87 (diff) | |
parent | f2e5dca46b5ba4588c0756c5f272123585cbbf23 (diff) | |
download | hqemu-9176e8fb8f78206bd4039f001aca0d931a47d663.zip hqemu-9176e8fb8f78206bd4039f001aca0d931a47d663.tar.gz |
Merge remote-tracking branch 'stefanha/block-next' into staging
# By Stefan Hajnoczi
# Via Stefan Hajnoczi
* stefanha/block-next:
aio: drop io_flush argument
tests: drop event_active_cb()
thread-pool: drop thread_pool_active()
dataplane/virtio-blk: drop flush_true() and flush_io()
block/ssh: drop return_true()
block/sheepdog: drop have_co_req() and aio_flush_request()
block/rbd: drop qemu_rbd_aio_flush_cb()
block/nbd: drop nbd_have_request()
block/linux-aio: drop qemu_laio_completion_cb()
block/iscsi: drop iscsi_process_flush()
block/gluster: drop qemu_gluster_aio_flush_cb()
block/curl: drop curl_aio_flush()
aio: stop using .io_flush()
tests: adjust test-thread-pool to new aio_poll() semantics
tests: adjust test-aio to new aio_poll() semantics
dataplane/virtio-blk: check exit conditions before aio_poll()
block: stop relying on io_flush() in bdrv_drain_all()
block: ensure bdrv_drain_all() works during bdrv_delete()
Message-id: 1376921877-9576-1-git-send-email-stefanha@redhat.com
Signed-off-by: Anthony Liguori <anthony@codemonkey.ws>
-rw-r--r-- | aio-posix.c | 36 | ||||
-rw-r--r-- | aio-win32.c | 37 | ||||
-rw-r--r-- | async.c | 4 | ||||
-rw-r--r-- | block.c | 49 | ||||
-rw-r--r-- | block/curl.c | 25 | ||||
-rw-r--r-- | block/gluster.c | 23 | ||||
-rw-r--r-- | block/iscsi.c | 10 | ||||
-rw-r--r-- | block/linux-aio.c | 18 | ||||
-rw-r--r-- | block/nbd.c | 18 | ||||
-rw-r--r-- | block/rbd.c | 16 | ||||
-rw-r--r-- | block/sheepdog.c | 33 | ||||
-rw-r--r-- | block/ssh.c | 12 | ||||
-rw-r--r-- | block/stream.c | 6 | ||||
-rw-r--r-- | hw/block/dataplane/virtio-blk.c | 25 | ||||
-rw-r--r-- | include/block/aio.h | 14 | ||||
-rw-r--r-- | main-loop.c | 9 | ||||
-rw-r--r-- | tests/test-aio.c | 82 | ||||
-rw-r--r-- | tests/test-thread-pool.c | 24 | ||||
-rw-r--r-- | thread-pool.c | 11 |
19 files changed, 163 insertions, 289 deletions
diff --git a/aio-posix.c b/aio-posix.c index b68eccd..2440eb9 100644 --- a/aio-posix.c +++ b/aio-posix.c @@ -23,7 +23,6 @@ struct AioHandler GPollFD pfd; IOHandler *io_read; IOHandler *io_write; - AioFlushHandler *io_flush; int deleted; int pollfds_idx; void *opaque; @@ -47,7 +46,6 @@ void aio_set_fd_handler(AioContext *ctx, int fd, IOHandler *io_read, IOHandler *io_write, - AioFlushHandler *io_flush, void *opaque) { AioHandler *node; @@ -84,7 +82,6 @@ void aio_set_fd_handler(AioContext *ctx, /* Update handler with latest information */ node->io_read = io_read; node->io_write = io_write; - node->io_flush = io_flush; node->opaque = opaque; node->pollfds_idx = -1; @@ -97,12 +94,10 @@ void aio_set_fd_handler(AioContext *ctx, void aio_set_event_notifier(AioContext *ctx, EventNotifier *notifier, - EventNotifierHandler *io_read, - AioFlushEventNotifierHandler *io_flush) + EventNotifierHandler *io_read) { aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), - (IOHandler *)io_read, NULL, - (AioFlushHandler *)io_flush, notifier); + (IOHandler *)io_read, NULL, notifier); } bool aio_pending(AioContext *ctx) @@ -147,7 +142,11 @@ static bool aio_dispatch(AioContext *ctx) (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && node->io_read) { node->io_read(node->opaque); - progress = true; + + /* aio_notify() does not count as progress */ + if (node->opaque != &ctx->notifier) { + progress = true; + } } if (!node->deleted && (revents & (G_IO_OUT | G_IO_ERR)) && @@ -173,7 +172,7 @@ bool aio_poll(AioContext *ctx, bool blocking) { AioHandler *node; int ret; - bool busy, progress; + bool progress; progress = false; @@ -200,20 +199,8 @@ bool aio_poll(AioContext *ctx, bool blocking) g_array_set_size(ctx->pollfds, 0); /* fill pollfds */ - busy = false; QLIST_FOREACH(node, &ctx->aio_handlers, node) { node->pollfds_idx = -1; - - /* If there aren't pending AIO operations, don't invoke callbacks. - * Otherwise, if there are no AIO requests, qemu_aio_wait() would - * wait indefinitely. - */ - if (!node->deleted && node->io_flush) { - if (node->io_flush(node->opaque) == 0) { - continue; - } - busy = true; - } if (!node->deleted && node->pfd.events) { GPollFD pfd = { .fd = node->pfd.fd, @@ -226,8 +213,8 @@ bool aio_poll(AioContext *ctx, bool blocking) ctx->walking_handlers--; - /* No AIO operations? Get us out of here */ - if (!busy) { + /* early return if we only have the aio_notify() fd */ + if (ctx->pollfds->len == 1) { return progress; } @@ -250,6 +237,5 @@ bool aio_poll(AioContext *ctx, bool blocking) } } - assert(progress || busy); - return true; + return progress; } diff --git a/aio-win32.c b/aio-win32.c index 38723bf..78b2801 100644 --- a/aio-win32.c +++ b/aio-win32.c @@ -23,7 +23,6 @@ struct AioHandler { EventNotifier *e; EventNotifierHandler *io_notify; - AioFlushEventNotifierHandler *io_flush; GPollFD pfd; int deleted; QLIST_ENTRY(AioHandler) node; @@ -31,8 +30,7 @@ struct AioHandler { void aio_set_event_notifier(AioContext *ctx, EventNotifier *e, - EventNotifierHandler *io_notify, - AioFlushEventNotifierHandler *io_flush) + EventNotifierHandler *io_notify) { AioHandler *node; @@ -73,7 +71,6 @@ void aio_set_event_notifier(AioContext *ctx, } /* Update handler with latest information */ node->io_notify = io_notify; - node->io_flush = io_flush; } aio_notify(ctx); @@ -96,7 +93,7 @@ bool aio_poll(AioContext *ctx, bool blocking) { AioHandler *node; HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; - bool busy, progress; + bool progress; int count; progress = false; @@ -126,7 +123,11 @@ bool aio_poll(AioContext *ctx, bool blocking) if (node->pfd.revents && node->io_notify) { node->pfd.revents = 0; node->io_notify(node->e); - progress = true; + + /* aio_notify() does not count as progress */ + if (node->opaque != &ctx->notifier) { + progress = true; + } } tmp = node; @@ -147,19 +148,8 @@ bool aio_poll(AioContext *ctx, bool blocking) ctx->walking_handlers++; /* fill fd sets */ - busy = false; count = 0; QLIST_FOREACH(node, &ctx->aio_handlers, node) { - /* If there aren't pending AIO operations, don't invoke callbacks. - * Otherwise, if there are no AIO requests, qemu_aio_wait() would - * wait indefinitely. - */ - if (!node->deleted && node->io_flush) { - if (node->io_flush(node->e) == 0) { - continue; - } - busy = true; - } if (!node->deleted && node->io_notify) { events[count++] = event_notifier_get_handle(node->e); } @@ -167,8 +157,8 @@ bool aio_poll(AioContext *ctx, bool blocking) ctx->walking_handlers--; - /* No AIO operations? Get us out of here */ - if (!busy) { + /* early return if we only have the aio_notify() fd */ + if (count == 1) { return progress; } @@ -196,7 +186,11 @@ bool aio_poll(AioContext *ctx, bool blocking) event_notifier_get_handle(node->e) == events[ret - WAIT_OBJECT_0] && node->io_notify) { node->io_notify(node->e); - progress = true; + + /* aio_notify() does not count as progress */ + if (node->opaque != &ctx->notifier) { + progress = true; + } } tmp = node; @@ -214,6 +208,5 @@ bool aio_poll(AioContext *ctx, bool blocking) events[ret - WAIT_OBJECT_0] = events[--count]; } - assert(progress || busy); - return true; + return progress; } @@ -201,7 +201,7 @@ aio_ctx_finalize(GSource *source) AioContext *ctx = (AioContext *) source; thread_pool_free(ctx->thread_pool); - aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL); + aio_set_event_notifier(ctx, &ctx->notifier, NULL); event_notifier_cleanup(&ctx->notifier); qemu_mutex_destroy(&ctx->bh_lock); g_array_free(ctx->pollfds, TRUE); @@ -243,7 +243,7 @@ AioContext *aio_context_new(void) event_notifier_init(&ctx->notifier, false); aio_set_event_notifier(ctx, &ctx->notifier, (EventNotifierHandler *) - event_notifier_test_and_clear, NULL); + event_notifier_test_and_clear); return ctx; } @@ -148,7 +148,6 @@ static void bdrv_block_timer(void *opaque) void bdrv_io_limits_enable(BlockDriverState *bs) { - qemu_co_queue_init(&bs->throttled_reqs); bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs); bs->io_limits_enabled = true; } @@ -306,6 +305,7 @@ BlockDriverState *bdrv_new(const char *device_name) bdrv_iostatus_disable(bs); notifier_list_init(&bs->close_notifiers); notifier_with_return_list_init(&bs->before_write_notifiers); + qemu_co_queue_init(&bs->throttled_reqs); return bs; } @@ -1428,6 +1428,35 @@ void bdrv_close_all(void) } } +/* Check if any requests are in-flight (including throttled requests) */ +static bool bdrv_requests_pending(BlockDriverState *bs) +{ + if (!QLIST_EMPTY(&bs->tracked_requests)) { + return true; + } + if (!qemu_co_queue_empty(&bs->throttled_reqs)) { + return true; + } + if (bs->file && bdrv_requests_pending(bs->file)) { + return true; + } + if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) { + return true; + } + return false; +} + +static bool bdrv_requests_pending_all(void) +{ + BlockDriverState *bs; + QTAILQ_FOREACH(bs, &bdrv_states, list) { + if (bdrv_requests_pending(bs)) { + return true; + } + } + return false; +} + /* * Wait for pending requests to complete across all BlockDriverStates * @@ -1442,12 +1471,11 @@ void bdrv_close_all(void) */ void bdrv_drain_all(void) { + /* Always run first iteration so any pending completion BHs run */ + bool busy = true; BlockDriverState *bs; - bool busy; - - do { - busy = qemu_aio_wait(); + while (busy) { /* FIXME: We do not have timer support here, so this is effectively * a busy wait. */ @@ -1456,12 +1484,9 @@ void bdrv_drain_all(void) busy = true; } } - } while (busy); - /* If requests are still pending there is a bug somewhere */ - QTAILQ_FOREACH(bs, &bdrv_states, list) { - assert(QLIST_EMPTY(&bs->tracked_requests)); - assert(qemu_co_queue_empty(&bs->throttled_reqs)); + busy = bdrv_requests_pending_all(); + busy |= aio_poll(qemu_get_aio_context(), busy); } } @@ -1606,11 +1631,11 @@ void bdrv_delete(BlockDriverState *bs) assert(!bs->job); assert(!bs->in_use); + bdrv_close(bs); + /* remove from list, if necessary */ bdrv_make_anon(bs); - bdrv_close(bs); - g_free(bs); } diff --git a/block/curl.c b/block/curl.c index 82d39ff..e566855 100644 --- a/block/curl.c +++ b/block/curl.c @@ -86,7 +86,6 @@ typedef struct BDRVCURLState { static void curl_clean_state(CURLState *s); static void curl_multi_do(void *arg); -static int curl_aio_flush(void *opaque); static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, void *s, void *sp) @@ -94,17 +93,16 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd); switch (action) { case CURL_POLL_IN: - qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, curl_aio_flush, s); + qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, s); break; case CURL_POLL_OUT: - qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, curl_aio_flush, s); + qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, s); break; case CURL_POLL_INOUT: - qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do, - curl_aio_flush, s); + qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do, s); break; case CURL_POLL_REMOVE: - qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(fd, NULL, NULL, NULL); break; } @@ -495,21 +493,6 @@ out_noclean: return -EINVAL; } -static int curl_aio_flush(void *opaque) -{ - BDRVCURLState *s = opaque; - int i, j; - - for (i=0; i < CURL_NUM_STATES; i++) { - for(j=0; j < CURL_NUM_ACB; j++) { - if (s->states[i].acb[j]) { - return 1; - } - } - } - return 0; -} - static void curl_aio_cancel(BlockDriverAIOCB *blockacb) { // Do we have to implement canceling? Seems to work without... diff --git a/block/gluster.c b/block/gluster.c index 645b7f1..46f36f8 100644 --- a/block/gluster.c +++ b/block/gluster.c @@ -32,7 +32,6 @@ typedef struct BDRVGlusterState { struct glfs *glfs; int fds[2]; struct glfs_fd *fd; - int qemu_aio_count; int event_reader_pos; GlusterAIOCB *event_acb; } BDRVGlusterState; @@ -247,7 +246,6 @@ static void qemu_gluster_complete_aio(GlusterAIOCB *acb, BDRVGlusterState *s) ret = -EIO; /* Partial read/write - fail it */ } - s->qemu_aio_count--; qemu_aio_release(acb); cb(opaque, ret); if (finished) { @@ -275,13 +273,6 @@ static void qemu_gluster_aio_event_reader(void *opaque) } while (ret < 0 && errno == EINTR); } -static int qemu_gluster_aio_flush_cb(void *opaque) -{ - BDRVGlusterState *s = opaque; - - return (s->qemu_aio_count > 0); -} - /* TODO Convert to fine grained options */ static QemuOptsList runtime_opts = { .name = "gluster", @@ -348,7 +339,7 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options, } fcntl(s->fds[GLUSTER_FD_READ], F_SETFL, O_NONBLOCK); qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], - qemu_gluster_aio_event_reader, NULL, qemu_gluster_aio_flush_cb, s); + qemu_gluster_aio_event_reader, NULL, s); out: qemu_opts_del(opts); @@ -445,11 +436,9 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) qemu_mutex_lock_iothread(); /* We are in gluster thread context */ acb->common.cb(acb->common.opaque, -EIO); qemu_aio_release(acb); - s->qemu_aio_count--; close(s->fds[GLUSTER_FD_READ]); close(s->fds[GLUSTER_FD_WRITE]); - qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL, - NULL); + qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL); bs->drv = NULL; /* Make the disk inaccessible */ qemu_mutex_unlock_iothread(); } @@ -467,7 +456,6 @@ static BlockDriverAIOCB *qemu_gluster_aio_rw(BlockDriverState *bs, offset = sector_num * BDRV_SECTOR_SIZE; size = nb_sectors * BDRV_SECTOR_SIZE; - s->qemu_aio_count++; acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque); acb->size = size; @@ -488,7 +476,6 @@ static BlockDriverAIOCB *qemu_gluster_aio_rw(BlockDriverState *bs, return &acb->common; out: - s->qemu_aio_count--; qemu_aio_release(acb); return NULL; } @@ -531,7 +518,6 @@ static BlockDriverAIOCB *qemu_gluster_aio_flush(BlockDriverState *bs, acb->size = 0; acb->ret = 0; acb->finished = NULL; - s->qemu_aio_count++; ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb); if (ret < 0) { @@ -540,7 +526,6 @@ static BlockDriverAIOCB *qemu_gluster_aio_flush(BlockDriverState *bs, return &acb->common; out: - s->qemu_aio_count--; qemu_aio_release(acb); return NULL; } @@ -563,7 +548,6 @@ static BlockDriverAIOCB *qemu_gluster_aio_discard(BlockDriverState *bs, acb->size = 0; acb->ret = 0; acb->finished = NULL; - s->qemu_aio_count++; ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb); if (ret < 0) { @@ -572,7 +556,6 @@ static BlockDriverAIOCB *qemu_gluster_aio_discard(BlockDriverState *bs, return &acb->common; out: - s->qemu_aio_count--; qemu_aio_release(acb); return NULL; } @@ -611,7 +594,7 @@ static void qemu_gluster_close(BlockDriverState *bs) close(s->fds[GLUSTER_FD_READ]); close(s->fds[GLUSTER_FD_WRITE]); - qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL); if (s->fd) { glfs_close(s->fd); diff --git a/block/iscsi.c b/block/iscsi.c index e7c1c2b..47a3adc 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -146,13 +146,6 @@ static const AIOCBInfo iscsi_aiocb_info = { static void iscsi_process_read(void *arg); static void iscsi_process_write(void *arg); -static int iscsi_process_flush(void *arg) -{ - IscsiLun *iscsilun = arg; - - return iscsi_queue_length(iscsilun->iscsi) > 0; -} - static void iscsi_set_events(IscsiLun *iscsilun) { @@ -166,7 +159,6 @@ iscsi_set_events(IscsiLun *iscsilun) qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), iscsi_process_read, (ev & POLLOUT) ? iscsi_process_write : NULL, - iscsi_process_flush, iscsilun); } @@ -1215,7 +1207,7 @@ static void iscsi_close(BlockDriverState *bs) qemu_del_timer(iscsilun->nop_timer); qemu_free_timer(iscsilun->nop_timer); } - qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL); iscsi_destroy_context(iscsi); memset(iscsilun, 0, sizeof(IscsiLun)); } diff --git a/block/linux-aio.c b/block/linux-aio.c index ee0f8d1..53434e2 100644 --- a/block/linux-aio.c +++ b/block/linux-aio.c @@ -39,7 +39,6 @@ struct qemu_laiocb { struct qemu_laio_state { io_context_t ctx; EventNotifier e; - int count; }; static inline ssize_t io_event_ret(struct io_event *ev) @@ -55,8 +54,6 @@ static void qemu_laio_process_completion(struct qemu_laio_state *s, { int ret; - s->count--; - ret = laiocb->ret; if (ret != -ECANCELED) { if (ret == laiocb->nbytes) { @@ -101,13 +98,6 @@ static void qemu_laio_completion_cb(EventNotifier *e) } } -static int qemu_laio_flush_cb(EventNotifier *e) -{ - struct qemu_laio_state *s = container_of(e, struct qemu_laio_state, e); - - return (s->count > 0) ? 1 : 0; -} - static void laio_cancel(BlockDriverAIOCB *blockacb) { struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb; @@ -177,14 +167,11 @@ BlockDriverAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd, goto out_free_aiocb; } io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e)); - s->count++; if (io_submit(s->ctx, 1, &iocbs) < 0) - goto out_dec_count; + goto out_free_aiocb; return &laiocb->common; -out_dec_count: - s->count--; out_free_aiocb: qemu_aio_release(laiocb); return NULL; @@ -203,8 +190,7 @@ void *laio_init(void) goto out_close_efd; } - qemu_aio_set_event_notifier(&s->e, qemu_laio_completion_cb, - qemu_laio_flush_cb); + qemu_aio_set_event_notifier(&s->e, qemu_laio_completion_cb); return s; diff --git a/block/nbd.c b/block/nbd.c index 9c480b8..691066f 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -279,13 +279,6 @@ static void nbd_coroutine_start(BDRVNBDState *s, struct nbd_request *request) request->handle = INDEX_TO_HANDLE(s, i); } -static int nbd_have_request(void *opaque) -{ - BDRVNBDState *s = opaque; - - return s->in_flight > 0; -} - static void nbd_reply_ready(void *opaque) { BDRVNBDState *s = opaque; @@ -341,8 +334,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request, qemu_co_mutex_lock(&s->send_mutex); s->send_coroutine = qemu_coroutine_self(); - qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write, - nbd_have_request, s); + qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write, s); if (qiov) { if (!s->is_unix) { socket_set_cork(s->sock, 1); @@ -361,8 +353,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request, } else { rc = nbd_send_request(s->sock, request); } - qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, - nbd_have_request, s); + qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, s); s->send_coroutine = NULL; qemu_co_mutex_unlock(&s->send_mutex); return rc; @@ -438,8 +429,7 @@ static int nbd_establish_connection(BlockDriverState *bs) /* Now that we're connected, set the socket to be non-blocking and * kick the reply mechanism. */ qemu_set_nonblock(sock); - qemu_aio_set_fd_handler(sock, nbd_reply_ready, NULL, - nbd_have_request, s); + qemu_aio_set_fd_handler(sock, nbd_reply_ready, NULL, s); s->sock = sock; s->size = size; @@ -459,7 +449,7 @@ static void nbd_teardown_connection(BlockDriverState *bs) request.len = 0; nbd_send_request(s->sock, &request); - qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL); closesocket(s->sock); } diff --git a/block/rbd.c b/block/rbd.c index cb71751..e798e19 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -100,7 +100,6 @@ typedef struct BDRVRBDState { rados_ioctx_t io_ctx; rbd_image_t image; char name[RBD_MAX_IMAGE_NAME_SIZE]; - int qemu_aio_count; char *snap; int event_reader_pos; RADOSCB *event_rcb; @@ -428,19 +427,11 @@ static void qemu_rbd_aio_event_reader(void *opaque) if (s->event_reader_pos == sizeof(s->event_rcb)) { s->event_reader_pos = 0; qemu_rbd_complete_aio(s->event_rcb); - s->qemu_aio_count--; } } } while (ret < 0 && errno == EINTR); } -static int qemu_rbd_aio_flush_cb(void *opaque) -{ - BDRVRBDState *s = opaque; - - return (s->qemu_aio_count > 0); -} - /* TODO Convert to fine grained options */ static QemuOptsList runtime_opts = { .name = "rbd", @@ -554,7 +545,7 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags) fcntl(s->fds[0], F_SETFL, O_NONBLOCK); fcntl(s->fds[1], F_SETFL, O_NONBLOCK); qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader, - NULL, qemu_rbd_aio_flush_cb, s); + NULL, s); qemu_opts_del(opts); @@ -578,7 +569,7 @@ static void qemu_rbd_close(BlockDriverState *bs) close(s->fds[0]); close(s->fds[1]); - qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL); rbd_close(s->image); rados_ioctx_destroy(s->io_ctx); @@ -741,8 +732,6 @@ static BlockDriverAIOCB *rbd_start_aio(BlockDriverState *bs, off = sector_num * BDRV_SECTOR_SIZE; size = nb_sectors * BDRV_SECTOR_SIZE; - s->qemu_aio_count++; /* All the RADOSCB */ - rcb = g_malloc(sizeof(RADOSCB)); rcb->done = 0; rcb->acb = acb; @@ -779,7 +768,6 @@ static BlockDriverAIOCB *rbd_start_aio(BlockDriverState *bs, failed: g_free(rcb); - s->qemu_aio_count--; qemu_aio_release(acb); return NULL; } diff --git a/block/sheepdog.c b/block/sheepdog.c index afe0533..1ad4d07 100644 --- a/block/sheepdog.c +++ b/block/sheepdog.c @@ -509,13 +509,6 @@ static void restart_co_req(void *opaque) qemu_coroutine_enter(co, NULL); } -static int have_co_req(void *opaque) -{ - /* this handler is set only when there is a pending request, so - * always returns 1. */ - return 1; -} - typedef struct SheepdogReqCo { int sockfd; SheepdogReq *hdr; @@ -538,14 +531,14 @@ static coroutine_fn void do_co_req(void *opaque) unsigned int *rlen = srco->rlen; co = qemu_coroutine_self(); - qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, have_co_req, co); + qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, co); ret = send_co_req(sockfd, hdr, data, wlen); if (ret < 0) { goto out; } - qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, have_co_req, co); + qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, co); ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr)); if (ret < sizeof(*hdr)) { @@ -570,7 +563,7 @@ static coroutine_fn void do_co_req(void *opaque) out: /* there is at most one request for this sockfd, so it is safe to * set each handler to NULL. */ - qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL); srco->ret = ret; srco->finished = true; @@ -796,14 +789,6 @@ static void co_write_request(void *opaque) qemu_coroutine_enter(s->co_send, NULL); } -static int aio_flush_request(void *opaque) -{ - BDRVSheepdogState *s = opaque; - - return !QLIST_EMPTY(&s->inflight_aio_head) || - !QLIST_EMPTY(&s->pending_aio_head); -} - /* * Return a socket discriptor to read/write objects. * @@ -819,7 +804,7 @@ static int get_sheep_fd(BDRVSheepdogState *s) return fd; } - qemu_aio_set_fd_handler(fd, co_read_response, NULL, aio_flush_request, s); + qemu_aio_set_fd_handler(fd, co_read_response, NULL, s); return fd; } @@ -1069,8 +1054,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, qemu_co_mutex_lock(&s->lock); s->co_send = qemu_coroutine_self(); - qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request, - aio_flush_request, s); + qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request, s); socket_set_cork(s->fd, 1); /* send a header */ @@ -1091,8 +1075,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, } socket_set_cork(s->fd, 0); - qemu_aio_set_fd_handler(s->fd, co_read_response, NULL, - aio_flush_request, s); + qemu_aio_set_fd_handler(s->fd, co_read_response, NULL, s); qemu_co_mutex_unlock(&s->lock); return 0; @@ -1350,7 +1333,7 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags) g_free(buf); return 0; out: - qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL); if (s->fd >= 0) { closesocket(s->fd); } @@ -1578,7 +1561,7 @@ static void sd_close(BlockDriverState *bs) error_report("%s, %s", sd_strerror(rsp->result), s->name); } - qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL); closesocket(s->fd); g_free(s->host_spec); } diff --git a/block/ssh.c b/block/ssh.c index d7e7bf8..27691b4 100644 --- a/block/ssh.c +++ b/block/ssh.c @@ -740,14 +740,6 @@ static void restart_coroutine(void *opaque) qemu_coroutine_enter(co, NULL); } -/* Always true because when we have called set_fd_handler there is - * always a request being processed. - */ -static int return_true(void *opaque) -{ - return 1; -} - static coroutine_fn void set_fd_handler(BDRVSSHState *s) { int r; @@ -766,13 +758,13 @@ static coroutine_fn void set_fd_handler(BDRVSSHState *s) DPRINTF("s->sock=%d rd_handler=%p wr_handler=%p", s->sock, rd_handler, wr_handler); - qemu_aio_set_fd_handler(s->sock, rd_handler, wr_handler, return_true, co); + qemu_aio_set_fd_handler(s->sock, rd_handler, wr_handler, co); } static coroutine_fn void clear_fd_handler(BDRVSSHState *s) { DPRINTF("s->sock=%d", s->sock); - qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL); } /* A non-blocking call returned EAGAIN, so yield, ensuring the diff --git a/block/stream.c b/block/stream.c index 7fe9e48..db49b4d 100644 --- a/block/stream.c +++ b/block/stream.c @@ -57,6 +57,11 @@ static void close_unused_images(BlockDriverState *top, BlockDriverState *base, BlockDriverState *intermediate; intermediate = top->backing_hd; + /* Must assign before bdrv_delete() to prevent traversing dangling pointer + * while we delete backing image instances. + */ + top->backing_hd = base; + while (intermediate) { BlockDriverState *unused; @@ -70,7 +75,6 @@ static void close_unused_images(BlockDriverState *top, BlockDriverState *base, unused->backing_hd = NULL; bdrv_delete(unused); } - top->backing_hd = base; } static void coroutine_fn stream_run(void *opaque) diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c index 411becc..5a96ccd 100644 --- a/hw/block/dataplane/virtio-blk.c +++ b/hw/block/dataplane/virtio-blk.c @@ -261,11 +261,6 @@ static int process_request(IOQueue *ioq, struct iovec iov[], } } -static int flush_true(EventNotifier *e) -{ - return true; -} - static void handle_notify(EventNotifier *e) { VirtIOBlockDataPlane *s = container_of(e, VirtIOBlockDataPlane, @@ -345,14 +340,6 @@ static void handle_notify(EventNotifier *e) } } -static int flush_io(EventNotifier *e) -{ - VirtIOBlockDataPlane *s = container_of(e, VirtIOBlockDataPlane, - io_notifier); - - return s->num_reqs > 0; -} - static void handle_io(EventNotifier *e) { VirtIOBlockDataPlane *s = container_of(e, VirtIOBlockDataPlane, @@ -376,9 +363,9 @@ static void *data_plane_thread(void *opaque) { VirtIOBlockDataPlane *s = opaque; - do { + while (!s->stopping || s->num_reqs > 0) { aio_poll(s->ctx, true); - } while (!s->stopping || s->num_reqs > 0); + } return NULL; } @@ -485,7 +472,7 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) exit(1); } s->host_notifier = *virtio_queue_get_host_notifier(vq); - aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify, flush_true); + aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify); /* Set up ioqueue */ ioq_init(&s->ioqueue, s->fd, REQ_MAX); @@ -493,7 +480,7 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) ioq_put_iocb(&s->ioqueue, &s->requests[i].iocb); } s->io_notifier = *ioq_get_notifier(&s->ioqueue); - aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io, flush_io); + aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io); s->started = true; trace_virtio_blk_data_plane_start(s); @@ -525,10 +512,10 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s) qemu_thread_join(&s->thread); } - aio_set_event_notifier(s->ctx, &s->io_notifier, NULL, NULL); + aio_set_event_notifier(s->ctx, &s->io_notifier, NULL); ioq_cleanup(&s->ioqueue); - aio_set_event_notifier(s->ctx, &s->host_notifier, NULL, NULL); + aio_set_event_notifier(s->ctx, &s->host_notifier, NULL); k->set_host_notifier(qbus->parent, 0, false); aio_context_unref(s->ctx); diff --git a/include/block/aio.h b/include/block/aio.h index cc77771..5743bf1 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -74,9 +74,6 @@ typedef struct AioContext { struct ThreadPool *thread_pool; } AioContext; -/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */ -typedef int (AioFlushEventNotifierHandler)(EventNotifier *e); - /** * aio_context_new: Allocate a new AioContext. * @@ -198,9 +195,6 @@ bool aio_pending(AioContext *ctx); bool aio_poll(AioContext *ctx, bool blocking); #ifdef CONFIG_POSIX -/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */ -typedef int (AioFlushHandler)(void *opaque); - /* Register a file descriptor and associated callbacks. Behaves very similarly * to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will * be invoked when using qemu_aio_wait(). @@ -212,7 +206,6 @@ void aio_set_fd_handler(AioContext *ctx, int fd, IOHandler *io_read, IOHandler *io_write, - AioFlushHandler *io_flush, void *opaque); #endif @@ -225,8 +218,7 @@ void aio_set_fd_handler(AioContext *ctx, */ void aio_set_event_notifier(AioContext *ctx, EventNotifier *notifier, - EventNotifierHandler *io_read, - AioFlushEventNotifierHandler *io_flush); + EventNotifierHandler *io_read); /* Return a GSource that lets the main loop poll the file descriptors attached * to this AioContext. @@ -240,14 +232,12 @@ struct ThreadPool *aio_get_thread_pool(AioContext *ctx); bool qemu_aio_wait(void); void qemu_aio_set_event_notifier(EventNotifier *notifier, - EventNotifierHandler *io_read, - AioFlushEventNotifierHandler *io_flush); + EventNotifierHandler *io_read); #ifdef CONFIG_POSIX void qemu_aio_set_fd_handler(int fd, IOHandler *io_read, IOHandler *io_write, - AioFlushHandler *io_flush, void *opaque); #endif diff --git a/main-loop.c b/main-loop.c index a44fff6..2d9774e 100644 --- a/main-loop.c +++ b/main-loop.c @@ -489,17 +489,14 @@ bool qemu_aio_wait(void) void qemu_aio_set_fd_handler(int fd, IOHandler *io_read, IOHandler *io_write, - AioFlushHandler *io_flush, void *opaque) { - aio_set_fd_handler(qemu_aio_context, fd, io_read, io_write, io_flush, - opaque); + aio_set_fd_handler(qemu_aio_context, fd, io_read, io_write, opaque); } #endif void qemu_aio_set_event_notifier(EventNotifier *notifier, - EventNotifierHandler *io_read, - AioFlushEventNotifierHandler *io_flush) + EventNotifierHandler *io_read) { - aio_set_event_notifier(qemu_aio_context, notifier, io_read, io_flush); + aio_set_event_notifier(qemu_aio_context, notifier, io_read); } diff --git a/tests/test-aio.c b/tests/test-aio.c index c173870..1ab5637 100644 --- a/tests/test-aio.c +++ b/tests/test-aio.c @@ -15,6 +15,13 @@ AioContext *ctx; +typedef struct { + EventNotifier e; + int n; + int active; + bool auto_set; +} EventNotifierTestData; + /* Wait until there are no more BHs or AIO requests */ static void wait_for_aio(void) { @@ -23,6 +30,14 @@ static void wait_for_aio(void) } } +/* Wait until event notifier becomes inactive */ +static void wait_until_inactive(EventNotifierTestData *data) +{ + while (data->active > 0) { + aio_poll(ctx, true); + } +} + /* Simple callbacks for testing. */ typedef struct { @@ -50,19 +65,6 @@ static void bh_delete_cb(void *opaque) } } -typedef struct { - EventNotifier e; - int n; - int active; - bool auto_set; -} EventNotifierTestData; - -static int event_active_cb(EventNotifier *e) -{ - EventNotifierTestData *data = container_of(e, EventNotifierTestData, e); - return data->active > 0; -} - static void event_ready_cb(EventNotifier *e) { EventNotifierTestData *data = container_of(e, EventNotifierTestData, e); @@ -231,11 +233,11 @@ static void test_set_event_notifier(void) { EventNotifierTestData data = { .n = 0, .active = 0 }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 0); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 0); event_notifier_cleanup(&data.e); @@ -245,8 +247,8 @@ static void test_wait_event_notifier(void) { EventNotifierTestData data = { .n = 0, .active = 1 }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb); - g_assert(aio_poll(ctx, false)); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); + g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 0); g_assert_cmpint(data.active, ==, 1); @@ -259,7 +261,7 @@ static void test_wait_event_notifier(void) g_assert_cmpint(data.n, ==, 1); g_assert_cmpint(data.active, ==, 0); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 1); @@ -270,8 +272,8 @@ static void test_flush_event_notifier(void) { EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb); - g_assert(aio_poll(ctx, false)); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); + g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 0); g_assert_cmpint(data.active, ==, 10); @@ -281,12 +283,12 @@ static void test_flush_event_notifier(void) g_assert_cmpint(data.active, ==, 9); g_assert(aio_poll(ctx, false)); - wait_for_aio(); + wait_until_inactive(&data); g_assert_cmpint(data.n, ==, 10); g_assert_cmpint(data.active, ==, 0); g_assert(!aio_poll(ctx, false)); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); g_assert(!aio_poll(ctx, false)); event_notifier_cleanup(&data.e); } @@ -297,7 +299,7 @@ static void test_wait_event_notifier_noflush(void) EventNotifierTestData dummy = { .n = 0, .active = 1 }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 0); @@ -305,35 +307,35 @@ static void test_wait_event_notifier_noflush(void) /* Until there is an active descriptor, aio_poll may or may not call * event_ready_cb. Still, it must not block. */ event_notifier_set(&data.e); - g_assert(!aio_poll(ctx, true)); + g_assert(aio_poll(ctx, true)); data.n = 0; /* An active event notifier forces aio_poll to look at EventNotifiers. */ event_notifier_init(&dummy.e, false); - aio_set_event_notifier(ctx, &dummy.e, event_ready_cb, event_active_cb); + aio_set_event_notifier(ctx, &dummy.e, event_ready_cb); event_notifier_set(&data.e); g_assert(aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 1); - g_assert(aio_poll(ctx, false)); + g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 1); event_notifier_set(&data.e); g_assert(aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 2); - g_assert(aio_poll(ctx, false)); + g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 2); event_notifier_set(&dummy.e); - wait_for_aio(); + wait_until_inactive(&dummy); g_assert_cmpint(data.n, ==, 2); g_assert_cmpint(dummy.n, ==, 1); g_assert_cmpint(dummy.active, ==, 0); - aio_set_event_notifier(ctx, &dummy.e, NULL, NULL); + aio_set_event_notifier(ctx, &dummy.e, NULL); event_notifier_cleanup(&dummy.e); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 2); @@ -513,11 +515,11 @@ static void test_source_set_event_notifier(void) { EventNotifierTestData data = { .n = 0, .active = 0 }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); while (g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 0); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); while (g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 0); event_notifier_cleanup(&data.e); @@ -527,7 +529,7 @@ static void test_source_wait_event_notifier(void) { EventNotifierTestData data = { .n = 0, .active = 1 }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); g_assert(g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 0); g_assert_cmpint(data.active, ==, 1); @@ -541,7 +543,7 @@ static void test_source_wait_event_notifier(void) g_assert_cmpint(data.n, ==, 1); g_assert_cmpint(data.active, ==, 0); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); while (g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 1); @@ -552,7 +554,7 @@ static void test_source_flush_event_notifier(void) { EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); g_assert(g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 0); g_assert_cmpint(data.active, ==, 10); @@ -568,7 +570,7 @@ static void test_source_flush_event_notifier(void) g_assert_cmpint(data.active, ==, 0); g_assert(!g_main_context_iteration(NULL, false)); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); while (g_main_context_iteration(NULL, false)); event_notifier_cleanup(&data.e); } @@ -579,7 +581,7 @@ static void test_source_wait_event_notifier_noflush(void) EventNotifierTestData dummy = { .n = 0, .active = 1 }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); while (g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 0); @@ -592,7 +594,7 @@ static void test_source_wait_event_notifier_noflush(void) /* An active event notifier forces aio_poll to look at EventNotifiers. */ event_notifier_init(&dummy.e, false); - aio_set_event_notifier(ctx, &dummy.e, event_ready_cb, event_active_cb); + aio_set_event_notifier(ctx, &dummy.e, event_ready_cb); event_notifier_set(&data.e); g_assert(g_main_context_iteration(NULL, false)); @@ -612,10 +614,10 @@ static void test_source_wait_event_notifier_noflush(void) g_assert_cmpint(dummy.n, ==, 1); g_assert_cmpint(dummy.active, ==, 0); - aio_set_event_notifier(ctx, &dummy.e, NULL, NULL); + aio_set_event_notifier(ctx, &dummy.e, NULL); event_notifier_cleanup(&dummy.e); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); while (g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 2); diff --git a/tests/test-thread-pool.c b/tests/test-thread-pool.c index b62338f..8188d1a 100644 --- a/tests/test-thread-pool.c +++ b/tests/test-thread-pool.c @@ -40,19 +40,13 @@ static void done_cb(void *opaque, int ret) active--; } -/* Wait until all aio and bh activity has finished */ -static void qemu_aio_wait_all(void) -{ - while (aio_poll(ctx, true)) { - /* Do nothing */ - } -} - static void test_submit(void) { WorkerTestData data = { .n = 0 }; thread_pool_submit(pool, worker_cb, &data); - qemu_aio_wait_all(); + while (data.n == 0) { + aio_poll(ctx, true); + } g_assert_cmpint(data.n, ==, 1); } @@ -65,7 +59,9 @@ static void test_submit_aio(void) /* The callbacks are not called until after the first wait. */ active = 1; g_assert_cmpint(data.ret, ==, -EINPROGRESS); - qemu_aio_wait_all(); + while (data.ret == -EINPROGRESS) { + aio_poll(ctx, true); + } g_assert_cmpint(active, ==, 0); g_assert_cmpint(data.n, ==, 1); g_assert_cmpint(data.ret, ==, 0); @@ -103,7 +99,9 @@ static void test_submit_co(void) /* qemu_aio_wait_all will execute the rest of the coroutine. */ - qemu_aio_wait_all(); + while (data.ret == -EINPROGRESS) { + aio_poll(ctx, true); + } /* Back here after the coroutine has finished. */ @@ -187,7 +185,9 @@ static void test_cancel(void) } /* Finish execution and execute any remaining callbacks. */ - qemu_aio_wait_all(); + while (active > 0) { + aio_poll(ctx, true); + } g_assert_cmpint(active, ==, 0); for (i = 0; i < 100; i++) { if (data[i].n == 3) { diff --git a/thread-pool.c b/thread-pool.c index 0ebd4c2..5025567 100644 --- a/thread-pool.c +++ b/thread-pool.c @@ -197,12 +197,6 @@ restart: } } -static int thread_pool_active(EventNotifier *notifier) -{ - ThreadPool *pool = container_of(notifier, ThreadPool, notifier); - return !QLIST_EMPTY(&pool->head); -} - static void thread_pool_cancel(BlockDriverAIOCB *acb) { ThreadPoolElement *elem = (ThreadPoolElement *)acb; @@ -309,8 +303,7 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) QLIST_INIT(&pool->head); QTAILQ_INIT(&pool->request_list); - aio_set_event_notifier(ctx, &pool->notifier, event_notifier_ready, - thread_pool_active); + aio_set_event_notifier(ctx, &pool->notifier, event_notifier_ready); } ThreadPool *thread_pool_new(AioContext *ctx) @@ -344,7 +337,7 @@ void thread_pool_free(ThreadPool *pool) qemu_mutex_unlock(&pool->lock); - aio_set_event_notifier(pool->ctx, &pool->notifier, NULL, NULL); + aio_set_event_notifier(pool->ctx, &pool->notifier, NULL); qemu_sem_destroy(&pool->sem); qemu_cond_destroy(&pool->check_cancel); qemu_cond_destroy(&pool->worker_stopped); |