diff options
author | John Snow <jsnow@redhat.com> | 2015-04-17 19:49:58 -0400 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2015-04-28 15:36:10 +0200 |
commit | d58d84539784d27c826924a79d9436178b07ff69 (patch) | |
tree | dfc10d7edcb62f28355eab4d7b6ab3eb9bc0bca6 /block | |
parent | 9bd2b08f27b9c27bb40d73b6466321b8c635086e (diff) | |
download | hqemu-d58d84539784d27c826924a79d9436178b07ff69.zip hqemu-d58d84539784d27c826924a79d9436178b07ff69.tar.gz |
qmp: Add support of "dirty-bitmap" sync mode for drive-backup
For "dirty-bitmap" sync mode, the block job will iterate through the
given dirty bitmap to decide if a sector needs backup (backup all the
dirty clusters and skip clean ones), just as allocation conditions of
"top" sync mode.
Signed-off-by: Fam Zheng <famz@redhat.com>
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 1429314609-29776-11-git-send-email-jsnow@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/backup.c | 155 | ||||
-rw-r--r-- | block/mirror.c | 4 |
2 files changed, 137 insertions, 22 deletions
diff --git a/block/backup.c b/block/backup.c index 3312476..ddf9027 100644 --- a/block/backup.c +++ b/block/backup.c @@ -37,6 +37,8 @@ typedef struct CowRequest { typedef struct BackupBlockJob { BlockJob common; BlockDriverState *target; + /* bitmap for sync=dirty-bitmap */ + BdrvDirtyBitmap *sync_bitmap; MirrorSyncMode sync_mode; RateLimit limit; BlockdevOnError on_source_error; @@ -242,6 +244,91 @@ static void backup_complete(BlockJob *job, void *opaque) g_free(data); } +static bool coroutine_fn yield_and_check(BackupBlockJob *job) +{ + if (block_job_is_cancelled(&job->common)) { + return true; + } + + /* we need to yield so that bdrv_drain_all() returns. + * (without, VM does not reboot) + */ + if (job->common.speed) { + uint64_t delay_ns = ratelimit_calculate_delay(&job->limit, + job->sectors_read); + job->sectors_read = 0; + block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns); + } else { + block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0); + } + + if (block_job_is_cancelled(&job->common)) { + return true; + } + + return false; +} + +static int coroutine_fn backup_run_incremental(BackupBlockJob *job) +{ + bool error_is_read; + int ret = 0; + int clusters_per_iter; + uint32_t granularity; + int64_t sector; + int64_t cluster; + int64_t end; + int64_t last_cluster = -1; + BlockDriverState *bs = job->common.bs; + HBitmapIter hbi; + + granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap); + clusters_per_iter = MAX((granularity / BACKUP_CLUSTER_SIZE), 1); + bdrv_dirty_iter_init(bs, job->sync_bitmap, &hbi); + + /* Find the next dirty sector(s) */ + while ((sector = hbitmap_iter_next(&hbi)) != -1) { + cluster = sector / BACKUP_SECTORS_PER_CLUSTER; + + /* Fake progress updates for any clusters we skipped */ + if (cluster != last_cluster + 1) { + job->common.offset += ((cluster - last_cluster - 1) * + BACKUP_CLUSTER_SIZE); + } + + for (end = cluster + clusters_per_iter; cluster < end; cluster++) { + do { + if (yield_and_check(job)) { + return ret; + } + ret = backup_do_cow(bs, cluster * BACKUP_SECTORS_PER_CLUSTER, + BACKUP_SECTORS_PER_CLUSTER, &error_is_read); + if ((ret < 0) && + backup_error_action(job, error_is_read, -ret) == + BLOCK_ERROR_ACTION_REPORT) { + return ret; + } + } while (ret < 0); + } + + /* If the bitmap granularity is smaller than the backup granularity, + * we need to advance the iterator pointer to the next cluster. */ + if (granularity < BACKUP_CLUSTER_SIZE) { + bdrv_set_dirty_iter(&hbi, cluster * BACKUP_SECTORS_PER_CLUSTER); + } + + last_cluster = cluster - 1; + } + + /* Play some final catchup with the progress meter */ + end = DIV_ROUND_UP(job->common.len, BACKUP_CLUSTER_SIZE); + if (last_cluster + 1 < end) { + job->common.offset += ((end - last_cluster - 1) * BACKUP_CLUSTER_SIZE); + } + + return ret; +} + static void coroutine_fn backup_run(void *opaque) { BackupBlockJob *job = opaque; @@ -259,8 +346,7 @@ static void coroutine_fn backup_run(void *opaque) qemu_co_rwlock_init(&job->flush_rwlock); start = 0; - end = DIV_ROUND_UP(job->common.len / BDRV_SECTOR_SIZE, - BACKUP_SECTORS_PER_CLUSTER); + end = DIV_ROUND_UP(job->common.len, BACKUP_CLUSTER_SIZE); job->bitmap = hbitmap_alloc(end, 0); @@ -278,28 +364,13 @@ static void coroutine_fn backup_run(void *opaque) qemu_coroutine_yield(); job->common.busy = true; } + } else if (job->sync_mode == MIRROR_SYNC_MODE_DIRTY_BITMAP) { + ret = backup_run_incremental(job); } else { /* Both FULL and TOP SYNC_MODE's require copying.. */ for (; start < end; start++) { bool error_is_read; - - if (block_job_is_cancelled(&job->common)) { - break; - } - - /* we need to yield so that bdrv_drain_all() returns. - * (without, VM does not reboot) - */ - if (job->common.speed) { - uint64_t delay_ns = ratelimit_calculate_delay( - &job->limit, job->sectors_read); - job->sectors_read = 0; - block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns); - } else { - block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0); - } - - if (block_job_is_cancelled(&job->common)) { + if (yield_and_check(job)) { break; } @@ -357,6 +428,18 @@ static void coroutine_fn backup_run(void *opaque) qemu_co_rwlock_wrlock(&job->flush_rwlock); qemu_co_rwlock_unlock(&job->flush_rwlock); + if (job->sync_bitmap) { + BdrvDirtyBitmap *bm; + if (ret < 0) { + /* Merge the successor back into the parent, delete nothing. */ + bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL); + assert(bm); + } else { + /* Everything is fine, delete this bitmap and install the backup. */ + bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL); + assert(bm); + } + } hbitmap_free(job->bitmap); bdrv_iostatus_disable(target); @@ -369,6 +452,7 @@ static void coroutine_fn backup_run(void *opaque) void backup_start(BlockDriverState *bs, BlockDriverState *target, int64_t speed, MirrorSyncMode sync_mode, + BdrvDirtyBitmap *sync_bitmap, BlockdevOnError on_source_error, BlockdevOnError on_target_error, BlockCompletionFunc *cb, void *opaque, @@ -412,17 +496,36 @@ void backup_start(BlockDriverState *bs, BlockDriverState *target, return; } + if (sync_mode == MIRROR_SYNC_MODE_DIRTY_BITMAP) { + if (!sync_bitmap) { + error_setg(errp, "must provide a valid bitmap name for " + "\"dirty-bitmap\" sync mode"); + return; + } + + /* Create a new bitmap, and freeze/disable this one. */ + if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) { + return; + } + } else if (sync_bitmap) { + error_setg(errp, + "a sync_bitmap was provided to backup_run, " + "but received an incompatible sync_mode (%s)", + MirrorSyncMode_lookup[sync_mode]); + return; + } + len = bdrv_getlength(bs); if (len < 0) { error_setg_errno(errp, -len, "unable to get length for '%s'", bdrv_get_device_name(bs)); - return; + goto error; } BackupBlockJob *job = block_job_create(&backup_job_driver, bs, speed, cb, opaque, errp); if (!job) { - return; + goto error; } bdrv_op_block_all(target, job->common.blocker); @@ -431,7 +534,15 @@ void backup_start(BlockDriverState *bs, BlockDriverState *target, job->on_target_error = on_target_error; job->target = target; job->sync_mode = sync_mode; + job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_DIRTY_BITMAP ? + sync_bitmap : NULL; job->common.len = len; job->common.co = qemu_coroutine_create(backup_run); qemu_coroutine_enter(job->common.co, job); + return; + + error: + if (sync_bitmap) { + bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL); + } } diff --git a/block/mirror.c b/block/mirror.c index 29c4545..898f8ce 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -718,6 +718,10 @@ void mirror_start(BlockDriverState *bs, BlockDriverState *target, bool is_none_mode; BlockDriverState *base; + if (mode == MIRROR_SYNC_MODE_DIRTY_BITMAP) { + error_setg(errp, "Sync mode 'dirty-bitmap' not supported"); + return; + } is_none_mode = mode == MIRROR_SYNC_MODE_NONE; base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL; mirror_start_job(bs, target, replaces, |