diff options
author | Mike Snitzer <snitzer@redhat.com> | 2009-12-10 23:52:34 +0000 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2009-12-10 23:52:34 +0000 |
commit | 8a2d528620e228ddfd0df9cec0a16e034ff8db1d (patch) | |
tree | ff93eac3e996df7bf046d08c7386d690c3fd5ee0 /drivers/md | |
parent | 73dfd078cf8bfee4018fb22f1e2a24f2e05b69dc (diff) | |
download | op-kernel-dev-8a2d528620e228ddfd0df9cec0a16e034ff8db1d.zip op-kernel-dev-8a2d528620e228ddfd0df9cec0a16e034ff8db1d.tar.gz |
dm snapshot: merge consecutive chunks together
s->store->type->prepare_merge returns the number of chunks that can be
copied linearly working backwards from the returned chunk number.
For example, if it returns 3 chunks with old_chunk == 10 and new_chunk
== 20, then chunk 20 can be copied to 10, chunk 19 to 9 and 18 to 8.
Until now kcopyd only copied one chunk at a time. This patch now copies
the full set at once.
Consequently, snapshot_merge_process() needs to delay the merging of all
chunks if any have writes in progress, not just the first chunk in the
region that is to be merged.
snapshot-merge's performance is now comparable to the original
snapshot-origin target.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-snap.c | 31 |
1 files changed, 21 insertions, 10 deletions
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 1498704..bb4b733 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -879,9 +879,10 @@ static void increment_pending_exceptions_done_count(void) static void snapshot_merge_next_chunks(struct dm_snapshot *s) { - int r; + int i, linear_chunks; chunk_t old_chunk, new_chunk; struct dm_io_region src, dest; + sector_t io_size; uint64_t previous_count; BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); @@ -896,20 +897,28 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s) goto shut; } - r = s->store->type->prepare_merge(s->store, &old_chunk, &new_chunk); - if (r <= 0) { - if (r < 0) + linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk, + &new_chunk); + if (linear_chunks <= 0) { + if (linear_chunks < 0) DMERR("Read error in exception store: " "shutting down merge"); goto shut; } - /* TODO: use larger I/O size once we verify that kcopyd handles it */ + /* Adjust old_chunk and new_chunk to reflect start of linear region */ + old_chunk = old_chunk + 1 - linear_chunks; + new_chunk = new_chunk + 1 - linear_chunks; + + /* + * Use one (potentially large) I/O to copy all 'linear_chunks' + * from the exception store to the origin + */ + io_size = linear_chunks * s->store->chunk_size; dest.bdev = s->origin->bdev; dest.sector = chunk_to_sector(s->store, old_chunk); - dest.count = min((sector_t)s->store->chunk_size, - get_dev_size(dest.bdev) - dest.sector); + dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector); src.bdev = s->cow->bdev; src.sector = chunk_to_sector(s->store, new_chunk); @@ -925,7 +934,7 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s) * significant impact on performance. */ previous_count = read_pending_exceptions_done_count(); - while (origin_write_extent(s, dest.sector, s->store->chunk_size)) { + while (origin_write_extent(s, dest.sector, io_size)) { wait_event(_pending_exceptions_done, (read_pending_exceptions_done_count() != previous_count)); @@ -935,10 +944,12 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s) down_write(&s->lock); s->first_merging_chunk = old_chunk; - s->num_merging_chunks = 1; + s->num_merging_chunks = linear_chunks; up_write(&s->lock); - __check_for_conflicting_io(s, old_chunk); + /* Wait until writes to all 'linear_chunks' drain */ + for (i = 0; i < linear_chunks; i++) + __check_for_conflicting_io(s, old_chunk + i); dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); return; |