summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlasdair G Kergon <agk@redhat.com>2006-10-03 01:15:28 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-03 08:04:14 -0700
commitba40a2aa6e6f3d084cf35c8b872fc9f18f91231f (patch)
treed32ec4949a0a9721e8955e7f0e98130019d95849
parent927ffe7c9a156e259aae31c07dd76072c459ec57 (diff)
downloadop-kernel-dev-ba40a2aa6e6f3d084cf35c8b872fc9f18f91231f.zip
op-kernel-dev-ba40a2aa6e6f3d084cf35c8b872fc9f18f91231f.tar.gz
[PATCH] dm snapshot: tidy snapshot_map
This patch rearranges the snapshot_map code so that the functional changes in subsequent patches are clearer. The only functional change is to replace the existing read lock with a write lock which the next patch needs. Signed-off-by: Alasdair G Kergon <agk@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--drivers/md/dm-snap.c72
1 files changed, 26 insertions, 46 deletions
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 58c444f..c5449f2 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -851,7 +851,6 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
{
struct exception *e;
struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
- int copy_needed = 0;
int r = 1;
chunk_t chunk;
struct pending_exception *pe = NULL;
@@ -866,29 +865,28 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
if (unlikely(bio_barrier(bio)))
return -EOPNOTSUPP;
+ /* FIXME: should only take write lock if we need
+ * to copy an exception */
+ down_write(&s->lock);
+
+ if (!s->valid) {
+ r = -EIO;
+ goto out_unlock;
+ }
+
+ /* If the block is already remapped - use that, else remap it */
+ e = lookup_exception(&s->complete, chunk);
+ if (e) {
+ remap_exception(s, e, bio);
+ goto out_unlock;
+ }
+
/*
* Write to snapshot - higher level takes care of RW/RO
* flags so we should only get this if we are
* writeable.
*/
if (bio_rw(bio) == WRITE) {
-
- /* FIXME: should only take write lock if we need
- * to copy an exception */
- down_write(&s->lock);
-
- if (!s->valid) {
- r = -EIO;
- goto out_unlock;
- }
-
- /* If the block is already remapped - use that, else remap it */
- e = lookup_exception(&s->complete, chunk);
- if (e) {
- remap_exception(s, e, bio);
- goto out_unlock;
- }
-
pe = __find_pending_exception(s, bio);
if (!pe) {
__invalidate_snapshot(s, pe, -ENOMEM);
@@ -899,45 +897,27 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
remap_exception(s, &pe->e, bio);
bio_list_add(&pe->snapshot_bios, bio);
+ r = 0;
+
if (!pe->started) {
/* this is protected by snap->lock */
pe->started = 1;
- copy_needed = 1;
- }
-
- r = 0;
-
- out_unlock:
- up_write(&s->lock);
-
- if (copy_needed)
+ up_write(&s->lock);
start_copy(pe);
- } else {
+ goto out;
+ }
+ } else
/*
* FIXME: this read path scares me because we
* always use the origin when we have a pending
* exception. However I can't think of a
* situation where this is wrong - ejt.
*/
+ bio->bi_bdev = s->origin->bdev;
- /* Do reads */
- down_read(&s->lock);
-
- if (!s->valid) {
- up_read(&s->lock);
- return -EIO;
- }
-
- /* See if it it has been remapped */
- e = lookup_exception(&s->complete, chunk);
- if (e)
- remap_exception(s, e, bio);
- else
- bio->bi_bdev = s->origin->bdev;
-
- up_read(&s->lock);
- }
-
+ out_unlock:
+ up_write(&s->lock);
+ out:
return r;
}
OpenPOWER on IntegriCloud