diff options
author | Milan Broz <mbroz@redhat.com> | 2008-10-10 13:37:08 +0100 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2008-10-10 13:37:08 +0100 |
commit | 933f01d43326fb12a978a8e0bb062c28a2de4d5a (patch) | |
tree | 89f25c15fb98d90ee7377437482e444a8ac6a106 /drivers/md | |
parent | c8081618a9f832fdf7ca81eb087f9f61f2bf07d5 (diff) | |
download | op-kernel-dev-933f01d43326fb12a978a8e0bb062c28a2de4d5a.zip op-kernel-dev-933f01d43326fb12a978a8e0bb062c28a2de4d5a.tar.gz |
dm crypt: avoid unnecessary wait when splitting bio
Don't wait between submitting crypt requests for a bio unless
we are short of memory.
There are two situations when we must split an encrypted bio:
1) there are no free pages;
2) the new bio would violate underlying device restrictions
(e.g. max hw segments).
In case (2) we do not need to wait.
Add output variable to crypt_alloc_buffer() to distinguish between
these cases.
Signed-off-by: Milan Broz <mbroz@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-crypt.c | 26 |
1 files changed, 18 insertions, 8 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index f6018f5..682ef9e 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -457,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio) /* * Generate a new unfragmented bio with the given size * This should never violate the device limitations - * May return a smaller bio when running out of pages + * May return a smaller bio when running out of pages, indicated by + * *out_of_pages set to 1. */ -static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) +static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, + unsigned *out_of_pages) { struct crypt_config *cc = io->target->private; struct bio *clone; @@ -473,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) return NULL; clone_init(io, clone); + *out_of_pages = 0; for (i = 0; i < nr_iovecs; i++) { page = mempool_alloc(cc->page_pool, gfp_mask); - if (!page) + if (!page) { + *out_of_pages = 1; break; + } /* * if additional pages cannot be allocated without waiting, @@ -696,6 +701,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) struct crypt_config *cc = io->target->private; struct bio *clone; int crypt_finished; + unsigned out_of_pages = 0; unsigned remaining = io->base_bio->bi_size; int r; @@ -710,7 +716,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) * so repeat the whole process until all the data can be handled. */ while (remaining) { - clone = crypt_alloc_buffer(io, remaining); + clone = crypt_alloc_buffer(io, remaining, &out_of_pages); if (unlikely(!clone)) { io->error = -ENOMEM; break; @@ -737,11 +743,15 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) break; } - /* out of memory -> run queues */ - if (unlikely(remaining)) { - wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); + /* + * Out of memory -> run queues + * But don't wait if split was due to the io size restriction + */ + if (unlikely(out_of_pages)) congestion_wait(WRITE, HZ/100); - } + + if (unlikely(remaining)) + wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); } crypt_dec_pending(io); |