diff options
author | Theodore Ts'o <tytso@mit.edu> | 2015-06-03 09:32:39 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2015-06-03 09:32:39 -0400 |
commit | 3dbb5eb9a3aa04f40e551338eee5e8d06f352fe8 (patch) | |
tree | c84bc425081af46b5692e437aa258adb0fe60846 | |
parent | e298e73bd766768707a7af440691ce2f418f5acc (diff) | |
download | op-kernel-dev-3dbb5eb9a3aa04f40e551338eee5e8d06f352fe8.zip op-kernel-dev-3dbb5eb9a3aa04f40e551338eee5e8d06f352fe8.tar.gz |
ext4 crypto: allocate bounce pages using GFP_NOWAIT
Previously we allocated bounce pages using a combination of
alloc_page() and mempool_alloc() with the __GFP_WAIT bit set.
Instead, use mempool_alloc() with GFP_NOWAIT. The mempool_alloc()
function will try using alloc_pages() initially, and then only use the
mempool reserve of pages if alloc_pages() is unable to fulfill the
request.
This minimizes the the impact on the mm layer when we need to do a
large amount of writeback of encrypted files, as Jaeguk Kim had
reported that under a heavy fio workload on a system with restricted
amounts memory (which unfortunately, includes many mobile handsets),
he had observed the the OOM killer getting triggered several times.
Using GFP_NOWAIT
If the mempool_alloc() function fails, we will retry the page
writeback at a later time; the function of the mempool is to ensure
that we can writeback at least 32 pages at a time, so we can more
efficiently dispatch I/O under high memory pressure situations. In
the future we should make this be a tunable so we can determine the
best tradeoff between permanently sequestering memory and the ability
to quickly launder pages so we can free up memory quickly when
necessary.
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
-rw-r--r-- | fs/ext4/crypto.c | 27 | ||||
-rw-r--r-- | fs/ext4/ext4_crypto.h | 3 |
2 files changed, 7 insertions, 23 deletions
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c index efcb7c0..f5c82e8 100644 --- a/fs/ext4/crypto.c +++ b/fs/ext4/crypto.c @@ -71,12 +71,8 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx) { unsigned long flags; - if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page) { - if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) - __free_page(ctx->w.bounce_page); - else - mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool); - } + if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page) + mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool); ctx->w.bounce_page = NULL; ctx->w.control_page = NULL; if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) { @@ -317,22 +313,11 @@ static int ext4_page_crypto(struct ext4_crypto_ctx *ctx, static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx) { - struct page *ciphertext_page = alloc_page(GFP_NOFS); - - if (!ciphertext_page) { - /* This is a potential bottleneck, but at least we'll have - * forward progress. */ - ciphertext_page = mempool_alloc(ext4_bounce_page_pool, - GFP_NOFS); - if (ciphertext_page == NULL) - return ERR_PTR(-ENOMEM); - ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; - } else { - ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; - } + ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT); + if (ctx->w.bounce_page == NULL) + return ERR_PTR(-ENOMEM); ctx->flags |= EXT4_WRITE_PATH_FL; - ctx->w.bounce_page = ciphertext_page; - return ciphertext_page; + return ctx->w.bounce_page; } /** diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h index 34e0d24..ac7d4e8 100644 --- a/fs/ext4/ext4_crypto.h +++ b/fs/ext4/ext4_crypto.h @@ -83,8 +83,7 @@ struct ext4_crypt_info { }; #define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 -#define EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL 0x00000002 -#define EXT4_WRITE_PATH_FL 0x00000004 +#define EXT4_WRITE_PATH_FL 0x00000002 struct ext4_crypto_ctx { union { |