summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2017-02-14 19:04:07 +0100
committerDavid Sterba <dsterba@suse.com>2017-02-28 14:26:35 +0100
commit4d3a800ebb1299944408f3b40b5b6b996477fba2 (patch)
tree165a58136bd55ad94dec5154f6e03a47bf4763a9 /fs/btrfs/inode.c
parent38c31464089f639630b7c28ce933a4d60e135a02 (diff)
downloadop-kernel-dev-4d3a800ebb1299944408f3b40b5b6b996477fba2.zip
op-kernel-dev-4d3a800ebb1299944408f3b40b5b6b996477fba2.tar.gz
btrfs: merge nr_pages input and output parameter in compress_pages
The parameter saying how many pages can be allocated at maximum can be merged with the output page counter, to save some stack space. The compression implementation will sink the parameter to a local variable so everything works as before. The nr_pages variables can also be simply merged in compress_file_range into one. Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8b9eac3..83bbe1e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -430,7 +430,6 @@ static noinline void compress_file_range(struct inode *inode,
int ret = 0;
struct page **pages = NULL;
unsigned long nr_pages;
- unsigned long nr_pages_ret = 0;
unsigned long total_compressed = 0;
unsigned long total_in = 0;
unsigned long max_compressed = SZ_128K;
@@ -518,7 +517,7 @@ again:
ret = btrfs_compress_pages(compress_type,
inode->i_mapping, start,
pages,
- nr_pages, &nr_pages_ret,
+ &nr_pages,
&total_in,
&total_compressed,
max_compressed);
@@ -526,7 +525,7 @@ again:
if (!ret) {
unsigned long offset = total_compressed &
(PAGE_SIZE - 1);
- struct page *page = pages[nr_pages_ret - 1];
+ struct page *page = pages[nr_pages - 1];
char *kaddr;
/* zero the tail end of the last page, we might be
@@ -607,7 +606,7 @@ cont:
* will submit them to the elevator.
*/
add_async_extent(async_cow, start, num_bytes,
- total_compressed, pages, nr_pages_ret,
+ total_compressed, pages, nr_pages,
compress_type);
if (start + num_bytes < end) {
@@ -624,14 +623,14 @@ cont:
* the compression code ran but failed to make things smaller,
* free any pages it allocated and our page pointer array
*/
- for (i = 0; i < nr_pages_ret; i++) {
+ for (i = 0; i < nr_pages; i++) {
WARN_ON(pages[i]->mapping);
put_page(pages[i]);
}
kfree(pages);
pages = NULL;
total_compressed = 0;
- nr_pages_ret = 0;
+ nr_pages = 0;
/* flag the file so we don't compress in the future */
if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
@@ -660,7 +659,7 @@ cleanup_and_bail_uncompressed:
return;
free_pages_out:
- for (i = 0; i < nr_pages_ret; i++) {
+ for (i = 0; i < nr_pages; i++) {
WARN_ON(pages[i]->mapping);
put_page(pages[i]);
}
OpenPOWER on IntegriCloud