summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLi Zefan <lizefan@huawei.com>2012-06-11 16:03:35 +0800
committerChris Mason <chris.mason@oracle.com>2012-06-14 21:30:55 -0400
commit6c282eb40ed6f64a51ff447707714df551d85b8e (patch)
tree97d8a780a577d21c0f237f2140f3a129f40096bf /fs
parent7ddf5a42d311d74fd9f7373cb56def0843c219f8 (diff)
downloadop-kernel-dev-6c282eb40ed6f64a51ff447707714df551d85b8e.zip
op-kernel-dev-6c282eb40ed6f64a51ff447707714df551d85b8e.tar.gz
Btrfs: fix defrag regression
If a file has 3 small extents: | ext1 | ext2 | ext3 | Running "btrfs fi defrag" will only defrag the last two extents, if those extent mappings hasn't been read into memory from disk. This bug was introduced by commit 17ce6ef8d731af5edac8c39e806db4c7e1f6956f ("Btrfs: add a check to decide if we should defrag the range") The cause is, that commit looked into previous and next extents using lookup_extent_mapping() only. While at it, remove the code that checks the previous extent, since it's sufficient to check the next extent. Signed-off-by: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/ioctl.c97
1 files changed, 49 insertions, 48 deletions
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index c5254dd..a98f7d2 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -786,39 +786,57 @@ none:
return -ENOENT;
}
-/*
- * Validaty check of prev em and next em:
- * 1) no prev/next em
- * 2) prev/next em is an hole/inline extent
- */
-static int check_adjacent_extents(struct inode *inode, struct extent_map *em)
+static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
{
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
- struct extent_map *prev = NULL, *next = NULL;
- int ret = 0;
+ struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ struct extent_map *em;
+ u64 len = PAGE_CACHE_SIZE;
+ /*
+ * hopefully we have this extent in the tree already, try without
+ * the full extent lock
+ */
read_lock(&em_tree->lock);
- prev = lookup_extent_mapping(em_tree, em->start - 1, (u64)-1);
- next = lookup_extent_mapping(em_tree, em->start + em->len, (u64)-1);
+ em = lookup_extent_mapping(em_tree, start, len);
read_unlock(&em_tree->lock);
- if ((!prev || prev->block_start >= EXTENT_MAP_LAST_BYTE) &&
- (!next || next->block_start >= EXTENT_MAP_LAST_BYTE))
- ret = 1;
- free_extent_map(prev);
- free_extent_map(next);
+ if (!em) {
+ /* get the big lock and read metadata off disk */
+ lock_extent(io_tree, start, start + len - 1);
+ em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
+ unlock_extent(io_tree, start, start + len - 1);
+ if (IS_ERR(em))
+ return NULL;
+ }
+
+ return em;
+}
+
+static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
+{
+ struct extent_map *next;
+ bool ret = true;
+
+ /* this is the last extent */
+ if (em->start + em->len >= i_size_read(inode))
+ return false;
+
+ next = defrag_lookup_extent(inode, em->start + em->len);
+ if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
+ ret = false;
+
+ free_extent_map(next);
return ret;
}
-static int should_defrag_range(struct inode *inode, u64 start, u64 len,
- int thresh, u64 *last_len, u64 *skip,
- u64 *defrag_end)
+static int should_defrag_range(struct inode *inode, u64 start, int thresh,
+ u64 *last_len, u64 *skip, u64 *defrag_end)
{
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct extent_map *em = NULL;
- struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ struct extent_map *em;
int ret = 1;
+ bool next_mergeable = true;
/*
* make sure that once we start defragging an extent, we keep on
@@ -829,23 +847,9 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
*skip = 0;
- /*
- * hopefully we have this extent in the tree already, try without
- * the full extent lock
- */
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, start, len);
- read_unlock(&em_tree->lock);
-
- if (!em) {
- /* get the big lock and read metadata off disk */
- lock_extent(io_tree, start, start + len - 1);
- em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
- unlock_extent(io_tree, start, start + len - 1);
-
- if (IS_ERR(em))
- return 0;
- }
+ em = defrag_lookup_extent(inode, start);
+ if (!em)
+ return 0;
/* this will cover holes, and inline extents */
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
@@ -853,18 +857,15 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
goto out;
}
- /* If we have nothing to merge with us, just skip. */
- if (check_adjacent_extents(inode, em)) {
- ret = 0;
- goto out;
- }
+ next_mergeable = defrag_check_next_extent(inode, em);
/*
- * we hit a real extent, if it is big don't bother defragging it again
+ * we hit a real extent, if it is big or the next extent is not a
+ * real extent, don't bother defragging it
*/
- if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh)
+ if ((*last_len == 0 || *last_len >= thresh) &&
+ (em->len >= thresh || !next_mergeable))
ret = 0;
-
out:
/*
* last_len ends up being a counter of how many bytes we've defragged.
@@ -1143,8 +1144,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
break;
if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
- PAGE_CACHE_SIZE, extent_thresh,
- &last_len, &skip, &defrag_end)) {
+ extent_thresh, &last_len, &skip,
+ &defrag_end)) {
unsigned long next;
/*
* the should_defrag function tells us how much to skip
OpenPOWER on IntegriCloud