summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2012-04-27 14:31:29 -0400
committerChris Mason <chris.mason@oracle.com>2012-04-27 14:51:05 -0400
commitdc7fdde39e4962b1a88741f7eba2a6b3be1285d8 (patch)
tree97cd8b1f9d8c0682c64303d45de1fb55925abcdf /fs
parentfede766f28dd766d4e8feb321fdb19edb21ef6fb (diff)
downloadop-kernel-dev-dc7fdde39e4962b1a88741f7eba2a6b3be1285d8.zip
op-kernel-dev-dc7fdde39e4962b1a88741f7eba2a6b3be1285d8.tar.gz
Btrfs: reduce lock contention during extent insertion
We're spending huge amounts of time on lock contention during end_io processing because we unconditionally assume we are overwriting an existing extent in the file for each IO. This checks to see if we are outside i_size, and if so, it uses a less expensive readonly search of the btree to look for existing extents. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/file.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index d83260d..53bf2d7 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -567,6 +567,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
int extent_type;
int recow;
int ret;
+ int modify_tree = -1;
if (drop_cache)
btrfs_drop_extent_cache(inode, start, end - 1, 0);
@@ -575,10 +576,13 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
if (!path)
return -ENOMEM;
+ if (start >= BTRFS_I(inode)->disk_i_size)
+ modify_tree = 0;
+
while (1) {
recow = 0;
ret = btrfs_lookup_file_extent(trans, root, path, ino,
- search_start, -1);
+ search_start, modify_tree);
if (ret < 0)
break;
if (ret > 0 && path->slots[0] > 0 && search_start == start) {
@@ -634,7 +638,8 @@ next_slot:
}
search_start = max(key.offset, start);
- if (recow) {
+ if (recow || !modify_tree) {
+ modify_tree = -1;
btrfs_release_path(path);
continue;
}
OpenPOWER on IntegriCloud