summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChao Yu <yuchao0@huawei.com>2017-10-13 18:01:35 +0800
committerJaegeuk Kim <jaegeuk@kernel.org>2017-10-26 10:44:19 +0200
commitcb7a84486577a95c0300a12eb2e9cef4bd126628 (patch)
tree1f10f669acc864168ef8705d4347acb50cc3d37d
parent4414dea8d32bb2a351e1e84eebcdc7bff22864b0 (diff)
downloadop-kernel-dev-cb7a84486577a95c0300a12eb2e9cef4bd126628.zip
op-kernel-dev-cb7a84486577a95c0300a12eb2e9cef4bd126628.tar.gz
f2fs: relocate readahead codes in readdir()
Previously, for large directory, we just do readahead only once in readdir(), readdir()'s performance may drop when traversing latter blocks. In order to avoid this, relocate readahead codes to covering all traverse flow. Signed-off-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
-rw-r--r--fs/f2fs/dir.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 1ebd206..1464644 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -870,11 +870,6 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
goto out_free;
}
- /* readahead for multi pages of dir */
- if (npages - n > 1 && !ra_has_index(ra, n))
- page_cache_sync_readahead(inode->i_mapping, ra, file, n,
- min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES));
-
for (; n < npages; n++) {
/* allow readdir() to be interrupted */
@@ -884,6 +879,11 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
}
cond_resched();
+ /* readahead for multi pages of dir */
+ if (npages - n > 1 && !ra_has_index(ra, n))
+ page_cache_sync_readahead(inode->i_mapping, ra, file, n,
+ min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES));
+
dentry_page = get_lock_data_page(inode, n, false);
if (IS_ERR(dentry_page)) {
err = PTR_ERR(dentry_page);
OpenPOWER on IntegriCloud