summaryrefslogtreecommitdiffstats
path: root/fs/f2fs/data.c
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk@kernel.org>2016-02-03 13:49:44 -0800
committerJaegeuk Kim <jaegeuk@kernel.org>2016-02-22 16:07:23 -0800
commit24b8491251cde66879e74092167cc0f27a1f11ce (patch)
treeffb8fc277cc2a649e6d98c6305083683e2dfee1b /fs/f2fs/data.c
parentb439b103a6c9eb3417f34b4a609d4e00b4c59aca (diff)
downloadop-kernel-dev-24b8491251cde66879e74092167cc0f27a1f11ce.zip
op-kernel-dev-24b8491251cde66879e74092167cc0f27a1f11ce.tar.gz
f2fs: preallocate blocks for buffered aio writes
This patch preallocates data blocks for buffered aio writes. With this patch, we can avoid redundant locking and unlocking of node pages given consecutive aio request. Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r--fs/f2fs/data.c37
1 files changed, 30 insertions, 7 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index e7815ac..03f948e 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -571,16 +571,25 @@ ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
ssize_t ret = 0;
map.m_lblk = F2FS_BYTES_TO_BLK(iocb->ki_pos);
- map.m_len = F2FS_BYTES_TO_BLK(iov_iter_count(from));
+ map.m_len = F2FS_BLK_ALIGN(iov_iter_count(from));
map.m_next_pgofs = NULL;
- if (iocb->ki_flags & IOCB_DIRECT &&
- !(f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))) {
+ if (f2fs_encrypted_inode(inode))
+ return 0;
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ return ret;
+ return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
+ }
+ if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
ret = f2fs_convert_inline_inode(inode);
if (ret)
return ret;
- ret = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
}
+ if (!f2fs_has_inline_data(inode))
+ return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
return ret;
}
@@ -612,7 +621,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
/* it only supports block size == page size */
pgofs = (pgoff_t)map->m_lblk;
- if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
+ if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
map->m_pblk = ei.blk + pgofs - ei.fofs;
map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
map->m_flags = F2FS_MAP_MAPPED;
@@ -647,7 +656,12 @@ next_block:
err = -EIO;
goto sync_out;
}
- err = __allocate_data_block(&dn);
+ if (flag == F2FS_GET_BLOCK_PRE_AIO) {
+ if (blkaddr == NULL_ADDR)
+ err = reserve_new_block(&dn);
+ } else {
+ err = __allocate_data_block(&dn);
+ }
if (err)
goto sync_out;
allocated = true;
@@ -679,7 +693,8 @@ next_block:
} else if ((map->m_pblk != NEW_ADDR &&
blkaddr == (map->m_pblk + ofs)) ||
(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
- flag == F2FS_GET_BLOCK_PRE_DIO) {
+ flag == F2FS_GET_BLOCK_PRE_DIO ||
+ flag == F2FS_GET_BLOCK_PRE_AIO) {
ofs++;
map->m_len++;
} else {
@@ -1418,6 +1433,14 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
struct extent_info ei;
int err = 0;
+ /*
+ * we already allocated all the blocks, so we don't need to get
+ * the block addresses when there is no need to fill the page.
+ */
+ if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
+ len == PAGE_CACHE_SIZE)
+ return 0;
+
if (f2fs_has_inline_data(inode) ||
(pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
f2fs_lock_op(sbi);
OpenPOWER on IntegriCloud