summaryrefslogtreecommitdiffstats
path: root/fs/f2fs/inline.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 09:21:09 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 09:21:09 -0800
commit0d90d638720ba14874e34cbd8766e4dc3f14f458 (patch)
tree5080f0335c1454ae6a3ef6b29834af20ae08a952 /fs/f2fs/inline.c
parent1d32bdafaaa8bcc4c39b41ab9f674887d147f188 (diff)
parentbf39c00a9a7f3cdb5ce7d6695d9f044daf8f0b53 (diff)
downloadop-kernel-dev-0d90d638720ba14874e34cbd8766e4dc3f14f458.zip
op-kernel-dev-0d90d638720ba14874e34cbd8766e4dc3f14f458.tar.gz
Merge tag 'for-f2fs-3.14' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs
Pull f2fs updates from Jaegeuk Kim: "In this round, a couple of sysfs entries were introduced to tune the f2fs at runtime. In addition, f2fs starts to support inline_data and improves the read/write performance in some workloads by refactoring bio-related flows. This patch-set includes the following major enhancement patches. - support inline_data - refactor bio operations such as merge operations and rw type assignment - enhance the direct IO path - enhance bio operations - truncate a node page when it becomes obsolete - add sysfs entries: small_discards, max_victim_search, and in-place-update - add a sysfs entry to control max_victim_search The other bug fixes are as follows. - fix a bug in truncate_partial_nodes - avoid warnings during sparse and build process - fix error handling flows - fix potential bit overflows And, there are a bunch of cleanups" * tag 'for-f2fs-3.14' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (95 commits) f2fs: drop obsolete node page when it is truncated f2fs: introduce NODE_MAPPING for code consistency f2fs: remove the orphan block page array f2fs: add help function META_MAPPING f2fs: move a branch for code redability f2fs: call mark_inode_dirty to flush dirty pages f2fs: clean checkpatch warnings f2fs: missing REQ_META and REQ_PRIO when sync_meta_pages(META_FLUSH) f2fs: avoid f2fs_balance_fs call during pageout f2fs: add delimiter to seperate name and value in debug phrase f2fs: use spinlock rather than mutex for better speed f2fs: move alloc new orphan node out of lock protection region f2fs: move grabing orphan pages out of protection region f2fs: remove the needless parameter of f2fs_wait_on_page_writeback f2fs: update documents and a MAINTAINERS entry f2fs: add a sysfs entry to control max_victim_search f2fs: improve write performance under frequent fsync calls f2fs: avoid to read inline data except first page f2fs: avoid to left uninitialized data in page when read inline data f2fs: fix truncate_partial_nodes bug ...
Diffstat (limited to 'fs/f2fs/inline.c')
-rw-r--r--fs/f2fs/inline.c222
1 files changed, 222 insertions, 0 deletions
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
new file mode 100644
index 0000000..31ee5b1
--- /dev/null
+++ b/fs/f2fs/inline.c
@@ -0,0 +1,222 @@
+/*
+ * fs/f2fs/inline.c
+ * Copyright (c) 2013, Intel Corporation
+ * Authors: Huajun Li <huajun.li@intel.com>
+ * Haicheng Li <haicheng.li@intel.com>
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+
+#include "f2fs.h"
+
+bool f2fs_may_inline(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ block_t nr_blocks;
+ loff_t i_size;
+
+ if (!test_opt(sbi, INLINE_DATA))
+ return false;
+
+ nr_blocks = F2FS_I(inode)->i_xattr_nid ? 3 : 2;
+ if (inode->i_blocks > nr_blocks)
+ return false;
+
+ i_size = i_size_read(inode);
+ if (i_size > MAX_INLINE_DATA)
+ return false;
+
+ return true;
+}
+
+int f2fs_read_inline_data(struct inode *inode, struct page *page)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct page *ipage;
+ void *src_addr, *dst_addr;
+
+ if (page->index) {
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ goto out;
+ }
+
+ ipage = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(ipage))
+ return PTR_ERR(ipage);
+
+ zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+
+ /* Copy the whole inline data block */
+ src_addr = inline_data_addr(ipage);
+ dst_addr = kmap(page);
+ memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
+ kunmap(page);
+ f2fs_put_page(ipage, 1);
+
+out:
+ SetPageUptodate(page);
+ unlock_page(page);
+
+ return 0;
+}
+
+static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
+{
+ int err;
+ struct page *ipage;
+ struct dnode_of_data dn;
+ void *src_addr, *dst_addr;
+ block_t new_blk_addr;
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct f2fs_io_info fio = {
+ .type = DATA,
+ .rw = WRITE_SYNC | REQ_PRIO,
+ };
+
+ f2fs_lock_op(sbi);
+ ipage = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(ipage))
+ return PTR_ERR(ipage);
+
+ /*
+ * i_addr[0] is not used for inline data,
+ * so reserving new block will not destroy inline data
+ */
+ set_new_dnode(&dn, inode, ipage, NULL, 0);
+ err = f2fs_reserve_block(&dn, 0);
+ if (err) {
+ f2fs_unlock_op(sbi);
+ return err;
+ }
+
+ zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+
+ /* Copy the whole inline data block */
+ src_addr = inline_data_addr(ipage);
+ dst_addr = kmap(page);
+ memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
+ kunmap(page);
+ SetPageUptodate(page);
+
+ /* write data page to try to make data consistent */
+ set_page_writeback(page);
+ write_data_page(page, &dn, &new_blk_addr, &fio);
+ update_extent_cache(new_blk_addr, &dn);
+ f2fs_wait_on_page_writeback(page, DATA);
+
+ /* clear inline data and flag after data writeback */
+ zero_user_segment(ipage, INLINE_DATA_OFFSET,
+ INLINE_DATA_OFFSET + MAX_INLINE_DATA);
+ clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
+ stat_dec_inline_inode(inode);
+
+ sync_inode_page(&dn);
+ f2fs_put_dnode(&dn);
+ f2fs_unlock_op(sbi);
+ return err;
+}
+
+int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
+{
+ struct page *page;
+ int err;
+
+ if (!f2fs_has_inline_data(inode))
+ return 0;
+ else if (to_size <= MAX_INLINE_DATA)
+ return 0;
+
+ page = grab_cache_page_write_begin(inode->i_mapping, 0, AOP_FLAG_NOFS);
+ if (!page)
+ return -ENOMEM;
+
+ err = __f2fs_convert_inline_data(inode, page);
+ f2fs_put_page(page, 1);
+ return err;
+}
+
+int f2fs_write_inline_data(struct inode *inode,
+ struct page *page, unsigned size)
+{
+ void *src_addr, *dst_addr;
+ struct page *ipage;
+ struct dnode_of_data dn;
+ int err;
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
+ if (err)
+ return err;
+ ipage = dn.inode_page;
+
+ zero_user_segment(ipage, INLINE_DATA_OFFSET,
+ INLINE_DATA_OFFSET + MAX_INLINE_DATA);
+ src_addr = kmap(page);
+ dst_addr = inline_data_addr(ipage);
+ memcpy(dst_addr, src_addr, size);
+ kunmap(page);
+
+ /* Release the first data block if it is allocated */
+ if (!f2fs_has_inline_data(inode)) {
+ truncate_data_blocks_range(&dn, 1);
+ set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
+ stat_inc_inline_inode(inode);
+ }
+
+ sync_inode_page(&dn);
+ f2fs_put_dnode(&dn);
+
+ return 0;
+}
+
+int recover_inline_data(struct inode *inode, struct page *npage)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct f2fs_inode *ri = NULL;
+ void *src_addr, *dst_addr;
+ struct page *ipage;
+
+ /*
+ * The inline_data recovery policy is as follows.
+ * [prev.] [next] of inline_data flag
+ * o o -> recover inline_data
+ * o x -> remove inline_data, and then recover data blocks
+ * x o -> remove inline_data, and then recover inline_data
+ * x x -> recover data blocks
+ */
+ if (IS_INODE(npage))
+ ri = F2FS_INODE(npage);
+
+ if (f2fs_has_inline_data(inode) &&
+ ri && ri->i_inline & F2FS_INLINE_DATA) {
+process_inline:
+ ipage = get_node_page(sbi, inode->i_ino);
+ f2fs_bug_on(IS_ERR(ipage));
+
+ src_addr = inline_data_addr(npage);
+ dst_addr = inline_data_addr(ipage);
+ memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
+ update_inode(inode, ipage);
+ f2fs_put_page(ipage, 1);
+ return -1;
+ }
+
+ if (f2fs_has_inline_data(inode)) {
+ ipage = get_node_page(sbi, inode->i_ino);
+ f2fs_bug_on(IS_ERR(ipage));
+ zero_user_segment(ipage, INLINE_DATA_OFFSET,
+ INLINE_DATA_OFFSET + MAX_INLINE_DATA);
+ clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
+ update_inode(inode, ipage);
+ f2fs_put_page(ipage, 1);
+ } else if (ri && ri->i_inline & F2FS_INLINE_DATA) {
+ truncate_blocks(inode, 0);
+ set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
+ goto process_inline;
+ }
+ return 0;
+}
OpenPOWER on IntegriCloud