summaryrefslogtreecommitdiffstats
path: root/fs/ext4
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-11-20 18:51:24 -0500
committerTheodore Ts'o <tytso@mit.edu>2016-11-20 18:51:24 -0500
commite2ae766c1b030271b5099b25674e2131d1d1e8c1 (patch)
tree95a2676b55c9de1ff3a5085154c8493a1537b597 /fs/ext4
parent96f8ba3dd632aff684cc7c67d9f4af435be0341c (diff)
downloadop-kernel-dev-e2ae766c1b030271b5099b25674e2131d1d1e8c1.zip
op-kernel-dev-e2ae766c1b030271b5099b25674e2131d1d1e8c1.tar.gz
ext4: convert DAX faults to iomap infrastructure
Convert DAX faults to use iomap infrastructure. We would not have to start transaction in ext4_dax_fault() anymore since ext4_iomap_begin takes care of that but so far we do that to avoid lock inversion of transaction start with DAX entry lock which gets acquired in dax_iomap_fault() before calling ->iomap_begin handler. Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/file.c9
-rw-r--r--fs/ext4/inode.c14
2 files changed, 14 insertions, 9 deletions
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 1953fe3..b5f1844 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -275,7 +275,7 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
else
- result = dax_fault(vma, vmf, ext4_dax_get_block);
+ result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
if (write) {
if (!IS_ERR(handle))
@@ -309,9 +309,10 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
- else
- result = dax_pmd_fault(vma, addr, pmd, flags,
- ext4_dax_get_block);
+ else {
+ result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
+ &ext4_iomap_ops);
+ }
if (write) {
if (!IS_ERR(handle))
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 29237f2..9de9a5a 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3369,12 +3369,16 @@ retry:
}
/*
- * If we added blocks beyond i_size we need to make sure they
+ * If we added blocks beyond i_size, we need to make sure they
* will get truncated if we crash before updating i_size in
- * ext4_iomap_end().
+ * ext4_iomap_end(). For faults we don't need to do that (and
+ * even cannot because for orphan list operations inode_lock is
+ * required) - if we happen to instantiate block beyond i_size,
+ * it is because we race with truncate which has already added
+ * the inode to the orphan list.
*/
- if (first_block + map.m_len >
- (inode->i_size + (1 << blkbits) - 1) >> blkbits) {
+ if (!(flags & IOMAP_FAULT) && first_block + map.m_len >
+ (i_size_read(inode) + (1 << blkbits) - 1) >> blkbits) {
int err;
err = ext4_orphan_add(handle, inode);
@@ -3420,7 +3424,7 @@ static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
int blkbits = inode->i_blkbits;
bool truncate = false;
- if (!(flags & IOMAP_WRITE))
+ if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT))
return 0;
handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
OpenPOWER on IntegriCloud