From c791ace1e747371658237f0d30234fef56c39669 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Fri, 24 Feb 2017 14:57:08 -0800 Subject: mm: replace FAULT_FLAG_SIZE with parameter to huge_fault Since the introduction of FAULT_FLAG_SIZE to the vm_fault flag, it has been somewhat painful with getting the flags set and removed at the correct locations. More than one kernel oops was introduced due to difficulties of getting the placement correctly. Remove the flag values and introduce an input parameter to huge_fault that indicates the size of the page entry. This makes the code easier to trace and should avoid the issues we see with the fault flags where removal of the flag was necessary in the fallback paths. Link: http://lkml.kernel.org/r/148615748258.43180.1690152053774975329.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dave Jiang Tested-by: Dan Williams Reviewed-by: Jan Kara Cc: Matthew Wilcox Cc: Dave Hansen Cc: Vlastimil Babka Cc: Ross Zwisler Cc: Kirill A. Shutemov Cc: Nilesh Choudhury Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dax.c | 9 +++++---- fs/ext2/file.c | 2 +- fs/ext4/file.c | 12 +++++++++--- fs/xfs/xfs_file.c | 9 +++++---- 4 files changed, 20 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/dax.c b/fs/dax.c index c3c29fb..5ae8b71 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1452,12 +1452,13 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, struct iomap_ops *ops) * has done all the necessary locking for page fault to proceed * successfully. */ -int dax_iomap_fault(struct vm_fault *vmf, const struct iomap_ops *ops) +int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, + const struct iomap_ops *ops) { - switch (vmf->flags & FAULT_FLAG_SIZE_MASK) { - case FAULT_FLAG_SIZE_PTE: + switch (pe_size) { + case PE_SIZE_PTE: return dax_iomap_pte_fault(vmf, ops); - case FAULT_FLAG_SIZE_PMD: + case PE_SIZE_PMD: return dax_iomap_pmd_fault(vmf, ops); default: return VM_FAULT_FALLBACK; diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 6873883..b21891a 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -99,7 +99,7 @@ static int ext2_dax_fault(struct vm_fault *vmf) } down_read(&ei->dax_sem); - ret = dax_iomap_fault(vmf, &ext2_iomap_ops); + ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &ext2_iomap_ops); up_read(&ei->dax_sem); if (vmf->flags & FAULT_FLAG_WRITE) diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 502d2d0..8210c1f 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -253,7 +253,8 @@ out: } #ifdef CONFIG_FS_DAX -static int ext4_dax_fault(struct vm_fault *vmf) +static int ext4_dax_huge_fault(struct vm_fault *vmf, + enum page_entry_size pe_size) { int result; struct inode *inode = file_inode(vmf->vma->vm_file); @@ -265,7 +266,7 @@ static int ext4_dax_fault(struct vm_fault *vmf) file_update_time(vmf->vma->vm_file); } down_read(&EXT4_I(inode)->i_mmap_sem); - result = dax_iomap_fault(vmf, &ext4_iomap_ops); + result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops); up_read(&EXT4_I(inode)->i_mmap_sem); if (write) sb_end_pagefault(sb); @@ -273,6 +274,11 @@ static int ext4_dax_fault(struct vm_fault *vmf) return result; } +static int ext4_dax_fault(struct vm_fault *vmf) +{ + return ext4_dax_huge_fault(vmf, PE_SIZE_PTE); +} + /* * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault() * handler we check for races agaist truncate. Note that since we cycle through @@ -305,7 +311,7 @@ static int ext4_dax_pfn_mkwrite(struct vm_fault *vmf) static const struct vm_operations_struct ext4_dax_vm_ops = { .fault = ext4_dax_fault, - .huge_fault = ext4_dax_fault, + .huge_fault = ext4_dax_huge_fault, .page_mkwrite = ext4_dax_fault, .pfn_mkwrite = ext4_dax_pfn_mkwrite, }; diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 990e038..a50eca6 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1391,7 +1391,7 @@ xfs_filemap_page_mkwrite( xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); if (IS_DAX(inode)) { - ret = dax_iomap_fault(vmf, &xfs_iomap_ops); + ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops); } else { ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops); ret = block_page_mkwrite_return(ret); @@ -1418,7 +1418,7 @@ xfs_filemap_fault( xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); if (IS_DAX(inode)) - ret = dax_iomap_fault(vmf, &xfs_iomap_ops); + ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops); else ret = filemap_fault(vmf); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); @@ -1435,7 +1435,8 @@ xfs_filemap_fault( */ STATIC int xfs_filemap_huge_fault( - struct vm_fault *vmf) + struct vm_fault *vmf, + enum page_entry_size pe_size) { struct inode *inode = file_inode(vmf->vma->vm_file); struct xfs_inode *ip = XFS_I(inode); @@ -1452,7 +1453,7 @@ xfs_filemap_huge_fault( } xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); - ret = dax_iomap_fault(vmf, &xfs_iomap_ops); + ret = dax_iomap_fault(vmf, pe_size, &xfs_iomap_ops); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); if (vmf->flags & FAULT_FLAG_WRITE) -- cgit v1.1