diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2017-02-24 14:57:57 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-24 17:46:55 -0800 |
commit | f27176cfc363d395eea8dc5c4a26e5d6d7d65eaf (patch) | |
tree | f470459d19b549de9141a79a076d386305927e8d /mm | |
parent | a8fa41ad2f6f7ca08edd1afcf8149ae5a4dcf654 (diff) | |
download | op-kernel-dev-f27176cfc363d395eea8dc5c4a26e5d6d7d65eaf.zip op-kernel-dev-f27176cfc363d395eea8dc5c4a26e5d6d7d65eaf.tar.gz |
mm: convert page_mkclean_one() to use page_vma_mapped_walk()
For consistency, it worth converting all page_check_address() to
page_vma_mapped_walk(), so we could drop the former.
PMD handling here is future-proofing, we don't have users yet. ext4
with huge pages will be the first.
Link: http://lkml.kernel.org/r/20170129173858.45174-7-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/rmap.c | 68 |
1 files changed, 45 insertions, 23 deletions
@@ -1017,34 +1017,56 @@ int page_referenced(struct page *page, static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { - struct mm_struct *mm = vma->vm_mm; - pte_t *pte; - spinlock_t *ptl; - int ret = 0; + struct page_vma_mapped_walk pvmw = { + .page = page, + .vma = vma, + .address = address, + .flags = PVMW_SYNC, + }; int *cleaned = arg; - pte = page_check_address(page, mm, address, &ptl, 1); - if (!pte) - goto out; - - if (pte_dirty(*pte) || pte_write(*pte)) { - pte_t entry; + while (page_vma_mapped_walk(&pvmw)) { + int ret = 0; + address = pvmw.address; + if (pvmw.pte) { + pte_t entry; + pte_t *pte = pvmw.pte; + + if (!pte_dirty(*pte) && !pte_write(*pte)) + continue; + + flush_cache_page(vma, address, pte_pfn(*pte)); + entry = ptep_clear_flush(vma, address, pte); + entry = pte_wrprotect(entry); + entry = pte_mkclean(entry); + set_pte_at(vma->vm_mm, address, pte, entry); + ret = 1; + } else { +#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE + pmd_t *pmd = pvmw.pmd; + pmd_t entry; + + if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) + continue; + + flush_cache_page(vma, address, page_to_pfn(page)); + entry = pmdp_huge_clear_flush(vma, address, pmd); + entry = pmd_wrprotect(entry); + entry = pmd_mkclean(entry); + set_pmd_at(vma->vm_mm, address, pmd, entry); + ret = 1; +#else + /* unexpected pmd-mapped page? */ + WARN_ON_ONCE(1); +#endif + } - flush_cache_page(vma, address, pte_pfn(*pte)); - entry = ptep_clear_flush(vma, address, pte); - entry = pte_wrprotect(entry); - entry = pte_mkclean(entry); - set_pte_at(mm, address, pte, entry); - ret = 1; + if (ret) { + mmu_notifier_invalidate_page(vma->vm_mm, address); + (*cleaned)++; + } } - pte_unmap_unlock(pte, ptl); - - if (ret) { - mmu_notifier_invalidate_page(mm, address); - (*cleaned)++; - } -out: return SWAP_AGAIN; } |