From e9d55e157034a9efd99405c99c1565d64619d82b Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 26 Jul 2016 15:24:09 -0700 Subject: mm: change the interface for __tlb_remove_page() This updates the generic and arch specific implementation to return true if we need to do a tlb flush. That means if a __tlb_remove_page indicate a flush is needed, the page we try to remove need to be tracked and added again after the flush. We need to track it because we have already update the pte to none and we can't just loop back. This change is done to enable us to do a tlb_flush when we try to flush a range that consists of different page sizes. For architectures like ppc64, we can do a range based tlb flush and we need to track page size for that. When we try to remove a huge page, we will force a tlb flush and starts a new mmu gather. [aneesh.kumar@linux.vnet.ibm.com: mm-change-the-interface-for-__tlb_remove_page-v3] Link: http://lkml.kernel.org/r/1465049193-22197-2-git-send-email-aneesh.kumar@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1464860389-29019-2-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Cc: Hugh Dickins Cc: "Kirill A. Shutemov" Cc: Andrea Arcangeli Cc: Joonsoo Kim Cc: Mel Gorman Cc: David Rientjes Cc: Vlastimil Babka Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/tlb.h | 44 +++++++++++++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 11 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 9dbb739..7b899a4 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -107,6 +107,11 @@ struct mmu_gather { struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; unsigned int batch_count; + /* + * __tlb_adjust_range will track the new addr here, + * that that we can adjust the range after the flush + */ + unsigned long addr; }; #define HAVE_GENERIC_MMU_GATHER @@ -115,23 +120,19 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); -int __tlb_remove_page(struct mmu_gather *tlb, struct page *page); - -/* tlb_remove_page - * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when - * required. - */ -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - if (!__tlb_remove_page(tlb, page)) - tlb_flush_mmu(tlb); -} +bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page); static inline void __tlb_adjust_range(struct mmu_gather *tlb, unsigned long address) { tlb->start = min(tlb->start, address); tlb->end = max(tlb->end, address + PAGE_SIZE); + /* + * Track the last address with which we adjusted the range. This + * will be used later to adjust again after a mmu_flush due to + * failed __tlb_remove_page + */ + tlb->addr = address; } static inline void __tlb_reset_range(struct mmu_gather *tlb) @@ -144,6 +145,27 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) } } +/* tlb_remove_page + * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when + * required. + */ +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + if (__tlb_remove_page(tlb, page)) { + tlb_flush_mmu(tlb); + __tlb_adjust_range(tlb, tlb->addr); + __tlb_remove_page(tlb, page); + } +} + +static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) +{ + /* active->nr should be zero when we call this */ + VM_BUG_ON_PAGE(tlb->active->nr, page); + __tlb_adjust_range(tlb, tlb->addr); + return __tlb_remove_page(tlb, page); +} + /* * In the case of tlb vma handling, we can optimise these away in the * case where we're doing a full MM flush. When we're doing a munmap, -- cgit v1.1 From e77b0852b551ffd8b29fa0225e1ef62c195e3160 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 26 Jul 2016 15:24:12 -0700 Subject: mm/mmu_gather: track page size with mmu gather and force flush if page size change This allows an arch which needs to do special handing with respect to different page size when flushing tlb to implement the same in mmu gather. Link: http://lkml.kernel.org/r/1465049193-22197-3-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Cc: Hugh Dickins Cc: "Kirill A. Shutemov" Cc: Andrea Arcangeli Cc: Joonsoo Kim Cc: Mel Gorman Cc: David Rientjes Cc: Vlastimil Babka Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/tlb.h | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 7b899a4..c6d6671 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -112,6 +112,7 @@ struct mmu_gather { * that that we can adjust the range after the flush */ unsigned long addr; + int page_size; }; #define HAVE_GENERIC_MMU_GATHER @@ -120,7 +121,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); -bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page); +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + int page_size); static inline void __tlb_adjust_range(struct mmu_gather *tlb, unsigned long address) @@ -145,23 +147,36 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) } } +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + if (__tlb_remove_page_size(tlb, page, page_size)) { + tlb_flush_mmu(tlb); + tlb->page_size = page_size; + __tlb_adjust_range(tlb, tlb->addr); + __tlb_remove_page_size(tlb, page, page_size); + } +} + +static bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + return __tlb_remove_page_size(tlb, page, PAGE_SIZE); +} + /* tlb_remove_page * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when * required. */ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { - if (__tlb_remove_page(tlb, page)) { - tlb_flush_mmu(tlb); - __tlb_adjust_range(tlb, tlb->addr); - __tlb_remove_page(tlb, page); - } + return tlb_remove_page_size(tlb, page, PAGE_SIZE); } static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) { /* active->nr should be zero when we call this */ VM_BUG_ON_PAGE(tlb->active->nr, page); + tlb->page_size = PAGE_SIZE; __tlb_adjust_range(tlb, tlb->addr); return __tlb_remove_page(tlb, page); } -- cgit v1.1