summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/tlb.h59
1 files changed, 48 insertions, 11 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 9dbb739..c6d6671 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -107,6 +107,12 @@ struct mmu_gather {
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
unsigned int batch_count;
+ /*
+ * __tlb_adjust_range will track the new addr here,
+ * that that we can adjust the range after the flush
+ */
+ unsigned long addr;
+ int page_size;
};
#define HAVE_GENERIC_MMU_GATHER
@@ -115,23 +121,20 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
void tlb_flush_mmu(struct mmu_gather *tlb);
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
unsigned long end);
-int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
-
-/* tlb_remove_page
- * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
- * required.
- */
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- if (!__tlb_remove_page(tlb, page))
- tlb_flush_mmu(tlb);
-}
+extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
+ int page_size);
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
unsigned long address)
{
tlb->start = min(tlb->start, address);
tlb->end = max(tlb->end, address + PAGE_SIZE);
+ /*
+ * Track the last address with which we adjusted the range. This
+ * will be used later to adjust again after a mmu_flush due to
+ * failed __tlb_remove_page
+ */
+ tlb->addr = address;
}
static inline void __tlb_reset_range(struct mmu_gather *tlb)
@@ -144,6 +147,40 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
}
}
+static inline void tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size)
+{
+ if (__tlb_remove_page_size(tlb, page, page_size)) {
+ tlb_flush_mmu(tlb);
+ tlb->page_size = page_size;
+ __tlb_adjust_range(tlb, tlb->addr);
+ __tlb_remove_page_size(tlb, page, page_size);
+ }
+}
+
+static bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
+}
+
+/* tlb_remove_page
+ * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
+ * required.
+ */
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ return tlb_remove_page_size(tlb, page, PAGE_SIZE);
+}
+
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page)
+{
+ /* active->nr should be zero when we call this */
+ VM_BUG_ON_PAGE(tlb->active->nr, page);
+ tlb->page_size = PAGE_SIZE;
+ __tlb_adjust_range(tlb, tlb->addr);
+ return __tlb_remove_page(tlb, page);
+}
+
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
OpenPOWER on IntegriCloud