summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2011-01-13 15:47:02 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 17:32:44 -0800
commit0ca1634d4143c3579273ca53b993df19f5c98e92 (patch)
treed566a582357a9e1528d1783566bfda4375c7e566
parentf2d6bfe9ff0acec30b713614260e78b03d20e909 (diff)
downloadop-kernel-dev-0ca1634d4143c3579273ca53b993df19f5c98e92.zip
op-kernel-dev-0ca1634d4143c3579273ca53b993df19f5c98e92.tar.gz
thp: mincore transparent hugepage support
Handle transparent huge page pmd entries natively instead of splitting them into subpages. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/huge_mm.h3
-rw-r--r--mm/huge_memory.c25
-rw-r--r--mm/mincore.c8
3 files changed, 35 insertions, 1 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 43a694e..25125fb 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -19,6 +19,9 @@ extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
extern int zap_huge_pmd(struct mmu_gather *tlb,
struct vm_area_struct *vma,
pmd_t *pmd);
+extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ unsigned char *vec);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ae2bf08..37e89a3 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -923,6 +923,31 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
return ret;
}
+int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ unsigned char *vec)
+{
+ int ret = 0;
+
+ spin_lock(&vma->vm_mm->page_table_lock);
+ if (likely(pmd_trans_huge(*pmd))) {
+ ret = !pmd_trans_splitting(*pmd);
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ if (unlikely(!ret))
+ wait_split_huge_page(vma->anon_vma, pmd);
+ else {
+ /*
+ * All logical pages in the range are present
+ * if backed by a huge page.
+ */
+ memset(vec, 1, (end - addr) >> PAGE_SHIFT);
+ }
+ } else
+ spin_unlock(&vma->vm_mm->page_table_lock);
+
+ return ret;
+}
+
pmd_t *page_check_address_pmd(struct page *page,
struct mm_struct *mm,
unsigned long address,
diff --git a/mm/mincore.c b/mm/mincore.c
index 9959bb4..a4e6b9d 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -154,7 +154,13 @@ static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- split_huge_page_pmd(vma->vm_mm, pmd);
+ if (pmd_trans_huge(*pmd)) {
+ if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
+ vec += (next - addr) >> PAGE_SHIFT;
+ continue;
+ }
+ /* fall through */
+ }
if (pmd_none_or_clear_bad(pmd))
mincore_unmapped_range(vma, addr, next, vec);
else
OpenPOWER on IntegriCloud