summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/proc/task_mmu.c45
-rw-r--r--include/linux/mm.h3
-rw-r--r--mm/pagewalk.c22
3 files changed, 67 insertions, 3 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 2a1bef9..47c03f4 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -650,6 +650,50 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
return err;
}
+static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
+{
+ u64 pme = 0;
+ if (pte_present(pte))
+ pme = PM_PFRAME(pte_pfn(pte) + offset)
+ | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
+ return pme;
+}
+
+static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ struct vm_area_struct *vma;
+ struct pagemapread *pm = walk->private;
+ struct hstate *hs = NULL;
+ int err = 0;
+
+ vma = find_vma(walk->mm, addr);
+ if (vma)
+ hs = hstate_vma(vma);
+ for (; addr != end; addr += PAGE_SIZE) {
+ u64 pfn = PM_NOT_PRESENT;
+
+ if (vma && (addr >= vma->vm_end)) {
+ vma = find_vma(walk->mm, addr);
+ if (vma)
+ hs = hstate_vma(vma);
+ }
+
+ if (vma && (vma->vm_start <= addr) && is_vm_hugetlb_page(vma)) {
+ /* calculate pfn of the "raw" page in the hugepage. */
+ int offset = (addr & ~huge_page_mask(hs)) >> PAGE_SHIFT;
+ pfn = huge_pte_to_pagemap_entry(*pte, offset);
+ }
+ err = add_to_pagemap(addr, pfn, pm);
+ if (err)
+ return err;
+ }
+
+ cond_resched();
+
+ return err;
+}
+
/*
* /proc/pid/pagemap - an array mapping virtual pages to pfns
*
@@ -742,6 +786,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
pagemap_walk.pmd_entry = pagemap_pte_range;
pagemap_walk.pte_hole = pagemap_pte_hole;
+ pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
pagemap_walk.mm = mm;
pagemap_walk.private = &pm;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 52b2645..9d65ae4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -770,6 +770,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlb,
* @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
* @pte_entry: if set, called for each non-empty PTE (4th-level) entry
* @pte_hole: if set, called for each hole at all levels
+ * @hugetlb_entry: if set, called for each hugetlb entry
*
* (see walk_page_range for more details)
*/
@@ -779,6 +780,8 @@ struct mm_walk {
int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
+ int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long,
+ struct mm_walk *);
struct mm_struct *mm;
void *private;
};
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index a286915..7b47a57 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -120,15 +120,31 @@ int walk_page_range(unsigned long addr, unsigned long end,
do {
next = pgd_addr_end(addr, end);
- /* skip hugetlb vma to avoid hugepage PMD being cleared
- * in pmd_none_or_clear_bad(). */
+ /*
+ * handle hugetlb vma individually because pagetable walk for
+ * the hugetlb page is dependent on the architecture and
+ * we can't handled it in the same manner as non-huge pages.
+ */
vma = find_vma(walk->mm, addr);
+#ifdef CONFIG_HUGETLB_PAGE
if (vma && is_vm_hugetlb_page(vma)) {
+ pte_t *pte;
+ struct hstate *hs;
+
if (vma->vm_end < next)
next = vma->vm_end;
+ hs = hstate_vma(vma);
+ pte = huge_pte_offset(walk->mm,
+ addr & huge_page_mask(hs));
+ if (pte && !huge_pte_none(huge_ptep_get(pte))
+ && walk->hugetlb_entry)
+ err = walk->hugetlb_entry(pte, addr,
+ next, walk);
+ if (err)
+ break;
continue;
}
-
+#endif
if (pgd_none_or_clear_bad(pgd)) {
if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk);
OpenPOWER on IntegriCloud