summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>2008-12-18 11:41:28 -0800
committerH. Peter Anvin <hpa@zytor.com>2008-12-18 13:30:15 -0800
commite121e418441525b5636321fe03d16f0193ad218e (patch)
treed9f54fef9c4f137c2e8327edbf3ba8110dfb968d /mm
parent3c8bb73ace6249bd089b70c941440441940e3365 (diff)
downloadop-kernel-dev-e121e418441525b5636321fe03d16f0193ad218e.zip
op-kernel-dev-e121e418441525b5636321fe03d16f0193ad218e.tar.gz
x86: PAT: add follow_pfnmp_pte routine to help tracking pfnmap pages - v3
Impact: New currently unused interface. Add a generic interface to follow pfn in a pfnmap vma range. This is used by one of the subsequent x86 PAT related patch to keep track of memory types for vma regions across vma copy and free. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c
index cef95c8..8ca6bbf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1111,6 +1111,49 @@ no_page_table:
return page;
}
+int follow_pfnmap_pte(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ret_ptep)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+ spinlock_t *ptl;
+ struct page *page;
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (!is_pfn_mapping(vma))
+ goto err;
+
+ page = NULL;
+ pgd = pgd_offset(mm, address);
+ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+ goto err;
+
+ pud = pud_offset(pgd, address);
+ if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+ goto err;
+
+ pmd = pmd_offset(pud, address);
+ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+ goto err;
+
+ ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+
+ pte = *ptep;
+ if (!pte_present(pte))
+ goto err_unlock;
+
+ *ret_ptep = pte;
+ pte_unmap_unlock(ptep, ptl);
+ return 0;
+
+err_unlock:
+ pte_unmap_unlock(ptep, ptl);
+err:
+ return -EINVAL;
+}
+
/* Can we do the FOLL_ANON optimization? */
static inline int use_zero_page(struct vm_area_struct *vma)
{
OpenPOWER on IntegriCloud