summaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authormalc <av1474@comtv.ru>2014-01-27 17:06:53 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-27 21:02:39 -0800
commitadd688fbd32158440dbe62c07269a39ed969c059 (patch)
treec4bf48c0c7cf65982c11b2cf1c949f52a3750148 /mm/vmalloc.c
parentfb5bb60cd004a00c1d11db680a37942ecdedb1c5 (diff)
downloadop-kernel-dev-add688fbd32158440dbe62c07269a39ed969c059.zip
op-kernel-dev-add688fbd32158440dbe62c07269a39ed969c059.tar.gz
Revert "mm/vmalloc: interchage the implementation of vmalloc_to_{pfn,page}"
Revert commit ece86e222db4, which was intended as a small performance improvement. Despite the claim that the patch doesn't introduce any functional changes in fact it does. The "no page" path behaves different now. Originally, vmalloc_to_page might return NULL under some conditions, with new implementation it returns pfn_to_page(0) which is not the same as NULL. Simple test shows the difference. test.c #include <linux/kernel.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/mm.h> int __init myi(void) { struct page *p; void *v; v = vmalloc(PAGE_SIZE); /* trigger the "no page" path in vmalloc_to_page*/ vfree(v); p = vmalloc_to_page(v); pr_err("expected val = NULL, returned val = %p", p); return -EBUSY; } void __exit mye(void) { } module_init(myi) module_exit(mye) Before interchange: expected val = NULL, returned val = (null) After interchange: expected val = NULL, returned val = c7ebe000 Signed-off-by: Vladimir Murzin <murzin.v@gmail.com> Cc: Jianyu Zhan <nasa4836@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e4f0db2..0fdf968 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -220,12 +220,12 @@ int is_vmalloc_or_module_addr(const void *x)
}
/*
- * Walk a vmap address to the physical pfn it maps to.
+ * Walk a vmap address to the struct page it maps.
*/
-unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
+struct page *vmalloc_to_page(const void *vmalloc_addr)
{
unsigned long addr = (unsigned long) vmalloc_addr;
- unsigned long pfn = 0;
+ struct page *page = NULL;
pgd_t *pgd = pgd_offset_k(addr);
/*
@@ -244,23 +244,23 @@ unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
if (pte_present(pte))
- pfn = pte_pfn(pte);
+ page = pte_page(pte);
pte_unmap(ptep);
}
}
}
- return pfn;
+ return page;
}
-EXPORT_SYMBOL(vmalloc_to_pfn);
+EXPORT_SYMBOL(vmalloc_to_page);
/*
- * Map a vmalloc()-space virtual address to the struct page.
+ * Map a vmalloc()-space virtual address to the physical page frame number.
*/
-struct page *vmalloc_to_page(const void *vmalloc_addr)
+unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
{
- return pfn_to_page(vmalloc_to_pfn(vmalloc_addr));
+ return page_to_pfn(vmalloc_to_page(vmalloc_addr));
}
-EXPORT_SYMBOL(vmalloc_to_page);
+EXPORT_SYMBOL(vmalloc_to_pfn);
/*** Global kva allocator ***/
OpenPOWER on IntegriCloud