From fb5bb60cd004a00c1d11db680a37942ecdedb1c5 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 27 Jan 2014 17:06:52 -0800 Subject: memblock: don't silently align size in memblock_virt_alloc() In original __alloc_memory_core_early() for bootmem wrapper, we do not align size silently. We should not do that, as later free with old size will leave some range not freed. It's obvious that code is copied from memblock_base_nid(), and that code is wrong for the same reason. Also remove that in memblock_alloc_base. Signed-off-by: Yinghai Lu Acked-by: Santosh Shilimkar Cc: Dave Hansen Cc: Russell King Cc: Konrad Rzeszutek Wilk Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memblock.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 9c0aeef..87d21a6 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -984,9 +984,6 @@ static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, if (!align) align = SMP_CACHE_BYTES; - /* align @size to avoid excessive fragmentation on reserved array */ - size = round_up(size, align); - found = memblock_find_in_range_node(size, align, 0, max_addr, nid); if (found && !memblock_reserve(found, size)) return found; @@ -1080,9 +1077,6 @@ static void * __init memblock_virt_alloc_internal( if (!align) align = SMP_CACHE_BYTES; - /* align @size to avoid excessive fragmentation on reserved array */ - size = round_up(size, align); - again: alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, nid); -- cgit v1.1 From add688fbd32158440dbe62c07269a39ed969c059 Mon Sep 17 00:00:00 2001 From: malc Date: Mon, 27 Jan 2014 17:06:53 -0800 Subject: Revert "mm/vmalloc: interchage the implementation of vmalloc_to_{pfn,page}" Revert commit ece86e222db4, which was intended as a small performance improvement. Despite the claim that the patch doesn't introduce any functional changes in fact it does. The "no page" path behaves different now. Originally, vmalloc_to_page might return NULL under some conditions, with new implementation it returns pfn_to_page(0) which is not the same as NULL. Simple test shows the difference. test.c #include #include #include #include int __init myi(void) { struct page *p; void *v; v = vmalloc(PAGE_SIZE); /* trigger the "no page" path in vmalloc_to_page*/ vfree(v); p = vmalloc_to_page(v); pr_err("expected val = NULL, returned val = %p", p); return -EBUSY; } void __exit mye(void) { } module_init(myi) module_exit(mye) Before interchange: expected val = NULL, returned val = (null) After interchange: expected val = NULL, returned val = c7ebe000 Signed-off-by: Vladimir Murzin Cc: Jianyu Zhan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmalloc.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e4f0db2..0fdf968 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -220,12 +220,12 @@ int is_vmalloc_or_module_addr(const void *x) } /* - * Walk a vmap address to the physical pfn it maps to. + * Walk a vmap address to the struct page it maps. */ -unsigned long vmalloc_to_pfn(const void *vmalloc_addr) +struct page *vmalloc_to_page(const void *vmalloc_addr) { unsigned long addr = (unsigned long) vmalloc_addr; - unsigned long pfn = 0; + struct page *page = NULL; pgd_t *pgd = pgd_offset_k(addr); /* @@ -244,23 +244,23 @@ unsigned long vmalloc_to_pfn(const void *vmalloc_addr) ptep = pte_offset_map(pmd, addr); pte = *ptep; if (pte_present(pte)) - pfn = pte_pfn(pte); + page = pte_page(pte); pte_unmap(ptep); } } } - return pfn; + return page; } -EXPORT_SYMBOL(vmalloc_to_pfn); +EXPORT_SYMBOL(vmalloc_to_page); /* - * Map a vmalloc()-space virtual address to the struct page. + * Map a vmalloc()-space virtual address to the physical page frame number. */ -struct page *vmalloc_to_page(const void *vmalloc_addr) +unsigned long vmalloc_to_pfn(const void *vmalloc_addr) { - return pfn_to_page(vmalloc_to_pfn(vmalloc_addr)); + return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } -EXPORT_SYMBOL(vmalloc_to_page); +EXPORT_SYMBOL(vmalloc_to_pfn); /*** Global kva allocator ***/ -- cgit v1.1 From e82cb95d626a6bb0e4fe7db1f311dc22039c2ed3 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 27 Jan 2014 17:06:55 -0800 Subject: mm: bring back /sys/kernel/mm Commit da29bd36224b ("mm/mm_init.c: make creation of the mm_kobj happen earlier than device_initcall") changed to pure_initcall(mm_sysfs_init). That's too early: mm_sysfs_init() depends on core_initcall(ksysfs_init) to have made the kernel_kobj directory "kernel" in which to create "mm". Make it postcore_initcall(mm_sysfs_init). We could use core_initcall(), and depend upon Makefile link order kernel/ mm/ fs/ ipc/ security/ ... as core_initcall(debugfs_init) and core_initcall(securityfs_init) do; but better not. Signed-off-by: Hugh Dickins Acked-by: Paul Gortmaker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mm_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index 857a643..4074caf 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -202,4 +202,4 @@ static int __init mm_sysfs_init(void) return 0; } -pure_initcall(mm_sysfs_init); +postcore_initcall(mm_sysfs_init); -- cgit v1.1 From a3978a519461b095b776f44a86079f5448c96963 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 27 Jan 2014 17:07:17 -0800 Subject: mm/migrate.c: fix setting of cpupid on page migration twice against normal page Commit 7851a45cd3f6 ("mm: numa: Copy cpupid on page migration") copies over the cpupid at page migration time. It is unnecessary to set it again in alloc_misplaced_dst_page(). Signed-off-by: Wanpeng Li Reviewed-by: Naoya Horiguchi Acked-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index 734704f..482a33d 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1548,8 +1548,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page, __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & ~GFP_IOFS, 0); - if (newpage) - page_cpupid_xchg_last(newpage, page_cpupid_last(page)); return newpage; } -- cgit v1.1