From b0f84ac352762ed02d7ea9f284942a8cab7f9077 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Thu, 17 Mar 2016 14:17:16 -0700 Subject: ia64: define ioremap_uc() All architectures now need ioremap_uc(), ia64 seems defines this already through its ioremap_nocache() and it already ensures it *only* uses UC. This is needed since v4.3 to complete an allyesconfig compile on ia64, there were others archs that needed this, and this one seems to have fallen through the cracks. Signed-off-by: Luis R. Rodriguez Reported-by: kbuild test robot Acked-by: Tony Luck Cc: [4.3+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/io.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index a865d2a..5de673a 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -433,6 +433,7 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo return ioremap(phys_addr, size); } #define ioremap_cache ioremap_cache +#define ioremap_uc ioremap_nocache /* -- cgit v1.1 From e7df0d88c455c915376397b4bd72a83b9ed656f7 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 17 Mar 2016 14:17:59 -0700 Subject: powerpc: query dynamic DEBUG_PAGEALLOC setting We can disable debug_pagealloc processing even if the code is compiled with CONFIG_DEBUG_PAGEALLOC. This patch changes the code to query whether it is enabled or not in runtime. Signed-off-by: Joonsoo Kim Acked-by: David Rientjes Cc: Christian Borntraeger Cc: Benjamin Herrenschmidt Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/traps.c | 5 ++--- arch/powerpc/mm/hash_utils_64.c | 36 ++++++++++++++++++++---------------- arch/powerpc/mm/init_32.c | 8 ++++---- 3 files changed, 26 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index b6becc7..33c47fc 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -203,9 +203,8 @@ static int __kprobes __die(const char *str, struct pt_regs *regs, long err) #ifdef CONFIG_SMP printk("SMP NR_CPUS=%d ", NR_CPUS); #endif -#ifdef CONFIG_DEBUG_PAGEALLOC - printk("DEBUG_PAGEALLOC "); -#endif + if (debug_pagealloc_enabled()) + printk("DEBUG_PAGEALLOC "); #ifdef CONFIG_NUMA printk("NUMA "); #endif diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index ba59d59..1005281 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -255,8 +255,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, if (ret < 0) break; + #ifdef CONFIG_DEBUG_PAGEALLOC - if ((paddr >> PAGE_SHIFT) < linear_map_hash_count) + if (debug_pagealloc_enabled() && + (paddr >> PAGE_SHIFT) < linear_map_hash_count) linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; #endif /* CONFIG_DEBUG_PAGEALLOC */ } @@ -512,17 +514,17 @@ static void __init htab_init_page_sizes(void) if (mmu_has_feature(MMU_FTR_16M_PAGE)) memcpy(mmu_psize_defs, mmu_psize_defaults_gp, sizeof(mmu_psize_defaults_gp)); - found: -#ifndef CONFIG_DEBUG_PAGEALLOC - /* - * Pick a size for the linear mapping. Currently, we only support - * 16M, 1M and 4K which is the default - */ - if (mmu_psize_defs[MMU_PAGE_16M].shift) - mmu_linear_psize = MMU_PAGE_16M; - else if (mmu_psize_defs[MMU_PAGE_1M].shift) - mmu_linear_psize = MMU_PAGE_1M; -#endif /* CONFIG_DEBUG_PAGEALLOC */ +found: + if (!debug_pagealloc_enabled()) { + /* + * Pick a size for the linear mapping. Currently, we only + * support 16M, 1M and 4K which is the default + */ + if (mmu_psize_defs[MMU_PAGE_16M].shift) + mmu_linear_psize = MMU_PAGE_16M; + else if (mmu_psize_defs[MMU_PAGE_1M].shift) + mmu_linear_psize = MMU_PAGE_1M; + } #ifdef CONFIG_PPC_64K_PAGES /* @@ -721,10 +723,12 @@ static void __init htab_initialize(void) prot = pgprot_val(PAGE_KERNEL); #ifdef CONFIG_DEBUG_PAGEALLOC - linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; - linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, - 1, ppc64_rma_size)); - memset(linear_map_hash_slots, 0, linear_map_hash_count); + if (debug_pagealloc_enabled()) { + linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; + linear_map_hash_slots = __va(memblock_alloc_base( + linear_map_hash_count, 1, ppc64_rma_size)); + memset(linear_map_hash_slots, 0, linear_map_hash_count); + } #endif /* CONFIG_DEBUG_PAGEALLOC */ /* On U3 based machines, we need to reserve the DART area and diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index a10be66..c2b7716 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -112,10 +112,10 @@ void __init MMU_setup(void) if (strstr(boot_command_line, "noltlbs")) { __map_without_ltlbs = 1; } -#ifdef CONFIG_DEBUG_PAGEALLOC - __map_without_bats = 1; - __map_without_ltlbs = 1; -#endif + if (debug_pagealloc_enabled()) { + __map_without_bats = 1; + __map_without_ltlbs = 1; + } } /* -- cgit v1.1 From 21c647865a7d7b810aa94c32b40a4b9393ddfb85 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 17 Mar 2016 14:18:02 -0700 Subject: tile: query dynamic DEBUG_PAGEALLOC setting We can disable debug_pagealloc processing even if the code is compiled with CONFIG_DEBUG_PAGEALLOC. This patch changes the code to query whether it is enabled or not in runtime. Signed-off-by: Joonsoo Kim Cc: Benjamin Herrenschmidt Acked-by: Chris Metcalf Cc: Christian Borntraeger Cc: Christoph Lameter Cc: David Rientjes Cc: Pekka Enberg Cc: Takashi Iwai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/tile/mm/init.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index d4e1fc4..a0582b7 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -896,17 +896,15 @@ void __init pgtable_cache_init(void) panic("pgtable_cache_init(): Cannot create pgd cache"); } -#ifdef CONFIG_DEBUG_PAGEALLOC -static long __write_once initfree; -#else static long __write_once initfree = 1; -#endif +static bool __write_once set_initfree_done; /* Select whether to free (1) or mark unusable (0) the __init pages. */ static int __init set_initfree(char *str) { long val; if (kstrtol(str, 0, &val) == 0) { + set_initfree_done = true; initfree = val; pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't"); @@ -919,6 +917,11 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end) { unsigned long addr = (unsigned long) begin; + /* Prefer user request first */ + if (!set_initfree_done) { + if (debug_pagealloc_enabled()) + initfree = 0; + } if (kdata_huge && !initfree) { pr_warn("Warning: ignoring initfree=0: incompatible with kdata=huge\n"); initfree = 1; -- cgit v1.1 From 01609ec2fa1f0c1ad016d7f6ae2371313275984a Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Thu, 17 Mar 2016 14:18:59 -0700 Subject: ARC, thp: remove infrastructure for handling splitting PMDs With THP refcounting work, no need to mark PMDs splitting. (ARC got missed under the sweeping arch change as THP support was likely not present in orig baseline) Signed-off-by: Vineet Gupta Cc: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arc/include/asm/hugepage.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h index c5094de..7afe335 100644 --- a/arch/arc/include/asm/hugepage.h +++ b/arch/arc/include/asm/hugepage.h @@ -30,19 +30,16 @@ static inline pmd_t pte_pmd(pte_t pte) #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) #define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd))) #define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd))) -#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd))) #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) #define pmd_write(pmd) pte_write(pmd_pte(pmd)) #define pmd_young(pmd) pte_young(pmd_pte(pmd)) #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) -#define pmd_special(pmd) pte_special(pmd_pte(pmd)) #define mk_pmd(page, prot) pte_pmd(mk_pte(page, prot)) #define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ) -#define pmd_trans_splitting(pmd) (pmd_trans_huge(pmd) && pmd_special(pmd)) #define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) -- cgit v1.1 From 3ed3a4f0ddffece942bb2661924d87be4ce63cb7 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 17 Mar 2016 14:19:11 -0700 Subject: mm: cleanup *pte_alloc* interfaces There are few things about *pte_alloc*() helpers worth cleaning up: - 'vma' argument is unused, let's drop it; - most __pte_alloc() callers do speculative check for pmd_none(), before taking ptl: let's introduce pte_alloc() macro which does the check. The only direct user of __pte_alloc left is userfaultfd, which has different expectation about atomicity wrt pmd. - pte_alloc_map() and pte_alloc_map_lock() are redefined using pte_alloc(). [sudeep.holla@arm.com: fix build for arm64 hugetlbpage] [sfr@canb.auug.org.au: fix arch/arm/mm/mmu.c some more] Signed-off-by: Kirill A. Shutemov Cc: Dave Hansen Signed-off-by: Sudeep Holla Acked-by: Kirill A. Shutemov Signed-off-by: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mm/mmu.c | 6 +++--- arch/arm/mm/pgd.c | 2 +- arch/arm64/mm/hugetlbpage.c | 2 +- arch/ia64/mm/hugetlbpage.c | 2 +- arch/metag/mm/hugetlbpage.c | 2 +- arch/parisc/mm/hugetlbpage.c | 2 +- arch/sh/mm/hugetlbpage.c | 2 +- arch/sparc/mm/hugetlbpage.c | 2 +- arch/tile/mm/hugetlbpage.c | 2 +- arch/um/kernel/skas/mmu.c | 2 +- arch/unicore32/mm/pgd.c | 2 +- arch/x86/kernel/tboot.c | 2 +- 12 files changed, 14 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 434d76f..88fbe0d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -732,7 +732,7 @@ static void *__init late_alloc(unsigned long sz) return ptr; } -static pte_t * __init pte_alloc(pmd_t *pmd, unsigned long addr, +static pte_t * __init arm_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot, void *(*alloc)(unsigned long sz)) { @@ -747,7 +747,7 @@ static pte_t * __init pte_alloc(pmd_t *pmd, unsigned long addr, static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) { - return pte_alloc(pmd, addr, prot, early_alloc); + return arm_pte_alloc(pmd, addr, prot, early_alloc); } static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, @@ -756,7 +756,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, void *(*alloc)(unsigned long sz), bool ng) { - pte_t *pte = pte_alloc(pmd, addr, type->prot_l1, alloc); + pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc); do { set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), ng ? PTE_EXT_NG : 0); diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index e683db1..b8d4773 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -80,7 +80,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) if (!new_pmd) goto no_pmd; - new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); + new_pte = pte_alloc_map(mm, new_pmd, 0); if (!new_pte) goto no_pte; diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index da30529..589fd28 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -124,7 +124,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, * will be no pte_unmap() to correspond with this * pte_alloc_map(). */ - pte = pte_alloc_map(mm, NULL, pmd, addr); + pte = pte_alloc_map(mm, pmd, addr); } else if (sz == PMD_SIZE) { if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && pud_none(*pud)) diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index f50d4b3..85de86d 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -38,7 +38,7 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) if (pud) { pmd = pmd_alloc(mm, pud, taddr); if (pmd) - pte = pte_alloc_map(mm, NULL, pmd, taddr); + pte = pte_alloc_map(mm, pmd, taddr); } return pte; } diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c index 53f0f6c..b38700ae 100644 --- a/arch/metag/mm/hugetlbpage.c +++ b/arch/metag/mm/hugetlbpage.c @@ -67,7 +67,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, pgd = pgd_offset(mm, addr); pud = pud_offset(pgd, addr); pmd = pmd_offset(pud, addr); - pte = pte_alloc_map(mm, NULL, pmd, addr); + pte = pte_alloc_map(mm, pmd, addr); pgd->pgd &= ~_PAGE_SZ_MASK; pgd->pgd |= _PAGE_SZHUGE; diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index 54ba392..5d6eea9 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -63,7 +63,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, if (pud) { pmd = pmd_alloc(mm, pud, addr); if (pmd) - pte = pte_alloc_map(mm, NULL, pmd, addr); + pte = pte_alloc_map(mm, pmd, addr); } return pte; } diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 6385f60..cc948db 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -35,7 +35,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, if (pud) { pmd = pmd_alloc(mm, pud, addr); if (pmd) - pte = pte_alloc_map(mm, NULL, pmd, addr); + pte = pte_alloc_map(mm, pmd, addr); } } diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 131eaf4..4977800 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -146,7 +146,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, if (pud) { pmd = pmd_alloc(mm, pud, addr); if (pmd) - pte = pte_alloc_map(mm, NULL, pmd, addr); + pte = pte_alloc_map(mm, pmd, addr); } return pte; } diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index c034dc3..e212c64 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -77,7 +77,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, else { if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE]) panic("Unexpected page size %#lx\n", sz); - return pte_alloc_map(mm, NULL, pmd, addr); + return pte_alloc_map(mm, pmd, addr); } } #else diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 9591a66..3943e9d 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -31,7 +31,7 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, if (!pmd) goto out_pmd; - pte = pte_alloc_map(mm, NULL, pmd, proc); + pte = pte_alloc_map(mm, pmd, proc); if (!pte) goto out_pte; diff --git a/arch/unicore32/mm/pgd.c b/arch/unicore32/mm/pgd.c index 2ade20d..c572a28 100644 --- a/arch/unicore32/mm/pgd.c +++ b/arch/unicore32/mm/pgd.c @@ -54,7 +54,7 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) if (!new_pmd) goto no_pmd; - new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); + new_pte = pte_alloc_map(mm, new_pmd, 0); if (!new_pte) goto no_pte; diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 91a4496..e72a07f 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -135,7 +135,7 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn, pmd = pmd_alloc(&tboot_mm, pud, vaddr); if (!pmd) return -1; - pte = pte_alloc_map(&tboot_mm, NULL, pmd, vaddr); + pte = pte_alloc_map(&tboot_mm, pmd, vaddr); if (!pte) return -1; set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); -- cgit v1.1 From fe896d1878949ea92ba547587bc3075cc688fb8f Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 17 Mar 2016 14:19:26 -0700 Subject: mm: introduce page reference manipulation functions The success of CMA allocation largely depends on the success of migration and key factor of it is page reference count. Until now, page reference is manipulated by direct calling atomic functions so we cannot follow up who and where manipulate it. Then, it is hard to find actual reason of CMA allocation failure. CMA allocation should be guaranteed to succeed so finding offending place is really important. In this patch, call sites where page reference is manipulated are converted to introduced wrapper function. This is preparation step to add tracepoint to each page reference manipulation function. With this facility, we can easily find reason of CMA allocation failure. There is no functional change in this patch. In addition, this patch also converts reference read sites. It will help a second step that renames page._count to something else and prevents later attempt to direct access to it (Suggested by Andrew). Signed-off-by: Joonsoo Kim Acked-by: Michal Nazarewicz Acked-by: Vlastimil Babka Cc: Minchan Kim Cc: Mel Gorman Cc: "Kirill A. Shutemov" Cc: Sergey Senozhatsky Cc: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/mips/mm/gup.c | 2 +- arch/powerpc/mm/mmu_context_hash64.c | 3 +-- arch/powerpc/mm/pgtable_64.c | 2 +- arch/powerpc/platforms/512x/mpc512x_shared.c | 2 +- arch/x86/mm/gup.c | 2 +- 5 files changed, 5 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c index 1afd87c..6cdffc7 100644 --- a/arch/mips/mm/gup.c +++ b/arch/mips/mm/gup.c @@ -64,7 +64,7 @@ static inline void get_head_page_multiple(struct page *page, int nr) { VM_BUG_ON(page != compound_head(page)); VM_BUG_ON(page_count(page) == 0); - atomic_add(nr, &page->_count); + page_ref_add(page, nr); SetPageReferenced(page); } diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index 4e4efbc..9ca6fe1 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c @@ -118,8 +118,7 @@ static void destroy_pagetable_page(struct mm_struct *mm) /* drop all the pending references */ count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; /* We allow PTE_FRAG_NR fragments from a PTE page */ - count = atomic_sub_return(PTE_FRAG_NR - count, &page->_count); - if (!count) { + if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) { pgtable_page_dtor(page); free_hot_cold_page(page, 0); } diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index cdf2123..d9cc66c 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -403,7 +403,7 @@ static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) * count. */ if (likely(!mm->context.pte_frag)) { - atomic_set(&page->_count, PTE_FRAG_NR); + set_page_count(page, PTE_FRAG_NR); mm->context.pte_frag = ret + PTE_FRAG_SIZE; } spin_unlock(&mm->page_table_lock); diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c index 711f3d3..452da23 100644 --- a/arch/powerpc/platforms/512x/mpc512x_shared.c +++ b/arch/powerpc/platforms/512x/mpc512x_shared.c @@ -188,7 +188,7 @@ static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb; static inline void mpc512x_free_bootmem(struct page *page) { BUG_ON(PageTail(page)); - BUG_ON(atomic_read(&page->_count) > 1); + BUG_ON(page_ref_count(page) > 1); free_reserved_page(page); } diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index d8a798d..f8d0b5e 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -131,7 +131,7 @@ static inline void get_head_page_multiple(struct page *page, int nr) { VM_BUG_ON_PAGE(page != compound_head(page), page); VM_BUG_ON_PAGE(page_count(page) == 0, page); - atomic_add(nr, &page->_count); + page_ref_add(page, nr); SetPageReferenced(page); } -- cgit v1.1 From 0e8fb9312fbaf1a687dd731b04d8ab3121c4ff5a Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 17 Mar 2016 14:19:55 -0700 Subject: mm: remove VM_FAULT_MINOR The define has a comment from Nick Piggin from 2007: /* For backwards compat. Remove me quickly. */ I guess 9 years should not be too hurried sense of 'quickly' even for kernel measures. Signed-off-by: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mm/fault.c | 2 +- arch/arm64/mm/fault.c | 2 +- arch/unicore32/mm/fault.c | 2 +- arch/xtensa/mm/fault.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index daafcf1..ad58418 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -346,7 +346,7 @@ retry: up_read(&mm->mmap_sem); /* - * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR + * Handle the "normal" case first - VM_FAULT_MAJOR */ if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) return 0; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index abe2a95..97135b6 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -295,7 +295,7 @@ retry: up_read(&mm->mmap_sem); /* - * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR + * Handle the "normal" case first - VM_FAULT_MAJOR */ if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index afccef552..2ec3d3a 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -276,7 +276,7 @@ retry: up_read(&mm->mmap_sem); /* - * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR + * Handle the "normal" case first - VM_FAULT_MAJOR */ if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index c9784c1..7f4a1fd 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -146,7 +146,7 @@ good_area: perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); if (flags & VM_FAULT_MAJOR) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); - else if (flags & VM_FAULT_MINOR) + else perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); return; -- cgit v1.1 From 7f2bd006334291178bd2bce3e506d4c7a34a0643 Mon Sep 17 00:00:00 2001 From: Li Zhang Date: Thu, 17 Mar 2016 14:20:19 -0700 Subject: powerpc/mm: enable page parallel initialisation Parallel initialisation has been enabled for X86, boot time is improved greatly. On Power8, it is improved greatly for small memory. Here is the result from my test on Power8 platform: For 4GB of memory, boot time is improved by 59%, from 24.5s to 10s. For 50GB memory, boot time is improved by 22%, from 56.8s to 43.8s. Signed-off-by: Li Zhang Acked-by: Mel Gorman Acked-by: Michael Ellerman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 832cc46..a030e5e 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -158,6 +158,7 @@ config PPC select ARCH_HAS_DEVMEM_IS_ALLOWED select HAVE_ARCH_SECCOMP_FILTER select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT config GENERIC_CSUM def_bool CPU_LITTLE_ENDIAN -- cgit v1.1 From 8b9e6d58e7016382f9958d9909d8cb20d3f6eece Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 17 Mar 2016 14:21:06 -0700 Subject: mn10300, c6x: CONFIG_GENERIC_BUG must depend on CONFIG_BUG CONFIG_BUG=n && CONFIG_GENERIC_BUG=y make no sense and things break: In file included from include/linux/page-flags.h:9:0, from kernel/bounds.c:9: include/linux/bug.h:91:47: warning: 'struct bug_entry' declared inside parameter list static inline int is_warning_bug(const struct bug_entry *bug) ^ include/linux/bug.h:91:47: warning: its scope is only this definition or declaration, which is probably not what you want include/linux/bug.h: In function 'is_warning_bug': >> include/linux/bug.h:93:12: error: dereferencing pointer to incomplete type return bug->flags & BUGFLAG_WARNING; Reported-by: kbuild test robot Cc: Josh Triplett Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/c6x/Kconfig | 1 + arch/mn10300/Kconfig | 1 + 2 files changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 79049d4..5aa8ea8 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -36,6 +36,7 @@ config GENERIC_HWEIGHT config GENERIC_BUG def_bool y + depends on BUG config C6X_BIG_KERNEL bool "Build a big kernel" diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index 10607f0..06ddb55 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig @@ -53,6 +53,7 @@ config GENERIC_HWEIGHT config GENERIC_BUG def_bool y + depends on BUG config QUICKLIST def_bool y -- cgit v1.1 From c60f169202c7643991a8b4bfeea60e06843d5b5a Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 17 Mar 2016 14:21:09 -0700 Subject: arch/mn10300/kernel/fpu-nofpu.c: needs asm/elf.h arch/mn10300/kernel/fpu-nofpu.c:27:36: error: unknown type name 'elf_fpregset_t' int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpreg) Reported-by: kbuild test robot Cc: Josh Triplett Cc: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/mn10300/kernel/fpu-nofpu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/mn10300/kernel/fpu-nofpu.c b/arch/mn10300/kernel/fpu-nofpu.c index 31c765b..8d0e041 100644 --- a/arch/mn10300/kernel/fpu-nofpu.c +++ b/arch/mn10300/kernel/fpu-nofpu.c @@ -9,6 +9,7 @@ * 2 of the Licence, or (at your option) any later version. */ #include +#include /* * handle an FPU operational exception -- cgit v1.1 From 93e205a728e6cb8d7d11f6836e289798a1de25e2 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 17 Mar 2016 14:21:15 -0700 Subject: fix Christoph's email addresses There are various email addresses for me throughout the kernel. Use the one that will always be valid. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/rwsem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h index 3027e75..ce11247 100644 --- a/arch/ia64/include/asm/rwsem.h +++ b/arch/ia64/include/asm/rwsem.h @@ -3,7 +3,7 @@ * * Copyright (C) 2003 Ken Chen * Copyright (C) 2003 Asit Mallick - * Copyright (C) 2005 Christoph Lameter + * Copyright (C) 2005 Christoph Lameter * * Based on asm-i386/rwsem.h and other architecture implementation. * -- cgit v1.1 From 4cc7ecb7f2a60e8deb783b8fbf7c1ae467acb920 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 17 Mar 2016 14:23:00 -0700 Subject: param: convert some "on"/"off" users to strtobool This changes several users of manual "on"/"off" parsing to use strtobool. Some side-effects: - these uses will now parse y/n/1/0 meaningfully too - the early_param uses will now bubble up parse errors Signed-off-by: Kees Cook Acked-by: Heiko Carstens Acked-by: Michael Ellerman Cc: Amitkumar Karwar Cc: Andy Shevchenko Cc: Daniel Borkmann Cc: Joe Perches Cc: Kalle Valo Cc: Martin Schwidefsky Cc: Nishant Sarmukadam Cc: Rasmus Villemoes Cc: Steve French Cc: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/rtasd.c | 9 ++------- arch/powerpc/platforms/pseries/hotplug-cpu.c | 10 ++-------- arch/s390/kernel/time.c | 8 ++------ arch/s390/kernel/topology.c | 7 ++----- arch/x86/kernel/aperture_64.c | 12 ++---------- 5 files changed, 10 insertions(+), 36 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 5a2c049..aa610ce 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -49,7 +49,7 @@ static unsigned int rtas_error_log_buffer_max; static unsigned int event_scan; static unsigned int rtas_event_scan_rate; -static int full_rtas_msgs = 0; +static bool full_rtas_msgs; /* Stop logging to nvram after first fatal error */ static int logging_enabled; /* Until we initialize everything, @@ -592,11 +592,6 @@ __setup("surveillance=", surveillance_setup); static int __init rtasmsgs_setup(char *str) { - if (strcmp(str, "on") == 0) - full_rtas_msgs = 1; - else if (strcmp(str, "off") == 0) - full_rtas_msgs = 0; - - return 1; + return (kstrtobool(str, &full_rtas_msgs) == 0); } __setup("rtasmsgs=", rtasmsgs_setup); diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 32274f7..282837a 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -47,20 +47,14 @@ static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE; static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE; -static int cede_offline_enabled __read_mostly = 1; +static bool cede_offline_enabled __read_mostly = true; /* * Enable/disable cede_offline when available. */ static int __init setup_cede_offline(char *str) { - if (!strcmp(str, "off")) - cede_offline_enabled = 0; - else if (!strcmp(str, "on")) - cede_offline_enabled = 1; - else - return 0; - return 1; + return (kstrtobool(str, &cede_offline_enabled) == 0); } __setup("cede_offline=", setup_cede_offline); diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index c4e5f18..9409d32 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -1432,7 +1432,7 @@ device_initcall(etr_init_sysfs); /* * Server Time Protocol (STP) code. */ -static int stp_online; +static bool stp_online; static struct stp_sstpi stp_info; static void *stp_page; @@ -1443,11 +1443,7 @@ static struct timer_list stp_timer; static int __init early_parse_stp(char *p) { - if (strncmp(p, "off", 3) == 0) - stp_online = 0; - else if (strncmp(p, "on", 2) == 0) - stp_online = 1; - return 0; + return kstrtobool(p, &stp_online); } early_param("stp", early_parse_stp); diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 40b8102..64298a8 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -37,7 +37,7 @@ static void set_topology_timer(void); static void topology_work_fn(struct work_struct *work); static struct sysinfo_15_1_x *tl_info; -static int topology_enabled = 1; +static bool topology_enabled = true; static DECLARE_WORK(topology_work, topology_work_fn); /* @@ -444,10 +444,7 @@ static const struct cpumask *cpu_book_mask(int cpu) static int __init early_parse_topology(char *p) { - if (strncmp(p, "off", 3)) - return 0; - topology_enabled = 0; - return 0; + return kstrtobool(p, &topology_enabled); } early_param("topology", early_parse_topology); diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 6e85f71..0a2bb1f 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -227,19 +227,11 @@ static u32 __init search_agp_bridge(u32 *order, int *valid_agp) return 0; } -static int gart_fix_e820 __initdata = 1; +static bool gart_fix_e820 __initdata = true; static int __init parse_gart_mem(char *p) { - if (!p) - return -EINVAL; - - if (!strncmp(p, "off", 3)) - gart_fix_e820 = 0; - else if (!strncmp(p, "on", 2)) - gart_fix_e820 = 1; - - return 0; + return kstrtobool(p, &gart_fix_e820); } early_param("gart_fix_e820", parse_gart_mem); -- cgit v1.1