diff options
author | Dave Airlie <airlied@redhat.com> | 2016-05-09 13:49:56 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2016-05-09 13:49:56 +1000 |
commit | bafb86f5bc3173479002555dea7f31d943b12332 (patch) | |
tree | 6ec94c96460faf782cdb8781339560e13e0918b0 /mm | |
parent | ba391646d0d43aee3596b616113cce0f22dde627 (diff) | |
parent | 44549e8f5eea4e0a41b487b63e616cb089922b99 (diff) | |
download | op-kernel-dev-bafb86f5bc3173479002555dea7f31d943b12332.zip op-kernel-dev-bafb86f5bc3173479002555dea7f31d943b12332.tar.gz |
Merge tag 'v4.6-rc7' into drm-next
Merge this back as we've built up a fair few conflicts, and I have
some newer trees to pull in.
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 4 | ||||
-rw-r--r-- | mm/compaction.c | 14 | ||||
-rw-r--r-- | mm/gup.c | 52 | ||||
-rw-r--r-- | mm/huge_memory.c | 16 | ||||
-rw-r--r-- | mm/memcontrol.c | 37 | ||||
-rw-r--r-- | mm/memory-failure.c | 10 | ||||
-rw-r--r-- | mm/memory.c | 51 | ||||
-rw-r--r-- | mm/migrate.c | 8 | ||||
-rw-r--r-- | mm/nommu.c | 44 | ||||
-rw-r--r-- | mm/page-writeback.c | 6 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/page_io.c | 6 | ||||
-rw-r--r-- | mm/swap.c | 5 | ||||
-rw-r--r-- | mm/vmscan.c | 30 | ||||
-rw-r--r-- | mm/zswap.c | 8 |
15 files changed, 141 insertions, 152 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index bfbd709..0c6317b 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -898,7 +898,7 @@ static atomic_t nr_wb_congested[2]; void clear_wb_congested(struct bdi_writeback_congested *congested, int sync) { wait_queue_head_t *wqh = &congestion_wqh[sync]; - enum wb_state bit; + enum wb_congested_state bit; bit = sync ? WB_sync_congested : WB_async_congested; if (test_and_clear_bit(bit, &congested->state)) @@ -911,7 +911,7 @@ EXPORT_SYMBOL(clear_wb_congested); void set_wb_congested(struct bdi_writeback_congested *congested, int sync) { - enum wb_state bit; + enum wb_congested_state bit; bit = sync ? WB_sync_congested : WB_async_congested; if (!test_and_set_bit(bit, &congested->state)) diff --git a/mm/compaction.c b/mm/compaction.c index ccf97b0..8fa2540 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -852,16 +852,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, ISOLATE_UNEVICTABLE); - /* - * In case of fatal failure, release everything that might - * have been isolated in the previous iteration, and signal - * the failure back to caller. - */ - if (!pfn) { - putback_movable_pages(&cc->migratepages); - cc->nr_migratepages = 0; + if (!pfn) break; - } if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) break; @@ -1741,7 +1733,7 @@ void compaction_unregister_node(struct node *node) static inline bool kcompactd_work_requested(pg_data_t *pgdat) { - return pgdat->kcompactd_max_order > 0; + return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); } static bool kcompactd_node_suitable(pg_data_t *pgdat) @@ -1805,6 +1797,8 @@ static void kcompactd_do_work(pg_data_t *pgdat) INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); + if (kthread_should_stop()) + return; status = compact_zone(zone, &cc); if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone), @@ -1,4 +1,3 @@ -#define __DISABLE_GUP_DEPRECATED 1 #include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> @@ -839,7 +838,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, * if (locked) * up_read(&mm->mmap_sem); */ -long get_user_pages_locked6(unsigned long start, unsigned long nr_pages, +long get_user_pages_locked(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, int *locked) { @@ -847,7 +846,7 @@ long get_user_pages_locked6(unsigned long start, unsigned long nr_pages, write, force, pages, NULL, locked, true, FOLL_TOUCH); } -EXPORT_SYMBOL(get_user_pages_locked6); +EXPORT_SYMBOL(get_user_pages_locked); /* * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to @@ -892,13 +891,13 @@ EXPORT_SYMBOL(__get_user_pages_unlocked); * or if "force" shall be set to 1 (get_user_pages_fast misses the * "force" parameter). */ -long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages, +long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages) { return __get_user_pages_unlocked(current, current->mm, start, nr_pages, write, force, pages, FOLL_TOUCH); } -EXPORT_SYMBOL(get_user_pages_unlocked5); +EXPORT_SYMBOL(get_user_pages_unlocked); /* * get_user_pages_remote() - pin user pages in memory @@ -972,7 +971,7 @@ EXPORT_SYMBOL(get_user_pages_remote); * and mm being operated on are the current task's. We also * obviously don't pass FOLL_REMOTE in here. */ -long get_user_pages6(unsigned long start, unsigned long nr_pages, +long get_user_pages(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) { @@ -980,7 +979,7 @@ long get_user_pages6(unsigned long start, unsigned long nr_pages, write, force, pages, vmas, NULL, false, FOLL_TOUCH); } -EXPORT_SYMBOL(get_user_pages6); +EXPORT_SYMBOL(get_user_pages); /** * populate_vma_page_range() - populate a range of pages in the vma. @@ -1491,7 +1490,6 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { - struct mm_struct *mm = current->mm; int nr, ret; start &= PAGE_MASK; @@ -1503,8 +1501,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, start += nr << PAGE_SHIFT; pages += nr; - ret = get_user_pages_unlocked(current, mm, start, - nr_pages - nr, write, 0, pages); + ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages); /* Have to be a bit careful with return values */ if (nr > 0) { @@ -1519,38 +1516,3 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, } #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */ - -long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, - struct vm_area_struct **vmas) -{ - WARN_ONCE(tsk != current, "get_user_pages() called on remote task"); - WARN_ONCE(mm != current->mm, "get_user_pages() called on remote mm"); - - return get_user_pages6(start, nr_pages, write, force, pages, vmas); -} -EXPORT_SYMBOL(get_user_pages8); - -long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, int *locked) -{ - WARN_ONCE(tsk != current, "get_user_pages_locked() called on remote task"); - WARN_ONCE(mm != current->mm, "get_user_pages_locked() called on remote mm"); - - return get_user_pages_locked6(start, nr_pages, write, force, pages, locked); -} -EXPORT_SYMBOL(get_user_pages_locked8); - -long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages) -{ - WARN_ONCE(tsk != current, "get_user_pages_unlocked() called on remote task"); - WARN_ONCE(mm != current->mm, "get_user_pages_unlocked() called on remote mm"); - - return get_user_pages_unlocked5(start, nr_pages, write, force, pages); -} -EXPORT_SYMBOL(get_user_pages_unlocked7); - diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 86f9f8b..f7daa7d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -232,7 +232,7 @@ retry: return READ_ONCE(huge_zero_page); } -static void put_huge_zero_page(void) +void put_huge_zero_page(void) { /* * Counter should never go to zero here. Only shrinker can put @@ -1684,12 +1684,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (vma_is_dax(vma)) { spin_unlock(ptl); if (is_huge_zero_pmd(orig_pmd)) - put_huge_zero_page(); + tlb_remove_page(tlb, pmd_page(orig_pmd)); } else if (is_huge_zero_pmd(orig_pmd)) { pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); atomic_long_dec(&tlb->mm->nr_ptes); spin_unlock(ptl); - put_huge_zero_page(); + tlb_remove_page(tlb, pmd_page(orig_pmd)); } else { struct page *page = pmd_page(orig_pmd); page_remove_rmap(page, true); @@ -1960,10 +1960,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma, * page fault if needed. */ return 0; - if (vma->vm_ops) + if (vma->vm_ops || (vm_flags & VM_NO_THP)) /* khugepaged not yet working on file or special mappings */ return 0; - VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (hstart < hend) @@ -2352,8 +2351,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) return false; if (is_vma_temporary_stack(vma)) return false; - VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); - return true; + return !(vma->vm_flags & VM_NO_THP); } static void collapse_huge_page(struct mm_struct *mm, @@ -3454,7 +3452,7 @@ next: } } - pr_info("%lu of %lu THP split", split, total); + pr_info("%lu of %lu THP split\n", split, total); return 0; } @@ -3465,7 +3463,7 @@ static int __init split_huge_pages_debugfs(void) { void *ret; - ret = debugfs_create_file("split_huge_pages", 0644, NULL, NULL, + ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL, &split_huge_pages_fops); if (!ret) pr_warn("Failed to create split_huge_pages in debugfs"); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 36db05f..fe787f5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -207,6 +207,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); /* "mc" and its members are protected by cgroup_mutex */ static struct move_charge_struct { spinlock_t lock; /* for from, to */ + struct mm_struct *mm; struct mem_cgroup *from; struct mem_cgroup *to; unsigned long flags; @@ -4667,6 +4668,8 @@ static void __mem_cgroup_clear_mc(void) static void mem_cgroup_clear_mc(void) { + struct mm_struct *mm = mc.mm; + /* * we must clear moving_task before waking up waiters at the end of * task migration. @@ -4676,7 +4679,10 @@ static void mem_cgroup_clear_mc(void) spin_lock(&mc.lock); mc.from = NULL; mc.to = NULL; + mc.mm = NULL; spin_unlock(&mc.lock); + + mmput(mm); } static int mem_cgroup_can_attach(struct cgroup_taskset *tset) @@ -4733,6 +4739,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) VM_BUG_ON(mc.moved_swap); spin_lock(&mc.lock); + mc.mm = mm; mc.from = from; mc.to = memcg; mc.flags = move_flags; @@ -4742,8 +4749,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) ret = mem_cgroup_precharge_mc(mm); if (ret) mem_cgroup_clear_mc(); + } else { + mmput(mm); } - mmput(mm); return ret; } @@ -4852,11 +4860,11 @@ put: /* get_mctgt_type() gets the page */ return ret; } -static void mem_cgroup_move_charge(struct mm_struct *mm) +static void mem_cgroup_move_charge(void) { struct mm_walk mem_cgroup_move_charge_walk = { .pmd_entry = mem_cgroup_move_charge_pte_range, - .mm = mm, + .mm = mc.mm, }; lru_add_drain_all(); @@ -4868,7 +4876,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm) atomic_inc(&mc.from->moving_account); synchronize_rcu(); retry: - if (unlikely(!down_read_trylock(&mm->mmap_sem))) { + if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) { /* * Someone who are holding the mmap_sem might be waiting in * waitq. So we cancel all extra charges, wake up all waiters, @@ -4885,23 +4893,16 @@ retry: * additional charge, the page walk just aborts. */ walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk); - up_read(&mm->mmap_sem); + up_read(&mc.mm->mmap_sem); atomic_dec(&mc.from->moving_account); } -static void mem_cgroup_move_task(struct cgroup_taskset *tset) +static void mem_cgroup_move_task(void) { - struct cgroup_subsys_state *css; - struct task_struct *p = cgroup_taskset_first(tset, &css); - struct mm_struct *mm = get_task_mm(p); - - if (mm) { - if (mc.to) - mem_cgroup_move_charge(mm); - mmput(mm); - } - if (mc.to) + if (mc.to) { + mem_cgroup_move_charge(); mem_cgroup_clear_mc(); + } } #else /* !CONFIG_MMU */ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) @@ -4911,7 +4912,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) { } -static void mem_cgroup_move_task(struct cgroup_taskset *tset) +static void mem_cgroup_move_task(void) { } #endif @@ -5195,7 +5196,7 @@ struct cgroup_subsys memory_cgrp_subsys = { .css_reset = mem_cgroup_css_reset, .can_attach = mem_cgroup_can_attach, .cancel_attach = mem_cgroup_cancel_attach, - .attach = mem_cgroup_move_task, + .post_attach = mem_cgroup_move_task, .bind = mem_cgroup_bind, .dfl_cftypes = memory_files, .legacy_cftypes = mem_cgroup_legacy_files, diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 78f5f26..ca5acee 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -888,7 +888,15 @@ int get_hwpoison_page(struct page *page) } } - return get_page_unless_zero(head); + if (get_page_unless_zero(head)) { + if (head == compound_head(page)) + return 1; + + pr_info("MCE: %#lx cannot catch tail\n", page_to_pfn(page)); + put_page(head); + } + + return 0; } EXPORT_SYMBOL_GPL(get_hwpoison_page); diff --git a/mm/memory.c b/mm/memory.c index 93897f2..52c218e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -789,6 +789,46 @@ out: return pfn_to_page(pfn); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t pmd) +{ + unsigned long pfn = pmd_pfn(pmd); + + /* + * There is no pmd_special() but there may be special pmds, e.g. + * in a direct-access (dax) mapping, so let's just replicate the + * !HAVE_PTE_SPECIAL case from vm_normal_page() here. + */ + if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { + if (vma->vm_flags & VM_MIXEDMAP) { + if (!pfn_valid(pfn)) + return NULL; + goto out; + } else { + unsigned long off; + off = (addr - vma->vm_start) >> PAGE_SHIFT; + if (pfn == vma->vm_pgoff + off) + return NULL; + if (!is_cow_mapping(vma->vm_flags)) + return NULL; + } + } + + if (is_zero_pfn(pfn)) + return NULL; + if (unlikely(pfn > highest_memmap_pfn)) + return NULL; + + /* + * NOTE! We still have PageReserved() pages in the page tables. + * eg. VDSO mappings can cause them to exist. + */ +out: + return pfn_to_page(pfn); +} +#endif + /* * copy one vm_area from one task to the other. Assumes the page tables * already present in the new task to be cleared in the whole range @@ -1182,15 +1222,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) { -#ifdef CONFIG_DEBUG_VM - if (!rwsem_is_locked(&tlb->mm->mmap_sem)) { - pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n", - __func__, addr, end, - vma->vm_start, - vma->vm_end); - BUG(); - } -#endif + VM_BUG_ON_VMA(vma_is_anonymous(vma) && + !rwsem_is_locked(&tlb->mm->mmap_sem), vma); split_huge_pmd(vma, pmd, addr); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) goto next; diff --git a/mm/migrate.c b/mm/migrate.c index 6c822a7..f9dfb18 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -975,7 +975,13 @@ out: dec_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); /* Soft-offlined page shouldn't go through lru cache list */ - if (reason == MR_MEMORY_FAILURE) { + if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) { + /* + * With this release, we free successfully migrated + * page and set PG_HWPoison on just freed page + * intentionally. Although it's rather weird, it's how + * HWPoison flag works at the moment. + */ put_page(page); if (!test_set_page_hwpoison(page)) num_poisoned_pages_inc(); @@ -15,8 +15,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#define __DISABLE_GUP_DEPRECATED - #include <linux/export.h> #include <linux/mm.h> #include <linux/vmacache.h> @@ -161,7 +159,7 @@ finish_or_fault: * slab page or a secondary page from a compound page * - don't permit access to VMAs that don't support it, such as I/O mappings */ -long get_user_pages6(unsigned long start, unsigned long nr_pages, +long get_user_pages(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) { @@ -175,15 +173,15 @@ long get_user_pages6(unsigned long start, unsigned long nr_pages, return __get_user_pages(current, current->mm, start, nr_pages, flags, pages, vmas, NULL); } -EXPORT_SYMBOL(get_user_pages6); +EXPORT_SYMBOL(get_user_pages); -long get_user_pages_locked6(unsigned long start, unsigned long nr_pages, +long get_user_pages_locked(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, int *locked) { - return get_user_pages6(start, nr_pages, write, force, pages, NULL); + return get_user_pages(start, nr_pages, write, force, pages, NULL); } -EXPORT_SYMBOL(get_user_pages_locked6); +EXPORT_SYMBOL(get_user_pages_locked); long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, @@ -199,13 +197,13 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, } EXPORT_SYMBOL(__get_user_pages_unlocked); -long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages, +long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages) { return __get_user_pages_unlocked(current, current->mm, start, nr_pages, write, force, pages, 0); } -EXPORT_SYMBOL(get_user_pages_unlocked5); +EXPORT_SYMBOL(get_user_pages_unlocked); /** * follow_pfn - look up PFN at a user virtual address @@ -1989,31 +1987,3 @@ static int __meminit init_admin_reserve(void) return 0; } subsys_initcall(init_admin_reserve); - -long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, - struct vm_area_struct **vmas) -{ - return get_user_pages6(start, nr_pages, write, force, pages, vmas); -} -EXPORT_SYMBOL(get_user_pages8); - -long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, - int *locked) -{ - return get_user_pages_locked6(start, nr_pages, write, - force, pages, locked); -} -EXPORT_SYMBOL(get_user_pages_locked8); - -long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages) -{ - return get_user_pages_unlocked5(start, nr_pages, write, force, pages); -} -EXPORT_SYMBOL(get_user_pages_unlocked7); - diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 999792d..bc5149d 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1910,7 +1910,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) if (gdtc->dirty > gdtc->bg_thresh) return true; - if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc)) + if (wb_stat(wb, WB_RECLAIMABLE) > + wb_calc_thresh(gdtc->wb, gdtc->bg_thresh)) return true; if (mdtc) { @@ -1924,7 +1925,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) if (mdtc->dirty > mdtc->bg_thresh) return true; - if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc)) + if (wb_stat(wb, WB_RECLAIMABLE) > + wb_calc_thresh(mdtc->wb, mdtc->bg_thresh)) return true; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 59de90d..c1069ef 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6485,7 +6485,7 @@ int __meminit init_per_zone_wmark_min(void) setup_per_zone_inactive_ratio(); return 0; } -module_init(init_per_zone_wmark_min) +core_initcall(init_per_zone_wmark_min) /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so diff --git a/mm/page_io.c b/mm/page_io.c index cd92e3d..985f23c 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -353,7 +353,11 @@ int swap_readpage(struct page *page) ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); if (!ret) { - swap_slot_free_notify(page); + if (trylock_page(page)) { + swap_slot_free_notify(page); + unlock_page(page); + } + count_vm_event(PSWPIN); return 0; } @@ -728,6 +728,11 @@ void release_pages(struct page **pages, int nr, bool cold) zone = NULL; } + if (is_huge_zero_page(page)) { + put_huge_zero_page(); + continue; + } + page = compound_head(page); if (!put_page_testzero(page)) continue; diff --git a/mm/vmscan.c b/mm/vmscan.c index b934223e..142cb61 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2553,7 +2553,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) sc->gfp_mask |= __GFP_HIGHMEM; for_each_zone_zonelist_nodemask(zone, z, zonelist, - requested_highidx, sc->nodemask) { + gfp_zone(sc->gfp_mask), sc->nodemask) { enum zone_type classzone_idx; if (!populated_zone(zone)) @@ -3318,6 +3318,20 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, /* Try to sleep for a short interval */ if (prepare_kswapd_sleep(pgdat, order, remaining, balanced_classzone_idx)) { + /* + * Compaction records what page blocks it recently failed to + * isolate pages from and skips them in the future scanning. + * When kswapd is going to sleep, it is reasonable to assume + * that pages and compaction may succeed so reset the cache. + */ + reset_isolation_suitable(pgdat); + + /* + * We have freed the memory, now we should compact it to make + * allocation of the requested order possible. + */ + wakeup_kcompactd(pgdat, order, classzone_idx); + remaining = schedule_timeout(HZ/10); finish_wait(&pgdat->kswapd_wait, &wait); prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); @@ -3341,20 +3355,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, */ set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); - /* - * Compaction records what page blocks it recently failed to - * isolate pages from and skips them in the future scanning. - * When kswapd is going to sleep, it is reasonable to assume - * that pages and compaction may succeed so reset the cache. - */ - reset_isolation_suitable(pgdat); - - /* - * We have freed the memory, now we should compact it to make - * allocation of the requested order possible. - */ - wakeup_kcompactd(pgdat, order, classzone_idx); - if (!kthread_should_stop()) schedule(); @@ -170,6 +170,8 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES]; static LIST_HEAD(zswap_pools); /* protects zswap_pools list modification */ static DEFINE_SPINLOCK(zswap_pools_lock); +/* pool counter to provide unique names to zpool */ +static atomic_t zswap_pools_count = ATOMIC_INIT(0); /* used by param callback function */ static bool zswap_init_started; @@ -565,6 +567,7 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) static struct zswap_pool *zswap_pool_create(char *type, char *compressor) { struct zswap_pool *pool; + char name[38]; /* 'zswap' + 32 char (max) num + \0 */ gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; pool = kzalloc(sizeof(*pool), GFP_KERNEL); @@ -573,7 +576,10 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor) return NULL; } - pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops); + /* unique name for each pool specifically required by zsmalloc */ + snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); + + pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops); if (!pool->zpool) { pr_err("%s zpool not available\n", type); goto error; |