diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/cma.c | 4 | ||||
-rw-r--r-- | mm/filemap.c | 9 | ||||
-rw-r--r-- | mm/huge_memory.c | 3 | ||||
-rw-r--r-- | mm/memcontrol.c | 1 | ||||
-rw-r--r-- | mm/memory.c | 2 | ||||
-rw-r--r-- | mm/readahead.c | 8 | ||||
-rw-r--r-- | mm/vmstat.c | 7 |
7 files changed, 20 insertions, 14 deletions
@@ -361,7 +361,7 @@ err: * This function allocates part of contiguous memory on specific * contiguous memory area. */ -struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align) +struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) { unsigned long mask, offset, pfn, start = 0; unsigned long bitmap_maxno, bitmap_no, bitmap_count; @@ -371,7 +371,7 @@ struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align) if (!cma || !cma->count) return NULL; - pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, + pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, count, align); if (!count) diff --git a/mm/filemap.c b/mm/filemap.c index 1cc5467..327910c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2488,6 +2488,11 @@ again: break; } + if (fatal_signal_pending(current)) { + status = -EINTR; + break; + } + status = a_ops->write_begin(file, mapping, pos, bytes, flags, &page, &fsdata); if (unlikely(status < 0)) @@ -2525,10 +2530,6 @@ again: written += copied; balance_dirty_pages_ratelimited(mapping); - if (fatal_signal_pending(current)) { - status = -EINTR; - break; - } } while (iov_iter_count(i)); return written ? written : status; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4b06b8d..bbac913 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2206,7 +2206,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { pte_t pteval = *_pte; - if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { + if (pte_none(pteval) || (pte_present(pteval) && + is_zero_pfn(pte_pfn(pteval)))) { if (!userfaultfd_armed(vma) && ++none_or_zero <= khugepaged_max_ptes_none) continue; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 882c10c..c57c442 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3387,6 +3387,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, ret = page_counter_memparse(args, "-1", &threshold); if (ret) return ret; + threshold <<= PAGE_SHIFT; mutex_lock(&memcg->thresholds_lock); diff --git a/mm/memory.c b/mm/memory.c index 9cb2747..deb679c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2426,6 +2426,8 @@ void unmap_mapping_range(struct address_space *mapping, if (details.last_index < details.first_index) details.last_index = ULONG_MAX; + + /* DAX uses i_mmap_lock to serialise file truncate vs page fault */ i_mmap_lock_write(mapping); if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) unmap_mapping_range_tree(&mapping->i_mmap, &details); diff --git a/mm/readahead.c b/mm/readahead.c index 60cd846..24682f6 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -89,8 +89,8 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, while (!list_empty(pages)) { page = list_to_page(pages); list_del(&page->lru); - if (add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) { + if (add_to_page_cache_lru(page, mapping, page->index, + GFP_KERNEL & mapping_gfp_mask(mapping))) { read_cache_pages_invalidate_page(mapping, page); continue; } @@ -127,8 +127,8 @@ static int read_pages(struct address_space *mapping, struct file *filp, for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page = list_to_page(pages); list_del(&page->lru); - if (!add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) { + if (!add_to_page_cache_lru(page, mapping, page->index, + GFP_KERNEL & mapping_gfp_mask(mapping))) { mapping->a_ops->readpage(filp, page); } page_cache_release(page); diff --git a/mm/vmstat.c b/mm/vmstat.c index 4f5cd97..fbf1448 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1363,15 +1363,16 @@ static cpumask_var_t cpu_stat_off; static void vmstat_update(struct work_struct *w) { - if (refresh_cpu_vm_stats()) + if (refresh_cpu_vm_stats()) { /* * Counters were updated so we expect more updates * to occur in the future. Keep on running the * update worker thread. */ - schedule_delayed_work(this_cpu_ptr(&vmstat_work), + schedule_delayed_work_on(smp_processor_id(), + this_cpu_ptr(&vmstat_work), round_jiffies_relative(sysctl_stat_interval)); - else { + } else { /* * We did not update any counters so the app may be in * a mode where it does not cause counter updates. |