summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-07-14 11:14:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-07-14 11:14:33 -0700
commitf353078f028fbfe9acd4b747b4a19c69ef6846cd (patch)
tree3a3c7426d5fe6b42db04559e5631436222e4761a /mm
parente181ae0c5db9544de9c53239eb22bc012ce75033 (diff)
parentfe10e398e860955bac4d28ec031b701d358465e4 (diff)
downloadop-kernel-dev-f353078f028fbfe9acd4b747b4a19c69ef6846cd.zip
op-kernel-dev-f353078f028fbfe9acd4b747b4a19c69ef6846cd.tar.gz
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "11 fixes" * emailed patches form Andrew Morton <akpm@linux-foundation.org>: reiserfs: fix buffer overflow with long warning messages checkpatch: fix duplicate invalid vsprintf pointer extension '%p<foo>' messages mm: do not bug_on on incorrect length in __mm_populate() mm/memblock.c: do not complain about top-down allocations for !MEMORY_HOTREMOVE fs, elf: make sure to page align bss in load_elf_library x86/purgatory: add missing FORCE to Makefile target net/9p/client.c: put refcount of trans_mod in error case in parse_opts() mm: allow arch to supply p??_free_tlb functions autofs: fix slab out of bounds read in getname_kernel() fs/proc/task_mmu.c: fix Locked field in /proc/pid/smaps* mm: do not drop unused pages when userfaultd is running
Diffstat (limited to 'mm')
-rw-r--r--mm/gup.c2
-rw-r--r--mm/memblock.c3
-rw-r--r--mm/mmap.c29
-rw-r--r--mm/rmap.c8
4 files changed, 21 insertions, 21 deletions
diff --git a/mm/gup.c b/mm/gup.c
index b70d7ba..fc5f980 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1238,8 +1238,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
int locked = 0;
long ret = 0;
- VM_BUG_ON(start & ~PAGE_MASK);
- VM_BUG_ON(len != PAGE_ALIGN(len));
end = start + len;
for (nstart = start; nstart < end; nstart = nend) {
diff --git a/mm/memblock.c b/mm/memblock.c
index 03d48d8..11e46f8 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -227,7 +227,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
* so we use WARN_ONCE() here to see the stack trace if
* fail happens.
*/
- WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
+ WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
+ "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
}
return __memblock_find_range_top_down(start, end, size, align, nid,
diff --git a/mm/mmap.c b/mm/mmap.c
index d1eb87e..5801b5f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -186,8 +186,8 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
return next;
}
-static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf);
-
+static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
+ struct list_head *uf);
SYSCALL_DEFINE1(brk, unsigned long, brk)
{
unsigned long retval;
@@ -245,7 +245,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
goto out;
/* Ok, looks good - let it rip. */
- if (do_brk(oldbrk, newbrk-oldbrk, &uf) < 0)
+ if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
goto out;
set_brk:
@@ -2929,21 +2929,14 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
* anonymous maps. eventually we may be able to do some
* brk-specific accounting here.
*/
-static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf)
+static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
- unsigned long len;
struct rb_node **rb_link, *rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
- len = PAGE_ALIGN(request);
- if (len < request)
- return -ENOMEM;
- if (!len)
- return 0;
-
/* Until we need other flags, refuse anything except VM_EXEC. */
if ((flags & (~VM_EXEC)) != 0)
return -EINVAL;
@@ -3015,18 +3008,20 @@ out:
return 0;
}
-static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf)
-{
- return do_brk_flags(addr, len, 0, uf);
-}
-
-int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags)
+int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
{
struct mm_struct *mm = current->mm;
+ unsigned long len;
int ret;
bool populate;
LIST_HEAD(uf);
+ len = PAGE_ALIGN(request);
+ if (len < request)
+ return -ENOMEM;
+ if (!len)
+ return 0;
+
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
diff --git a/mm/rmap.c b/mm/rmap.c
index 6db729d..eb47780 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -64,6 +64,7 @@
#include <linux/backing-dev.h>
#include <linux/page_idle.h>
#include <linux/memremap.h>
+#include <linux/userfaultfd_k.h>
#include <asm/tlbflush.h>
@@ -1481,11 +1482,16 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
set_pte_at(mm, address, pvmw.pte, pteval);
}
- } else if (pte_unused(pteval)) {
+ } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
/*
* The guest indicated that the page content is of no
* interest anymore. Simply discard the pte, vmscan
* will take care of the rest.
+ * A future reference will then fault in a new zero
+ * page. When userfaultfd is active, we must not drop
+ * this page though, as its main user (postcopy
+ * migration) will not expect userfaults on already
+ * copied pages.
*/
dec_mm_counter(mm, mm_counter(page));
/* We have to invalidate as we cleared the pte */
OpenPOWER on IntegriCloud