summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched/mm.h6
-rw-r--r--kernel/fork.c17
-rw-r--r--mm/mmap.c18
-rw-r--r--mm/oom_kill.c15
4 files changed, 23 insertions, 33 deletions
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 2b0a281..3a19c25 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -84,12 +84,6 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
-#ifdef CONFIG_MMU
-/* same as above but performs the slow path from the async context. Can
- * be called from the atomic context as well
- */
-extern void mmput_async(struct mm_struct *);
-#endif
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
diff --git a/kernel/fork.c b/kernel/fork.c
index 4e5345c..7ed6460 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -922,7 +922,6 @@ static inline void __mmput(struct mm_struct *mm)
}
if (mm->binfmt)
module_put(mm->binfmt->module);
- set_bit(MMF_OOM_SKIP, &mm->flags);
mmdrop(mm);
}
@@ -938,22 +937,6 @@ void mmput(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(mmput);
-#ifdef CONFIG_MMU
-static void mmput_async_fn(struct work_struct *work)
-{
- struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
- __mmput(mm);
-}
-
-void mmput_async(struct mm_struct *mm)
-{
- if (atomic_dec_and_test(&mm->mm_users)) {
- INIT_WORK(&mm->async_put_work, mmput_async_fn);
- schedule_work(&mm->async_put_work);
- }
-}
-#endif
-
/**
* set_mm_exe_file - change a reference to the mm's executable file
*
diff --git a/mm/mmap.c b/mm/mmap.c
index 52f6c6b1..4c59816 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -44,6 +44,7 @@
#include <linux/userfaultfd_k.h>
#include <linux/moduleparam.h>
#include <linux/pkeys.h>
+#include <linux/oom.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
@@ -3001,6 +3002,23 @@ void exit_mmap(struct mm_struct *mm)
/* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1);
+ set_bit(MMF_OOM_SKIP, &mm->flags);
+ if (unlikely(tsk_is_oom_victim(current))) {
+ /*
+ * Wait for oom_reap_task() to stop working on this
+ * mm. Because MMF_OOM_SKIP is already set before
+ * calling down_read(), oom_reap_task() will not run
+ * on this "mm" post up_write().
+ *
+ * tsk_is_oom_victim() cannot be set from under us
+ * either because current->mm is already set to NULL
+ * under task_lock before calling mmput and oom_mm is
+ * set not NULL by the OOM killer only if current->mm
+ * is found not NULL while holding the task_lock.
+ */
+ down_write(&mm->mmap_sem);
+ up_write(&mm->mmap_sem);
+ }
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb, 0, -1);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index c9f3569..99736e0 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -495,11 +495,12 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
}
/*
- * increase mm_users only after we know we will reap something so
- * that the mmput_async is called only when we have reaped something
- * and delayed __mmput doesn't matter that much
+ * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
+ * work on the mm anymore. The check for MMF_OOM_SKIP must run
+ * under mmap_sem for reading because it serializes against the
+ * down_write();up_write() cycle in exit_mmap().
*/
- if (!mmget_not_zero(mm)) {
+ if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
up_read(&mm->mmap_sem);
trace_skip_task_reaping(tsk->pid);
goto unlock_oom;
@@ -542,12 +543,6 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
K(get_mm_counter(mm, MM_SHMEMPAGES)));
up_read(&mm->mmap_sem);
- /*
- * Drop our reference but make sure the mmput slow path is called from a
- * different context because we shouldn't risk we get stuck there and
- * put the oom_reaper out of the way.
- */
- mmput_async(mm);
trace_finish_task_reaping(tsk->pid);
unlock_oom:
mutex_unlock(&oom_lock);
OpenPOWER on IntegriCloud