summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/oom.h3
-rw-r--r--mm/oom_kill.c52
3 files changed, 60 insertions, 0 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index bad9486..9011505 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -344,6 +344,7 @@ struct zone {
typedef enum {
ZONE_ALL_UNRECLAIMABLE, /* all pages pinned */
ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
+ ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
} zone_flags_t;
static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
@@ -363,6 +364,10 @@ static inline int zone_is_reclaim_locked(const struct zone *zone)
{
return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
}
+static inline int zone_is_oom_locked(const struct zone *zone)
+{
+ return test_bit(ZONE_OOM_LOCKED, &zone->flags);
+}
/*
* The "priority" of VM scanning is how much of the queues we will scan in one
diff --git a/include/linux/oom.h b/include/linux/oom.h
index cf6ebf5..e908120 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -20,6 +20,9 @@ enum oom_constraint {
CONSTRAINT_MEMORY_POLICY,
};
+extern int try_set_zone_oom(struct zonelist *zonelist);
+extern void clear_zonelist_oom(struct zonelist *zonelist);
+
extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order);
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 1a7a4ef..6e999c8 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -27,6 +27,7 @@
#include <linux/notifier.h>
int sysctl_panic_on_oom;
+static DEFINE_MUTEX(zone_scan_mutex);
/* #define DEBUG */
/**
@@ -374,6 +375,57 @@ int unregister_oom_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_oom_notifier);
+/*
+ * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero
+ * if a parallel OOM killing is already taking place that includes a zone in
+ * the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
+ */
+int try_set_zone_oom(struct zonelist *zonelist)
+{
+ struct zone **z;
+ int ret = 1;
+
+ z = zonelist->zones;
+
+ mutex_lock(&zone_scan_mutex);
+ do {
+ if (zone_is_oom_locked(*z)) {
+ ret = 0;
+ goto out;
+ }
+ } while (*(++z) != NULL);
+
+ /*
+ * Lock each zone in the zonelist under zone_scan_mutex so a parallel
+ * invocation of try_set_zone_oom() doesn't succeed when it shouldn't.
+ */
+ z = zonelist->zones;
+ do {
+ zone_set_flag(*z, ZONE_OOM_LOCKED);
+ } while (*(++z) != NULL);
+out:
+ mutex_unlock(&zone_scan_mutex);
+ return ret;
+}
+
+/*
+ * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
+ * allocation attempts with zonelists containing them may now recall the OOM
+ * killer, if necessary.
+ */
+void clear_zonelist_oom(struct zonelist *zonelist)
+{
+ struct zone **z;
+
+ z = zonelist->zones;
+
+ mutex_lock(&zone_scan_mutex);
+ do {
+ zone_clear_flag(*z, ZONE_OOM_LOCKED);
+ } while (*(++z) != NULL);
+ mutex_unlock(&zone_scan_mutex);
+}
+
/**
* out_of_memory - kill the "best" process when we run out of memory
*
OpenPOWER on IntegriCloud