summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c126
1 files changed, 112 insertions, 14 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 70db6e0..e2fa20d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -45,7 +45,7 @@ struct cgroup_subsys mem_cgroup_subsys __read_mostly;
#define MEM_CGROUP_RECLAIM_RETRIES 5
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
-/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
+/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
int do_swap_account __read_mostly;
static int really_do_swap_account __initdata = 1; /* for remember boot option*/
#else
@@ -62,7 +62,8 @@ enum mem_cgroup_stat_index {
* For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
*/
MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
- MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
+ MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
+ MEM_CGROUP_STAT_MAPPED_FILE, /* # of pages charged as file rss */
MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
@@ -176,6 +177,9 @@ struct mem_cgroup {
unsigned int swappiness;
+ /* set when res.limit == memsw.limit */
+ bool memsw_is_minimum;
+
/*
* statistics. This must be placed at the end of memcg.
*/
@@ -188,6 +192,7 @@ enum charge_type {
MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
+ MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
NR_CHARGE_TYPE,
};
@@ -644,6 +649,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
int zid = zone_idx(z);
struct mem_cgroup_per_zone *mz;
int lru = LRU_FILE * !!file + !!active;
+ int ret;
BUG_ON(!mem_cont);
mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
@@ -661,9 +667,19 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
continue;
scan++;
- if (__isolate_lru_page(page, mode, file) == 0) {
+ ret = __isolate_lru_page(page, mode, file);
+ switch (ret) {
+ case 0:
list_move(&page->lru, dst);
+ mem_cgroup_del_lru(page);
nr_taken++;
+ break;
+ case -EBUSY:
+ /* we don't affect global LRU but rotate in our LRU */
+ mem_cgroup_rotate_lru_list(page, page_lru(page));
+ break;
+ default:
+ break;
}
}
@@ -845,6 +861,10 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
int ret, total = 0;
int loop = 0;
+ /* If memsw_is_minimum==1, swap-out is of-no-use. */
+ if (root_mem->memsw_is_minimum)
+ noswap = true;
+
while (loop < 2) {
victim = mem_cgroup_select_victim(root_mem);
if (victim == root_mem)
@@ -900,6 +920,44 @@ static void record_last_oom(struct mem_cgroup *mem)
mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
}
+/*
+ * Currently used to update mapped file statistics, but the routine can be
+ * generalized to update other statistics as well.
+ */
+void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
+{
+ struct mem_cgroup *mem;
+ struct mem_cgroup_stat *stat;
+ struct mem_cgroup_stat_cpu *cpustat;
+ int cpu;
+ struct page_cgroup *pc;
+
+ if (!page_is_file_cache(page))
+ return;
+
+ pc = lookup_page_cgroup(page);
+ if (unlikely(!pc))
+ return;
+
+ lock_page_cgroup(pc);
+ mem = pc->mem_cgroup;
+ if (!mem)
+ goto done;
+
+ if (!PageCgroupUsed(pc))
+ goto done;
+
+ /*
+ * Preemption is already disabled, we don't need get_cpu()
+ */
+ cpu = smp_processor_id();
+ stat = &mem->stat;
+ cpustat = &stat->cpustat[cpu];
+
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val);
+done:
+ unlock_page_cgroup(pc);
+}
/*
* Unlike exported interface, "oom" parameter is added. if oom==true,
@@ -1098,6 +1156,10 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
struct mem_cgroup_per_zone *from_mz, *to_mz;
int nid, zid;
int ret = -EBUSY;
+ struct page *page;
+ int cpu;
+ struct mem_cgroup_stat *stat;
+ struct mem_cgroup_stat_cpu *cpustat;
VM_BUG_ON(from == to);
VM_BUG_ON(PageLRU(pc->page));
@@ -1118,6 +1180,23 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
res_counter_uncharge(&from->res, PAGE_SIZE);
mem_cgroup_charge_statistics(from, pc, false);
+
+ page = pc->page;
+ if (page_is_file_cache(page) && page_mapped(page)) {
+ cpu = smp_processor_id();
+ /* Update mapped_file data for mem_cgroup "from" */
+ stat = &from->stat;
+ cpustat = &stat->cpustat[cpu];
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+ -1);
+
+ /* Update mapped_file data for mem_cgroup "to" */
+ stat = &to->stat;
+ cpustat = &stat->cpustat[cpu];
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+ 1);
+ }
+
if (do_swap_account)
res_counter_uncharge(&from->memsw, PAGE_SIZE);
css_put(&from->css);
@@ -1433,6 +1512,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
switch (ctype) {
case MEM_CGROUP_CHARGE_TYPE_MAPPED:
+ case MEM_CGROUP_CHARGE_TYPE_DROP:
if (page_mapped(page))
goto unlock_out;
break;
@@ -1496,18 +1576,23 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
* called after __delete_from_swap_cache() and drop "page" account.
* memcg information is recorded to swap_cgroup of "ent"
*/
-void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
+void
+mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
{
struct mem_cgroup *memcg;
+ int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
+
+ if (!swapout) /* this was a swap cache but the swap is unused ! */
+ ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
+
+ memcg = __mem_cgroup_uncharge_common(page, ctype);
- memcg = __mem_cgroup_uncharge_common(page,
- MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
/* record memcg information */
- if (do_swap_account && memcg) {
+ if (do_swap_account && swapout && memcg) {
swap_cgroup_record(ent, css_id(&memcg->css));
mem_cgroup_get(memcg);
}
- if (memcg)
+ if (swapout && memcg)
css_put(&memcg->css);
}
#endif
@@ -1685,6 +1770,12 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
break;
}
ret = res_counter_set_limit(&memcg->res, val);
+ if (!ret) {
+ if (memswlimit == val)
+ memcg->memsw_is_minimum = true;
+ else
+ memcg->memsw_is_minimum = false;
+ }
mutex_unlock(&set_limit_mutex);
if (!ret)
@@ -1703,16 +1794,14 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
return ret;
}
-int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
- unsigned long long val)
+static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
+ unsigned long long val)
{
int retry_count;
u64 memlimit, oldusage, curusage;
int children = mem_cgroup_count_children(memcg);
int ret = -EBUSY;
- if (!do_swap_account)
- return -EINVAL;
/* see mem_cgroup_resize_res_limit */
retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
@@ -1734,6 +1823,12 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
break;
}
ret = res_counter_set_limit(&memcg->memsw, val);
+ if (!ret) {
+ if (memlimit == val)
+ memcg->memsw_is_minimum = true;
+ else
+ memcg->memsw_is_minimum = false;
+ }
mutex_unlock(&set_limit_mutex);
if (!ret)
@@ -1947,8 +2042,7 @@ static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
val = res_counter_read_u64(&mem->res, name);
break;
case _MEMSWAP:
- if (do_swap_account)
- val = res_counter_read_u64(&mem->memsw, name);
+ val = res_counter_read_u64(&mem->memsw, name);
break;
default:
BUG();
@@ -2046,6 +2140,7 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
enum {
MCS_CACHE,
MCS_RSS,
+ MCS_MAPPED_FILE,
MCS_PGPGIN,
MCS_PGPGOUT,
MCS_INACTIVE_ANON,
@@ -2066,6 +2161,7 @@ struct {
} memcg_stat_strings[NR_MCS_STAT] = {
{"cache", "total_cache"},
{"rss", "total_rss"},
+ {"mapped_file", "total_mapped_file"},
{"pgpgin", "total_pgpgin"},
{"pgpgout", "total_pgpgout"},
{"inactive_anon", "total_inactive_anon"},
@@ -2086,6 +2182,8 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
s->stat[MCS_CACHE] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
s->stat[MCS_RSS] += val * PAGE_SIZE;
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE);
+ s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
s->stat[MCS_PGPGIN] += val;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
OpenPOWER on IntegriCloud