diff options
author | David S. Miller <davem@davemloft.net> | 2011-08-20 10:39:12 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-08-20 10:39:12 -0700 |
commit | 823dcd2506fa369aeb8cbd26da5663efe2fda9a9 (patch) | |
tree | 853b3e3c05f0b9ee1b5df8464db19b7acc57150c /mm | |
parent | eaa36660de7e174498618d69d7277d44a2f24c3d (diff) | |
parent | 98e77438aed3cd3343cbb86825127b1d9d2bea33 (diff) | |
download | op-kernel-dev-823dcd2506fa369aeb8cbd26da5663efe2fda9a9.zip op-kernel-dev-823dcd2506fa369aeb8cbd26da5663efe2fda9a9.tar.gz |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 12 | ||||
-rw-r--r-- | mm/slub.c | 10 |
2 files changed, 16 insertions, 6 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f4ec4e7..930de94 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2091,6 +2091,7 @@ struct memcg_stock_pcp { #define FLUSHING_CACHED_CHARGE (0) }; static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); +static DEFINE_MUTEX(percpu_charge_mutex); /* * Try to consume stocked charge on this cpu. If success, one page is consumed @@ -2197,8 +2198,7 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync) for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); - if (mem_cgroup_same_or_subtree(root_mem, stock->cached) && - test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) + if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) flush_work(&stock->work); } out: @@ -2213,14 +2213,22 @@ out: */ static void drain_all_stock_async(struct mem_cgroup *root_mem) { + /* + * If someone calls draining, avoid adding more kworker runs. + */ + if (!mutex_trylock(&percpu_charge_mutex)) + return; drain_all_stock(root_mem, false); + mutex_unlock(&percpu_charge_mutex); } /* This is a synchronous drain interface. */ static void drain_all_stock_sync(struct mem_cgroup *root_mem) { /* called when force_empty is called */ + mutex_lock(&percpu_charge_mutex); drain_all_stock(root_mem, true); + mutex_unlock(&percpu_charge_mutex); } /* @@ -701,7 +701,7 @@ static u8 *check_bytes(u8 *start, u8 value, unsigned int bytes) return check_bytes8(start, value, bytes); value64 = value | value << 8 | value << 16 | value << 24; - value64 = value64 | value64 << 32; + value64 = (value64 & 0xffffffff) | value64 << 32; prefix = 8 - ((unsigned long)start) % 8; if (prefix) { @@ -1854,7 +1854,7 @@ redo: new.frozen = 0; - if (!new.inuse && n->nr_partial < s->min_partial) + if (!new.inuse && n->nr_partial > s->min_partial) m = M_FREE; else if (new.freelist) { m = M_PARTIAL; @@ -2387,11 +2387,13 @@ static void __slab_free(struct kmem_cache *s, struct page *page, slab_empty: if (prior) { /* - * Slab still on the partial list. + * Slab on the partial list. */ remove_partial(n, page); stat(s, FREE_REMOVE_PARTIAL); - } + } else + /* Slab must be on the full list */ + remove_full(s, page); spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); |