summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c35
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c6
3 files changed, 27 insertions, 16 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 94da8ee..d87aa35 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -381,16 +381,25 @@ static void mem_cgroup_put(struct mem_cgroup *memcg);
static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
void sock_update_memcg(struct sock *sk)
{
- /* A socket spends its whole life in the same cgroup */
- if (sk->sk_cgrp) {
- WARN_ON(1);
- return;
- }
if (static_branch(&memcg_socket_limit_enabled)) {
struct mem_cgroup *memcg;
BUG_ON(!sk->sk_prot->proto_cgroup);
+ /* Socket cloning can throw us here with sk_cgrp already
+ * filled. It won't however, necessarily happen from
+ * process context. So the test for root memcg given
+ * the current task's memcg won't help us in this case.
+ *
+ * Respecting the original socket's memcg is a better
+ * decision in this case.
+ */
+ if (sk->sk_cgrp) {
+ BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
+ mem_cgroup_get(sk->sk_cgrp->memcg);
+ return;
+ }
+
rcu_read_lock();
memcg = mem_cgroup_from_task(current);
if (!mem_cgroup_is_root(memcg)) {
@@ -5391,8 +5400,9 @@ static void mem_cgroup_clear_mc(void)
static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
struct cgroup *cgroup,
- struct task_struct *p)
+ struct cgroup_taskset *tset)
{
+ struct task_struct *p = cgroup_taskset_first(tset);
int ret = 0;
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
@@ -5430,7 +5440,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
struct cgroup *cgroup,
- struct task_struct *p)
+ struct cgroup_taskset *tset)
{
mem_cgroup_clear_mc();
}
@@ -5547,9 +5557,9 @@ retry:
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct cgroup *cont,
- struct cgroup *old_cont,
- struct task_struct *p)
+ struct cgroup_taskset *tset)
{
+ struct task_struct *p = cgroup_taskset_first(tset);
struct mm_struct *mm = get_task_mm(p);
if (mm) {
@@ -5564,19 +5574,18 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
#else /* !CONFIG_MMU */
static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
struct cgroup *cgroup,
- struct task_struct *p)
+ struct cgroup_taskset *tset)
{
return 0;
}
static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
struct cgroup *cgroup,
- struct task_struct *p)
+ struct cgroup_taskset *tset)
{
}
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct cgroup *cont,
- struct cgroup *old_cont,
- struct task_struct *p)
+ struct cgroup_taskset *tset)
{
}
#endif
diff --git a/mm/slab.c b/mm/slab.c
index 83311c9a..2acfa0d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -121,6 +121,8 @@
#include <asm/tlbflush.h>
#include <asm/page.h>
+#include <trace/events/kmem.h>
+
/*
* DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
* 0 for faster, smaller code (especially in the critical paths).
diff --git a/mm/slub.c b/mm/slub.c
index 09ccee8..025f6ac 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1978,7 +1978,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page->pobjects = pobjects;
page->next = oldpage;
- } while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+ } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
stat(s, CPU_PARTIAL_FREE);
return pobjects;
}
@@ -2304,7 +2304,7 @@ redo:
* Since this is without lock semantics the protection is only against
* code executing on this cpu *not* from access by other cpus.
*/
- if (unlikely(!irqsafe_cpu_cmpxchg_double(
+ if (unlikely(!this_cpu_cmpxchg_double(
s->cpu_slab->freelist, s->cpu_slab->tid,
object, tid,
get_freepointer_safe(s, object), next_tid(tid)))) {
@@ -2534,7 +2534,7 @@ redo:
if (likely(page == c->page)) {
set_freepointer(s, object, c->freelist);
- if (unlikely(!irqsafe_cpu_cmpxchg_double(
+ if (unlikely(!this_cpu_cmpxchg_double(
s->cpu_slab->freelist, s->cpu_slab->tid,
c->freelist, tid,
object, next_tid(tid)))) {
OpenPOWER on IntegriCloud