summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/memcontrol.h19
-rw-r--r--include/net/sock.h64
-rw-r--r--include/net/tcp.h5
3 files changed, 24 insertions, 64 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6c91c1b..e4e77bd 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -660,12 +660,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
}
#endif /* CONFIG_MEMCG */
-enum {
- UNDER_LIMIT,
- SOFT_LIMIT,
- OVER_LIMIT,
-};
-
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
@@ -694,6 +688,19 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
struct sock;
void sock_update_memcg(struct sock *sk);
void sock_release_memcg(struct sock *sk);
+bool mem_cgroup_charge_skmem(struct cg_proto *proto, unsigned int nr_pages);
+void mem_cgroup_uncharge_skmem(struct cg_proto *proto, unsigned int nr_pages);
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
+static inline bool mem_cgroup_under_socket_pressure(struct cg_proto *proto)
+{
+ return proto->memory_pressure;
+}
+#else
+static inline bool mem_cgroup_under_pressure(struct cg_proto *proto)
+{
+ return false;
+}
+#endif
#ifdef CONFIG_MEMCG_KMEM
extern struct static_key memcg_kmem_enabled_key;
diff --git a/include/net/sock.h b/include/net/sock.h
index 8b1f8e5..94a6c1a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1129,8 +1129,9 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
if (!sk->sk_prot->memory_pressure)
return false;
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- return !!sk->sk_cgrp->memory_pressure;
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
+ mem_cgroup_under_socket_pressure(sk->sk_cgrp))
+ return true;
return !!*sk->sk_prot->memory_pressure;
}
@@ -1144,9 +1145,6 @@ static inline void sk_leave_memory_pressure(struct sock *sk)
if (*memory_pressure)
*memory_pressure = 0;
-
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- sk->sk_cgrp->memory_pressure = 0;
}
static inline void sk_enter_memory_pressure(struct sock *sk)
@@ -1154,76 +1152,30 @@ static inline void sk_enter_memory_pressure(struct sock *sk)
if (!sk->sk_prot->enter_memory_pressure)
return;
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- sk->sk_cgrp->memory_pressure = 1;
-
sk->sk_prot->enter_memory_pressure(sk);
}
static inline long sk_prot_mem_limits(const struct sock *sk, int index)
{
- long limit = sk->sk_prot->sysctl_mem[index];
-
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- limit = min_t(long, limit, sk->sk_cgrp->memory_allocated.limit);
-
- return limit;
-}
-
-static inline void memcg_memory_allocated_add(struct cg_proto *prot,
- unsigned long amt,
- int *parent_status)
-{
- struct page_counter *counter;
-
- if (page_counter_try_charge(&prot->memory_allocated, amt, &counter))
- return;
-
- page_counter_charge(&prot->memory_allocated, amt);
- *parent_status = OVER_LIMIT;
-}
-
-static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
- unsigned long amt)
-{
- page_counter_uncharge(&prot->memory_allocated, amt);
+ return sk->sk_prot->sysctl_mem[index];
}
static inline long
sk_memory_allocated(const struct sock *sk)
{
- struct proto *prot = sk->sk_prot;
-
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- return page_counter_read(&sk->sk_cgrp->memory_allocated);
-
- return atomic_long_read(prot->memory_allocated);
+ return atomic_long_read(sk->sk_prot->memory_allocated);
}
static inline long
-sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
+sk_memory_allocated_add(struct sock *sk, int amt)
{
- struct proto *prot = sk->sk_prot;
-
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
- memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
- /* update the root cgroup regardless */
- atomic_long_add_return(amt, prot->memory_allocated);
- return page_counter_read(&sk->sk_cgrp->memory_allocated);
- }
-
- return atomic_long_add_return(amt, prot->memory_allocated);
+ return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
}
static inline void
sk_memory_allocated_sub(struct sock *sk, int amt)
{
- struct proto *prot = sk->sk_prot;
-
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- memcg_memory_allocated_sub(sk->sk_cgrp, amt);
-
- atomic_long_sub(amt, prot->memory_allocated);
+ atomic_long_sub(amt, sk->sk_prot->memory_allocated);
}
static inline void sk_sockets_allocated_dec(struct sock *sk)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a80255f..d9df80d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -289,8 +289,9 @@ extern int tcp_memory_pressure;
/* optimized version of sk_under_memory_pressure() for TCP sockets */
static inline bool tcp_under_memory_pressure(const struct sock *sk)
{
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- return !!sk->sk_cgrp->memory_pressure;
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
+ mem_cgroup_under_socket_pressure(sk->sk_cgrp))
+ return true;
return tcp_memory_pressure;
}
OpenPOWER on IntegriCloud