summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-12-06 09:29:43 -0800
committerDavid S. Miller <davem@davemloft.net>2010-12-06 09:29:43 -0800
commit46bcf14f44d8f31ecfdc8b6708ec15a3b33316d9 (patch)
tree4d2a200387242e1ed2d95ccd367c77750379e8cc /include
parente7dfc8dbdf9a7fa1ef04c63100a71f4102b82ed3 (diff)
downloadop-kernel-dev-46bcf14f44d8f31ecfdc8b6708ec15a3b33316d9.zip
op-kernel-dev-46bcf14f44d8f31ecfdc8b6708ec15a3b33316d9.tar.gz
filter: fix sk_filter rcu handling
Pavel Emelyanov tried to fix a race between sk_filter_(de|at)tach and sk_clone() in commit 47e958eac280c263397 Problem is we can have several clones sharing a common sk_filter, and these clones might want to sk_filter_attach() their own filters at the same time, and can overwrite old_filter->rcu, corrupting RCU queues. We can not use filter->rcu without being sure no other thread could do the same thing. Switch code to a more conventional ref-counting technique : Do the atomic decrement immediately and queue one rcu call back when last reference is released. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/net/sock.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index a6338d0..659d968d9 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1155,6 +1155,8 @@ extern void sk_common_release(struct sock *sk);
/* Initialise core socket variables */
extern void sock_init_data(struct socket *sock, struct sock *sk);
+extern void sk_filter_release_rcu(struct rcu_head *rcu);
+
/**
* sk_filter_release - release a socket filter
* @fp: filter to remove
@@ -1165,7 +1167,7 @@ extern void sock_init_data(struct socket *sock, struct sock *sk);
static inline void sk_filter_release(struct sk_filter *fp)
{
if (atomic_dec_and_test(&fp->refcnt))
- kfree(fp);
+ call_rcu_bh(&fp->rcu, sk_filter_release_rcu);
}
static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
OpenPOWER on IntegriCloud