summaryrefslogtreecommitdiffstats
path: root/net/sctp/input.c
diff options
context:
space:
mode:
authorXin Long <lucien.xin@gmail.com>2016-01-22 01:49:07 +0800
committerDavid S. Miller <davem@davemloft.net>2016-01-28 15:59:32 -0800
commit1eed677933b816978abc4e3e18ecae5f254cb9be (patch)
treeb2eba8266b9f81c2a93d9cb9173962da88fd1399 /net/sctp/input.c
parent2baaa2d1386bd9949877a74ffc0dc0f2c5394b85 (diff)
downloadop-kernel-dev-1eed677933b816978abc4e3e18ecae5f254cb9be.zip
op-kernel-dev-1eed677933b816978abc4e3e18ecae5f254cb9be.tar.gz
sctp: fix the transport dead race check by using atomic_add_unless on refcnt
Now when __sctp_lookup_association is running in BH, it will try to check if t->dead is set, but meanwhile other CPUs may be freeing this transport and this assoc and if it happens that __sctp_lookup_association checked t->dead a bit too early, it may think that the association is still good while it was already freed. So we fix this race by using atomic_add_unless in sctp_transport_hold. After we get one transport from hashtable, we will hold it only when this transport's refcnt is not 0, so that we can make sure t->asoc cannot be freed before we hold the asoc again. Note that sctp association is not freed using RCU so we can't use atomic_add_unless() with it as it may just be too late for that either. Fixes: 4f0087812648 ("sctp: apply rhashtable api to send/recv path") Reported-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: Xin Long <lucien.xin@gmail.com> Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/input.c')
-rw-r--r--net/sctp/input.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/net/sctp/input.c b/net/sctp/input.c
index bf61dfb..49d2cc7 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -935,15 +935,22 @@ static struct sctp_association *__sctp_lookup_association(
struct sctp_transport **pt)
{
struct sctp_transport *t;
+ struct sctp_association *asoc = NULL;
+ rcu_read_lock();
t = sctp_addrs_lookup_transport(net, local, peer);
- if (!t || t->dead)
- return NULL;
+ if (!t || !sctp_transport_hold(t))
+ goto out;
- sctp_association_hold(t->asoc);
+ asoc = t->asoc;
+ sctp_association_hold(asoc);
*pt = t;
- return t->asoc;
+ sctp_transport_put(t);
+
+out:
+ rcu_read_unlock();
+ return asoc;
}
/* Look up an association. protected by RCU read lock */
@@ -955,9 +962,7 @@ struct sctp_association *sctp_lookup_association(struct net *net,
{
struct sctp_association *asoc;
- rcu_read_lock();
asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
- rcu_read_unlock();
return asoc;
}
OpenPOWER on IntegriCloud