summaryrefslogtreecommitdiffstats
path: root/net/ipv4/udp.c
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2008-10-29 11:19:58 -0700
committerDavid S. Miller <davem@davemloft.net>2008-10-29 11:19:58 -0700
commit96631ed16c514cf8b28fab991a076985ce378c26 (patch)
tree60023b60c6eaf2acdc8fcad258585e4425ebaf91 /net/ipv4/udp.c
parentf52b5054ec108aaa9e903850d6b62af8ae3fe6ae (diff)
downloadop-kernel-dev-96631ed16c514cf8b28fab991a076985ce378c26.zip
op-kernel-dev-96631ed16c514cf8b28fab991a076985ce378c26.tar.gz
udp: introduce sk_for_each_rcu_safenext()
Corey Minyard found a race added in commit 271b72c7fa82c2c7a795bc16896149933110672d (udp: RCU handling for Unicast packets.) "If the socket is moved from one list to another list in-between the time the hash is calculated and the next field is accessed, and the socket has moved to the end of the new list, the traversal will not complete properly on the list it should have, since the socket will be on the end of the new list and there's not a way to tell it's on a new list and restart the list traversal. I think that this can be solved by pre-fetching the "next" field (with proper barriers) before checking the hash." This patch corrects this problem, introducing a new sk_for_each_rcu_safenext() macro. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/udp.c')
-rw-r--r--net/ipv4/udp.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ced8203..c3ecec8 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -256,7 +256,7 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
int dif, struct udp_table *udptable)
{
struct sock *sk, *result;
- struct hlist_node *node;
+ struct hlist_node *node, *next;
unsigned short hnum = ntohs(dport);
unsigned int hash = udp_hashfn(net, hnum);
struct udp_hslot *hslot = &udptable->hash[hash];
@@ -266,7 +266,7 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
begin:
result = NULL;
badness = -1;
- sk_for_each_rcu(sk, node, &hslot->head) {
+ sk_for_each_rcu_safenext(sk, node, &hslot->head, next) {
/*
* lockless reader, and SLAB_DESTROY_BY_RCU items:
* We must check this item was not moved to another chain
OpenPOWER on IntegriCloud