diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2010-06-10 16:12:44 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-06-15 10:58:38 -0700 |
commit | de85d99eb7b595f6751550184b94c1e2f74a828b (patch) | |
tree | 950eea4329bfb2b5d94f2e242a86c36cf1fd49fc /include | |
parent | 36655042f9873efc2a90d251b9aef9b6b79d75d8 (diff) | |
download | op-kernel-dev-de85d99eb7b595f6751550184b94c1e2f74a828b.zip op-kernel-dev-de85d99eb7b595f6751550184b94c1e2f74a828b.tar.gz |
netpoll: Fix RCU usage
The use of RCU in netpoll is incorrect in a number of places:
1) The initial setting is lacking a write barrier.
2) The synchronize_rcu is in the wrong place.
3) Read barriers are missing.
4) Some places are even missing rcu_read_lock.
5) npinfo is zeroed after freeing.
This patch fixes those issues. As most users are in BH context,
this also converts the RCU usage to the BH variant.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/netpoll.h | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index e9e2312..95c9f7e 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -57,12 +57,15 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); #ifdef CONFIG_NETPOLL static inline bool netpoll_rx(struct sk_buff *skb) { - struct netpoll_info *npinfo = skb->dev->npinfo; + struct netpoll_info *npinfo; unsigned long flags; bool ret = false; + rcu_read_lock_bh(); + npinfo = rcu_dereference(skb->dev->npinfo); + if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) - return false; + goto out; spin_lock_irqsave(&npinfo->rx_lock, flags); /* check rx_flags again with the lock held */ @@ -70,12 +73,14 @@ static inline bool netpoll_rx(struct sk_buff *skb) ret = true; spin_unlock_irqrestore(&npinfo->rx_lock, flags); +out: + rcu_read_unlock_bh(); return ret; } static inline int netpoll_rx_on(struct sk_buff *skb) { - struct netpoll_info *npinfo = skb->dev->npinfo; + struct netpoll_info *npinfo = rcu_dereference(skb->dev->npinfo); return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); } @@ -91,7 +96,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) { struct net_device *dev = napi->dev; - rcu_read_lock(); /* deal with race on ->npinfo */ if (dev && dev->npinfo) { spin_lock(&napi->poll_lock); napi->poll_owner = smp_processor_id(); @@ -108,7 +112,6 @@ static inline void netpoll_poll_unlock(void *have) napi->poll_owner = -1; spin_unlock(&napi->poll_lock); } - rcu_read_unlock(); } #else |