diff options
author | Reshetova, Elena <elena.reshetova@intel.com> | 2017-06-30 13:07:58 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-07-01 07:39:07 -0700 |
commit | 633547973ffc32fd2c815639d4675e1531f0896f (patch) | |
tree | 751ca7a379366af93f578d9f35f48339d4d2dd9b /net | |
parent | 53869cebce4bc53f71a080e7830600d4ae1ab712 (diff) | |
download | op-kernel-dev-633547973ffc32fd2c815639d4675e1531f0896f.zip op-kernel-dev-633547973ffc32fd2c815639d4675e1531f0896f.tar.gz |
net: convert sk_buff.users from atomic_t to refcount_t
refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.
Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: David Windsor <dwindsor@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/datagram.c | 4 | ||||
-rw-r--r-- | net/core/dev.c | 10 | ||||
-rw-r--r-- | net/core/netpoll.c | 4 | ||||
-rw-r--r-- | net/core/pktgen.c | 16 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 2 | ||||
-rw-r--r-- | net/core/skbuff.c | 8 | ||||
-rw-r--r-- | net/dccp/ipv6.c | 2 | ||||
-rw-r--r-- | net/ipv6/syncookies.c | 2 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 2 | ||||
-rw-r--r-- | net/key/af_key.c | 4 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 6 | ||||
-rw-r--r-- | net/rxrpc/skbuff.c | 12 | ||||
-rw-r--r-- | net/sctp/outqueue.c | 2 | ||||
-rw-r--r-- | net/sctp/socket.c | 2 |
14 files changed, 38 insertions, 38 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c index e5311a7..95d4354 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -188,7 +188,7 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk, } } *peeked = 1; - atomic_inc(&skb->users); + refcount_inc(&skb->users); } else { __skb_unlink(skb, queue); if (destructor) @@ -358,7 +358,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue, spin_lock_bh(&sk_queue->lock); if (skb == skb_peek(sk_queue)) { __skb_unlink(skb, sk_queue); - atomic_dec(&skb->users); + refcount_dec(&skb->users); if (destructor) destructor(sk, skb); err = 0; diff --git a/net/core/dev.c b/net/core/dev.c index 88927f1..b999489 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1862,7 +1862,7 @@ static inline int deliver_skb(struct sk_buff *skb, { if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) return -ENOMEM; - atomic_inc(&skb->users); + refcount_inc(&skb->users); return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } @@ -2484,10 +2484,10 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) if (unlikely(!skb)) return; - if (likely(atomic_read(&skb->users) == 1)) { + if (likely(refcount_read(&skb->users) == 1)) { smp_rmb(); - atomic_set(&skb->users, 0); - } else if (likely(!atomic_dec_and_test(&skb->users))) { + refcount_set(&skb->users, 0); + } else if (likely(!refcount_dec_and_test(&skb->users))) { return; } get_kfree_skb_cb(skb)->reason = reason; @@ -3955,7 +3955,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h) clist = clist->next; - WARN_ON(atomic_read(&skb->users)); + WARN_ON(refcount_read(&skb->users)); if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) trace_consume_skb(skb); else diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 37c1e34..a835155 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -277,7 +277,7 @@ static void zap_completion_queue(void) struct sk_buff *skb = clist; clist = clist->next; if (!skb_irq_freeable(skb)) { - atomic_inc(&skb->users); + refcount_inc(&skb->users); dev_kfree_skb_any(skb); /* put this one back */ } else { __kfree_skb(skb); @@ -309,7 +309,7 @@ repeat: return NULL; } - atomic_set(&skb->users, 1); + refcount_set(&skb->users, 1); skb_reserve(skb, reserve); return skb; } diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 2dd42c5..6e1e10f 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3363,7 +3363,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev) { ktime_t idle_start = ktime_get(); - while (atomic_read(&(pkt_dev->skb->users)) != 1) { + while (refcount_read(&(pkt_dev->skb->users)) != 1) { if (signal_pending(current)) break; @@ -3420,7 +3420,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) { skb = pkt_dev->skb; skb->protocol = eth_type_trans(skb, skb->dev); - atomic_add(burst, &skb->users); + refcount_add(burst, &skb->users); local_bh_disable(); do { ret = netif_receive_skb(skb); @@ -3428,11 +3428,11 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) pkt_dev->errors++; pkt_dev->sofar++; pkt_dev->seq_num++; - if (atomic_read(&skb->users) != burst) { + if (refcount_read(&skb->users) != burst) { /* skb was queued by rps/rfs or taps, * so cannot reuse this skb */ - atomic_sub(burst - 1, &skb->users); + WARN_ON(refcount_sub_and_test(burst - 1, &skb->users)); /* get out of the loop and wait * until skb is consumed */ @@ -3446,7 +3446,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) goto out; /* Skips xmit_mode M_START_XMIT */ } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) { local_bh_disable(); - atomic_inc(&pkt_dev->skb->users); + refcount_inc(&pkt_dev->skb->users); ret = dev_queue_xmit(pkt_dev->skb); switch (ret) { @@ -3487,7 +3487,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) pkt_dev->last_ok = 0; goto unlock; } - atomic_add(burst, &pkt_dev->skb->users); + refcount_add(burst, &pkt_dev->skb->users); xmit_more: ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0); @@ -3513,11 +3513,11 @@ xmit_more: /* fallthru */ case NETDEV_TX_BUSY: /* Retry it next time */ - atomic_dec(&(pkt_dev->skb->users)); + refcount_dec(&(pkt_dev->skb->users)); pkt_dev->last_ok = 0; } if (unlikely(burst)) - atomic_sub(burst, &pkt_dev->skb->users); + WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users)); unlock: HARD_TX_UNLOCK(odev, txq); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index ed51de5..d1ba909 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -649,7 +649,7 @@ int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int g NETLINK_CB(skb).dst_group = group; if (echo) - atomic_inc(&skb->users); + refcount_inc(&skb->users); netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL); if (echo) err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f75897a..45dc662 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -176,7 +176,7 @@ struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) memset(skb, 0, offsetof(struct sk_buff, tail)); skb->head = NULL; skb->truesize = sizeof(struct sk_buff); - atomic_set(&skb->users, 1); + refcount_set(&skb->users, 1); skb->mac_header = (typeof(skb->mac_header))~0U; out: @@ -247,7 +247,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, /* Account for allocated memory : skb + skb->head */ skb->truesize = SKB_TRUESIZE(size); skb->pfmemalloc = pfmemalloc; - atomic_set(&skb->users, 1); + refcount_set(&skb->users, 1); skb->head = data; skb->data = data; skb_reset_tail_pointer(skb); @@ -314,7 +314,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size) memset(skb, 0, offsetof(struct sk_buff, tail)); skb->truesize = SKB_TRUESIZE(size); - atomic_set(&skb->users, 1); + refcount_set(&skb->users, 1); skb->head = data; skb->data = data; skb_reset_tail_pointer(skb); @@ -915,7 +915,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) C(head_frag); C(data); C(truesize); - atomic_set(&n->users, 1); + refcount_set(&n->users, 1); atomic_inc(&(skb_shinfo(skb)->dataref)); skb->cloned = 1; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 4fccc0c..c376af5 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -353,7 +353,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { - atomic_inc(&skb->users); + refcount_inc(&skb->users); ireq->pktopts = skb; } ireq->ir_iif = sk->sk_bound_dev_if; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 2f7e99a..7b75b06 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -194,7 +194,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { - atomic_inc(&skb->users); + refcount_inc(&skb->users); ireq->pktopts = skb; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index f85cbfc..f1a4881 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -734,7 +734,7 @@ static void tcp_v6_init_req(struct request_sock *req, np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim || np->repflow)) { - atomic_inc(&skb->users); + refcount_inc(&skb->users); ireq->pktopts = skb; } } diff --git a/net/key/af_key.c b/net/key/af_key.c index 376fdcf..287964a5 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -203,11 +203,11 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, sock_hold(sk); if (*skb2 == NULL) { - if (atomic_read(&skb->users) != 1) { + if (refcount_read(&skb->users) != 1) { *skb2 = skb_clone(skb, allocation); } else { *skb2 = skb; - atomic_inc(&skb->users); + refcount_inc(&skb->users); } } if (*skb2 != NULL) { diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a88745e..05030ad 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1848,7 +1848,7 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) } if (dst_group) { - atomic_inc(&skb->users); + refcount_inc(&skb->users); netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL); } err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT); @@ -2226,7 +2226,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, struct netlink_sock *nlk; int ret; - atomic_inc(&skb->users); + refcount_inc(&skb->users); sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid); if (sk == NULL) { @@ -2431,7 +2431,7 @@ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid, int exclude_portid = 0; if (report) { - atomic_inc(&skb->users); + refcount_inc(&skb->users); exclude_portid = portid; } diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c index 67b02c4..b8985d0 100644 --- a/net/rxrpc/skbuff.c +++ b/net/rxrpc/skbuff.c @@ -27,7 +27,7 @@ void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) { const void *here = __builtin_return_address(0); int n = atomic_inc_return(select_skb_count(op)); - trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); } /* @@ -38,7 +38,7 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) const void *here = __builtin_return_address(0); if (skb) { int n = atomic_read(select_skb_count(op)); - trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); } } @@ -49,7 +49,7 @@ void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) { const void *here = __builtin_return_address(0); int n = atomic_inc_return(select_skb_count(op)); - trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); skb_get(skb); } @@ -63,7 +63,7 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) int n; CHECK_SLAB_OKAY(&skb->users); n = atomic_dec_return(select_skb_count(op)); - trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); kfree_skb(skb); } } @@ -78,7 +78,7 @@ void rxrpc_lose_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) int n; CHECK_SLAB_OKAY(&skb->users); n = atomic_dec_return(select_skb_count(op)); - trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); kfree_skb(skb); } } @@ -93,7 +93,7 @@ void rxrpc_purge_queue(struct sk_buff_head *list) while ((skb = skb_dequeue((list))) != NULL) { int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged)); trace_rxrpc_skb(skb, rxrpc_skb_rx_purged, - atomic_read(&skb->users), n, here); + refcount_read(&skb->users), n, here); kfree_skb(skb); } } diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 20299df..e876270 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -1102,7 +1102,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), chunk->skb ? chunk->skb->head : NULL, chunk->skb ? - atomic_read(&chunk->skb->users) : -1); + refcount_read(&chunk->skb->users) : -1); /* Add the chunk to the packet. */ status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 7b6e20e..b497ee8 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -7563,7 +7563,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, if (flags & MSG_PEEK) { skb = skb_peek(&sk->sk_receive_queue); if (skb) - atomic_inc(&skb->users); + refcount_inc(&skb->users); } else { skb = __skb_dequeue(&sk->sk_receive_queue); } |