diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/core/datagram.c | 6 | ||||
-rw-r--r-- | net/core/neighbour.c | 1 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 26 | ||||
-rw-r--r-- | net/core/sock.c | 33 | ||||
-rw-r--r-- | net/ipv4/ipmr.c | 2 | ||||
-rw-r--r-- | net/ipv4/udp.c | 14 | ||||
-rw-r--r-- | net/ipv6/ip6_output.c | 2 | ||||
-rw-r--r-- | net/ipv6/ip6mr.c | 2 | ||||
-rw-r--r-- | net/ipv6/udp.c | 5 | ||||
-rw-r--r-- | net/iucv/af_iucv.c | 2 | ||||
-rw-r--r-- | net/netfilter/xt_TEE.c | 4 |
11 files changed, 70 insertions, 27 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c index e0097531..f5b6f43 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -229,15 +229,17 @@ EXPORT_SYMBOL(skb_free_datagram); void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) { + bool slow; + if (likely(atomic_read(&skb->users) == 1)) smp_rmb(); else if (likely(!atomic_dec_and_test(&skb->users))) return; - lock_sock_bh(sk); + slow = lock_sock_fast(sk); skb_orphan(skb); sk_mem_reclaim_partial(sk); - unlock_sock_bh(sk); + unlock_sock_fast(sk, slow); /* skb is now orphaned, can be freed outside of locked section */ __kfree_skb(skb); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index bff3790..6ba1c0e 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -934,6 +934,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) kfree_skb(buff); NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); } + skb_dst_force(skb); __skb_queue_tail(&neigh->arp_queue, skb); } rc = 1; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 7ab86f3..1a2af24 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -650,11 +650,12 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev) if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { int num_vfs = dev_num_vf(dev->dev.parent); - size_t size = nlmsg_total_size(sizeof(struct nlattr)); - size += nlmsg_total_size(num_vfs * sizeof(struct nlattr)); - size += num_vfs * (sizeof(struct ifla_vf_mac) + - sizeof(struct ifla_vf_vlan) + - sizeof(struct ifla_vf_tx_rate)); + size_t size = nla_total_size(sizeof(struct nlattr)); + size += nla_total_size(num_vfs * sizeof(struct nlattr)); + size += num_vfs * + (nla_total_size(sizeof(struct ifla_vf_mac)) + + nla_total_size(sizeof(struct ifla_vf_vlan)) + + nla_total_size(sizeof(struct ifla_vf_tx_rate))); return size; } else return 0; @@ -722,14 +723,13 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { vf_port = nla_nest_start(skb, IFLA_VF_PORT); - if (!vf_port) { - nla_nest_cancel(skb, vf_ports); - return -EMSGSIZE; - } + if (!vf_port) + goto nla_put_failure; NLA_PUT_U32(skb, IFLA_PORT_VF, vf); err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); + if (err == -EMSGSIZE) + goto nla_put_failure; if (err) { -nla_put_failure: nla_nest_cancel(skb, vf_port); continue; } @@ -739,6 +739,10 @@ nla_put_failure: nla_nest_end(skb, vf_ports); return 0; + +nla_put_failure: + nla_nest_cancel(skb, vf_ports); + return -EMSGSIZE; } static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) @@ -753,7 +757,7 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); if (err) { nla_nest_cancel(skb, port_self); - return err; + return (err == -EMSGSIZE) ? err : 0; } nla_nest_end(skb, port_self); diff --git a/net/core/sock.c b/net/core/sock.c index 37fe9b6..2cf7f9f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2007,6 +2007,39 @@ void release_sock(struct sock *sk) } EXPORT_SYMBOL(release_sock); +/** + * lock_sock_fast - fast version of lock_sock + * @sk: socket + * + * This version should be used for very small section, where process wont block + * return false if fast path is taken + * sk_lock.slock locked, owned = 0, BH disabled + * return true if slow path is taken + * sk_lock.slock unlocked, owned = 1, BH enabled + */ +bool lock_sock_fast(struct sock *sk) +{ + might_sleep(); + spin_lock_bh(&sk->sk_lock.slock); + + if (!sk->sk_lock.owned) + /* + * Note : We must disable BH + */ + return false; + + __lock_sock(sk); + sk->sk_lock.owned = 1; + spin_unlock(&sk->sk_lock.slock); + /* + * The sk_lock has mutex_lock() semantics here: + */ + mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); + local_bh_enable(); + return true; +} +EXPORT_SYMBOL(lock_sock_fast); + int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) { struct timeval tv; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 4588910..856123f 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1911,7 +1911,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct rtattr *mp_head; /* If cache is unresolved, don't try to parse IIF and OIF */ - if (c->mfc_parent > MAXVIFS) + if (c->mfc_parent >= MAXVIFS) return -ENOENT; if (VIF_EXISTS(mrt, c->mfc_parent)) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index baeec29..5858574 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1063,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk) spin_unlock_bh(&rcvq->lock); if (!skb_queue_empty(&list_kill)) { - lock_sock_bh(sk); + bool slow = lock_sock_fast(sk); + __skb_queue_purge(&list_kill); sk_mem_reclaim_partial(sk); - unlock_sock_bh(sk); + unlock_sock_fast(sk, slow); } return res; } @@ -1123,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, int peeked; int err; int is_udplite = IS_UDPLITE(sk); + bool slow; /* * Check any passed addresses @@ -1197,10 +1199,10 @@ out: return err; csum_copy_err: - lock_sock_bh(sk); + slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); - unlock_sock_bh(sk); + unlock_sock_fast(sk, slow); if (noblock) return -EAGAIN; @@ -1625,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb) void udp_destroy_sock(struct sock *sk) { - lock_sock_bh(sk); + bool slow = lock_sock_fast(sk); udp_flush_pending_frames(sk); - unlock_sock_bh(sk); + unlock_sock_fast(sk, slow); } /* diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index cd963f6..89425af 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -507,7 +507,7 @@ int ip6_forward(struct sk_buff *skb) if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; - if (skb->len > mtu) { + if (skb->len > mtu && !skb_is_gso(skb)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index bd9e7d3..073071f 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -2017,7 +2017,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, struct rtattr *mp_head; /* If cache is unresolved, don't try to parse IIF and OIF */ - if (c->mf6c_parent > MAXMIFS) + if (c->mf6c_parent >= MAXMIFS) return -ENOENT; if (MIF_EXISTS(mrt, c->mf6c_parent)) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 3d7a2c0..87be586 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -328,6 +328,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, int err; int is_udplite = IS_UDPLITE(sk); int is_udp4; + bool slow; if (addr_len) *addr_len=sizeof(struct sockaddr_in6); @@ -424,7 +425,7 @@ out: return err; csum_copy_err: - lock_sock_bh(sk); + slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { if (is_udp4) UDP_INC_STATS_USER(sock_net(sk), @@ -433,7 +434,7 @@ csum_copy_err: UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } - unlock_sock_bh(sk); + unlock_sock_fast(sk, slow); if (flags & MSG_DONTWAIT) return -EAGAIN; diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index c8b4599..9637e45 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1619,7 +1619,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) save_message: save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); if (!save_msg) - return; + goto out_unlock; save_msg->path = path; save_msg->msg = *msg; diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c index d7920d9f..859d9fd 100644 --- a/net/netfilter/xt_TEE.c +++ b/net/netfilter/xt_TEE.c @@ -76,7 +76,7 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info) if (ip_route_output_key(net, &rt, &fl) != 0) return false; - dst_release(skb_dst(skb)); + skb_dst_drop(skb); skb_dst_set(skb, &rt->u.dst); skb->dev = rt->u.dst.dev; skb->protocol = htons(ETH_P_IP); @@ -157,7 +157,7 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info) if (dst == NULL) return false; - dst_release(skb_dst(skb)); + skb_dst_drop(skb); skb_dst_set(skb, dst); skb->dev = dst->dev; skb->protocol = htons(ETH_P_IPV6); |