diff options
Diffstat (limited to 'net')
86 files changed, 1479 insertions, 498 deletions
diff --git a/net/802/garp.c b/net/802/garp.c index 1610295..070bf44 100644 --- a/net/802/garp.c +++ b/net/802/garp.c @@ -553,7 +553,7 @@ static void garp_release_port(struct net_device *dev) if (rtnl_dereference(port->applicants[i])) return; } - rcu_assign_pointer(dev->garp_port, NULL); + RCU_INIT_POINTER(dev->garp_port, NULL); kfree_rcu(port, rcu); } @@ -605,7 +605,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl ASSERT_RTNL(); - rcu_assign_pointer(port->applicants[appl->type], NULL); + RCU_INIT_POINTER(port->applicants[appl->type], NULL); /* Delete timer and generate a final TRANSMIT_PDU event to flush out * all pending messages before the applicant is gone. */ diff --git a/net/802/stp.c b/net/802/stp.c index 978c30b..0e136ef 100644 --- a/net/802/stp.c +++ b/net/802/stp.c @@ -88,9 +88,9 @@ void stp_proto_unregister(const struct stp_proto *proto) { mutex_lock(&stp_proto_mutex); if (is_zero_ether_addr(proto->group_address)) - rcu_assign_pointer(stp_proto, NULL); + RCU_INIT_POINTER(stp_proto, NULL); else - rcu_assign_pointer(garp_protos[proto->group_address[5] - + RCU_INIT_POINTER(garp_protos[proto->group_address[5] - GARP_ADDR_MIN], NULL); synchronize_rcu(); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 8970ba1..5471628 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -133,7 +133,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) if (grp->nr_vlans == 0) { vlan_gvrp_uninit_applicant(real_dev); - rcu_assign_pointer(real_dev->vlgrp, NULL); + RCU_INIT_POINTER(real_dev->vlgrp, NULL); /* Free the group, after all cpu's are done. */ call_rcu(&grp->rcu, vlan_rcu_free); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 9d40a07..eba705b 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -674,7 +674,6 @@ static const struct net_device_ops vlan_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = vlan_dev_set_mac_address, .ndo_set_rx_mode = vlan_dev_set_rx_mode, - .ndo_set_multicast_list = vlan_dev_set_rx_mode, .ndo_change_rx_flags = vlan_dev_change_rx_flags, .ndo_do_ioctl = vlan_dev_ioctl, .ndo_neigh_setup = vlan_dev_neigh_setup, diff --git a/net/atm/lec.c b/net/atm/lec.c index 215c9fa..f1964ca 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -643,7 +643,7 @@ static const struct net_device_ops lec_netdev_ops = { .ndo_start_xmit = lec_start_xmit, .ndo_change_mtu = lec_change_mtu, .ndo_tx_timeout = lec_tx_timeout, - .ndo_set_multicast_list = lec_set_multicast_list, + .ndo_set_rx_mode = lec_set_multicast_list, }; static const unsigned char lec_ctrl_magic[] = { diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index d4f5dff..bc40864 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c @@ -217,7 +217,7 @@ static const struct net_device_ops bnep_netdev_ops = { .ndo_stop = bnep_net_close, .ndo_start_xmit = bnep_net_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = bnep_net_set_mc_list, + .ndo_set_rx_mode = bnep_net_set_mc_list, .ndo_set_mac_address = bnep_net_set_mac_addr, .ndo_tx_timeout = bnep_net_timeout, .ndo_change_mtu = eth_change_mtu, diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 32b8f9f..ee68eee 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -304,7 +304,7 @@ static const struct net_device_ops br_netdev_ops = { .ndo_start_xmit = br_dev_xmit, .ndo_get_stats64 = br_get_stats64, .ndo_set_mac_address = br_set_mac_address, - .ndo_set_multicast_list = br_dev_set_multicast_list, + .ndo_set_rx_mode = br_dev_set_multicast_list, .ndo_change_mtu = br_change_mtu, .ndo_do_ioctl = br_dev_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c index 1bcaf36a..40d8258 100644 --- a/net/bridge/netfilter/ebtable_broute.c +++ b/net/bridge/netfilter/ebtable_broute.c @@ -87,14 +87,14 @@ static int __init ebtable_broute_init(void) if (ret < 0) return ret; /* see br_input.c */ - rcu_assign_pointer(br_should_route_hook, + RCU_INIT_POINTER(br_should_route_hook, (br_should_route_hook_t *)ebt_broute); return 0; } static void __exit ebtable_broute_fini(void) { - rcu_assign_pointer(br_should_route_hook, NULL); + RCU_INIT_POINTER(br_should_route_hook, NULL); synchronize_net(); unregister_pernet_subsys(&broute_net_ops); } diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c index c23979e..b36f24a 100644 --- a/net/caif/cfmuxl.c +++ b/net/caif/cfmuxl.c @@ -108,7 +108,7 @@ struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid) int idx = phyid % DN_CACHE_SIZE; spin_lock_bh(&muxl->transmit_lock); - rcu_assign_pointer(muxl->dn_cache[idx], NULL); + RCU_INIT_POINTER(muxl->dn_cache[idx], NULL); dn = get_from_id(&muxl->frml_list, phyid); if (dn == NULL) goto out; @@ -164,7 +164,7 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id) if (up == NULL) goto out; - rcu_assign_pointer(muxl->up_cache[idx], NULL); + RCU_INIT_POINTER(muxl->up_cache[idx], NULL); list_del_rcu(&up->node); out: spin_unlock_bh(&muxl->receive_lock); @@ -261,7 +261,7 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, idx = layer->id % UP_CACHE_SIZE; spin_lock_bh(&muxl->receive_lock); - rcu_assign_pointer(muxl->up_cache[idx], NULL); + RCU_INIT_POINTER(muxl->up_cache[idx], NULL); list_del_rcu(&layer->node); spin_unlock_bh(&muxl->receive_lock); } diff --git a/net/can/af_can.c b/net/can/af_can.c index 8ce926d..b9efa94 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -719,7 +719,7 @@ int can_proto_register(const struct can_proto *cp) proto); err = -EBUSY; } else - rcu_assign_pointer(proto_tab[proto], cp); + RCU_INIT_POINTER(proto_tab[proto], cp); mutex_unlock(&proto_tab_lock); @@ -740,7 +740,7 @@ void can_proto_unregister(const struct can_proto *cp) mutex_lock(&proto_tab_lock); BUG_ON(proto_tab[proto] != cp); - rcu_assign_pointer(proto_tab[proto], NULL); + RCU_INIT_POINTER(proto_tab[proto], NULL); mutex_unlock(&proto_tab_lock); synchronize_rcu(); diff --git a/net/core/dev.c b/net/core/dev.c index 17d67b5..c2442b4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -133,6 +133,8 @@ #include <linux/pci.h> #include <linux/inetdevice.h> #include <linux/cpu_rmap.h> +#include <linux/if_tunnel.h> +#include <linux/if_pppox.h> #include "net-sysfs.h" @@ -2519,24 +2521,29 @@ static inline void ____napi_schedule(struct softnet_data *sd, /* * __skb_get_rxhash: calculate a flow hash based on src/dst addresses - * and src/dst port numbers. Returns a non-zero hash number on success - * and 0 on failure. + * and src/dst port numbers. Sets rxhash in skb to non-zero hash value + * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb + * if hash is a canonical 4-tuple hash over transport ports. */ -__u32 __skb_get_rxhash(struct sk_buff *skb) +void __skb_get_rxhash(struct sk_buff *skb) { int nhoff, hash = 0, poff; const struct ipv6hdr *ip6; const struct iphdr *ip; + const struct vlan_hdr *vlan; u8 ip_proto; - u32 addr1, addr2, ihl; + u32 addr1, addr2; + u16 proto; union { u32 v32; u16 v16[2]; } ports; nhoff = skb_network_offset(skb); + proto = skb->protocol; - switch (skb->protocol) { +again: + switch (proto) { case __constant_htons(ETH_P_IP): if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) goto done; @@ -2548,7 +2555,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb) ip_proto = ip->protocol; addr1 = (__force u32) ip->saddr; addr2 = (__force u32) ip->daddr; - ihl = ip->ihl; + nhoff += ip->ihl * 4; break; case __constant_htons(ETH_P_IPV6): if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) @@ -2558,20 +2565,62 @@ __u32 __skb_get_rxhash(struct sk_buff *skb) ip_proto = ip6->nexthdr; addr1 = (__force u32) ip6->saddr.s6_addr32[3]; addr2 = (__force u32) ip6->daddr.s6_addr32[3]; - ihl = (40 >> 2); + nhoff += 40; break; + case __constant_htons(ETH_P_8021Q): + if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff)) + goto done; + vlan = (const struct vlan_hdr *) (skb->data + nhoff); + proto = vlan->h_vlan_encapsulated_proto; + nhoff += sizeof(*vlan); + goto again; + case __constant_htons(ETH_P_PPP_SES): + if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff)) + goto done; + proto = *((__be16 *) (skb->data + nhoff + + sizeof(struct pppoe_hdr))); + nhoff += PPPOE_SES_HLEN; + goto again; default: goto done; } + switch (ip_proto) { + case IPPROTO_GRE: + if (pskb_may_pull(skb, nhoff + 16)) { + u8 *h = skb->data + nhoff; + __be16 flags = *(__be16 *)h; + + /* + * Only look inside GRE if version zero and no + * routing + */ + if (!(flags & (GRE_VERSION|GRE_ROUTING))) { + proto = *(__be16 *)(h + 2); + nhoff += 4; + if (flags & GRE_CSUM) + nhoff += 4; + if (flags & GRE_KEY) + nhoff += 4; + if (flags & GRE_SEQ) + nhoff += 4; + goto again; + } + } + break; + default: + break; + } + ports.v32 = 0; poff = proto_ports_offset(ip_proto); if (poff >= 0) { - nhoff += ihl * 4 + poff; + nhoff += poff; if (pskb_may_pull(skb, nhoff + 4)) { ports.v32 = * (__force u32 *) (skb->data + nhoff); if (ports.v16[1] < ports.v16[0]) swap(ports.v16[0], ports.v16[1]); + skb->l4_rxhash = 1; } } @@ -2584,7 +2633,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb) hash = 1; done: - return hash; + skb->rxhash = hash; } EXPORT_SYMBOL(__skb_get_rxhash); @@ -2673,13 +2722,13 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, map = rcu_dereference(rxqueue->rps_map); if (map) { if (map->len == 1 && - !rcu_dereference_raw(rxqueue->rps_flow_table)) { + !rcu_access_pointer(rxqueue->rps_flow_table)) { tcpu = map->cpus[0]; if (cpu_online(tcpu)) cpu = tcpu; goto done; } - } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) { + } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) { goto done; } @@ -3094,8 +3143,8 @@ void netdev_rx_handler_unregister(struct net_device *dev) { ASSERT_RTNL(); - rcu_assign_pointer(dev->rx_handler, NULL); - rcu_assign_pointer(dev->rx_handler_data, NULL); + RCU_INIT_POINTER(dev->rx_handler, NULL); + RCU_INIT_POINTER(dev->rx_handler_data, NULL); } EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); @@ -4489,9 +4538,7 @@ void __dev_set_rx_mode(struct net_device *dev) if (!netif_device_present(dev)) return; - if (ops->ndo_set_rx_mode) - ops->ndo_set_rx_mode(dev); - else { + if (!(dev->priv_flags & IFF_UNICAST_FLT)) { /* Unicast addresses changes may only happen under the rtnl, * therefore calling __dev_set_promiscuity here is safe. */ @@ -4502,10 +4549,10 @@ void __dev_set_rx_mode(struct net_device *dev) __dev_set_promiscuity(dev, -1); dev->uc_promisc = false; } - - if (ops->ndo_set_multicast_list) - ops->ndo_set_multicast_list(dev); } + + if (ops->ndo_set_rx_mode) + ops->ndo_set_rx_mode(dev); } void dev_set_rx_mode(struct net_device *dev) @@ -4855,7 +4902,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) return -EOPNOTSUPP; case SIOCADDMULTI: - if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || + if (!ops->ndo_set_rx_mode || ifr->ifr_hwaddr.sa_family != AF_UNSPEC) return -EINVAL; if (!netif_device_present(dev)) @@ -4863,7 +4910,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); case SIOCDELMULTI: - if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || + if (!ops->ndo_set_rx_mode || ifr->ifr_hwaddr.sa_family != AF_UNSPEC) return -EINVAL; if (!netif_device_present(dev)) @@ -5727,8 +5774,8 @@ void netdev_run_todo(void) /* paranoia */ BUG_ON(netdev_refcnt_read(dev)); - WARN_ON(rcu_dereference_raw(dev->ip_ptr)); - WARN_ON(rcu_dereference_raw(dev->ip6_ptr)); + WARN_ON(rcu_access_pointer(dev->ip_ptr)); + WARN_ON(rcu_access_pointer(dev->ip6_ptr)); WARN_ON(dev->dn_ptr); if (dev->destructor) @@ -5932,7 +5979,7 @@ void free_netdev(struct net_device *dev) kfree(dev->_rx); #endif - kfree(rcu_dereference_raw(dev->ingress_queue)); + kfree(rcu_dereference_protected(dev->ingress_queue, 1)); /* Flush device addresses */ dev_addr_flush(dev); diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index e2e6693..283d1b8 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -591,8 +591,8 @@ EXPORT_SYMBOL(dev_mc_del_global); * addresses that have no users left. The source device must be * locked by netif_tx_lock_bh. * - * This function is intended to be called from the dev->set_multicast_list - * or dev->set_rx_mode function of layered software devices. + * This function is intended to be called from the ndo_set_rx_mode + * function of layered software devices. */ int dev_mc_sync(struct net_device *to, struct net_device *from) { diff --git a/net/core/dst.c b/net/core/dst.c index 14b33baf..d5e2c4c 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -171,7 +171,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev, dst_init_metrics(dst, dst_default_metrics, true); dst->expires = 0UL; dst->path = dst; - dst->_neighbour = NULL; + RCU_INIT_POINTER(dst->_neighbour, NULL); #ifdef CONFIG_XFRM dst->xfrm = NULL; #endif @@ -229,11 +229,11 @@ struct dst_entry *dst_destroy(struct dst_entry * dst) smp_rmb(); again: - neigh = dst->_neighbour; + neigh = rcu_dereference_protected(dst->_neighbour, 1); child = dst->child; if (neigh) { - dst->_neighbour = NULL; + RCU_INIT_POINTER(dst->_neighbour, NULL); neigh_release(neigh); } @@ -360,14 +360,19 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev, if (!unregister) { dst->input = dst->output = dst_discard; } else { + struct neighbour *neigh; + dst->dev = dev_net(dst->dev)->loopback_dev; dev_hold(dst->dev); dev_put(dev); - if (dst->_neighbour && dst->_neighbour->dev == dev) { - dst->_neighbour->dev = dst->dev; + rcu_read_lock(); + neigh = dst_get_neighbour(dst); + if (neigh && neigh->dev == dev) { + neigh->dev = dst->dev; dev_hold(dst->dev); dev_put(dev); } + rcu_read_unlock(); } } diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index e7ab0c0..67c5c28 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -487,7 +487,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) if (ops->nr_goto_rules > 0) { list_for_each_entry(tmp, &ops->rules_list, list) { if (rtnl_dereference(tmp->ctarget) == rule) { - rcu_assign_pointer(tmp->ctarget, NULL); + RCU_INIT_POINTER(tmp->ctarget, NULL); ops->unresolved_rules++; } } @@ -545,7 +545,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, frh->flags = rule->flags; if (rule->action == FR_ACT_GOTO && - rcu_dereference_raw(rule->ctarget) == NULL) + rcu_access_pointer(rule->ctarget) == NULL) frh->flags |= FIB_RULE_UNRESOLVED; if (rule->iifname[0]) { diff --git a/net/core/filter.c b/net/core/filter.c index 36f975f..8fcc2d7 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -645,7 +645,7 @@ int sk_detach_filter(struct sock *sk) filter = rcu_dereference_protected(sk->sk_filter, sock_owned_by_user(sk)); if (filter) { - rcu_assign_pointer(sk->sk_filter, NULL); + RCU_INIT_POINTER(sk->sk_filter, NULL); sk_filter_uncharge(sk, filter); ret = 0; } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 8fab9b0..4002261 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -844,6 +844,19 @@ static void neigh_invalidate(struct neighbour *neigh) skb_queue_purge(&neigh->arp_queue); } +static void neigh_probe(struct neighbour *neigh) + __releases(neigh->lock) +{ + struct sk_buff *skb = skb_peek(&neigh->arp_queue); + /* keep skb alive even if arp_queue overflows */ + if (skb) + skb = skb_copy(skb, GFP_ATOMIC); + write_unlock(&neigh->lock); + neigh->ops->solicit(neigh, skb); + atomic_inc(&neigh->probes); + kfree_skb(skb); +} + /* Called when a timer expires for a neighbour entry. */ static void neigh_timer_handler(unsigned long arg) @@ -920,14 +933,7 @@ static void neigh_timer_handler(unsigned long arg) neigh_hold(neigh); } if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { - struct sk_buff *skb = skb_peek(&neigh->arp_queue); - /* keep skb alive even if arp_queue overflows */ - if (skb) - skb = skb_copy(skb, GFP_ATOMIC); - write_unlock(&neigh->lock); - neigh->ops->solicit(neigh, skb); - atomic_inc(&neigh->probes); - kfree_skb(skb); + neigh_probe(neigh); } else { out: write_unlock(&neigh->lock); @@ -942,7 +948,7 @@ out: int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) { int rc; - unsigned long now; + bool immediate_probe = false; write_lock_bh(&neigh->lock); @@ -950,14 +956,16 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)) goto out_unlock_bh; - now = jiffies; - if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { if (neigh->parms->mcast_probes + neigh->parms->app_probes) { + unsigned long next, now = jiffies; + atomic_set(&neigh->probes, neigh->parms->ucast_probes); neigh->nud_state = NUD_INCOMPLETE; - neigh->updated = jiffies; - neigh_add_timer(neigh, now + 1); + neigh->updated = now; + next = now + max(neigh->parms->retrans_time, HZ/2); + neigh_add_timer(neigh, next); + immediate_probe = true; } else { neigh->nud_state = NUD_FAILED; neigh->updated = jiffies; @@ -989,7 +997,11 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) rc = 1; } out_unlock_bh: - write_unlock_bh(&neigh->lock); + if (immediate_probe) + neigh_probe(neigh); + else + write_unlock(&neigh->lock); + local_bh_enable(); return rc; } EXPORT_SYMBOL(__neigh_event_send); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 1683e5d..56e42ab 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -712,13 +712,13 @@ static void rx_queue_release(struct kobject *kobj) struct rps_dev_flow_table *flow_table; - map = rcu_dereference_raw(queue->rps_map); + map = rcu_dereference_protected(queue->rps_map, 1); if (map) { RCU_INIT_POINTER(queue->rps_map, NULL); kfree_rcu(map, rcu); } - flow_table = rcu_dereference_raw(queue->rps_flow_table); + flow_table = rcu_dereference_protected(queue->rps_flow_table, 1); if (flow_table) { RCU_INIT_POINTER(queue->rps_flow_table, NULL); call_rcu(&flow_table->rcu, rps_dev_flow_table_release); @@ -987,10 +987,10 @@ static ssize_t store_xps_map(struct netdev_queue *queue, } if (nonempty) - rcu_assign_pointer(dev->xps_maps, new_dev_maps); + RCU_INIT_POINTER(dev->xps_maps, new_dev_maps); else { kfree(new_dev_maps); - rcu_assign_pointer(dev->xps_maps, NULL); + RCU_INIT_POINTER(dev->xps_maps, NULL); } if (dev_maps) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index adf84dd..d676a56 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -760,7 +760,7 @@ int __netpoll_setup(struct netpoll *np) } /* last thing to do is link it to the net device structure */ - rcu_assign_pointer(ndev->npinfo, npinfo); + RCU_INIT_POINTER(ndev->npinfo, npinfo); return 0; @@ -901,7 +901,7 @@ void __netpoll_cleanup(struct netpoll *np) if (ops->ndo_netpoll_cleanup) ops->ndo_netpoll_cleanup(np->dev); - rcu_assign_pointer(np->dev->npinfo, NULL); + RCU_INIT_POINTER(np->dev->npinfo, NULL); /* avoid racing with NAPI reading npinfo */ synchronize_rcu_bh(); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 99d9e95..39f8dd6 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1604,7 +1604,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net, dev_net_set(dev, net); dev->rtnl_link_ops = ops; dev->rtnl_link_state = RTNL_LINK_INITIALIZING; - dev->real_num_tx_queues = real_num_queues; if (tb[IFLA_MTU]) dev->mtu = nla_get_u32(tb[IFLA_MTU]); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 27002df..edb66f3 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -529,6 +529,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->mac_header = old->mac_header; skb_dst_copy(new, old); new->rxhash = old->rxhash; + new->l4_rxhash = old->l4_rxhash; #ifdef CONFIG_XFRM new->sp = secpath_get(old->sp); #endif diff --git a/net/core/sock.c b/net/core/sock.c index bc745d0..9997026 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -387,7 +387,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { sk_tx_queue_clear(sk); - rcu_assign_pointer(sk->sk_dst_cache, NULL); + RCU_INIT_POINTER(sk->sk_dst_cache, NULL); dst_release(dst); return NULL; } @@ -1158,7 +1158,7 @@ static void __sk_free(struct sock *sk) atomic_read(&sk->sk_wmem_alloc) == 0); if (filter) { sk_filter_uncharge(sk, filter); - rcu_assign_pointer(sk->sk_filter, NULL); + RCU_INIT_POINTER(sk->sk_filter, NULL); } sock_disable_timestamp(sk, SOCK_TIMESTAMP); diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index 0462040..67164bb 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c @@ -85,7 +85,6 @@ static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) { - struct dccp_sock *dp = dccp_sk(sk); u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2); /* @@ -98,14 +97,33 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio); val = max_ratio; } - if (val > DCCPF_ACK_RATIO_MAX) - val = DCCPF_ACK_RATIO_MAX; + dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO, + min_t(u32, val, DCCPF_ACK_RATIO_MAX)); +} - if (val == dp->dccps_l_ack_ratio) - return; +static void ccid2_check_l_ack_ratio(struct sock *sk) +{ + struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); - ccid2_pr_debug("changing local ack ratio to %u\n", val); - dp->dccps_l_ack_ratio = val; + /* + * After a loss, idle period, application limited period, or RTO we + * need to check that the ack ratio is still less than the congestion + * window. Otherwise, we will send an entire congestion window of + * packets and got no response because we haven't sent ack ratio + * packets yet. + * If the ack ratio does need to be reduced, we reduce it to half of + * the congestion window (or 1 if that's zero) instead of to the + * congestion window. This prevents problems if one ack is lost. + */ + if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd) + ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U); +} + +static void ccid2_change_l_seq_window(struct sock *sk, u64 val) +{ + dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW, + clamp_val(val, DCCPF_SEQ_WMIN, + DCCPF_SEQ_WMAX)); } static void ccid2_hc_tx_rto_expire(unsigned long data) @@ -187,6 +205,8 @@ static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now) } hc->tx_cwnd_used = 0; hc->tx_cwnd_stamp = now; + + ccid2_check_l_ack_ratio(sk); } /* This borrows the code of tcp_cwnd_restart() */ @@ -205,6 +225,8 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now) hc->tx_cwnd_stamp = now; hc->tx_cwnd_used = 0; + + ccid2_check_l_ack_ratio(sk); } static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) @@ -405,17 +427,37 @@ static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp, unsigned int *maxincr) { struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); - - if (hc->tx_cwnd < hc->tx_ssthresh) { - if (*maxincr > 0 && ++hc->tx_packets_acked == 2) { + struct dccp_sock *dp = dccp_sk(sk); + int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio; + + if (hc->tx_cwnd < dp->dccps_l_seq_win && + r_seq_used < dp->dccps_r_seq_win) { + if (hc->tx_cwnd < hc->tx_ssthresh) { + if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) { + hc->tx_cwnd += 1; + *maxincr -= 1; + hc->tx_packets_acked = 0; + } + } else if (++hc->tx_packets_acked >= hc->tx_cwnd) { hc->tx_cwnd += 1; - *maxincr -= 1; hc->tx_packets_acked = 0; } - } else if (++hc->tx_packets_acked >= hc->tx_cwnd) { - hc->tx_cwnd += 1; - hc->tx_packets_acked = 0; } + + /* + * Adjust the local sequence window and the ack ratio to allow about + * 5 times the number of packets in the network (RFC 4340 7.5.2) + */ + if (r_seq_used * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_r_seq_win) + ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2); + else if (r_seq_used * CCID2_WIN_CHANGE_FACTOR < dp->dccps_r_seq_win/2) + ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U); + + if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win) + ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2); + else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2) + ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2); + /* * FIXME: RTT is sampled several times per acknowledgment (for each * entry in the Ack Vector), instead of once per Ack (as in TCP SACK). @@ -441,9 +483,7 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U; hc->tx_ssthresh = max(hc->tx_cwnd, 2U); - /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */ - if (dccp_sk(sk)->dccps_l_ack_ratio > hc->tx_cwnd) - ccid2_change_l_ack_ratio(sk, hc->tx_cwnd); + ccid2_check_l_ack_ratio(sk); } static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type, @@ -494,8 +534,16 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) if (hc->tx_rpdupack >= NUMDUPACK) { hc->tx_rpdupack = -1; /* XXX lame */ hc->tx_rpseq = 0; - +#ifdef __CCID2_COPES_GRACEFULLY_WITH_ACK_CONGESTION_CONTROL__ + /* + * FIXME: Ack Congestion Control is broken; in + * the current state instabilities occurred with + * Ack Ratios greater than 1; causing hang-ups + * and long RTO timeouts. This needs to be fixed + * before opening up dynamic changes. -- gerrit + */ ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); +#endif } } } diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h index f585d33..18c9754 100644 --- a/net/dccp/ccids/ccid2.h +++ b/net/dccp/ccids/ccid2.h @@ -43,6 +43,12 @@ struct ccid2_seq { #define CCID2_SEQBUF_LEN 1024 #define CCID2_SEQBUF_MAX 128 +/* + * Multiple of congestion window to keep the sequence window at + * (RFC 4340 7.5.2) + */ +#define CCID2_WIN_CHANGE_FACTOR 5 + /** * struct ccid2_hc_tx_sock - CCID2 TX half connection * @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5 diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 5fdb072..583490a 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -474,6 +474,7 @@ static inline int dccp_ack_pending(const struct sock *sk) return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk); } +extern int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val); extern int dccp_feat_finalise_settings(struct dccp_sock *dp); extern int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq); extern int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*, diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 568def9..23cea0e 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c @@ -12,6 +12,7 @@ * ----------- * o Feature negotiation is coordinated with connection setup (as in TCP), wild * changes of parameters of an established connection are not supported. + * o Changing non-negotiable (NN) values is supported in state OPEN/PARTOPEN. * o All currently known SP features have 1-byte quantities. If in the future * extensions of RFCs 4340..42 define features with item lengths larger than * one byte, a feature-specific extension of the code will be required. @@ -343,6 +344,20 @@ static int __dccp_feat_activate(struct sock *sk, const int idx, return dccp_feat_table[idx].activation_hdlr(sk, val, rx); } +/** + * dccp_feat_activate - Activate feature value on socket + * @sk: fully connected DCCP socket (after handshake is complete) + * @feat_num: feature to activate, one of %dccp_feature_numbers + * @local: whether local (1) or remote (0) @feat_num is meant + * @fval: the value (SP or NN) to activate, or NULL to use the default value + * For general use this function is preferable over __dccp_feat_activate(). + */ +static int dccp_feat_activate(struct sock *sk, u8 feat_num, bool local, + dccp_feat_val const *fval) +{ + return __dccp_feat_activate(sk, dccp_feat_index(feat_num), local, fval); +} + /* Test for "Req'd" feature (RFC 4340, 6.4) */ static inline int dccp_feat_must_be_understood(u8 feat_num) { @@ -650,11 +665,22 @@ int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq, return -1; if (pos->needs_mandatory && dccp_insert_option_mandatory(skb)) return -1; - /* - * Enter CHANGING after transmitting the Change option (6.6.2). - */ - if (pos->state == FEAT_INITIALISING) - pos->state = FEAT_CHANGING; + + if (skb->sk->sk_state == DCCP_OPEN && + (opt == DCCPO_CONFIRM_R || opt == DCCPO_CONFIRM_L)) { + /* + * Confirms don't get retransmitted (6.6.3) once the + * connection is in state OPEN + */ + dccp_feat_list_pop(pos); + } else { + /* + * Enter CHANGING after transmitting the Change + * option (6.6.2). + */ + if (pos->state == FEAT_INITIALISING) + pos->state = FEAT_CHANGING; + } } return 0; } @@ -730,6 +756,70 @@ int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local, 0, list, len); } +/** + * dccp_feat_nn_get - Query current/pending value of NN feature + * @sk: DCCP socket of an established connection + * @feat: NN feature number from %dccp_feature_numbers + * For a known NN feature, returns value currently being negotiated, or + * current (confirmed) value if no negotiation is going on. + */ +u64 dccp_feat_nn_get(struct sock *sk, u8 feat) +{ + if (dccp_feat_type(feat) == FEAT_NN) { + struct dccp_sock *dp = dccp_sk(sk); + struct dccp_feat_entry *entry; + + entry = dccp_feat_list_lookup(&dp->dccps_featneg, feat, 1); + if (entry != NULL) + return entry->val.nn; + + switch (feat) { + case DCCPF_ACK_RATIO: + return dp->dccps_l_ack_ratio; + case DCCPF_SEQUENCE_WINDOW: + return dp->dccps_l_seq_win; + } + } + DCCP_BUG("attempt to look up unsupported feature %u", feat); + return 0; +} +EXPORT_SYMBOL_GPL(dccp_feat_nn_get); + +/** + * dccp_feat_signal_nn_change - Update NN values for an established connection + * @sk: DCCP socket of an established connection + * @feat: NN feature number from %dccp_feature_numbers + * @nn_val: the new value to use + * This function is used to communicate NN updates out-of-band. + */ +int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val) +{ + struct list_head *fn = &dccp_sk(sk)->dccps_featneg; + dccp_feat_val fval = { .nn = nn_val }; + struct dccp_feat_entry *entry; + + if (sk->sk_state != DCCP_OPEN && sk->sk_state != DCCP_PARTOPEN) + return 0; + + if (dccp_feat_type(feat) != FEAT_NN || + !dccp_feat_is_valid_nn_val(feat, nn_val)) + return -EINVAL; + + if (nn_val == dccp_feat_nn_get(sk, feat)) + return 0; /* already set or negotiation under way */ + + entry = dccp_feat_list_lookup(fn, feat, 1); + if (entry != NULL) { + dccp_pr_debug("Clobbering existing NN entry %llu -> %llu\n", + (unsigned long long)entry->val.nn, + (unsigned long long)nn_val); + dccp_feat_list_pop(entry); + } + + inet_csk_schedule_ack(sk); + return dccp_feat_push_change(fn, feat, 1, 0, &fval); +} +EXPORT_SYMBOL_GPL(dccp_feat_signal_nn_change); /* * Tracking features whose value depend on the choice of CCID @@ -1187,6 +1277,100 @@ confirmation_failed: } /** + * dccp_feat_handle_nn_established - Fast-path reception of NN options + * @sk: socket of an established DCCP connection + * @mandatory: whether @opt was preceded by a Mandatory option + * @opt: %DCCPO_CHANGE_L | %DCCPO_CONFIRM_R (NN only) + * @feat: NN number, one of %dccp_feature_numbers + * @val: NN value + * @len: length of @val in bytes + * This function combines the functionality of change_recv/confirm_recv, with + * the following differences (reset codes are the same): + * - cleanup after receiving the Confirm; + * - values are directly activated after successful parsing; + * - deliberately restricted to NN features. + * The restriction to NN features is essential since SP features can have non- + * predictable outcomes (depending on the remote configuration), and are inter- + * dependent (CCIDs for instance cause further dependencies). + */ +static u8 dccp_feat_handle_nn_established(struct sock *sk, u8 mandatory, u8 opt, + u8 feat, u8 *val, u8 len) +{ + struct list_head *fn = &dccp_sk(sk)->dccps_featneg; + const bool local = (opt == DCCPO_CONFIRM_R); + struct dccp_feat_entry *entry; + u8 type = dccp_feat_type(feat); + dccp_feat_val fval; + + dccp_feat_print_opt(opt, feat, val, len, mandatory); + + /* Ignore non-mandatory unknown and non-NN features */ + if (type == FEAT_UNKNOWN) { + if (local && !mandatory) + return 0; + goto fast_path_unknown; + } else if (type != FEAT_NN) { + return 0; + } + + /* + * We don't accept empty Confirms, since in fast-path feature + * negotiation the values are enabled immediately after sending + * the Change option. + * Empty Changes on the other hand are invalid (RFC 4340, 6.1). + */ + if (len == 0 || len > sizeof(fval.nn)) + goto fast_path_unknown; + + if (opt == DCCPO_CHANGE_L) { + fval.nn = dccp_decode_value_var(val, len); + if (!dccp_feat_is_valid_nn_val(feat, fval.nn)) + goto fast_path_unknown; + + if (dccp_feat_push_confirm(fn, feat, local, &fval) || + dccp_feat_activate(sk, feat, local, &fval)) + return DCCP_RESET_CODE_TOO_BUSY; + + /* set the `Ack Pending' flag to piggyback a Confirm */ + inet_csk_schedule_ack(sk); + + } else if (opt == DCCPO_CONFIRM_R) { + entry = dccp_feat_list_lookup(fn, feat, local); + if (entry == NULL || entry->state != FEAT_CHANGING) + return 0; + + fval.nn = dccp_decode_value_var(val, len); + /* + * Just ignore a value that doesn't match our current value. + * If the option changes twice within two RTTs, then at least + * one CONFIRM will be received for the old value after a + * new CHANGE was sent. + */ + if (fval.nn != entry->val.nn) + return 0; + + /* Only activate after receiving the Confirm option (6.6.1). */ + dccp_feat_activate(sk, feat, local, &fval); + + /* It has been confirmed - so remove the entry */ + dccp_feat_list_pop(entry); + + } else { + DCCP_WARN("Received illegal option %u\n", opt); + goto fast_path_failed; + } + return 0; + +fast_path_unknown: + if (!mandatory) + return dccp_push_empty_confirm(fn, feat, local); + +fast_path_failed: + return mandatory ? DCCP_RESET_CODE_MANDATORY_ERROR + : DCCP_RESET_CODE_OPTION_ERROR; +} + +/** * dccp_feat_parse_options - Process Feature-Negotiation Options * @sk: for general use and used by the client during connection setup * @dreq: used by the server during connection setup @@ -1221,6 +1405,14 @@ int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq, return dccp_feat_confirm_recv(fn, mandatory, opt, feat, val, len, server); } + break; + /* + * Support for exchanging NN options on an established connection. + */ + case DCCP_OPEN: + case DCCP_PARTOPEN: + return dccp_feat_handle_nn_established(sk, mandatory, opt, feat, + val, len); } return 0; /* ignore FN options in all other states */ } diff --git a/net/dccp/feat.h b/net/dccp/feat.h index e56a4e5..90b957d 100644 --- a/net/dccp/feat.h +++ b/net/dccp/feat.h @@ -129,6 +129,7 @@ extern int dccp_feat_clone_list(struct list_head const *, struct list_head *); extern void dccp_encode_value_var(const u64 value, u8 *to, const u8 len); extern u64 dccp_decode_value_var(const u8 *bf, const u8 len); +extern u64 dccp_feat_nn_get(struct sock *sk, u8 feat); extern int dccp_insert_option_mandatory(struct sk_buff *skb); extern int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat, diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 152975d..e742f90 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -184,7 +184,6 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) dp->dccps_rate_last = jiffies; dp->dccps_role = DCCP_ROLE_UNDEFINED; dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT; - dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1; dp->dccps_tx_qlen = sysctl_dccp_tx_qlen; dccp_init_xmit_timers(sk); diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index ba4face..2ab16e1 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -388,7 +388,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa) } ifa->ifa_next = dn_db->ifa_list; - rcu_assign_pointer(dn_db->ifa_list, ifa); + RCU_INIT_POINTER(dn_db->ifa_list, ifa); dn_ifaddr_notify(RTM_NEWADDR, ifa); blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); @@ -1093,7 +1093,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err) memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); - rcu_assign_pointer(dev->dn_ptr, dn_db); + RCU_INIT_POINTER(dev->dn_ptr, dn_db); dn_db->dev = dev; init_timer(&dn_db->timer); @@ -1101,7 +1101,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err) dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table); if (!dn_db->neigh_parms) { - rcu_assign_pointer(dev->dn_ptr, NULL); + RCU_INIT_POINTER(dev->dn_ptr, NULL); kfree(dn_db); return NULL; } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 0a47b6c..56cf9b8 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -301,7 +301,6 @@ static const struct net_device_ops dsa_netdev_ops = { .ndo_start_xmit = dsa_xmit, .ndo_change_rx_flags = dsa_slave_change_rx_flags, .ndo_set_rx_mode = dsa_slave_set_rx_mode, - .ndo_set_multicast_list = dsa_slave_set_rx_mode, .ndo_set_mac_address = dsa_slave_set_mac_address, .ndo_do_ioctl = dsa_slave_ioctl, }; @@ -314,7 +313,6 @@ static const struct net_device_ops edsa_netdev_ops = { .ndo_start_xmit = edsa_xmit, .ndo_change_rx_flags = dsa_slave_change_rx_flags, .ndo_set_rx_mode = dsa_slave_set_rx_mode, - .ndo_set_multicast_list = dsa_slave_set_rx_mode, .ndo_set_mac_address = dsa_slave_set_mac_address, .ndo_do_ioctl = dsa_slave_ioctl, }; @@ -327,7 +325,6 @@ static const struct net_device_ops trailer_netdev_ops = { .ndo_start_xmit = trailer_xmit, .ndo_change_rx_flags = dsa_slave_change_rx_flags, .ndo_set_rx_mode = dsa_slave_set_rx_mode, - .ndo_set_multicast_list = dsa_slave_set_rx_mode, .ndo_set_mac_address = dsa_slave_set_mac_address, .ndo_do_ioctl = dsa_slave_ioctl, }; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index bc19bd0..c6b5092 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -258,7 +258,7 @@ static struct in_device *inetdev_init(struct net_device *dev) ip_mc_up(in_dev); /* we can receive as soon as ip_ptr is set -- do this last */ - rcu_assign_pointer(dev->ip_ptr, in_dev); + RCU_INIT_POINTER(dev->ip_ptr, in_dev); out: return in_dev; out_kfree: @@ -291,7 +291,7 @@ static void inetdev_destroy(struct in_device *in_dev) inet_free_ifa(ifa); } - rcu_assign_pointer(dev->ip_ptr, NULL); + RCU_INIT_POINTER(dev->ip_ptr, NULL); devinet_sysctl_unregister(in_dev); neigh_parms_release(&arp_tbl, in_dev->arp_parms); @@ -1175,7 +1175,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, switch (event) { case NETDEV_REGISTER: printk(KERN_DEBUG "inetdev_event: bug\n"); - rcu_assign_pointer(dev->ip_ptr, NULL); + RCU_INIT_POINTER(dev->ip_ptr, NULL); break; case NETDEV_UP: if (!inetdev_valid_mtu(dev->mtu)) diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index de9e297..89d6f71 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -204,7 +204,7 @@ static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node) return (struct tnode *)(parent & ~NODE_TYPE_MASK); } -/* Same as rcu_assign_pointer +/* Same as RCU_INIT_POINTER * but that macro() assumes that value is a pointer. */ static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr) @@ -528,7 +528,7 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node * if (n) node_set_parent(n, tn); - rcu_assign_pointer(tn->child[i], n); + RCU_INIT_POINTER(tn->child[i], n); } #define MAX_WORK 10 @@ -1014,7 +1014,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn) tp = node_parent((struct rt_trie_node *) tn); if (!tp) - rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); + RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn); tnode_free_flush(); if (!tp) @@ -1026,7 +1026,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn) if (IS_TNODE(tn)) tn = (struct tnode *)resize(t, (struct tnode *)tn); - rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); + RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn); tnode_free_flush(); } @@ -1163,7 +1163,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen) put_child(t, (struct tnode *)tp, cindex, (struct rt_trie_node *)tn); } else { - rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); + RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn); tp = tn; } } @@ -1621,7 +1621,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l) put_child(t, (struct tnode *)tp, cindex, NULL); trie_rebalance(t, tp); } else - rcu_assign_pointer(t->trie, NULL); + RCU_INIT_POINTER(t->trie, NULL); free_leaf(l); } diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c index dbfc21d..8cb1ebb 100644 --- a/net/ipv4/gre.c +++ b/net/ipv4/gre.c @@ -34,7 +34,7 @@ int gre_add_protocol(const struct gre_protocol *proto, u8 version) if (gre_proto[version]) goto err_out_unlock; - rcu_assign_pointer(gre_proto[version], proto); + RCU_INIT_POINTER(gre_proto[version], proto); spin_unlock(&gre_proto_lock); return 0; @@ -54,7 +54,7 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version) if (rcu_dereference_protected(gre_proto[version], lockdep_is_held(&gre_proto_lock)) != proto) goto err_out_unlock; - rcu_assign_pointer(gre_proto[version], NULL); + RCU_INIT_POINTER(gre_proto[version], NULL); spin_unlock(&gre_proto_lock); synchronize_rcu(); return 0; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 283c0a2..ce57bde 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1009,7 +1009,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr) /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG. We will get multicast token leakage, when IFF_MULTICAST - is changed. This check should be done in dev->set_multicast_list + is changed. This check should be done in ndo_set_rx_mode routine. Something sort of: if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; } --ANK @@ -1242,7 +1242,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) im->next_rcu = in_dev->mc_list; in_dev->mc_count++; - rcu_assign_pointer(in_dev->mc_list, im); + RCU_INIT_POINTER(in_dev->mc_list, im); #ifdef CONFIG_IP_MULTICAST igmpv3_del_delrec(in_dev, im->multiaddr); @@ -1813,7 +1813,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) iml->next_rcu = inet->mc_list; iml->sflist = NULL; iml->sfmode = MCAST_EXCLUDE; - rcu_assign_pointer(inet->mc_list, iml); + RCU_INIT_POINTER(inet->mc_list, iml); ip_mc_inc_group(in_dev, addr); err = 0; done: @@ -1835,7 +1835,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, } err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, iml->sfmode, psf->sl_count, psf->sl_addr, 0); - rcu_assign_pointer(iml->sflist, NULL); + RCU_INIT_POINTER(iml->sflist, NULL); /* decrease mem now to avoid the memleak warning */ atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc); kfree_rcu(psf, rcu); @@ -2000,7 +2000,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); kfree_rcu(psl, rcu); } - rcu_assign_pointer(pmc->sflist, newpsl); + RCU_INIT_POINTER(pmc->sflist, newpsl); psl = newpsl; } rv = 1; /* > 0 for insert logic below if sl_count is 0 */ @@ -2103,7 +2103,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) } else (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 0, NULL, 0); - rcu_assign_pointer(pmc->sflist, newpsl); + RCU_INIT_POINTER(pmc->sflist, newpsl); pmc->sfmode = msf->imsf_fmode; err = 0; done: diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 378b20b..065effd 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -231,7 +231,7 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t) (iter = rtnl_dereference(*tp)) != NULL; tp = &iter->next) { if (t == iter) { - rcu_assign_pointer(*tp, t->next); + RCU_INIT_POINTER(*tp, t->next); break; } } @@ -241,8 +241,8 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t) { struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t); - rcu_assign_pointer(t->next, rtnl_dereference(*tp)); - rcu_assign_pointer(*tp, t); + RCU_INIT_POINTER(t->next, rtnl_dereference(*tp)); + RCU_INIT_POINTER(*tp, t); } static struct ip_tunnel * ipip_tunnel_locate(struct net *net, @@ -301,7 +301,7 @@ static void ipip_tunnel_uninit(struct net_device *dev) struct ipip_net *ipn = net_generic(net, ipip_net_id); if (dev == ipn->fb_tunnel_dev) - rcu_assign_pointer(ipn->tunnels_wc[0], NULL); + RCU_INIT_POINTER(ipn->tunnels_wc[0], NULL); else ipip_tunnel_unlink(ipn, netdev_priv(dev)); dev_put(dev); @@ -791,7 +791,7 @@ static int __net_init ipip_fb_tunnel_init(struct net_device *dev) return -ENOMEM; dev_hold(dev); - rcu_assign_pointer(ipn->tunnels_wc[0], tunnel); + RCU_INIT_POINTER(ipn->tunnels_wc[0], tunnel); return 0; } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 58e8791..6164e98 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1176,7 +1176,7 @@ static void mrtsock_destruct(struct sock *sk) ipmr_for_each_table(mrt, net) { if (sk == rtnl_dereference(mrt->mroute_sk)) { IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; - rcu_assign_pointer(mrt->mroute_sk, NULL); + RCU_INIT_POINTER(mrt->mroute_sk, NULL); mroute_clean_tables(mrt); } } @@ -1203,7 +1203,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi return -ENOENT; if (optname != MRT_INIT) { - if (sk != rcu_dereference_raw(mrt->mroute_sk) && + if (sk != rcu_access_pointer(mrt->mroute_sk) && !capable(CAP_NET_ADMIN)) return -EACCES; } @@ -1224,13 +1224,13 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi ret = ip_ra_control(sk, 1, mrtsock_destruct); if (ret == 0) { - rcu_assign_pointer(mrt->mroute_sk, sk); + RCU_INIT_POINTER(mrt->mroute_sk, sk); IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; } rtnl_unlock(); return ret; case MRT_DONE: - if (sk != rcu_dereference_raw(mrt->mroute_sk)) + if (sk != rcu_access_pointer(mrt->mroute_sk)) return -EACCES; return ip_ra_control(sk, 0, NULL); case MRT_ADD_VIF: diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c index 703f366f..7b22382 100644 --- a/net/ipv4/netfilter/nf_nat_amanda.c +++ b/net/ipv4/netfilter/nf_nat_amanda.c @@ -70,14 +70,14 @@ static unsigned int help(struct sk_buff *skb, static void __exit nf_nat_amanda_fini(void) { - rcu_assign_pointer(nf_nat_amanda_hook, NULL); + RCU_INIT_POINTER(nf_nat_amanda_hook, NULL); synchronize_rcu(); } static int __init nf_nat_amanda_init(void) { BUG_ON(nf_nat_amanda_hook != NULL); - rcu_assign_pointer(nf_nat_amanda_hook, help); + RCU_INIT_POINTER(nf_nat_amanda_hook, help); return 0; } diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 3346de5..447bc5c 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -514,7 +514,7 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto) ret = -EBUSY; goto out; } - rcu_assign_pointer(nf_nat_protos[proto->protonum], proto); + RCU_INIT_POINTER(nf_nat_protos[proto->protonum], proto); out: spin_unlock_bh(&nf_nat_lock); return ret; @@ -525,7 +525,7 @@ EXPORT_SYMBOL(nf_nat_protocol_register); void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto) { spin_lock_bh(&nf_nat_lock); - rcu_assign_pointer(nf_nat_protos[proto->protonum], + RCU_INIT_POINTER(nf_nat_protos[proto->protonum], &nf_nat_unknown_protocol); spin_unlock_bh(&nf_nat_lock); synchronize_rcu(); @@ -736,10 +736,10 @@ static int __init nf_nat_init(void) /* Sew in builtin protocols. */ spin_lock_bh(&nf_nat_lock); for (i = 0; i < MAX_IP_NAT_PROTO; i++) - rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol); - rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp); - rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp); - rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp); + RCU_INIT_POINTER(nf_nat_protos[i], &nf_nat_unknown_protocol); + RCU_INIT_POINTER(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp); + RCU_INIT_POINTER(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp); + RCU_INIT_POINTER(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp); spin_unlock_bh(&nf_nat_lock); /* Initialize fake conntrack so that NAT will skip it */ @@ -748,12 +748,12 @@ static int __init nf_nat_init(void) l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); BUG_ON(nf_nat_seq_adjust_hook != NULL); - rcu_assign_pointer(nf_nat_seq_adjust_hook, nf_nat_seq_adjust); + RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust); BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); - rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, + RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, nfnetlink_parse_nat_setup); BUG_ON(nf_ct_nat_offset != NULL); - rcu_assign_pointer(nf_ct_nat_offset, nf_nat_get_offset); + RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset); return 0; cleanup_extend: @@ -766,9 +766,9 @@ static void __exit nf_nat_cleanup(void) unregister_pernet_subsys(&nf_nat_net_ops); nf_ct_l3proto_put(l3proto); nf_ct_extend_unregister(&nat_extend); - rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL); - rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL); - rcu_assign_pointer(nf_ct_nat_offset, NULL); + RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL); + RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL); + RCU_INIT_POINTER(nf_ct_nat_offset, NULL); synchronize_net(); } diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c index dc73abb..e462a95 100644 --- a/net/ipv4/netfilter/nf_nat_ftp.c +++ b/net/ipv4/netfilter/nf_nat_ftp.c @@ -113,14 +113,14 @@ out: static void __exit nf_nat_ftp_fini(void) { - rcu_assign_pointer(nf_nat_ftp_hook, NULL); + RCU_INIT_POINTER(nf_nat_ftp_hook, NULL); synchronize_rcu(); } static int __init nf_nat_ftp_init(void) { BUG_ON(nf_nat_ftp_hook != NULL); - rcu_assign_pointer(nf_nat_ftp_hook, nf_nat_ftp); + RCU_INIT_POINTER(nf_nat_ftp_hook, nf_nat_ftp); return 0; } diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index 790f316..b9a1136 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c @@ -581,30 +581,30 @@ static int __init init(void) BUG_ON(nat_callforwarding_hook != NULL); BUG_ON(nat_q931_hook != NULL); - rcu_assign_pointer(set_h245_addr_hook, set_h245_addr); - rcu_assign_pointer(set_h225_addr_hook, set_h225_addr); - rcu_assign_pointer(set_sig_addr_hook, set_sig_addr); - rcu_assign_pointer(set_ras_addr_hook, set_ras_addr); - rcu_assign_pointer(nat_rtp_rtcp_hook, nat_rtp_rtcp); - rcu_assign_pointer(nat_t120_hook, nat_t120); - rcu_assign_pointer(nat_h245_hook, nat_h245); - rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding); - rcu_assign_pointer(nat_q931_hook, nat_q931); + RCU_INIT_POINTER(set_h245_addr_hook, set_h245_addr); + RCU_INIT_POINTER(set_h225_addr_hook, set_h225_addr); + RCU_INIT_POINTER(set_sig_addr_hook, set_sig_addr); + RCU_INIT_POINTER(set_ras_addr_hook, set_ras_addr); + RCU_INIT_POINTER(nat_rtp_rtcp_hook, nat_rtp_rtcp); + RCU_INIT_POINTER(nat_t120_hook, nat_t120); + RCU_INIT_POINTER(nat_h245_hook, nat_h245); + RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding); + RCU_INIT_POINTER(nat_q931_hook, nat_q931); return 0; } /****************************************************************************/ static void __exit fini(void) { - rcu_assign_pointer(set_h245_addr_hook, NULL); - rcu_assign_pointer(set_h225_addr_hook, NULL); - rcu_assign_pointer(set_sig_addr_hook, NULL); - rcu_assign_pointer(set_ras_addr_hook, NULL); - rcu_assign_pointer(nat_rtp_rtcp_hook, NULL); - rcu_assign_pointer(nat_t120_hook, NULL); - rcu_assign_pointer(nat_h245_hook, NULL); - rcu_assign_pointer(nat_callforwarding_hook, NULL); - rcu_assign_pointer(nat_q931_hook, NULL); + RCU_INIT_POINTER(set_h245_addr_hook, NULL); + RCU_INIT_POINTER(set_h225_addr_hook, NULL); + RCU_INIT_POINTER(set_sig_addr_hook, NULL); + RCU_INIT_POINTER(set_ras_addr_hook, NULL); + RCU_INIT_POINTER(nat_rtp_rtcp_hook, NULL); + RCU_INIT_POINTER(nat_t120_hook, NULL); + RCU_INIT_POINTER(nat_h245_hook, NULL); + RCU_INIT_POINTER(nat_callforwarding_hook, NULL); + RCU_INIT_POINTER(nat_q931_hook, NULL); synchronize_rcu(); } diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c index 535e1a8..979ae16 100644 --- a/net/ipv4/netfilter/nf_nat_irc.c +++ b/net/ipv4/netfilter/nf_nat_irc.c @@ -75,14 +75,14 @@ static unsigned int help(struct sk_buff *skb, static void __exit nf_nat_irc_fini(void) { - rcu_assign_pointer(nf_nat_irc_hook, NULL); + RCU_INIT_POINTER(nf_nat_irc_hook, NULL); synchronize_rcu(); } static int __init nf_nat_irc_init(void) { BUG_ON(nf_nat_irc_hook != NULL); - rcu_assign_pointer(nf_nat_irc_hook, help); + RCU_INIT_POINTER(nf_nat_irc_hook, help); return 0; } diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c index 4c06003..3e8284b 100644 --- a/net/ipv4/netfilter/nf_nat_pptp.c +++ b/net/ipv4/netfilter/nf_nat_pptp.c @@ -282,25 +282,25 @@ static int __init nf_nat_helper_pptp_init(void) nf_nat_need_gre(); BUG_ON(nf_nat_pptp_hook_outbound != NULL); - rcu_assign_pointer(nf_nat_pptp_hook_outbound, pptp_outbound_pkt); + RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, pptp_outbound_pkt); BUG_ON(nf_nat_pptp_hook_inbound != NULL); - rcu_assign_pointer(nf_nat_pptp_hook_inbound, pptp_inbound_pkt); + RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, pptp_inbound_pkt); BUG_ON(nf_nat_pptp_hook_exp_gre != NULL); - rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, pptp_exp_gre); + RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, pptp_exp_gre); BUG_ON(nf_nat_pptp_hook_expectfn != NULL); - rcu_assign_pointer(nf_nat_pptp_hook_expectfn, pptp_nat_expected); + RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, pptp_nat_expected); return 0; } static void __exit nf_nat_helper_pptp_fini(void) { - rcu_assign_pointer(nf_nat_pptp_hook_expectfn, NULL); - rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, NULL); - rcu_assign_pointer(nf_nat_pptp_hook_inbound, NULL); - rcu_assign_pointer(nf_nat_pptp_hook_outbound, NULL); + RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, NULL); + RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, NULL); + RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, NULL); + RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, NULL); synchronize_rcu(); } diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index e40cf78..78844d9 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c @@ -528,13 +528,13 @@ err1: static void __exit nf_nat_sip_fini(void) { - rcu_assign_pointer(nf_nat_sip_hook, NULL); - rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, NULL); - rcu_assign_pointer(nf_nat_sip_expect_hook, NULL); - rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL); - rcu_assign_pointer(nf_nat_sdp_port_hook, NULL); - rcu_assign_pointer(nf_nat_sdp_session_hook, NULL); - rcu_assign_pointer(nf_nat_sdp_media_hook, NULL); + RCU_INIT_POINTER(nf_nat_sip_hook, NULL); + RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, NULL); + RCU_INIT_POINTER(nf_nat_sip_expect_hook, NULL); + RCU_INIT_POINTER(nf_nat_sdp_addr_hook, NULL); + RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL); + RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL); + RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL); synchronize_rcu(); } @@ -547,13 +547,13 @@ static int __init nf_nat_sip_init(void) BUG_ON(nf_nat_sdp_port_hook != NULL); BUG_ON(nf_nat_sdp_session_hook != NULL); BUG_ON(nf_nat_sdp_media_hook != NULL); - rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip); - rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust); - rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect); - rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr); - rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port); - rcu_assign_pointer(nf_nat_sdp_session_hook, ip_nat_sdp_session); - rcu_assign_pointer(nf_nat_sdp_media_hook, ip_nat_sdp_media); + RCU_INIT_POINTER(nf_nat_sip_hook, ip_nat_sip); + RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust); + RCU_INIT_POINTER(nf_nat_sip_expect_hook, ip_nat_sip_expect); + RCU_INIT_POINTER(nf_nat_sdp_addr_hook, ip_nat_sdp_addr); + RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port); + RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session); + RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media); return 0; } diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index 076b7c8..d1cb412 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c @@ -1310,7 +1310,7 @@ static int __init nf_nat_snmp_basic_init(void) int ret = 0; BUG_ON(nf_nat_snmp_hook != NULL); - rcu_assign_pointer(nf_nat_snmp_hook, help); + RCU_INIT_POINTER(nf_nat_snmp_hook, help); ret = nf_conntrack_helper_register(&snmp_trap_helper); if (ret < 0) { @@ -1322,7 +1322,7 @@ static int __init nf_nat_snmp_basic_init(void) static void __exit nf_nat_snmp_basic_fini(void) { - rcu_assign_pointer(nf_nat_snmp_hook, NULL); + RCU_INIT_POINTER(nf_nat_snmp_hook, NULL); nf_conntrack_helper_unregister(&snmp_trap_helper); } diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c index a6e606e..9290048 100644 --- a/net/ipv4/netfilter/nf_nat_standalone.c +++ b/net/ipv4/netfilter/nf_nat_standalone.c @@ -284,7 +284,7 @@ static int __init nf_nat_standalone_init(void) #ifdef CONFIG_XFRM BUG_ON(ip_nat_decode_session != NULL); - rcu_assign_pointer(ip_nat_decode_session, nat_decode_session); + RCU_INIT_POINTER(ip_nat_decode_session, nat_decode_session); #endif ret = nf_nat_rule_init(); if (ret < 0) { @@ -302,7 +302,7 @@ static int __init nf_nat_standalone_init(void) nf_nat_rule_cleanup(); cleanup_decode_session: #ifdef CONFIG_XFRM - rcu_assign_pointer(ip_nat_decode_session, NULL); + RCU_INIT_POINTER(ip_nat_decode_session, NULL); synchronize_net(); #endif return ret; @@ -313,7 +313,7 @@ static void __exit nf_nat_standalone_fini(void) nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); nf_nat_rule_cleanup(); #ifdef CONFIG_XFRM - rcu_assign_pointer(ip_nat_decode_session, NULL); + RCU_INIT_POINTER(ip_nat_decode_session, NULL); synchronize_net(); #endif /* Conntrack caches are unregistered in nf_conntrack_cleanup */ diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c index 7274a43..a2901bf 100644 --- a/net/ipv4/netfilter/nf_nat_tftp.c +++ b/net/ipv4/netfilter/nf_nat_tftp.c @@ -36,14 +36,14 @@ static unsigned int help(struct sk_buff *skb, static void __exit nf_nat_tftp_fini(void) { - rcu_assign_pointer(nf_nat_tftp_hook, NULL); + RCU_INIT_POINTER(nf_nat_tftp_hook, NULL); synchronize_rcu(); } static int __init nf_nat_tftp_init(void) { BUG_ON(nf_nat_tftp_hook != NULL); - rcu_assign_pointer(nf_nat_tftp_hook, help); + RCU_INIT_POINTER(nf_nat_tftp_hook, help); return 0; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 075212e..2c21d3b 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -324,7 +324,7 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq) struct rtable *r = NULL; for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { - if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain)) + if (!rcu_access_pointer(rt_hash_table[st->bucket].chain)) continue; rcu_read_lock_bh(); r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); @@ -350,7 +350,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq, do { if (--st->bucket < 0) return NULL; - } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain)); + } while (!rcu_access_pointer(rt_hash_table[st->bucket].chain)); rcu_read_lock_bh(); r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); } @@ -761,7 +761,7 @@ static void rt_do_flush(struct net *net, int process_context) if (process_context && need_resched()) cond_resched(); - rth = rcu_dereference_raw(rt_hash_table[i].chain); + rth = rcu_access_pointer(rt_hash_table[i].chain); if (!rth) continue; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1c12b8e..b3f2611 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1578,7 +1578,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) #endif if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ - sock_rps_save_rxhash(sk, skb->rxhash); + sock_rps_save_rxhash(sk, skb); if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { rsk = sk; goto reset; @@ -1595,7 +1595,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) goto discard; if (nsk != sk) { - sock_rps_save_rxhash(nsk, skb->rxhash); + sock_rps_save_rxhash(nsk, skb); if (tcp_child_process(sk, nsk, skb)) { rsk = nsk; goto reset; @@ -1603,7 +1603,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) return 0; } } else - sock_rps_save_rxhash(sk, skb->rxhash); + sock_rps_save_rxhash(sk, skb); if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { rsk = sk; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1b5a193..ebaa96b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1267,7 +1267,7 @@ int udp_disconnect(struct sock *sk, int flags) sk->sk_state = TCP_CLOSE; inet->inet_daddr = 0; inet->inet_dport = 0; - sock_rps_save_rxhash(sk, 0); + sock_rps_reset_rxhash(sk); sk->sk_bound_dev_if = 0; if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); @@ -1355,7 +1355,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) int rc; if (inet_sk(sk)->inet_daddr) - sock_rps_save_rxhash(sk, skb->rxhash); + sock_rps_save_rxhash(sk, skb); rc = ip_queue_rcv_skb(sk, skb); if (rc < 0) { @@ -1461,10 +1461,9 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) } } - if (rcu_dereference_raw(sk->sk_filter)) { - if (udp_lib_checksum_complete(skb)) - goto drop; - } + if (rcu_access_pointer(sk->sk_filter) && + udp_lib_checksum_complete(skb)) + goto drop; if (sk_rcvqueues_full(sk, skb)) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f012ebd..8f1e5be 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -428,7 +428,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) ndev->tstamp = jiffies; addrconf_sysctl_register(ndev); /* protected by rtnl_lock */ - rcu_assign_pointer(dev->ip6_ptr, ndev); + RCU_INIT_POINTER(dev->ip6_ptr, ndev); /* Join all-node multicast group */ ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); @@ -824,12 +824,13 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i { struct inet6_dev *idev = ifp->idev; struct in6_addr addr, *tmpaddr; - unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age; + unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age; unsigned long regen_advance; int tmp_plen; int ret = 0; int max_addresses; u32 addr_flags; + unsigned long now = jiffies; write_lock(&idev->lock); if (ift) { @@ -874,7 +875,7 @@ retry: goto out; } memcpy(&addr.s6_addr[8], idev->rndid, 8); - age = (jiffies - ifp->tstamp) / HZ; + age = (now - ifp->tstamp) / HZ; tmp_valid_lft = min_t(__u32, ifp->valid_lft, idev->cnf.temp_valid_lft + age); @@ -884,7 +885,6 @@ retry: idev->cnf.max_desync_factor); tmp_plen = ifp->prefix_len; max_addresses = idev->cnf.max_addresses; - tmp_cstamp = ifp->cstamp; tmp_tstamp = ifp->tstamp; spin_unlock_bh(&ifp->lock); @@ -929,7 +929,7 @@ retry: ift->ifpub = ifp; ift->valid_lft = tmp_valid_lft; ift->prefered_lft = tmp_prefered_lft; - ift->cstamp = tmp_cstamp; + ift->cstamp = now; ift->tstamp = tmp_tstamp; spin_unlock_bh(&ift->lock); @@ -1999,25 +1999,50 @@ ok: #ifdef CONFIG_IPV6_PRIVACY read_lock_bh(&in6_dev->lock); /* update all temporary addresses in the list */ - list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) { - /* - * When adjusting the lifetimes of an existing - * temporary address, only lower the lifetimes. - * Implementations must not increase the - * lifetimes of an existing temporary address - * when processing a Prefix Information Option. - */ + list_for_each_entry(ift, &in6_dev->tempaddr_list, + tmp_list) { + int age, max_valid, max_prefered; + if (ifp != ift->ifpub) continue; + /* + * RFC 4941 section 3.3: + * If a received option will extend the lifetime + * of a public address, the lifetimes of + * temporary addresses should be extended, + * subject to the overall constraint that no + * temporary addresses should ever remain + * "valid" or "preferred" for a time longer than + * (TEMP_VALID_LIFETIME) or + * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), + * respectively. + */ + age = (now - ift->cstamp) / HZ; + max_valid = in6_dev->cnf.temp_valid_lft - age; + if (max_valid < 0) + max_valid = 0; + + max_prefered = in6_dev->cnf.temp_prefered_lft - + in6_dev->cnf.max_desync_factor - + age; + if (max_prefered < 0) + max_prefered = 0; + + if (valid_lft > max_valid) + valid_lft = max_valid; + + if (prefered_lft > max_prefered) + prefered_lft = max_prefered; + spin_lock(&ift->lock); flags = ift->flags; - if (ift->valid_lft > valid_lft && - ift->valid_lft - valid_lft > (jiffies - ift->tstamp) / HZ) - ift->valid_lft = valid_lft + (jiffies - ift->tstamp) / HZ; - if (ift->prefered_lft > prefered_lft && - ift->prefered_lft - prefered_lft > (jiffies - ift->tstamp) / HZ) - ift->prefered_lft = prefered_lft + (jiffies - ift->tstamp) / HZ; + ift->valid_lft = valid_lft; + ift->prefered_lft = prefered_lft; + ift->tstamp = now; + if (prefered_lft > 0) + ift->flags &= ~IFA_F_DEPRECATED; + spin_unlock(&ift->lock); if (!(flags&IFA_F_TENTATIVE)) ipv6_ifa_notify(0, ift); @@ -2025,9 +2050,11 @@ ok: if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) { /* - * When a new public address is created as described in [ADDRCONF], - * also create a new temporary address. Also create a temporary - * address if it's enabled but no temporary address currently exists. + * When a new public address is created as + * described in [ADDRCONF], also create a new + * temporary address. Also create a temporary + * address if it's enabled but no temporary + * address currently exists. */ read_unlock_bh(&in6_dev->lock); ipv6_create_tempaddr(ifp, NULL); @@ -2706,7 +2733,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) idev->dead = 1; /* protected by rtnl_lock */ - rcu_assign_pointer(dev->ip6_ptr, NULL); + RCU_INIT_POINTER(dev->ip6_ptr, NULL); /* Step 1.5: remove snmp6 entry */ snmp6_unregister_dev(idev); diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 79a485e..1318de4 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -273,12 +273,12 @@ static int ipv6_destopt_rcv(struct sk_buff *skb) #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) __u16 dstbuf; #endif - struct dst_entry *dst; + struct dst_entry *dst = skb_dst(skb); if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + ((skb_transport_header(skb)[1] + 1) << 3)))) { - IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), + IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; @@ -289,9 +289,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb) dstbuf = opt->dst1; #endif - dst = dst_clone(skb_dst(skb)); if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { - dst_release(dst); skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; opt = IP6CB(skb); #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) @@ -304,7 +302,6 @@ static int ipv6_destopt_rcv(struct sk_buff *skb) IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); - dst_release(dst); return -1; } diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 1190041..2b59154 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -490,7 +490,8 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) goto out_dst_release; } - idev = in6_dev_get(skb->dev); + rcu_read_lock(); + idev = __in6_dev_get(skb->dev); err = ip6_append_data(sk, icmpv6_getfrag, &msg, len + sizeof(struct icmp6hdr), @@ -500,19 +501,16 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) if (err) { ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); - goto out_put; + } else { + err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, + len + sizeof(struct icmp6hdr)); } - err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, len + sizeof(struct icmp6hdr)); - -out_put: - if (likely(idev != NULL)) - in6_dev_put(idev); + rcu_read_unlock(); out_dst_release: dst_release(dst); out: icmpv6_xmit_unlock(sk); } - EXPORT_SYMBOL(icmpv6_send); static void icmpv6_echo_reply(struct sk_buff *skb) @@ -569,7 +567,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); - idev = in6_dev_get(skb->dev); + idev = __in6_dev_get(skb->dev); msg.skb = skb; msg.offset = 0; @@ -583,13 +581,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb) if (err) { ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); - goto out_put; + } else { + err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, + skb->len + sizeof(struct icmp6hdr)); } - err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, skb->len + sizeof(struct icmp6hdr)); - -out_put: - if (likely(idev != NULL)) - in6_dev_put(idev); dst_release(dst); out: icmpv6_xmit_unlock(sk); diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 8a58e8c..2916200 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -211,6 +211,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused) struct flowi6 fl6; struct dst_entry *dst; struct in6_addr *final_p, final; + int res; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = sk->sk_protocol; @@ -241,12 +242,14 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused) __inet6_csk_dst_store(sk, dst, NULL, NULL); } - skb_dst_set(skb, dst_clone(dst)); + rcu_read_lock(); + skb_dst_set_noref(skb, dst); /* Restore final destination back after routing done */ ipv6_addr_copy(&fl6.daddr, &np->daddr); - return ip6_xmit(sk, skb, &fl6, np->opt); + res = ip6_xmit(sk, skb, &fl6, np->opt); + rcu_read_unlock(); + return res; } - EXPORT_SYMBOL_GPL(inet6_csk_xmit); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 0bc9888..694d70a 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -218,8 +218,8 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) { struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms); - rcu_assign_pointer(t->next , rtnl_dereference(*tp)); - rcu_assign_pointer(*tp, t); + RCU_INIT_POINTER(t->next , rtnl_dereference(*tp)); + RCU_INIT_POINTER(*tp, t); } /** @@ -237,7 +237,7 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) (iter = rtnl_dereference(*tp)) != NULL; tp = &iter->next) { if (t == iter) { - rcu_assign_pointer(*tp, t->next); + RCU_INIT_POINTER(*tp, t->next); break; } } @@ -350,7 +350,7 @@ ip6_tnl_dev_uninit(struct net_device *dev) struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); if (dev == ip6n->fb_tnl_dev) - rcu_assign_pointer(ip6n->tnls_wc[0], NULL); + RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); else ip6_tnl_unlink(ip6n, t); ip6_tnl_dst_reset(t); @@ -889,7 +889,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, struct net_device_stats *stats = &t->dev->stats; struct ipv6hdr *ipv6h = ipv6_hdr(skb); struct ipv6_tel_txoption opt; - struct dst_entry *dst; + struct dst_entry *dst, *ndst = NULL; struct net_device *tdev; int mtu; unsigned int max_headroom = sizeof(struct ipv6hdr); @@ -897,19 +897,19 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, int err = -1; int pkt_len; - if ((dst = ip6_tnl_dst_check(t)) != NULL) - dst_hold(dst); - else { - dst = ip6_route_output(net, NULL, fl6); + dst = ip6_tnl_dst_check(t); + if (!dst) { + ndst = ip6_route_output(net, NULL, fl6); - if (dst->error) + if (ndst->error) goto tx_err_link_failure; - dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); - if (IS_ERR(dst)) { - err = PTR_ERR(dst); - dst = NULL; + ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0); + if (IS_ERR(ndst)) { + err = PTR_ERR(ndst); + ndst = NULL; goto tx_err_link_failure; } + dst = ndst; } tdev = dst->dev; @@ -955,7 +955,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, skb = new_skb; } skb_dst_drop(skb); - skb_dst_set(skb, dst_clone(dst)); + skb_dst_set_noref(skb, dst); skb->transport_header = skb->network_header; @@ -987,13 +987,14 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, stats->tx_errors++; stats->tx_aborted_errors++; } - ip6_tnl_dst_store(t, dst); + if (ndst) + ip6_tnl_dst_store(t, ndst); return 0; tx_err_link_failure: stats->tx_carrier_errors++; dst_link_failure(skb); tx_err_dst_release: - dst_release(dst); + dst_release(ndst); return err; } @@ -1439,7 +1440,7 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) t->parms.proto = IPPROTO_IPV6; dev_hold(dev); - rcu_assign_pointer(ip6n->tnls_wc[0], t); + RCU_INIT_POINTER(ip6n->tnls_wc[0], t); return 0; } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 9da6e02..1f52dd2 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -533,7 +533,8 @@ void ndisc_send_skb(struct sk_buff *skb, skb_dst_set(skb, dst); - idev = in6_dev_get(dst->dev); + rcu_read_lock(); + idev = __in6_dev_get(dst->dev); IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, @@ -543,8 +544,7 @@ void ndisc_send_skb(struct sk_buff *skb, ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); } - if (likely(idev != NULL)) - in6_dev_put(idev); + rcu_read_unlock(); } EXPORT_SYMBOL(ndisc_send_skb); @@ -1039,7 +1039,7 @@ static void ndisc_recv_rs(struct sk_buff *skb) if (skb->len < sizeof(*rs_msg)) return; - idev = in6_dev_get(skb->dev); + idev = __in6_dev_get(skb->dev); if (!idev) { if (net_ratelimit()) ND_PRINTK1("ICMP6 RS: can't find in6 device\n"); @@ -1080,7 +1080,7 @@ static void ndisc_recv_rs(struct sk_buff *skb) neigh_release(neigh); } out: - in6_dev_put(idev); + return; } static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt) @@ -1179,7 +1179,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) * set the RA_RECV flag in the interface */ - in6_dev = in6_dev_get(skb->dev); + in6_dev = __in6_dev_get(skb->dev); if (in6_dev == NULL) { ND_PRINTK0(KERN_ERR "ICMPv6 RA: can't find inet6 device for %s.\n", @@ -1188,7 +1188,6 @@ static void ndisc_router_discovery(struct sk_buff *skb) } if (!ndisc_parse_options(opt, optlen, &ndopts)) { - in6_dev_put(in6_dev); ND_PRINTK2(KERN_WARNING "ICMP6 RA: invalid ND options\n"); return; @@ -1255,7 +1254,6 @@ static void ndisc_router_discovery(struct sk_buff *skb) ND_PRINTK0(KERN_ERR "ICMPv6 RA: %s() failed to add default route.\n", __func__); - in6_dev_put(in6_dev); return; } @@ -1265,7 +1263,6 @@ static void ndisc_router_discovery(struct sk_buff *skb) "ICMPv6 RA: %s() got default router without neighbour.\n", __func__); dst_release(&rt->dst); - in6_dev_put(in6_dev); return; } neigh->flags |= NTF_ROUTER; @@ -1422,7 +1419,6 @@ out: dst_release(&rt->dst); else if (neigh) neigh_release(neigh); - in6_dev_put(in6_dev); } static void ndisc_redirect_rcv(struct sk_buff *skb) @@ -1481,13 +1477,11 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) return; } - in6_dev = in6_dev_get(skb->dev); + in6_dev = __in6_dev_get(skb->dev); if (!in6_dev) return; - if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) { - in6_dev_put(in6_dev); + if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) return; - } /* RFC2461 8.1: * The IP source address of the Redirect MUST be the same as the current @@ -1497,7 +1491,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) { ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: invalid ND options\n"); - in6_dev_put(in6_dev); return; } if (ndopts.nd_opts_tgt_lladdr) { @@ -1506,7 +1499,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) if (!lladdr) { ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: invalid link-layer address length\n"); - in6_dev_put(in6_dev); return; } } @@ -1518,7 +1510,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) on_link); neigh_release(neigh); } - in6_dev_put(in6_dev); } void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, @@ -1651,7 +1642,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, csum_partial(icmph, len, 0)); skb_dst_set(buff, dst); - idev = in6_dev_get(dst->dev); + rcu_read_lock(); + idev = __in6_dev_get(dst->dev); IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev, dst_output); @@ -1660,8 +1652,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); } - if (likely(idev != NULL)) - in6_dev_put(idev); + rcu_read_unlock(); return; release: diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 6a79f308..f34902f 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -130,14 +130,14 @@ static mh_filter_t __rcu *mh_filter __read_mostly; int rawv6_mh_filter_register(mh_filter_t filter) { - rcu_assign_pointer(mh_filter, filter); + RCU_INIT_POINTER(mh_filter, filter); return 0; } EXPORT_SYMBOL(rawv6_mh_filter_register); int rawv6_mh_filter_unregister(mh_filter_t filter) { - rcu_assign_pointer(mh_filter, NULL); + RCU_INIT_POINTER(mh_filter, NULL); synchronize_rcu(); return 0; } @@ -372,9 +372,9 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr, read_unlock(&raw_v6_hashinfo.lock); } -static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) +static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb) { - if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) && + if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) && skb_checksum_complete(skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 00b15ac..a7a1860 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -182,7 +182,7 @@ static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t) (iter = rtnl_dereference(*tp)) != NULL; tp = &iter->next) { if (t == iter) { - rcu_assign_pointer(*tp, t->next); + RCU_INIT_POINTER(*tp, t->next); break; } } @@ -192,8 +192,8 @@ static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t) { struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t); - rcu_assign_pointer(t->next, rtnl_dereference(*tp)); - rcu_assign_pointer(*tp, t); + RCU_INIT_POINTER(t->next, rtnl_dereference(*tp)); + RCU_INIT_POINTER(*tp, t); } static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn) @@ -391,7 +391,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg) p->addr = a->addr; p->flags = a->flags; t->prl_count++; - rcu_assign_pointer(t->prl, p); + RCU_INIT_POINTER(t->prl, p); out: return err; } @@ -474,7 +474,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev) struct sit_net *sitn = net_generic(net, sit_net_id); if (dev == sitn->fb_tunnel_dev) { - rcu_assign_pointer(sitn->tunnels_wc[0], NULL); + RCU_INIT_POINTER(sitn->tunnels_wc[0], NULL); } else { ipip6_tunnel_unlink(sitn, netdev_priv(dev)); ipip6_tunnel_del_prl(netdev_priv(dev), NULL); @@ -1176,7 +1176,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev) if (!dev->tstats) return -ENOMEM; dev_hold(dev); - rcu_assign_pointer(sitn->tunnels_wc[0], tunnel); + RCU_INIT_POINTER(sitn->tunnels_wc[0], tunnel); return 0; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index d1fb63f..44a5859 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1628,7 +1628,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) opt_skb = skb_clone(skb, GFP_ATOMIC); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ - sock_rps_save_rxhash(sk, skb->rxhash); + sock_rps_save_rxhash(sk, skb); if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) goto reset; if (opt_skb) @@ -1650,7 +1650,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) * the new socket.. */ if(nsk != sk) { - sock_rps_save_rxhash(nsk, skb->rxhash); + sock_rps_save_rxhash(nsk, skb); if (tcp_child_process(sk, nsk, skb)) goto reset; if (opt_skb) @@ -1658,7 +1658,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) return 0; } } else - sock_rps_save_rxhash(sk, skb->rxhash); + sock_rps_save_rxhash(sk, skb); if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) goto reset; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 29213b5..35bbdc4 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -509,7 +509,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) int is_udplite = IS_UDPLITE(sk); if (!ipv6_addr_any(&inet6_sk(sk)->daddr)) - sock_rps_save_rxhash(sk, skb->rxhash); + sock_rps_save_rxhash(sk, skb); if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto drop; @@ -533,7 +533,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) } } - if (rcu_dereference_raw(sk->sk_filter)) { + if (rcu_access_pointer(sk->sk_filter)) { if (udp_lib_checksum_complete(skb)) goto drop; } diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c index e8d5f44..d14152e 100644 --- a/net/irda/irlan/irlan_eth.c +++ b/net/irda/irlan/irlan_eth.c @@ -50,7 +50,7 @@ static const struct net_device_ops irlan_eth_netdev_ops = { .ndo_open = irlan_eth_open, .ndo_stop = irlan_eth_close, .ndo_start_xmit = irlan_eth_xmit, - .ndo_set_multicast_list = irlan_eth_set_multicast_list, + .ndo_set_rx_mode = irlan_eth_set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; diff --git a/net/iucv/Kconfig b/net/iucv/Kconfig index 16ce9cd..497fbe7 100644 --- a/net/iucv/Kconfig +++ b/net/iucv/Kconfig @@ -1,15 +1,17 @@ config IUCV - tristate "IUCV support (S390 - z/VM only)" depends on S390 + def_tristate y if S390 + prompt "IUCV support (S390 - z/VM only)" help Select this option if you want to use inter-user communication under VM or VIF. If you run on z/VM, say "Y" to enable a fast communication link between VM guests. config AFIUCV - tristate "AF_IUCV support (S390 - z/VM only)" - depends on IUCV + depends on S390 + def_tristate m if QETH_L3 || IUCV + prompt "AF_IUCV Socket support (S390 - z/VM and HiperSockets transport)" help - Select this option if you want to use inter-user communication under - VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast - communication link between VM guests. + Select this option if you want to use AF_IUCV socket applications + based on z/VM inter-user communication vehicle or based on + HiperSockets. diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index e2013e4..c39f3a4 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -27,10 +27,9 @@ #include <asm/cpcmd.h> #include <linux/kmod.h> -#include <net/iucv/iucv.h> #include <net/iucv/af_iucv.h> -#define VERSION "1.1" +#define VERSION "1.2" static char iucv_userid[80]; @@ -42,6 +41,8 @@ static struct proto iucv_proto = { .obj_size = sizeof(struct iucv_sock), }; +static struct iucv_interface *pr_iucv; + /* special AF_IUCV IPRM messages */ static const u8 iprm_shutdown[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; @@ -90,6 +91,12 @@ do { \ static void iucv_sock_kill(struct sock *sk); static void iucv_sock_close(struct sock *sk); +static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev); +static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, + struct sk_buff *skb, u8 flags); +static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify); + /* Call Back functions */ static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); @@ -165,7 +172,7 @@ static int afiucv_pm_freeze(struct device *dev) case IUCV_CLOSING: case IUCV_CONNECTED: if (iucv->path) { - err = iucv_path_sever(iucv->path, NULL); + err = pr_iucv->path_sever(iucv->path, NULL); iucv_path_free(iucv->path); iucv->path = NULL; } @@ -229,7 +236,7 @@ static const struct dev_pm_ops afiucv_pm_ops = { static struct device_driver af_iucv_driver = { .owner = THIS_MODULE, .name = "afiucv", - .bus = &iucv_bus, + .bus = NULL, .pm = &afiucv_pm_ops, }; @@ -294,7 +301,11 @@ static inline int iucv_below_msglim(struct sock *sk) if (sk->sk_state != IUCV_CONNECTED) return 1; - return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); + if (iucv->transport == AF_IUCV_TRANS_IUCV) + return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); + else + return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && + (atomic_read(&iucv->pendings) <= 0)); } /** @@ -312,6 +323,79 @@ static void iucv_sock_wake_msglim(struct sock *sk) rcu_read_unlock(); } +/** + * afiucv_hs_send() - send a message through HiperSockets transport + */ +static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, + struct sk_buff *skb, u8 flags) +{ + struct net *net = sock_net(sock); + struct iucv_sock *iucv = iucv_sk(sock); + struct af_iucv_trans_hdr *phs_hdr; + struct sk_buff *nskb; + int err, confirm_recv = 0; + + memset(skb->head, 0, ETH_HLEN); + phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb, + sizeof(struct af_iucv_trans_hdr)); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_push(skb, ETH_HLEN); + skb_reset_mac_header(skb); + memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr)); + + phs_hdr->magic = ETH_P_AF_IUCV; + phs_hdr->version = 1; + phs_hdr->flags = flags; + if (flags == AF_IUCV_FLAG_SYN) + phs_hdr->window = iucv->msglimit; + else if ((flags == AF_IUCV_FLAG_WIN) || !flags) { + confirm_recv = atomic_read(&iucv->msg_recv); + phs_hdr->window = confirm_recv; + if (confirm_recv) + phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN; + } + memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8); + memcpy(phs_hdr->destAppName, iucv->dst_name, 8); + memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8); + memcpy(phs_hdr->srcAppName, iucv->src_name, 8); + ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID)); + ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName)); + ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID)); + ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName)); + if (imsg) + memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); + + rcu_read_lock(); + skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if); + rcu_read_unlock(); + if (!skb->dev) + return -ENODEV; + if (!(skb->dev->flags & IFF_UP)) + return -ENETDOWN; + if (skb->len > skb->dev->mtu) { + if (sock->sk_type == SOCK_SEQPACKET) + return -EMSGSIZE; + else + skb_trim(skb, skb->dev->mtu); + } + skb->protocol = ETH_P_AF_IUCV; + skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF; + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; + skb_queue_tail(&iucv->send_skb_q, nskb); + err = dev_queue_xmit(skb); + if (err) { + skb_unlink(nskb, &iucv->send_skb_q); + kfree_skb(nskb); + } else { + atomic_sub(confirm_recv, &iucv->msg_recv); + WARN_ON(atomic_read(&iucv->msg_recv) < 0); + } + return err; +} + /* Timers */ static void iucv_sock_timeout(unsigned long arg) { @@ -380,6 +464,8 @@ static void iucv_sock_close(struct sock *sk) unsigned char user_data[16]; struct iucv_sock *iucv = iucv_sk(sk); unsigned long timeo; + int err, blen; + struct sk_buff *skb; iucv_sock_clear_timer(sk); lock_sock(sk); @@ -390,6 +476,20 @@ static void iucv_sock_close(struct sock *sk) break; case IUCV_CONNECTED: + if (iucv->transport == AF_IUCV_TRANS_HIPER) { + /* send fin */ + blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; + skb = sock_alloc_send_skb(sk, blen, 1, &err); + if (skb) { + skb_reserve(skb, + sizeof(struct af_iucv_trans_hdr) + + ETH_HLEN); + err = afiucv_hs_send(NULL, sk, skb, + AF_IUCV_FLAG_FIN); + } + sk->sk_state = IUCV_DISCONN; + sk->sk_state_change(sk); + } case IUCV_DISCONN: sk->sk_state = IUCV_CLOSING; sk->sk_state_change(sk); @@ -412,7 +512,7 @@ static void iucv_sock_close(struct sock *sk) low_nmcpy(user_data, iucv->src_name); high_nmcpy(user_data, iucv->dst_name); ASCEBC(user_data, sizeof(user_data)); - iucv_path_sever(iucv->path, user_data); + pr_iucv->path_sever(iucv->path, user_data); iucv_path_free(iucv->path); iucv->path = NULL; } @@ -444,23 +544,33 @@ static void iucv_sock_init(struct sock *sk, struct sock *parent) static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) { struct sock *sk; + struct iucv_sock *iucv; sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); if (!sk) return NULL; + iucv = iucv_sk(sk); sock_init_data(sock, sk); - INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); - spin_lock_init(&iucv_sk(sk)->accept_q_lock); - skb_queue_head_init(&iucv_sk(sk)->send_skb_q); - INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list); - spin_lock_init(&iucv_sk(sk)->message_q.lock); - skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); - iucv_sk(sk)->send_tag = 0; - iucv_sk(sk)->flags = 0; - iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT; - iucv_sk(sk)->path = NULL; - memset(&iucv_sk(sk)->src_user_id , 0, 32); + INIT_LIST_HEAD(&iucv->accept_q); + spin_lock_init(&iucv->accept_q_lock); + skb_queue_head_init(&iucv->send_skb_q); + INIT_LIST_HEAD(&iucv->message_q.list); + spin_lock_init(&iucv->message_q.lock); + skb_queue_head_init(&iucv->backlog_skb_q); + iucv->send_tag = 0; + atomic_set(&iucv->pendings, 0); + iucv->flags = 0; + iucv->msglimit = 0; + atomic_set(&iucv->msg_sent, 0); + atomic_set(&iucv->msg_recv, 0); + iucv->path = NULL; + iucv->sk_txnotify = afiucv_hs_callback_txnotify; + memset(&iucv->src_user_id , 0, 32); + if (pr_iucv) + iucv->transport = AF_IUCV_TRANS_IUCV; + else + iucv->transport = AF_IUCV_TRANS_HIPER; sk->sk_destruct = iucv_sock_destruct; sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; @@ -591,7 +701,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; struct sock *sk = sock->sk; struct iucv_sock *iucv; - int err; + int err = 0; + struct net_device *dev; + char uid[9]; /* Verify the input sockaddr */ if (!addr || addr->sa_family != AF_IUCV) @@ -610,19 +722,46 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, err = -EADDRINUSE; goto done_unlock; } - if (iucv->path) { - err = 0; + if (iucv->path) goto done_unlock; - } /* Bind the socket */ - memcpy(iucv->src_name, sa->siucv_name, 8); - /* Copy the user id */ - memcpy(iucv->src_user_id, iucv_userid, 8); - sk->sk_state = IUCV_BOUND; - err = 0; + if (pr_iucv) + if (!memcmp(sa->siucv_user_id, iucv_userid, 8)) + goto vm_bind; /* VM IUCV transport */ + /* try hiper transport */ + memcpy(uid, sa->siucv_user_id, sizeof(uid)); + ASCEBC(uid, 8); + rcu_read_lock(); + for_each_netdev_rcu(&init_net, dev) { + if (!memcmp(dev->perm_addr, uid, 8)) { + memcpy(iucv->src_name, sa->siucv_name, 8); + memcpy(iucv->src_user_id, sa->siucv_user_id, 8); + sock->sk->sk_bound_dev_if = dev->ifindex; + sk->sk_state = IUCV_BOUND; + iucv->transport = AF_IUCV_TRANS_HIPER; + if (!iucv->msglimit) + iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT; + rcu_read_unlock(); + goto done_unlock; + } + } + rcu_read_unlock(); +vm_bind: + if (pr_iucv) { + /* use local userid for backward compat */ + memcpy(iucv->src_name, sa->siucv_name, 8); + memcpy(iucv->src_user_id, iucv_userid, 8); + sk->sk_state = IUCV_BOUND; + iucv->transport = AF_IUCV_TRANS_IUCV; + if (!iucv->msglimit) + iucv->msglimit = IUCV_QUEUELEN_DEFAULT; + goto done_unlock; + } + /* found no dev to bind */ + err = -ENODEV; done_unlock: /* Release the socket list lock */ write_unlock_bh(&iucv_sk_list.lock); @@ -658,45 +797,44 @@ static int iucv_sock_autobind(struct sock *sk) memcpy(&iucv->src_name, name, 8); + if (!iucv->msglimit) + iucv->msglimit = IUCV_QUEUELEN_DEFAULT; + return err; } -/* Connect an unconnected socket */ -static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, - int alen, int flags) +static int afiucv_hs_connect(struct socket *sock) { - struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; struct sock *sk = sock->sk; - struct iucv_sock *iucv; - unsigned char user_data[16]; - int err; - - if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv)) - return -EINVAL; - - if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) - return -EBADFD; - - if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) - return -EINVAL; + struct sk_buff *skb; + int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; + int err = 0; - if (sk->sk_state == IUCV_OPEN) { - err = iucv_sock_autobind(sk); - if (unlikely(err)) - return err; + /* send syn */ + skb = sock_alloc_send_skb(sk, blen, 1, &err); + if (!skb) { + err = -ENOMEM; + goto done; } + skb->dev = NULL; + skb_reserve(skb, blen); + err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN); +done: + return err; +} - lock_sock(sk); - - /* Set the destination information */ - memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8); - memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8); +static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr) +{ + struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; + struct sock *sk = sock->sk; + struct iucv_sock *iucv = iucv_sk(sk); + unsigned char user_data[16]; + int err; high_nmcpy(user_data, sa->siucv_name); - low_nmcpy(user_data, iucv_sk(sk)->src_name); + low_nmcpy(user_data, iucv->src_name); ASCEBC(user_data, sizeof(user_data)); - iucv = iucv_sk(sk); /* Create path. */ iucv->path = iucv_path_alloc(iucv->msglimit, IUCV_IPRMDATA, GFP_KERNEL); @@ -704,8 +842,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, err = -ENOMEM; goto done; } - err = iucv_path_connect(iucv->path, &af_iucv_handler, - sa->siucv_user_id, NULL, user_data, sk); + err = pr_iucv->path_connect(iucv->path, &af_iucv_handler, + sa->siucv_user_id, NULL, user_data, + sk); if (err) { iucv_path_free(iucv->path); iucv->path = NULL; @@ -724,21 +863,62 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, err = -ECONNREFUSED; break; } - goto done; } +done: + return err; +} - if (sk->sk_state != IUCV_CONNECTED) { +/* Connect an unconnected socket */ +static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, + int alen, int flags) +{ + struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; + struct sock *sk = sock->sk; + struct iucv_sock *iucv = iucv_sk(sk); + int err; + + if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv)) + return -EINVAL; + + if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) + return -EBADFD; + + if (sk->sk_state == IUCV_OPEN && + iucv->transport == AF_IUCV_TRANS_HIPER) + return -EBADFD; /* explicit bind required */ + + if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) + return -EINVAL; + + if (sk->sk_state == IUCV_OPEN) { + err = iucv_sock_autobind(sk); + if (unlikely(err)) + return err; + } + + lock_sock(sk); + + /* Set the destination information */ + memcpy(iucv->dst_user_id, sa->siucv_user_id, 8); + memcpy(iucv->dst_name, sa->siucv_name, 8); + + if (iucv->transport == AF_IUCV_TRANS_HIPER) + err = afiucv_hs_connect(sock); + else + err = afiucv_path_connect(sock, addr); + if (err) + goto done; + + if (sk->sk_state != IUCV_CONNECTED) err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, IUCV_DISCONN), sock_sndtimeo(sk, flags & O_NONBLOCK)); - } - if (sk->sk_state == IUCV_DISCONN) { + if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) err = -ECONNREFUSED; - } - if (err) { - iucv_path_sever(iucv->path, NULL); + if (err && iucv->transport == AF_IUCV_TRANS_IUCV) { + pr_iucv->path_sever(iucv->path, NULL); iucv_path_free(iucv->path); iucv->path = NULL; } @@ -833,20 +1013,21 @@ static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, { struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; struct sock *sk = sock->sk; + struct iucv_sock *iucv = iucv_sk(sk); addr->sa_family = AF_IUCV; *len = sizeof(struct sockaddr_iucv); if (peer) { - memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8); - memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8); + memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8); + memcpy(siucv->siucv_name, iucv->dst_name, 8); } else { - memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8); - memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8); + memcpy(siucv->siucv_user_id, iucv->src_user_id, 8); + memcpy(siucv->siucv_name, iucv->src_name, 8); } memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); - memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); + memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); return 0; } @@ -871,7 +1052,7 @@ static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg, memcpy(prmdata, (void *) skb->data, skb->len); prmdata[7] = 0xff - (u8) skb->len; - return iucv_message_send(path, msg, IUCV_IPRMDATA, 0, + return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0, (void *) prmdata, 8); } @@ -960,9 +1141,16 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, * this is fine for SOCK_SEQPACKET (unless we want to support * segmented records using the MSG_EOR flag), but * for SOCK_STREAM we might want to improve it in future */ - skb = sock_alloc_send_skb(sk, len, noblock, &err); + if (iucv->transport == AF_IUCV_TRANS_HIPER) + skb = sock_alloc_send_skb(sk, + len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN, + noblock, &err); + else + skb = sock_alloc_send_skb(sk, len, noblock, &err); if (!skb) goto out; + if (iucv->transport == AF_IUCV_TRANS_HIPER) + skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { err = -EFAULT; goto fail; @@ -983,6 +1171,15 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, /* increment and save iucv message tag for msg_completion cbk */ txmsg.tag = iucv->send_tag++; memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); + if (iucv->transport == AF_IUCV_TRANS_HIPER) { + atomic_inc(&iucv->msg_sent); + err = afiucv_hs_send(&txmsg, sk, skb, 0); + if (err) { + atomic_dec(&iucv->msg_sent); + goto fail; + } + goto release; + } skb_queue_tail(&iucv->send_skb_q, skb); if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) @@ -999,13 +1196,13 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, /* this error should never happen since the * IUCV_IPRMDATA path flag is set... sever path */ if (err == 0x15) { - iucv_path_sever(iucv->path, NULL); + pr_iucv->path_sever(iucv->path, NULL); skb_unlink(skb, &iucv->send_skb_q); err = -EPIPE; goto fail; } } else - err = iucv_message_send(iucv->path, &txmsg, 0, 0, + err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0, (void *) skb->data, skb->len); if (err) { if (err == 3) { @@ -1023,6 +1220,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, goto fail; } +release: release_sock(sk); return len; @@ -1095,8 +1293,9 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, skb->len = 0; } } else { - rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA, - skb->data, len, NULL); + rc = pr_iucv->message_receive(path, msg, + msg->flags & IUCV_IPRMDATA, + skb->data, len, NULL); if (rc) { kfree_skb(skb); return; @@ -1110,7 +1309,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, kfree_skb(skb); skb = NULL; if (rc) { - iucv_path_sever(path, NULL); + pr_iucv->path_sever(path, NULL); return; } skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); @@ -1154,7 +1353,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); unsigned int copied, rlen; - struct sk_buff *skb, *rskb, *cskb; + struct sk_buff *skb, *rskb, *cskb, *sskb; + int blen; int err = 0; if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && @@ -1179,7 +1379,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, copied = min_t(unsigned int, rlen, len); cskb = skb; - if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { + if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return -EFAULT; @@ -1217,6 +1417,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, } kfree_skb(skb); + atomic_inc(&iucv->msg_recv); /* Queue backlog skbs */ spin_lock_bh(&iucv->message_q.lock); @@ -1233,6 +1434,24 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, if (skb_queue_empty(&iucv->backlog_skb_q)) { if (!list_empty(&iucv->message_q.list)) iucv_process_message_q(sk); + if (atomic_read(&iucv->msg_recv) >= + iucv->msglimit / 2) { + /* send WIN to peer */ + blen = sizeof(struct af_iucv_trans_hdr) + + ETH_HLEN; + sskb = sock_alloc_send_skb(sk, blen, 1, &err); + if (sskb) { + skb_reserve(sskb, + sizeof(struct af_iucv_trans_hdr) + + ETH_HLEN); + err = afiucv_hs_send(NULL, sk, sskb, + AF_IUCV_FLAG_WIN); + } + if (err) { + sk->sk_state = IUCV_DISCONN; + sk->sk_state_change(sk); + } + } } spin_unlock_bh(&iucv->message_q.lock); } @@ -1327,8 +1546,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how) if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { txmsg.class = 0; txmsg.tag = 0; - err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, - (void *) iprm_shutdown, 8); + err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA, + 0, (void *) iprm_shutdown, 8); if (err) { switch (err) { case 1: @@ -1345,7 +1564,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how) } if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { - err = iucv_path_quiesce(iucv_sk(sk)->path, NULL); + err = pr_iucv->path_quiesce(iucv->path, NULL); if (err) err = -ENOTCONN; @@ -1372,7 +1591,7 @@ static int iucv_sock_release(struct socket *sock) /* Unregister with IUCV base support */ if (iucv_sk(sk)->path) { - iucv_path_sever(iucv_sk(sk)->path, NULL); + pr_iucv->path_sever(iucv_sk(sk)->path, NULL); iucv_path_free(iucv_sk(sk)->path); iucv_sk(sk)->path = NULL; } @@ -1514,14 +1733,14 @@ static int iucv_callback_connreq(struct iucv_path *path, high_nmcpy(user_data, iucv->dst_name); ASCEBC(user_data, sizeof(user_data)); if (sk->sk_state != IUCV_LISTEN) { - err = iucv_path_sever(path, user_data); + err = pr_iucv->path_sever(path, user_data); iucv_path_free(path); goto fail; } /* Check for backlog size */ if (sk_acceptq_is_full(sk)) { - err = iucv_path_sever(path, user_data); + err = pr_iucv->path_sever(path, user_data); iucv_path_free(path); goto fail; } @@ -1529,7 +1748,7 @@ static int iucv_callback_connreq(struct iucv_path *path, /* Create the new socket */ nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); if (!nsk) { - err = iucv_path_sever(path, user_data); + err = pr_iucv->path_sever(path, user_data); iucv_path_free(path); goto fail; } @@ -1553,9 +1772,9 @@ static int iucv_callback_connreq(struct iucv_path *path, /* set message limit for path based on msglimit of accepting socket */ niucv->msglimit = iucv->msglimit; path->msglim = iucv->msglimit; - err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); + err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk); if (err) { - err = iucv_path_sever(path, user_data); + err = pr_iucv->path_sever(path, user_data); iucv_path_free(path); iucv_sock_kill(nsk); goto fail; @@ -1589,7 +1808,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) int len; if (sk->sk_shutdown & RCV_SHUTDOWN) { - iucv_message_reject(path, msg); + pr_iucv->message_reject(path, msg); return; } @@ -1692,6 +1911,389 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16]) bh_unlock_sock(sk); } +/***************** HiperSockets transport callbacks ********************/ +static void afiucv_swap_src_dest(struct sk_buff *skb) +{ + struct af_iucv_trans_hdr *trans_hdr = + (struct af_iucv_trans_hdr *)skb->data; + char tmpID[8]; + char tmpName[8]; + + ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); + ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); + ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); + ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); + memcpy(tmpID, trans_hdr->srcUserID, 8); + memcpy(tmpName, trans_hdr->srcAppName, 8); + memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8); + memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8); + memcpy(trans_hdr->destUserID, tmpID, 8); + memcpy(trans_hdr->destAppName, tmpName, 8); + skb_push(skb, ETH_HLEN); + memset(skb->data, 0, ETH_HLEN); +} + +/** + * afiucv_hs_callback_syn - react on received SYN + **/ +static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) +{ + struct sock *nsk; + struct iucv_sock *iucv, *niucv; + struct af_iucv_trans_hdr *trans_hdr; + int err; + + iucv = iucv_sk(sk); + trans_hdr = (struct af_iucv_trans_hdr *)skb->data; + if (!iucv) { + /* no sock - connection refused */ + afiucv_swap_src_dest(skb); + trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; + err = dev_queue_xmit(skb); + goto out; + } + + nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); + bh_lock_sock(sk); + if ((sk->sk_state != IUCV_LISTEN) || + sk_acceptq_is_full(sk) || + !nsk) { + /* error on server socket - connection refused */ + if (nsk) + sk_free(nsk); + afiucv_swap_src_dest(skb); + trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; + err = dev_queue_xmit(skb); + bh_unlock_sock(sk); + goto out; + } + + niucv = iucv_sk(nsk); + iucv_sock_init(nsk, sk); + niucv->transport = AF_IUCV_TRANS_HIPER; + niucv->msglimit = iucv->msglimit; + if (!trans_hdr->window) + niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT; + else + niucv->msglimit_peer = trans_hdr->window; + memcpy(niucv->dst_name, trans_hdr->srcAppName, 8); + memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8); + memcpy(niucv->src_name, iucv->src_name, 8); + memcpy(niucv->src_user_id, iucv->src_user_id, 8); + nsk->sk_bound_dev_if = sk->sk_bound_dev_if; + afiucv_swap_src_dest(skb); + trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK; + trans_hdr->window = niucv->msglimit; + /* if receiver acks the xmit connection is established */ + err = dev_queue_xmit(skb); + if (!err) { + iucv_accept_enqueue(sk, nsk); + nsk->sk_state = IUCV_CONNECTED; + sk->sk_data_ready(sk, 1); + } else + iucv_sock_kill(nsk); + bh_unlock_sock(sk); + +out: + return NET_RX_SUCCESS; +} + +/** + * afiucv_hs_callback_synack() - react on received SYN-ACK + **/ +static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) +{ + struct iucv_sock *iucv = iucv_sk(sk); + struct af_iucv_trans_hdr *trans_hdr = + (struct af_iucv_trans_hdr *)skb->data; + + if (!iucv) + goto out; + if (sk->sk_state != IUCV_BOUND) + goto out; + bh_lock_sock(sk); + iucv->msglimit_peer = trans_hdr->window; + sk->sk_state = IUCV_CONNECTED; + sk->sk_state_change(sk); + bh_unlock_sock(sk); +out: + kfree_skb(skb); + return NET_RX_SUCCESS; +} + +/** + * afiucv_hs_callback_synfin() - react on received SYN_FIN + **/ +static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) +{ + struct iucv_sock *iucv = iucv_sk(sk); + + if (!iucv) + goto out; + if (sk->sk_state != IUCV_BOUND) + goto out; + bh_lock_sock(sk); + sk->sk_state = IUCV_DISCONN; + sk->sk_state_change(sk); + bh_unlock_sock(sk); +out: + kfree_skb(skb); + return NET_RX_SUCCESS; +} + +/** + * afiucv_hs_callback_fin() - react on received FIN + **/ +static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) +{ + struct iucv_sock *iucv = iucv_sk(sk); + + /* other end of connection closed */ + if (iucv) { + bh_lock_sock(sk); + if (!list_empty(&iucv->accept_q)) + sk->sk_state = IUCV_SEVERED; + else + sk->sk_state = IUCV_DISCONN; + sk->sk_state_change(sk); + bh_unlock_sock(sk); + } + kfree_skb(skb); + return NET_RX_SUCCESS; +} + +/** + * afiucv_hs_callback_win() - react on received WIN + **/ +static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) +{ + struct iucv_sock *iucv = iucv_sk(sk); + struct af_iucv_trans_hdr *trans_hdr = + (struct af_iucv_trans_hdr *)skb->data; + + if (!iucv) + return NET_RX_SUCCESS; + + if (sk->sk_state != IUCV_CONNECTED) + return NET_RX_SUCCESS; + + atomic_sub(trans_hdr->window, &iucv->msg_sent); + iucv_sock_wake_msglim(sk); + return NET_RX_SUCCESS; +} + +/** + * afiucv_hs_callback_rx() - react on received data + **/ +static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) +{ + struct iucv_sock *iucv = iucv_sk(sk); + + if (!iucv) { + kfree_skb(skb); + return NET_RX_SUCCESS; + } + + if (sk->sk_state != IUCV_CONNECTED) { + kfree_skb(skb); + return NET_RX_SUCCESS; + } + + /* write stuff from iucv_msg to skb cb */ + if (skb->len <= sizeof(struct af_iucv_trans_hdr)) { + kfree_skb(skb); + return NET_RX_SUCCESS; + } + skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + spin_lock(&iucv->message_q.lock); + if (skb_queue_empty(&iucv->backlog_skb_q)) { + if (sock_queue_rcv_skb(sk, skb)) { + /* handle rcv queue full */ + skb_queue_tail(&iucv->backlog_skb_q, skb); + } + } else + skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); + spin_unlock(&iucv->message_q.lock); + return NET_RX_SUCCESS; +} + +/** + * afiucv_hs_rcv() - base function for arriving data through HiperSockets + * transport + * called from netif RX softirq + **/ +static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) +{ + struct hlist_node *node; + struct sock *sk; + struct iucv_sock *iucv; + struct af_iucv_trans_hdr *trans_hdr; + char nullstring[8]; + int err = 0; + + skb_pull(skb, ETH_HLEN); + trans_hdr = (struct af_iucv_trans_hdr *)skb->data; + EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); + EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); + EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); + EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); + memset(nullstring, 0, sizeof(nullstring)); + iucv = NULL; + sk = NULL; + read_lock(&iucv_sk_list.lock); + sk_for_each(sk, node, &iucv_sk_list.head) { + if (trans_hdr->flags == AF_IUCV_FLAG_SYN) { + if ((!memcmp(&iucv_sk(sk)->src_name, + trans_hdr->destAppName, 8)) && + (!memcmp(&iucv_sk(sk)->src_user_id, + trans_hdr->destUserID, 8)) && + (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) && + (!memcmp(&iucv_sk(sk)->dst_user_id, + nullstring, 8))) { + iucv = iucv_sk(sk); + break; + } + } else { + if ((!memcmp(&iucv_sk(sk)->src_name, + trans_hdr->destAppName, 8)) && + (!memcmp(&iucv_sk(sk)->src_user_id, + trans_hdr->destUserID, 8)) && + (!memcmp(&iucv_sk(sk)->dst_name, + trans_hdr->srcAppName, 8)) && + (!memcmp(&iucv_sk(sk)->dst_user_id, + trans_hdr->srcUserID, 8))) { + iucv = iucv_sk(sk); + break; + } + } + } + read_unlock(&iucv_sk_list.lock); + if (!iucv) + sk = NULL; + + /* no sock + how should we send with no sock + 1) send without sock no send rc checking? + 2) introduce default sock to handle this cases + + SYN -> send SYN|ACK in good case, send SYN|FIN in bad case + data -> send FIN + SYN|ACK, SYN|FIN, FIN -> no action? */ + + switch (trans_hdr->flags) { + case AF_IUCV_FLAG_SYN: + /* connect request */ + err = afiucv_hs_callback_syn(sk, skb); + break; + case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK): + /* connect request confirmed */ + err = afiucv_hs_callback_synack(sk, skb); + break; + case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN): + /* connect request refused */ + err = afiucv_hs_callback_synfin(sk, skb); + break; + case (AF_IUCV_FLAG_FIN): + /* close request */ + err = afiucv_hs_callback_fin(sk, skb); + break; + case (AF_IUCV_FLAG_WIN): + err = afiucv_hs_callback_win(sk, skb); + if (skb->len > sizeof(struct af_iucv_trans_hdr)) + err = afiucv_hs_callback_rx(sk, skb); + else + kfree(skb); + break; + case 0: + /* plain data frame */ + err = afiucv_hs_callback_rx(sk, skb); + break; + default: + ; + } + + return err; +} + +/** + * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets + * transport + **/ +static void afiucv_hs_callback_txnotify(struct sk_buff *skb, + enum iucv_tx_notify n) +{ + struct sock *isk = skb->sk; + struct sock *sk = NULL; + struct iucv_sock *iucv = NULL; + struct sk_buff_head *list; + struct sk_buff *list_skb; + struct sk_buff *this = NULL; + unsigned long flags; + struct hlist_node *node; + + read_lock(&iucv_sk_list.lock); + sk_for_each(sk, node, &iucv_sk_list.head) + if (sk == isk) { + iucv = iucv_sk(sk); + break; + } + read_unlock(&iucv_sk_list.lock); + + if (!iucv) + return; + + bh_lock_sock(sk); + list = &iucv->send_skb_q; + list_skb = list->next; + if (skb_queue_empty(list)) + goto out_unlock; + + spin_lock_irqsave(&list->lock, flags); + while (list_skb != (struct sk_buff *)list) { + if (skb_shinfo(list_skb) == skb_shinfo(skb)) { + this = list_skb; + switch (n) { + case TX_NOTIFY_OK: + __skb_unlink(this, list); + iucv_sock_wake_msglim(sk); + kfree_skb(this); + break; + case TX_NOTIFY_PENDING: + atomic_inc(&iucv->pendings); + break; + case TX_NOTIFY_DELAYED_OK: + __skb_unlink(this, list); + atomic_dec(&iucv->pendings); + if (atomic_read(&iucv->pendings) <= 0) + iucv_sock_wake_msglim(sk); + kfree_skb(this); + break; + case TX_NOTIFY_UNREACHABLE: + case TX_NOTIFY_DELAYED_UNREACHABLE: + case TX_NOTIFY_TPQFULL: /* not yet used */ + case TX_NOTIFY_GENERALERROR: + case TX_NOTIFY_DELAYED_GENERALERROR: + __skb_unlink(this, list); + kfree_skb(this); + if (!list_empty(&iucv->accept_q)) + sk->sk_state = IUCV_SEVERED; + else + sk->sk_state = IUCV_DISCONN; + sk->sk_state_change(sk); + break; + } + break; + } + list_skb = list_skb->next; + } + spin_unlock_irqrestore(&list->lock, flags); + +out_unlock: + bh_unlock_sock(sk); +} static const struct proto_ops iucv_sock_ops = { .family = PF_IUCV, .owner = THIS_MODULE, @@ -1718,71 +2320,104 @@ static const struct net_proto_family iucv_sock_family_ops = { .create = iucv_sock_create, }; -static int __init afiucv_init(void) +static struct packet_type iucv_packet_type = { + .type = cpu_to_be16(ETH_P_AF_IUCV), + .func = afiucv_hs_rcv, +}; + +static int afiucv_iucv_init(void) { int err; - if (!MACHINE_IS_VM) { - pr_err("The af_iucv module cannot be loaded" - " without z/VM\n"); - err = -EPROTONOSUPPORT; - goto out; - } - cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); - if (unlikely(err)) { - WARN_ON(err); - err = -EPROTONOSUPPORT; - goto out; - } - - err = iucv_register(&af_iucv_handler, 0); + err = pr_iucv->iucv_register(&af_iucv_handler, 0); if (err) goto out; - err = proto_register(&iucv_proto, 0); - if (err) - goto out_iucv; - err = sock_register(&iucv_sock_family_ops); - if (err) - goto out_proto; /* establish dummy device */ + af_iucv_driver.bus = pr_iucv->bus; err = driver_register(&af_iucv_driver); if (err) - goto out_sock; + goto out_iucv; af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!af_iucv_dev) { err = -ENOMEM; goto out_driver; } dev_set_name(af_iucv_dev, "af_iucv"); - af_iucv_dev->bus = &iucv_bus; - af_iucv_dev->parent = iucv_root; + af_iucv_dev->bus = pr_iucv->bus; + af_iucv_dev->parent = pr_iucv->root; af_iucv_dev->release = (void (*)(struct device *))kfree; af_iucv_dev->driver = &af_iucv_driver; err = device_register(af_iucv_dev); if (err) goto out_driver; - return 0; out_driver: driver_unregister(&af_iucv_driver); +out_iucv: + pr_iucv->iucv_unregister(&af_iucv_handler, 0); +out: + return err; +} + +static int __init afiucv_init(void) +{ + int err; + + if (MACHINE_IS_VM) { + cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); + if (unlikely(err)) { + WARN_ON(err); + err = -EPROTONOSUPPORT; + goto out; + } + + pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv"); + if (!pr_iucv) { + printk(KERN_WARNING "iucv_if lookup failed\n"); + memset(&iucv_userid, 0, sizeof(iucv_userid)); + } + } else { + memset(&iucv_userid, 0, sizeof(iucv_userid)); + pr_iucv = NULL; + } + + err = proto_register(&iucv_proto, 0); + if (err) + goto out; + err = sock_register(&iucv_sock_family_ops); + if (err) + goto out_proto; + + if (pr_iucv) { + err = afiucv_iucv_init(); + if (err) + goto out_sock; + } + dev_add_pack(&iucv_packet_type); + return 0; + out_sock: sock_unregister(PF_IUCV); out_proto: proto_unregister(&iucv_proto); -out_iucv: - iucv_unregister(&af_iucv_handler, 0); out: + if (pr_iucv) + symbol_put(iucv_if); return err; } static void __exit afiucv_exit(void) { - device_unregister(af_iucv_dev); - driver_unregister(&af_iucv_driver); + if (pr_iucv) { + device_unregister(af_iucv_dev); + driver_unregister(&af_iucv_driver); + pr_iucv->iucv_unregister(&af_iucv_handler, 0); + symbol_put(iucv_if); + } + dev_remove_pack(&iucv_packet_type); sock_unregister(PF_IUCV); proto_unregister(&iucv_proto); - iucv_unregister(&af_iucv_handler, 0); } module_init(afiucv_init); @@ -1793,3 +2428,4 @@ MODULE_DESCRIPTION("IUCV Sockets ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_IUCV); + diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 075a380..403be43 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -1974,6 +1974,27 @@ out: return rc; } +struct iucv_interface iucv_if = { + .message_receive = iucv_message_receive, + .__message_receive = __iucv_message_receive, + .message_reply = iucv_message_reply, + .message_reject = iucv_message_reject, + .message_send = iucv_message_send, + .__message_send = __iucv_message_send, + .message_send2way = iucv_message_send2way, + .message_purge = iucv_message_purge, + .path_accept = iucv_path_accept, + .path_connect = iucv_path_connect, + .path_quiesce = iucv_path_quiesce, + .path_resume = iucv_path_resume, + .path_sever = iucv_path_sever, + .iucv_register = iucv_register, + .iucv_unregister = iucv_unregister, + .bus = NULL, + .root = NULL, +}; +EXPORT_SYMBOL(iucv_if); + /** * iucv_init * @@ -2038,6 +2059,8 @@ static int __init iucv_init(void) rc = bus_register(&iucv_bus); if (rc) goto out_reboot; + iucv_if.root = iucv_root; + iucv_if.bus = &iucv_bus; return 0; out_reboot: diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index fd1aaf2..9b5bd8c 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -69,7 +69,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, if (!tid_rx) return; - rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], NULL); + RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", @@ -340,7 +340,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, status = WLAN_STATUS_SUCCESS; /* activate it for RX */ - rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx); + RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx); if (timeout) mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout)); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 3d1b091..86f8f49 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -62,7 +62,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy, if (type == NL80211_IFTYPE_AP_VLAN && params && params->use_4addr == 0) - rcu_assign_pointer(sdata->u.vlan.sta, NULL); + RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); else if (type == NL80211_IFTYPE_STATION && params && params->use_4addr >= 0) sdata->u.mgd.use_4addr = params->use_4addr; @@ -542,7 +542,7 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.dtim_period = new->dtim_period; - rcu_assign_pointer(sdata->u.ap.beacon, new); + RCU_INIT_POINTER(sdata->u.ap.beacon, new); synchronize_rcu(); @@ -594,7 +594,7 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev) if (!old) return -ENOENT; - rcu_assign_pointer(sdata->u.ap.beacon, NULL); + RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); synchronize_rcu(); kfree(old); @@ -857,7 +857,7 @@ static int ieee80211_change_station(struct wiphy *wiphy, return -EBUSY; } - rcu_assign_pointer(vlansdata->u.vlan.sta, sta); + RCU_INIT_POINTER(vlansdata->u.vlan.sta, sta); } sta->sdata = vlansdata; diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 56c24ca..4f9235b 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -84,7 +84,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, drv_reset_tsf(local); skb = ifibss->skb; - rcu_assign_pointer(ifibss->presp, NULL); + RCU_INIT_POINTER(ifibss->presp, NULL); synchronize_rcu(); skb->data = skb->head; skb->len = 0; @@ -184,7 +184,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, *pos++ = 0; /* U-APSD no in use */ } - rcu_assign_pointer(ifibss->presp, skb); + RCU_INIT_POINTER(ifibss->presp, skb); sdata->vif.bss_conf.beacon_int = beacon_int; sdata->vif.bss_conf.basic_rates = basic_rates; @@ -995,7 +995,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) kfree(sdata->u.ibss.ie); skb = rcu_dereference_protected(sdata->u.ibss.presp, lockdep_is_held(&sdata->u.ibss.mtx)); - rcu_assign_pointer(sdata->u.ibss.presp, NULL); + RCU_INIT_POINTER(sdata->u.ibss.presp, NULL); sdata->vif.bss_conf.ibss_joined = false; ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_IBSS); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 556e7e6..d10dc4d 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -456,7 +456,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, BSS_CHANGED_BEACON_ENABLED); /* remove beacon */ - rcu_assign_pointer(sdata->u.ap.beacon, NULL); + RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); synchronize_rcu(); kfree(old_beacon); @@ -645,7 +645,7 @@ static const struct net_device_ops ieee80211_dataif_ops = { .ndo_stop = ieee80211_stop, .ndo_uninit = ieee80211_teardown_sdata, .ndo_start_xmit = ieee80211_subif_start_xmit, - .ndo_set_multicast_list = ieee80211_set_multicast_list, + .ndo_set_rx_mode = ieee80211_set_multicast_list, .ndo_change_mtu = ieee80211_change_mtu, .ndo_set_mac_address = ieee80211_change_mac, .ndo_select_queue = ieee80211_netdev_select_queue, @@ -689,7 +689,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { .ndo_stop = ieee80211_stop, .ndo_uninit = ieee80211_teardown_sdata, .ndo_start_xmit = ieee80211_monitor_start_xmit, - .ndo_set_multicast_list = ieee80211_set_multicast_list, + .ndo_set_rx_mode = ieee80211_set_multicast_list, .ndo_change_mtu = ieee80211_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_select_queue = ieee80211_monitor_select_queue, diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 068ee651..dc7ae8d 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -843,6 +843,6 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) void mesh_pathtbl_unregister(void) { /* no need for locking during exit path */ - mesh_table_free(rcu_dereference_raw(mesh_paths), true); - mesh_table_free(rcu_dereference_raw(mpp_paths), true); + mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true); + mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true); } diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 3db78b6..8b6ebee 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -72,7 +72,7 @@ static int sta_info_hash_del(struct ieee80211_local *local, if (!s) return -ENOENT; if (s == sta) { - rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], + RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)], s->hnext); return 0; } @@ -82,7 +82,7 @@ static int sta_info_hash_del(struct ieee80211_local *local, s = rcu_dereference_protected(s->hnext, lockdep_is_held(&local->sta_lock)); if (rcu_access_pointer(s->hnext)) { - rcu_assign_pointer(s->hnext, sta->hnext); + RCU_INIT_POINTER(s->hnext, sta->hnext); return 0; } @@ -184,7 +184,7 @@ static void sta_info_hash_add(struct ieee80211_local *local, struct sta_info *sta) { sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)]; - rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta); + RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)], sta); } static void sta_unblock(struct work_struct *wk) @@ -672,7 +672,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) local->sta_generation++; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) - rcu_assign_pointer(sdata->u.vlan.sta, NULL); + RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); if (sta->uploaded) { if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 899b71c..3346829 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -37,7 +37,7 @@ int nf_register_afinfo(const struct nf_afinfo *afinfo) err = mutex_lock_interruptible(&afinfo_mutex); if (err < 0) return err; - rcu_assign_pointer(nf_afinfo[afinfo->family], afinfo); + RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo); mutex_unlock(&afinfo_mutex); return 0; } @@ -46,7 +46,7 @@ EXPORT_SYMBOL_GPL(nf_register_afinfo); void nf_unregister_afinfo(const struct nf_afinfo *afinfo) { mutex_lock(&afinfo_mutex); - rcu_assign_pointer(nf_afinfo[afinfo->family], NULL); + RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL); mutex_unlock(&afinfo_mutex); synchronize_rcu(); } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index f7af8b8..5acfaf5 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -779,7 +779,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, if (exp->helper) { help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); if (help) - rcu_assign_pointer(help->helper, exp->helper); + RCU_INIT_POINTER(help->helper, exp->helper); } #ifdef CONFIG_NF_CONNTRACK_MARK @@ -1317,7 +1317,7 @@ static void nf_conntrack_cleanup_net(struct net *net) void nf_conntrack_cleanup(struct net *net) { if (net_eq(net, &init_net)) - rcu_assign_pointer(ip_ct_attach, NULL); + RCU_INIT_POINTER(ip_ct_attach, NULL); /* This makes sure all current packets have passed through netfilter framework. Roll on, two-stage module @@ -1327,7 +1327,7 @@ void nf_conntrack_cleanup(struct net *net) nf_conntrack_cleanup_net(net); if (net_eq(net, &init_net)) { - rcu_assign_pointer(nf_ct_destroy, NULL); + RCU_INIT_POINTER(nf_ct_destroy, NULL); nf_conntrack_cleanup_init_net(); } } @@ -1576,11 +1576,11 @@ int nf_conntrack_init(struct net *net) if (net_eq(net, &init_net)) { /* For use by REJECT target */ - rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach); - rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); + RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach); + RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack); /* Howto get NAT offsets */ - rcu_assign_pointer(nf_ct_nat_offset, NULL); + RCU_INIT_POINTER(nf_ct_nat_offset, NULL); } return 0; diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index 63a1b91..3add994 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -94,7 +94,7 @@ int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new) ret = -EBUSY; goto out_unlock; } - rcu_assign_pointer(nf_conntrack_event_cb, new); + RCU_INIT_POINTER(nf_conntrack_event_cb, new); mutex_unlock(&nf_ct_ecache_mutex); return ret; @@ -112,7 +112,7 @@ void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new) notify = rcu_dereference_protected(nf_conntrack_event_cb, lockdep_is_held(&nf_ct_ecache_mutex)); BUG_ON(notify != new); - rcu_assign_pointer(nf_conntrack_event_cb, NULL); + RCU_INIT_POINTER(nf_conntrack_event_cb, NULL); mutex_unlock(&nf_ct_ecache_mutex); } EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); @@ -129,7 +129,7 @@ int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new) ret = -EBUSY; goto out_unlock; } - rcu_assign_pointer(nf_expect_event_cb, new); + RCU_INIT_POINTER(nf_expect_event_cb, new); mutex_unlock(&nf_ct_ecache_mutex); return ret; @@ -147,7 +147,7 @@ void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new) notify = rcu_dereference_protected(nf_expect_event_cb, lockdep_is_held(&nf_ct_ecache_mutex)); BUG_ON(notify != new); - rcu_assign_pointer(nf_expect_event_cb, NULL); + RCU_INIT_POINTER(nf_expect_event_cb, NULL); mutex_unlock(&nf_ct_ecache_mutex); } EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 05ecdc2..4605c94 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -169,7 +169,7 @@ int nf_ct_extend_register(struct nf_ct_ext_type *type) before updating alloc_size */ type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align) + type->len; - rcu_assign_pointer(nf_ct_ext_types[type->id], type); + RCU_INIT_POINTER(nf_ct_ext_types[type->id], type); update_alloc_size(type); out: mutex_unlock(&nf_ct_ext_type_mutex); @@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(nf_ct_extend_register); void nf_ct_extend_unregister(struct nf_ct_ext_type *type) { mutex_lock(&nf_ct_ext_type_mutex); - rcu_assign_pointer(nf_ct_ext_types[type->id], NULL); + RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL); update_alloc_size(type); mutex_unlock(&nf_ct_ext_type_mutex); rcu_barrier(); /* Wait for completion of call_rcu()'s */ diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 1bdfea3..93c4bdb 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -131,7 +131,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); if (helper == NULL) { if (help) - rcu_assign_pointer(help->helper, NULL); + RCU_INIT_POINTER(help->helper, NULL); goto out; } @@ -145,7 +145,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, memset(&help->help, 0, sizeof(help->help)); } - rcu_assign_pointer(help->helper, helper); + RCU_INIT_POINTER(help->helper, helper); out: return ret; } @@ -162,7 +162,7 @@ static inline int unhelp(struct nf_conntrack_tuple_hash *i, lockdep_is_held(&nf_conntrack_lock) ) == me) { nf_conntrack_event(IPCT_HELPER, ct); - rcu_assign_pointer(help->helper, NULL); + RCU_INIT_POINTER(help->helper, NULL); } return 0; } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 7dec88a..e58aa9b 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1125,7 +1125,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[]) if (help && help->helper) { /* we had a helper before ... */ nf_ct_remove_expectations(ct); - rcu_assign_pointer(help->helper, NULL); + RCU_INIT_POINTER(help->helper, NULL); } return 0; @@ -1163,7 +1163,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[]) return -EOPNOTSUPP; } - rcu_assign_pointer(help->helper, helper); + RCU_INIT_POINTER(help->helper, helper); return 0; } @@ -1386,7 +1386,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, } /* not in hash table yet so not strictly necessary */ - rcu_assign_pointer(help->helper, helper); + RCU_INIT_POINTER(help->helper, helper); } } else { /* try an implicit helper assignation */ diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 20714ed..ce0c406 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -55,7 +55,7 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger) llog = rcu_dereference_protected(nf_loggers[pf], lockdep_is_held(&nf_log_mutex)); if (llog == NULL) - rcu_assign_pointer(nf_loggers[pf], logger); + RCU_INIT_POINTER(nf_loggers[pf], logger); } mutex_unlock(&nf_log_mutex); @@ -74,7 +74,7 @@ void nf_log_unregister(struct nf_logger *logger) c_logger = rcu_dereference_protected(nf_loggers[i], lockdep_is_held(&nf_log_mutex)); if (c_logger == logger) - rcu_assign_pointer(nf_loggers[i], NULL); + RCU_INIT_POINTER(nf_loggers[i], NULL); list_del(&logger->list[i]); } mutex_unlock(&nf_log_mutex); @@ -92,7 +92,7 @@ int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) mutex_unlock(&nf_log_mutex); return -ENOENT; } - rcu_assign_pointer(nf_loggers[pf], logger); + RCU_INIT_POINTER(nf_loggers[pf], logger); mutex_unlock(&nf_log_mutex); return 0; } @@ -103,7 +103,7 @@ void nf_log_unbind_pf(u_int8_t pf) if (pf >= ARRAY_SIZE(nf_loggers)) return; mutex_lock(&nf_log_mutex); - rcu_assign_pointer(nf_loggers[pf], NULL); + RCU_INIT_POINTER(nf_loggers[pf], NULL); mutex_unlock(&nf_log_mutex); } EXPORT_SYMBOL(nf_log_unbind_pf); @@ -250,7 +250,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write, mutex_unlock(&nf_log_mutex); return -ENOENT; } - rcu_assign_pointer(nf_loggers[tindex], logger); + RCU_INIT_POINTER(nf_loggers[tindex], logger); mutex_unlock(&nf_log_mutex); } else { mutex_lock(&nf_log_mutex); diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 84d0fd4..99ffd28 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -40,7 +40,7 @@ int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) else if (old) ret = -EBUSY; else { - rcu_assign_pointer(queue_handler[pf], qh); + RCU_INIT_POINTER(queue_handler[pf], qh); ret = 0; } mutex_unlock(&queue_handler_mutex); @@ -65,7 +65,7 @@ int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) return -EINVAL; } - rcu_assign_pointer(queue_handler[pf], NULL); + RCU_INIT_POINTER(queue_handler[pf], NULL); mutex_unlock(&queue_handler_mutex); synchronize_rcu(); @@ -84,7 +84,7 @@ void nf_unregister_queue_handlers(const struct nf_queue_handler *qh) queue_handler[pf], lockdep_is_held(&queue_handler_mutex) ) == qh) - rcu_assign_pointer(queue_handler[pf], NULL); + RCU_INIT_POINTER(queue_handler[pf], NULL); } mutex_unlock(&queue_handler_mutex); diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 1905976..c879c1a 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -59,7 +59,7 @@ int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) nfnl_unlock(); return -EBUSY; } - rcu_assign_pointer(subsys_table[n->subsys_id], n); + RCU_INIT_POINTER(subsys_table[n->subsys_id], n); nfnl_unlock(); return 0; @@ -210,7 +210,7 @@ static int __net_init nfnetlink_net_init(struct net *net) if (!nfnl) return -ENOMEM; net->nfnl_stash = nfnl; - rcu_assign_pointer(net->nfnl, nfnl); + RCU_INIT_POINTER(net->nfnl, nfnl); return 0; } @@ -219,7 +219,7 @@ static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list) struct net *net; list_for_each_entry(net, net_exit_list, exit_list) - rcu_assign_pointer(net->nfnl, NULL); + RCU_INIT_POINTER(net->nfnl, NULL); synchronize_net(); list_for_each_entry(net, net_exit_list, exit_list) netlink_kernel_release(net->nfnl_stash); diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index 7d8083c..3f905e5 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c @@ -282,7 +282,7 @@ int __init netlbl_domhsh_init(u32 size) INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); spin_lock(&netlbl_domhsh_lock); - rcu_assign_pointer(netlbl_domhsh, hsh_tbl); + RCU_INIT_POINTER(netlbl_domhsh, hsh_tbl); spin_unlock(&netlbl_domhsh_lock); return 0; @@ -330,7 +330,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, &rcu_dereference(netlbl_domhsh)->tbl[bkt]); } else { INIT_LIST_HEAD(&entry->list); - rcu_assign_pointer(netlbl_domhsh_def, entry); + RCU_INIT_POINTER(netlbl_domhsh_def, entry); } if (entry->type == NETLBL_NLTYPE_ADDRSELECT) { @@ -451,7 +451,7 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, if (entry != rcu_dereference(netlbl_domhsh_def)) list_del_rcu(&entry->list); else - rcu_assign_pointer(netlbl_domhsh_def, NULL); + RCU_INIT_POINTER(netlbl_domhsh_def, NULL); } else ret_val = -ENOENT; spin_unlock(&netlbl_domhsh_lock); diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index e6e8236..e251c2c 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -354,7 +354,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex) INIT_LIST_HEAD(&iface->list); if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL) goto add_iface_failure; - rcu_assign_pointer(netlbl_unlhsh_def, iface); + RCU_INIT_POINTER(netlbl_unlhsh_def, iface); } spin_unlock(&netlbl_unlhsh_lock); @@ -621,7 +621,7 @@ static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface) if (iface->ifindex > 0) list_del_rcu(&iface->list); else - rcu_assign_pointer(netlbl_unlhsh_def, NULL); + RCU_INIT_POINTER(netlbl_unlhsh_def, NULL); spin_unlock(&netlbl_unlhsh_lock); call_rcu(&iface->rcu, netlbl_unlhsh_free_iface); @@ -1449,7 +1449,7 @@ int __init netlbl_unlabel_init(u32 size) rcu_read_lock(); spin_lock(&netlbl_unlhsh_lock); - rcu_assign_pointer(netlbl_unlhsh, hsh_tbl); + RCU_INIT_POINTER(netlbl_unlhsh, hsh_tbl); spin_unlock(&netlbl_unlhsh_lock); rcu_read_unlock(); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 0a4db02..4330db9 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1578,7 +1578,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups) new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); if (!new) return -ENOMEM; - old = rcu_dereference_raw(tbl->listeners); + old = rcu_dereference_protected(tbl->listeners, 1); memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); rcu_assign_pointer(tbl->listeners, new); diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index c6fffd9..bf10ea8 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c @@ -480,7 +480,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol, if (proto_tab[protocol]) err = -EBUSY; else - rcu_assign_pointer(proto_tab[protocol], pp); + RCU_INIT_POINTER(proto_tab[protocol], pp); mutex_unlock(&proto_tab_lock); return err; @@ -491,7 +491,7 @@ void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp) { mutex_lock(&proto_tab_lock); BUG_ON(proto_tab[protocol] != pp); - rcu_assign_pointer(proto_tab[protocol], NULL); + RCU_INIT_POINTER(proto_tab[protocol], NULL); mutex_unlock(&proto_tab_lock); synchronize_rcu(); proto_unregister(pp->prot); diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index d2df8f3..c582761 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c @@ -276,7 +276,7 @@ static void phonet_route_autodel(struct net_device *dev) mutex_lock(&pnn->routes.lock); for (i = 0; i < 64; i++) if (dev == pnn->routes.table[i]) { - rcu_assign_pointer(pnn->routes.table[i], NULL); + RCU_INIT_POINTER(pnn->routes.table[i], NULL); set_bit(i, deleted); } mutex_unlock(&pnn->routes.lock); @@ -390,7 +390,7 @@ int phonet_route_add(struct net_device *dev, u8 daddr) daddr = daddr >> 2; mutex_lock(&routes->lock); if (routes->table[daddr] == NULL) { - rcu_assign_pointer(routes->table[daddr], dev); + RCU_INIT_POINTER(routes->table[daddr], dev); dev_hold(dev); err = 0; } @@ -406,7 +406,7 @@ int phonet_route_del(struct net_device *dev, u8 daddr) daddr = daddr >> 2; mutex_lock(&routes->lock); if (dev == routes->table[daddr]) - rcu_assign_pointer(routes->table[daddr], NULL); + RCU_INIT_POINTER(routes->table[daddr], NULL); else dev = NULL; mutex_unlock(&routes->lock); diff --git a/net/phonet/socket.c b/net/phonet/socket.c index ab07711..676d18d 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -679,7 +679,7 @@ int pn_sock_bind_res(struct sock *sk, u8 res) mutex_lock(&resource_mutex); if (pnres.sk[res] == NULL) { sock_hold(sk); - rcu_assign_pointer(pnres.sk[res], sk); + RCU_INIT_POINTER(pnres.sk[res], sk); ret = 0; } mutex_unlock(&resource_mutex); @@ -695,7 +695,7 @@ int pn_sock_unbind_res(struct sock *sk, u8 res) mutex_lock(&resource_mutex); if (pnres.sk[res] == sk) { - rcu_assign_pointer(pnres.sk[res], NULL); + RCU_INIT_POINTER(pnres.sk[res], NULL); ret = 0; } mutex_unlock(&resource_mutex); @@ -714,7 +714,7 @@ void pn_sock_unbind_all_res(struct sock *sk) mutex_lock(&resource_mutex); for (res = 0; res < 256; res++) { if (pnres.sk[res] == sk) { - rcu_assign_pointer(pnres.sk[res], NULL); + RCU_INIT_POINTER(pnres.sk[res], NULL); match++; } } diff --git a/net/socket.c b/net/socket.c index 24a7740..2517e11 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2470,7 +2470,7 @@ int sock_register(const struct net_proto_family *ops) lockdep_is_held(&net_family_lock))) err = -EEXIST; else { - rcu_assign_pointer(net_families[ops->family], ops); + RCU_INIT_POINTER(net_families[ops->family], ops); err = 0; } spin_unlock(&net_family_lock); @@ -2498,7 +2498,7 @@ void sock_unregister(int family) BUG_ON(family < 0 || family >= NPROTO); spin_lock(&net_family_lock); - rcu_assign_pointer(net_families[family], NULL); + RCU_INIT_POINTER(net_families[family], NULL); spin_unlock(&net_family_lock); synchronize_rcu(); diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 364eb45..d413275 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -122,7 +122,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) return; gss_get_ctx(ctx); - rcu_assign_pointer(gss_cred->gc_ctx, ctx); + RCU_INIT_POINTER(gss_cred->gc_ctx, ctx); set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); smp_mb__before_clear_bit(); clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); @@ -970,7 +970,7 @@ gss_destroy_nullcred(struct rpc_cred *cred) struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); struct gss_cl_ctx *ctx = gss_cred->gc_ctx; - rcu_assign_pointer(gss_cred->gc_ctx, NULL); + RCU_INIT_POINTER(gss_cred->gc_ctx, NULL); call_rcu(&cred->cr_rcu, gss_free_cred_callback); if (ctx) gss_put_ctx(ctx); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 0256b8a..d0a42df 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -2927,7 +2927,7 @@ static int __net_init xfrm_user_net_init(struct net *net) if (nlsk == NULL) return -ENOMEM; net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ - rcu_assign_pointer(net->xfrm.nlsk, nlsk); + RCU_INIT_POINTER(net->xfrm.nlsk, nlsk); return 0; } @@ -2935,7 +2935,7 @@ static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list) { struct net *net; list_for_each_entry(net, net_exit_list, exit_list) - rcu_assign_pointer(net->xfrm.nlsk, NULL); + RCU_INIT_POINTER(net->xfrm.nlsk, NULL); synchronize_net(); list_for_each_entry(net, net_exit_list, exit_list) netlink_kernel_release(net->xfrm.nlsk_stash); |