diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-12 18:07:07 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-12 18:07:07 -0800 |
commit | 6be35c700f742e911ecedd07fcc43d4439922334 (patch) | |
tree | ca9f37214d204465fcc2d79c82efd291e357c53c /net/bridge/br_mdb.c | |
parent | e37aa63e87bd581f9be5555ed0ba83f5295c92fc (diff) | |
parent | 520dfe3a3645257bf83660f672c47f8558f3d4c4 (diff) | |
download | op-kernel-dev-6be35c700f742e911ecedd07fcc43d4439922334.zip op-kernel-dev-6be35c700f742e911ecedd07fcc43d4439922334.tar.gz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller:
1) Allow to dump, monitor, and change the bridge multicast database
using netlink. From Cong Wang.
2) RFC 5961 TCP blind data injection attack mitigation, from Eric
Dumazet.
3) Networking user namespace support from Eric W. Biederman.
4) tuntap/virtio-net multiqueue support by Jason Wang.
5) Support for checksum offload of encapsulated packets (basically,
tunneled traffic can still be checksummed by HW). From Joseph
Gasparakis.
6) Allow BPF filter access to VLAN tags, from Eric Dumazet and
Daniel Borkmann.
7) Bridge port parameters over netlink and BPDU blocking support
from Stephen Hemminger.
8) Improve data access patterns during inet socket demux by rearranging
socket layout, from Eric Dumazet.
9) TIPC protocol updates and cleanups from Ying Xue, Paul Gortmaker, and
Jon Maloy.
10) Update TCP socket hash sizing to be more in line with current day
realities. The existing heurstics were choosen a decade ago.
From Eric Dumazet.
11) Fix races, queue bloat, and excessive wakeups in ATM and
associated drivers, from Krzysztof Mazur and David Woodhouse.
12) Support DOVE (Distributed Overlay Virtual Ethernet) extensions
in VXLAN driver, from David Stevens.
13) Add "oops_only" mode to netconsole, from Amerigo Wang.
14) Support set and query of VEB/VEPA bridge mode via PF_BRIDGE, also
allow DCB netlink to work on namespaces other than the initial
namespace. From John Fastabend.
15) Support PTP in the Tigon3 driver, from Matt Carlson.
16) tun/vhost zero copy fixes and improvements, plus turn it on
by default, from Michael S. Tsirkin.
17) Support per-association statistics in SCTP, from Michele
Baldessari.
And many, many, driver updates, cleanups, and improvements. Too
numerous to mention individually.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1722 commits)
net/mlx4_en: Add support for destination MAC in steering rules
net/mlx4_en: Use generic etherdevice.h functions.
net: ethtool: Add destination MAC address to flow steering API
bridge: add support of adding and deleting mdb entries
bridge: notify mdb changes via netlink
ndisc: Unexport ndisc_{build,send}_skb().
uapi: add missing netconf.h to export list
pkt_sched: avoid requeues if possible
solos-pci: fix double-free of TX skb in DMA mode
bnx2: Fix accidental reversions.
bna: Driver Version Updated to 3.1.2.1
bna: Firmware update
bna: Add RX State
bna: Rx Page Based Allocation
bna: TX Intr Coalescing Fix
bna: Tx and Rx Optimizations
bna: Code Cleanup and Enhancements
ath9k: check pdata variable before dereferencing it
ath5k: RX timestamp is reported at end of frame
ath9k_htc: RX timestamp is reported at end of frame
...
Diffstat (limited to 'net/bridge/br_mdb.c')
-rw-r--r-- | net/bridge/br_mdb.c | 481 |
1 files changed, 481 insertions, 0 deletions
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c new file mode 100644 index 0000000..6f0a2ee --- /dev/null +++ b/net/bridge/br_mdb.c @@ -0,0 +1,481 @@ +#include <linux/err.h> +#include <linux/igmp.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/rculist.h> +#include <linux/skbuff.h> +#include <linux/if_ether.h> +#include <net/ip.h> +#include <net/netlink.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ipv6.h> +#endif + +#include "br_private.h" + +static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb, + struct net_device *dev) +{ + struct net_bridge *br = netdev_priv(dev); + struct net_bridge_port *p; + struct hlist_node *n; + struct nlattr *nest; + + if (!br->multicast_router || hlist_empty(&br->router_list)) + return 0; + + nest = nla_nest_start(skb, MDBA_ROUTER); + if (nest == NULL) + return -EMSGSIZE; + + hlist_for_each_entry_rcu(p, n, &br->router_list, rlist) { + if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex)) + goto fail; + } + + nla_nest_end(skb, nest); + return 0; +fail: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, + struct net_device *dev) +{ + struct net_bridge *br = netdev_priv(dev); + struct net_bridge_mdb_htable *mdb; + struct nlattr *nest, *nest2; + int i, err = 0; + int idx = 0, s_idx = cb->args[1]; + + if (br->multicast_disabled) + return 0; + + mdb = rcu_dereference(br->mdb); + if (!mdb) + return 0; + + nest = nla_nest_start(skb, MDBA_MDB); + if (nest == NULL) + return -EMSGSIZE; + + for (i = 0; i < mdb->max; i++) { + struct hlist_node *h; + struct net_bridge_mdb_entry *mp; + struct net_bridge_port_group *p, **pp; + struct net_bridge_port *port; + + hlist_for_each_entry_rcu(mp, h, &mdb->mhash[i], hlist[mdb->ver]) { + if (idx < s_idx) + goto skip; + + nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); + if (nest2 == NULL) { + err = -EMSGSIZE; + goto out; + } + + for (pp = &mp->ports; + (p = rcu_dereference(*pp)) != NULL; + pp = &p->next) { + port = p->port; + if (port) { + struct br_mdb_entry e; + e.ifindex = port->dev->ifindex; + e.addr.u.ip4 = p->addr.u.ip4; +#if IS_ENABLED(CONFIG_IPV6) + e.addr.u.ip6 = p->addr.u.ip6; +#endif + e.addr.proto = p->addr.proto; + if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) { + nla_nest_cancel(skb, nest2); + err = -EMSGSIZE; + goto out; + } + } + } + nla_nest_end(skb, nest2); + skip: + idx++; + } + } + +out: + cb->args[1] = idx; + nla_nest_end(skb, nest); + return err; +} + +static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct net_device *dev; + struct net *net = sock_net(skb->sk); + struct nlmsghdr *nlh = NULL; + int idx = 0, s_idx; + + s_idx = cb->args[0]; + + rcu_read_lock(); + + /* In theory this could be wrapped to 0... */ + cb->seq = net->dev_base_seq + br_mdb_rehash_seq; + + for_each_netdev_rcu(net, dev) { + if (dev->priv_flags & IFF_EBRIDGE) { + struct br_port_msg *bpm; + + if (idx < s_idx) + goto skip; + + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, RTM_GETMDB, + sizeof(*bpm), NLM_F_MULTI); + if (nlh == NULL) + break; + + bpm = nlmsg_data(nlh); + bpm->ifindex = dev->ifindex; + if (br_mdb_fill_info(skb, cb, dev) < 0) + goto out; + if (br_rports_fill_info(skb, cb, dev) < 0) + goto out; + + cb->args[1] = 0; + nlmsg_end(skb, nlh); + skip: + idx++; + } + } + +out: + if (nlh) + nlmsg_end(skb, nlh); + rcu_read_unlock(); + cb->args[0] = idx; + return skb->len; +} + +static int nlmsg_populate_mdb_fill(struct sk_buff *skb, + struct net_device *dev, + struct br_mdb_entry *entry, u32 pid, + u32 seq, int type, unsigned int flags) +{ + struct nlmsghdr *nlh; + struct br_port_msg *bpm; + struct nlattr *nest, *nest2; + + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); + if (!nlh) + return -EMSGSIZE; + + bpm = nlmsg_data(nlh); + bpm->family = AF_BRIDGE; + bpm->ifindex = dev->ifindex; + nest = nla_nest_start(skb, MDBA_MDB); + if (nest == NULL) + goto cancel; + nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); + if (nest2 == NULL) + goto end; + + if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry)) + goto end; + + nla_nest_end(skb, nest2); + nla_nest_end(skb, nest); + return nlmsg_end(skb, nlh); + +end: + nla_nest_end(skb, nest); +cancel: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static inline size_t rtnl_mdb_nlmsg_size(void) +{ + return NLMSG_ALIGN(sizeof(struct br_port_msg)) + + nla_total_size(sizeof(struct br_mdb_entry)); +} + +static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry, + int type) +{ + struct net *net = dev_net(dev); + struct sk_buff *skb; + int err = -ENOBUFS; + + skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC); + if (!skb) + goto errout; + + err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF); + if (err < 0) { + kfree_skb(skb); + goto errout; + } + + rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); + return; +errout: + rtnl_set_sk_err(net, RTNLGRP_MDB, err); +} + +void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, + struct br_ip *group, int type) +{ + struct br_mdb_entry entry; + + entry.ifindex = port->dev->ifindex; + entry.addr.proto = group->proto; + entry.addr.u.ip4 = group->u.ip4; +#if IS_ENABLED(CONFIG_IPV6) + entry.addr.u.ip6 = group->u.ip6; +#endif + __br_mdb_notify(dev, &entry, type); +} + +static bool is_valid_mdb_entry(struct br_mdb_entry *entry) +{ + if (entry->ifindex == 0) + return false; + + if (entry->addr.proto == htons(ETH_P_IP)) { + if (!ipv4_is_multicast(entry->addr.u.ip4)) + return false; + if (ipv4_is_local_multicast(entry->addr.u.ip4)) + return false; +#if IS_ENABLED(CONFIG_IPV6) + } else if (entry->addr.proto == htons(ETH_P_IPV6)) { + if (!ipv6_is_transient_multicast(&entry->addr.u.ip6)) + return false; +#endif + } else + return false; + + return true; +} + +static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh, + struct net_device **pdev, struct br_mdb_entry **pentry) +{ + struct net *net = sock_net(skb->sk); + struct br_mdb_entry *entry; + struct br_port_msg *bpm; + struct nlattr *tb[MDBA_SET_ENTRY_MAX+1]; + struct net_device *dev; + int err; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY, NULL); + if (err < 0) + return err; + + bpm = nlmsg_data(nlh); + if (bpm->ifindex == 0) { + pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n"); + return -EINVAL; + } + + dev = __dev_get_by_index(net, bpm->ifindex); + if (dev == NULL) { + pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n"); + return -ENODEV; + } + + if (!(dev->priv_flags & IFF_EBRIDGE)) { + pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n"); + return -EOPNOTSUPP; + } + + *pdev = dev; + + if (!tb[MDBA_SET_ENTRY] || + nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) { + pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n"); + return -EINVAL; + } + + entry = nla_data(tb[MDBA_SET_ENTRY]); + if (!is_valid_mdb_entry(entry)) { + pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n"); + return -EINVAL; + } + + *pentry = entry; + return 0; +} + +static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, + struct br_ip *group) +{ + struct net_bridge_mdb_entry *mp; + struct net_bridge_port_group *p; + struct net_bridge_port_group __rcu **pp; + struct net_bridge_mdb_htable *mdb; + int err; + + mdb = mlock_dereference(br->mdb, br); + mp = br_mdb_ip_get(mdb, group); + if (!mp) { + mp = br_multicast_new_group(br, port, group); + err = PTR_ERR(mp); + if (IS_ERR(mp)) + return err; + } + + for (pp = &mp->ports; + (p = mlock_dereference(*pp, br)) != NULL; + pp = &p->next) { + if (p->port == port) + return -EEXIST; + if ((unsigned long)p->port < (unsigned long)port) + break; + } + + p = br_multicast_new_port_group(port, group, *pp); + if (unlikely(!p)) + return -ENOMEM; + rcu_assign_pointer(*pp, p); + + br_mdb_notify(br->dev, port, group, RTM_NEWMDB); + return 0; +} + +static int __br_mdb_add(struct net *net, struct net_bridge *br, + struct br_mdb_entry *entry) +{ + struct br_ip ip; + struct net_device *dev; + struct net_bridge_port *p; + int ret; + + if (!netif_running(br->dev) || br->multicast_disabled) + return -EINVAL; + + dev = __dev_get_by_index(net, entry->ifindex); + if (!dev) + return -ENODEV; + + p = br_port_get_rtnl(dev); + if (!p || p->br != br || p->state == BR_STATE_DISABLED) + return -EINVAL; + + ip.proto = entry->addr.proto; + if (ip.proto == htons(ETH_P_IP)) + ip.u.ip4 = entry->addr.u.ip4; +#if IS_ENABLED(CONFIG_IPV6) + else + ip.u.ip6 = entry->addr.u.ip6; +#endif + + spin_lock_bh(&br->multicast_lock); + ret = br_mdb_add_group(br, p, &ip); + spin_unlock_bh(&br->multicast_lock); + return ret; +} + +static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +{ + struct net *net = sock_net(skb->sk); + struct br_mdb_entry *entry; + struct net_device *dev; + struct net_bridge *br; + int err; + + err = br_mdb_parse(skb, nlh, &dev, &entry); + if (err < 0) + return err; + + br = netdev_priv(dev); + + err = __br_mdb_add(net, br, entry); + if (!err) + __br_mdb_notify(dev, entry, RTM_NEWMDB); + return err; +} + +static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) +{ + struct net_bridge_mdb_htable *mdb; + struct net_bridge_mdb_entry *mp; + struct net_bridge_port_group *p; + struct net_bridge_port_group __rcu **pp; + struct br_ip ip; + int err = -EINVAL; + + if (!netif_running(br->dev) || br->multicast_disabled) + return -EINVAL; + + if (timer_pending(&br->multicast_querier_timer)) + return -EBUSY; + + ip.proto = entry->addr.proto; + if (ip.proto == htons(ETH_P_IP)) + ip.u.ip4 = entry->addr.u.ip4; +#if IS_ENABLED(CONFIG_IPV6) + else + ip.u.ip6 = entry->addr.u.ip6; +#endif + + spin_lock_bh(&br->multicast_lock); + mdb = mlock_dereference(br->mdb, br); + + mp = br_mdb_ip_get(mdb, &ip); + if (!mp) + goto unlock; + + for (pp = &mp->ports; + (p = mlock_dereference(*pp, br)) != NULL; + pp = &p->next) { + if (!p->port || p->port->dev->ifindex != entry->ifindex) + continue; + + if (p->port->state == BR_STATE_DISABLED) + goto unlock; + + rcu_assign_pointer(*pp, p->next); + hlist_del_init(&p->mglist); + del_timer(&p->timer); + call_rcu_bh(&p->rcu, br_multicast_free_pg); + err = 0; + + if (!mp->ports && !mp->mglist && + netif_running(br->dev)) + mod_timer(&mp->timer, jiffies); + break; + } + +unlock: + spin_unlock_bh(&br->multicast_lock); + return err; +} + +static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +{ + struct net_device *dev; + struct br_mdb_entry *entry; + struct net_bridge *br; + int err; + + err = br_mdb_parse(skb, nlh, &dev, &entry); + if (err < 0) + return err; + + br = netdev_priv(dev); + + err = __br_mdb_del(br, entry); + if (!err) + __br_mdb_notify(dev, entry, RTM_DELMDB); + return err; +} + +void br_mdb_init(void) +{ + rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, NULL); + rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL); + rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL); +} |