summaryrefslogtreecommitdiffstats
path: root/net/tipc/node.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-12 18:57:02 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-12 18:57:02 -0800
commitaee3bfa3307cd0da2126bdc0ea359dabea5ee8f7 (patch)
tree3d35c69e8fa835098bb90f77f30abed120681651 /net/tipc/node.c
parentc597b6bcd5c624534afc3df65cdc42bb05173bca (diff)
parent415b6f19e87e350b13585591859d4fdf50772229 (diff)
downloadop-kernel-dev-aee3bfa3307cd0da2126bdc0ea359dabea5ee8f7.zip
op-kernel-dev-aee3bfa3307cd0da2126bdc0ea359dabea5ee8f7.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from Davic Miller: 1) Support busy polling generically, for all NAPI drivers. From Eric Dumazet. 2) Add byte/packet counter support to nft_ct, from Floriani Westphal. 3) Add RSS/XPS support to mvneta driver, from Gregory Clement. 4) Implement IPV6_HDRINCL socket option for raw sockets, from Hannes Frederic Sowa. 5) Add support for T6 adapter to cxgb4 driver, from Hariprasad Shenai. 6) Add support for VLAN device bridging to mlxsw switch driver, from Ido Schimmel. 7) Add driver for Netronome NFP4000/NFP6000, from Jakub Kicinski. 8) Provide hwmon interface to mlxsw switch driver, from Jiri Pirko. 9) Reorganize wireless drivers into per-vendor directories just like we do for ethernet drivers. From Kalle Valo. 10) Provide a way for administrators "destroy" connected sockets via the SOCK_DESTROY socket netlink diag operation. From Lorenzo Colitti. 11) Add support to add/remove multicast routes via netlink, from Nikolay Aleksandrov. 12) Make TCP keepalive settings per-namespace, from Nikolay Borisov. 13) Add forwarding and packet duplication facilities to nf_tables, from Pablo Neira Ayuso. 14) Dead route support in MPLS, from Roopa Prabhu. 15) TSO support for thunderx chips, from Sunil Goutham. 16) Add driver for IBM's System i/p VNIC protocol, from Thomas Falcon. 17) Rationalize, consolidate, and more completely document the checksum offloading facilities in the networking stack. From Tom Herbert. 18) Support aborting an ongoing scan in mac80211/cfg80211, from Vidyullatha Kanchanapally. 19) Use per-bucket spinlock for bpf hash facility, from Tom Leiming. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1375 commits) net: bnxt: always return values from _bnxt_get_max_rings net: bpf: reject invalid shifts phonet: properly unshare skbs in phonet_rcv() dwc_eth_qos: Fix dma address for multi-fragment skbs phy: remove an unneeded condition mdio: remove an unneed condition mdio_bus: NULL dereference on allocation error net: Fix typo in netdev_intersect_features net: freescale: mac-fec: Fix build error from phy_device API change net: freescale: ucc_geth: Fix build error from phy_device API change bonding: Prevent IPv6 link local address on enslaved devices IB/mlx5: Add flow steering support net/mlx5_core: Export flow steering API net/mlx5_core: Make ipv4/ipv6 location more clear net/mlx5_core: Enable flow steering support for the IB driver net/mlx5_core: Initialize namespaces only when supported by device net/mlx5_core: Set priority attributes net/mlx5_core: Connect flow tables net/mlx5_core: Introduce modify flow table command net/mlx5_core: Managing root flow table ...
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r--net/tipc/node.c875
1 files changed, 687 insertions, 188 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 20cddec..fa97d96 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -42,6 +42,84 @@
#include "bcast.h"
#include "discover.h"
+#define INVALID_NODE_SIG 0x10000
+
+/* Flags used to take different actions according to flag type
+ * TIPC_NOTIFY_NODE_DOWN: notify node is down
+ * TIPC_NOTIFY_NODE_UP: notify node is up
+ * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
+ */
+enum {
+ TIPC_NOTIFY_NODE_DOWN = (1 << 3),
+ TIPC_NOTIFY_NODE_UP = (1 << 4),
+ TIPC_NOTIFY_LINK_UP = (1 << 6),
+ TIPC_NOTIFY_LINK_DOWN = (1 << 7)
+};
+
+struct tipc_link_entry {
+ struct tipc_link *link;
+ spinlock_t lock; /* per link */
+ u32 mtu;
+ struct sk_buff_head inputq;
+ struct tipc_media_addr maddr;
+};
+
+struct tipc_bclink_entry {
+ struct tipc_link *link;
+ struct sk_buff_head inputq1;
+ struct sk_buff_head arrvq;
+ struct sk_buff_head inputq2;
+ struct sk_buff_head namedq;
+};
+
+/**
+ * struct tipc_node - TIPC node structure
+ * @addr: network address of node
+ * @ref: reference counter to node object
+ * @lock: rwlock governing access to structure
+ * @net: the applicable net namespace
+ * @hash: links to adjacent nodes in unsorted hash chain
+ * @inputq: pointer to input queue containing messages for msg event
+ * @namedq: pointer to name table input queue with name table messages
+ * @active_links: bearer ids of active links, used as index into links[] array
+ * @links: array containing references to all links to node
+ * @action_flags: bit mask of different types of node actions
+ * @state: connectivity state vs peer node
+ * @sync_point: sequence number where synch/failover is finished
+ * @list: links to adjacent nodes in sorted list of cluster's nodes
+ * @working_links: number of working links to node (both active and standby)
+ * @link_cnt: number of links to node
+ * @capabilities: bitmap, indicating peer node's functional capabilities
+ * @signature: node instance identifier
+ * @link_id: local and remote bearer ids of changing link, if any
+ * @publ_list: list of publications
+ * @rcu: rcu struct for tipc_node
+ */
+struct tipc_node {
+ u32 addr;
+ struct kref kref;
+ rwlock_t lock;
+ struct net *net;
+ struct hlist_node hash;
+ int active_links[2];
+ struct tipc_link_entry links[MAX_BEARERS];
+ struct tipc_bclink_entry bc_entry;
+ int action_flags;
+ struct list_head list;
+ int state;
+ u16 sync_point;
+ int link_cnt;
+ u16 working_links;
+ u16 capabilities;
+ u32 signature;
+ u32 link_id;
+ struct list_head publ_list;
+ struct list_head conn_sks;
+ unsigned long keepalive_intv;
+ struct timer_list timer;
+ struct rcu_head rcu;
+};
+
/* Node FSM states and events:
*/
enum {
@@ -75,6 +153,9 @@ static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
static void tipc_node_delete(struct tipc_node *node);
static void tipc_node_timeout(unsigned long data);
static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
+static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
+static void tipc_node_put(struct tipc_node *node);
+static bool tipc_node_is_up(struct tipc_node *n);
struct tipc_sock_conn {
u32 port;
@@ -83,12 +164,54 @@ struct tipc_sock_conn {
struct list_head list;
};
+static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
+ [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_LINK_NAME] = {
+ .type = NLA_STRING,
+ .len = TIPC_MAX_LINK_NAME
+ },
+ [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
+ [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
+ [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
+ [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
+ [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
+ [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
+ [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
+ [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
+};
+
static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
[TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC },
[TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 },
[TIPC_NLA_NODE_UP] = { .type = NLA_FLAG }
};
+static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
+{
+ int bearer_id = n->active_links[sel & 1];
+
+ if (unlikely(bearer_id == INVALID_BEARER_ID))
+ return NULL;
+
+ return n->links[bearer_id].link;
+}
+
+int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
+{
+ struct tipc_node *n;
+ int bearer_id;
+ unsigned int mtu = MAX_MSG_SIZE;
+
+ n = tipc_node_find(net, addr);
+ if (unlikely(!n))
+ return mtu;
+
+ bearer_id = n->active_links[sel & 1];
+ if (likely(bearer_id != INVALID_BEARER_ID))
+ mtu = n->links[bearer_id].mtu;
+ tipc_node_put(n);
+ return mtu;
+}
/*
* A trivial power-of-two bitmask technique is used for speed, since this
* operation is done for every incoming TIPC packet. The number of hash table
@@ -107,7 +230,7 @@ static void tipc_node_kref_release(struct kref *kref)
tipc_node_delete(node);
}
-void tipc_node_put(struct tipc_node *node)
+static void tipc_node_put(struct tipc_node *node)
{
kref_put(&node->kref, tipc_node_kref_release);
}
@@ -120,7 +243,7 @@ static void tipc_node_get(struct tipc_node *node)
/*
* tipc_node_find - locate specified node object, if it exists
*/
-struct tipc_node *tipc_node_find(struct net *net, u32 addr)
+static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *node;
@@ -141,66 +264,122 @@ struct tipc_node *tipc_node_find(struct net *net, u32 addr)
return NULL;
}
+static void tipc_node_read_lock(struct tipc_node *n)
+{
+ read_lock_bh(&n->lock);
+}
+
+static void tipc_node_read_unlock(struct tipc_node *n)
+{
+ read_unlock_bh(&n->lock);
+}
+
+static void tipc_node_write_lock(struct tipc_node *n)
+{
+ write_lock_bh(&n->lock);
+}
+
+static void tipc_node_write_unlock(struct tipc_node *n)
+{
+ struct net *net = n->net;
+ u32 addr = 0;
+ u32 flags = n->action_flags;
+ u32 link_id = 0;
+ struct list_head *publ_list;
+
+ if (likely(!flags)) {
+ write_unlock_bh(&n->lock);
+ return;
+ }
+
+ addr = n->addr;
+ link_id = n->link_id;
+ publ_list = &n->publ_list;
+
+ n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
+ TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
+
+ write_unlock_bh(&n->lock);
+
+ if (flags & TIPC_NOTIFY_NODE_DOWN)
+ tipc_publ_notify(net, publ_list, addr);
+
+ if (flags & TIPC_NOTIFY_NODE_UP)
+ tipc_named_node_up(net, addr);
+
+ if (flags & TIPC_NOTIFY_LINK_UP)
+ tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
+ TIPC_NODE_SCOPE, link_id, addr);
+
+ if (flags & TIPC_NOTIFY_LINK_DOWN)
+ tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
+ link_id, addr);
+}
+
struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_node *n_ptr, *temp_node;
+ struct tipc_node *n, *temp_node;
+ int i;
spin_lock_bh(&tn->node_list_lock);
- n_ptr = tipc_node_find(net, addr);
- if (n_ptr)
+ n = tipc_node_find(net, addr);
+ if (n)
goto exit;
- n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
- if (!n_ptr) {
+ n = kzalloc(sizeof(*n), GFP_ATOMIC);
+ if (!n) {
pr_warn("Node creation failed, no memory\n");
goto exit;
}
- n_ptr->addr = addr;
- n_ptr->net = net;
- n_ptr->capabilities = capabilities;
- kref_init(&n_ptr->kref);
- spin_lock_init(&n_ptr->lock);
- INIT_HLIST_NODE(&n_ptr->hash);
- INIT_LIST_HEAD(&n_ptr->list);
- INIT_LIST_HEAD(&n_ptr->publ_list);
- INIT_LIST_HEAD(&n_ptr->conn_sks);
- skb_queue_head_init(&n_ptr->bc_entry.namedq);
- skb_queue_head_init(&n_ptr->bc_entry.inputq1);
- __skb_queue_head_init(&n_ptr->bc_entry.arrvq);
- skb_queue_head_init(&n_ptr->bc_entry.inputq2);
- hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
+ n->addr = addr;
+ n->net = net;
+ n->capabilities = capabilities;
+ kref_init(&n->kref);
+ rwlock_init(&n->lock);
+ INIT_HLIST_NODE(&n->hash);
+ INIT_LIST_HEAD(&n->list);
+ INIT_LIST_HEAD(&n->publ_list);
+ INIT_LIST_HEAD(&n->conn_sks);
+ skb_queue_head_init(&n->bc_entry.namedq);
+ skb_queue_head_init(&n->bc_entry.inputq1);
+ __skb_queue_head_init(&n->bc_entry.arrvq);
+ skb_queue_head_init(&n->bc_entry.inputq2);
+ for (i = 0; i < MAX_BEARERS; i++)
+ spin_lock_init(&n->links[i].lock);
+ hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
- if (n_ptr->addr < temp_node->addr)
+ if (n->addr < temp_node->addr)
break;
}
- list_add_tail_rcu(&n_ptr->list, &temp_node->list);
- n_ptr->state = SELF_DOWN_PEER_LEAVING;
- n_ptr->signature = INVALID_NODE_SIG;
- n_ptr->active_links[0] = INVALID_BEARER_ID;
- n_ptr->active_links[1] = INVALID_BEARER_ID;
- if (!tipc_link_bc_create(net, tipc_own_addr(net), n_ptr->addr,
- U16_MAX, tipc_bc_sndlink(net)->window,
- n_ptr->capabilities,
- &n_ptr->bc_entry.inputq1,
- &n_ptr->bc_entry.namedq,
+ list_add_tail_rcu(&n->list, &temp_node->list);
+ n->state = SELF_DOWN_PEER_LEAVING;
+ n->signature = INVALID_NODE_SIG;
+ n->active_links[0] = INVALID_BEARER_ID;
+ n->active_links[1] = INVALID_BEARER_ID;
+ if (!tipc_link_bc_create(net, tipc_own_addr(net), n->addr,
+ U16_MAX,
+ tipc_link_window(tipc_bc_sndlink(net)),
+ n->capabilities,
+ &n->bc_entry.inputq1,
+ &n->bc_entry.namedq,
tipc_bc_sndlink(net),
- &n_ptr->bc_entry.link)) {
+ &n->bc_entry.link)) {
pr_warn("Broadcast rcv link creation failed, no memory\n");
- kfree(n_ptr);
- n_ptr = NULL;
+ kfree(n);
+ n = NULL;
goto exit;
}
- tipc_node_get(n_ptr);
- setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
- n_ptr->keepalive_intv = U32_MAX;
+ tipc_node_get(n);
+ setup_timer(&n->timer, tipc_node_timeout, (unsigned long)n);
+ n->keepalive_intv = U32_MAX;
exit:
spin_unlock_bh(&tn->node_list_lock);
- return n_ptr;
+ return n;
}
static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
{
- unsigned long tol = l->tolerance;
+ unsigned long tol = tipc_link_tolerance(l);
unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
unsigned long keepalive_intv = msecs_to_jiffies(intv);
@@ -209,7 +388,7 @@ static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
n->keepalive_intv = keepalive_intv;
/* Ensure link's abort limit corresponds to current interval */
- l->abort_limit = l->tolerance / jiffies_to_msecs(n->keepalive_intv);
+ tipc_link_set_abort_limit(l, tol / jiffies_to_msecs(n->keepalive_intv));
}
static void tipc_node_delete(struct tipc_node *node)
@@ -234,6 +413,42 @@ void tipc_node_stop(struct net *net)
spin_unlock_bh(&tn->node_list_lock);
}
+void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
+{
+ struct tipc_node *n;
+
+ if (in_own_node(net, addr))
+ return;
+
+ n = tipc_node_find(net, addr);
+ if (!n) {
+ pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
+ return;
+ }
+ tipc_node_write_lock(n);
+ list_add_tail(subscr, &n->publ_list);
+ tipc_node_write_unlock(n);
+ tipc_node_put(n);
+}
+
+void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
+{
+ struct tipc_node *n;
+
+ if (in_own_node(net, addr))
+ return;
+
+ n = tipc_node_find(net, addr);
+ if (!n) {
+ pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
+ return;
+ }
+ tipc_node_write_lock(n);
+ list_del_init(subscr);
+ tipc_node_write_unlock(n);
+ tipc_node_put(n);
+}
+
int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
{
struct tipc_node *node;
@@ -257,9 +472,9 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
conn->port = port;
conn->peer_port = peer_port;
- tipc_node_lock(node);
+ tipc_node_write_lock(node);
list_add_tail(&conn->list, &node->conn_sks);
- tipc_node_unlock(node);
+ tipc_node_write_unlock(node);
exit:
tipc_node_put(node);
return err;
@@ -277,14 +492,14 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
if (!node)
return;
- tipc_node_lock(node);
+ tipc_node_write_lock(node);
list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
if (port != conn->port)
continue;
list_del(&conn->list);
kfree(conn);
}
- tipc_node_unlock(node);
+ tipc_node_write_unlock(node);
tipc_node_put(node);
}
@@ -301,14 +516,16 @@ static void tipc_node_timeout(unsigned long data)
__skb_queue_head_init(&xmitq);
for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
- tipc_node_lock(n);
+ tipc_node_read_lock(n);
le = &n->links[bearer_id];
+ spin_lock_bh(&le->lock);
if (le->link) {
/* Link tolerance may change asynchronously: */
tipc_node_calculate_timer(n, le->link);
rc = tipc_link_timeout(le->link, &xmitq);
}
- tipc_node_unlock(n);
+ spin_unlock_bh(&le->lock);
+ tipc_node_read_unlock(n);
tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
if (rc & TIPC_LINK_DOWN_EVT)
tipc_node_link_down(n, bearer_id, false);
@@ -340,16 +557,16 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
n->working_links++;
n->action_flags |= TIPC_NOTIFY_LINK_UP;
- n->link_id = nl->peer_bearer_id << 16 | bearer_id;
+ n->link_id = tipc_link_id(nl);
/* Leave room for tunnel header when returning 'mtu' to users: */
- n->links[bearer_id].mtu = nl->mtu - INT_H_SIZE;
+ n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE;
tipc_bearer_add_dest(n->net, bearer_id, n->addr);
tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
pr_debug("Established link <%s> on network plane %c\n",
- nl->name, nl->net_plane);
+ tipc_link_name(nl), tipc_link_plane(nl));
/* First link? => give it both slots */
if (!ol) {
@@ -362,17 +579,17 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
}
/* Second link => redistribute slots */
- if (nl->priority > ol->priority) {
- pr_debug("Old link <%s> becomes standby\n", ol->name);
+ if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
+ pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
*slot0 = bearer_id;
*slot1 = bearer_id;
tipc_link_set_active(nl, true);
tipc_link_set_active(ol, false);
- } else if (nl->priority == ol->priority) {
+ } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
tipc_link_set_active(nl, true);
*slot1 = bearer_id;
} else {
- pr_debug("New link <%s> is standby\n", nl->name);
+ pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
}
/* Prepare synchronization with first link */
@@ -387,9 +604,9 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
struct sk_buff_head *xmitq)
{
- tipc_node_lock(n);
+ tipc_node_write_lock(n);
__tipc_node_link_up(n, bearer_id, xmitq);
- tipc_node_unlock(n);
+ tipc_node_write_unlock(n);
}
/**
@@ -402,7 +619,7 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
struct tipc_link_entry *le = &n->links[*bearer_id];
int *slot0 = &n->active_links[0];
int *slot1 = &n->active_links[1];
- int i, highest = 0;
+ int i, highest = 0, prio;
struct tipc_link *l, *_l, *tnl;
l = n->links[*bearer_id].link;
@@ -411,12 +628,12 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
n->working_links--;
n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
- n->link_id = l->peer_bearer_id << 16 | *bearer_id;
+ n->link_id = tipc_link_id(l);
tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
pr_debug("Lost link <%s> on network plane %c\n",
- l->name, l->net_plane);
+ tipc_link_name(l), tipc_link_plane(l));
/* Select new active link if any available */
*slot0 = INVALID_BEARER_ID;
@@ -427,10 +644,11 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
continue;
if (_l == l)
continue;
- if (_l->priority < highest)
+ prio = tipc_link_prio(_l);
+ if (prio < highest)
continue;
- if (_l->priority > highest) {
- highest = _l->priority;
+ if (prio > highest) {
+ highest = prio;
*slot0 = i;
*slot1 = i;
continue;
@@ -453,17 +671,17 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
/* There is still a working link => initiate failover */
- tnl = node_active_link(n, 0);
+ *bearer_id = n->active_links[0];
+ tnl = n->links[*bearer_id].link;
tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
- n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1);
+ n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
tipc_link_reset(l);
tipc_link_fsm_evt(l, LINK_RESET_EVT);
tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
- *maddr = &n->links[tnl->bearer_id].maddr;
- *bearer_id = tnl->bearer_id;
+ *maddr = &n->links[*bearer_id].maddr;
}
static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
@@ -478,7 +696,7 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
__skb_queue_head_init(&xmitq);
- tipc_node_lock(n);
+ tipc_node_write_lock(n);
if (!tipc_link_is_establishing(l)) {
__tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
if (delete) {
@@ -490,12 +708,12 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
/* Defuse pending tipc_node_link_up() */
tipc_link_fsm_evt(l, LINK_RESET_EVT);
}
- tipc_node_unlock(n);
+ tipc_node_write_unlock(n);
tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
tipc_sk_rcv(n->net, &le->inputq);
}
-bool tipc_node_is_up(struct tipc_node *n)
+static bool tipc_node_is_up(struct tipc_node *n)
{
return n->active_links[0] != INVALID_BEARER_ID;
}
@@ -523,7 +741,7 @@ void tipc_node_check_dest(struct net *net, u32 onode,
if (!n)
return;
- tipc_node_lock(n);
+ tipc_node_write_lock(n);
le = &n->links[b->identity];
@@ -626,7 +844,7 @@ void tipc_node_check_dest(struct net *net, u32 onode,
}
memcpy(&le->maddr, maddr, sizeof(*maddr));
exit:
- tipc_node_unlock(n);
+ tipc_node_write_unlock(n);
if (reset && !tipc_link_is_reset(l))
tipc_node_link_down(n, b->identity, false);
tipc_node_put(n);
@@ -834,24 +1052,6 @@ illegal_evt:
pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
}
-bool tipc_node_filter_pkt(struct tipc_node *n, struct tipc_msg *hdr)
-{
- int state = n->state;
-
- if (likely(state == SELF_UP_PEER_UP))
- return true;
-
- if (state == SELF_LEAVING_PEER_DOWN)
- return false;
-
- if (state == SELF_DOWN_PEER_LEAVING) {
- if (msg_peer_node_is_up(hdr))
- return false;
- }
-
- return true;
-}
-
static void node_lost_contact(struct tipc_node *n,
struct sk_buff_head *inputq)
{
@@ -913,56 +1113,18 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
if (bearer_id >= MAX_BEARERS)
goto exit;
- tipc_node_lock(node);
+ tipc_node_read_lock(node);
link = node->links[bearer_id].link;
if (link) {
- strncpy(linkname, link->name, len);
+ strncpy(linkname, tipc_link_name(link), len);
err = 0;
}
exit:
- tipc_node_unlock(node);
+ tipc_node_read_unlock(node);
tipc_node_put(node);
return err;
}
-void tipc_node_unlock(struct tipc_node *node)
-{
- struct net *net = node->net;
- u32 addr = 0;
- u32 flags = node->action_flags;
- u32 link_id = 0;
- struct list_head *publ_list;
-
- if (likely(!flags)) {
- spin_unlock_bh(&node->lock);
- return;
- }
-
- addr = node->addr;
- link_id = node->link_id;
- publ_list = &node->publ_list;
-
- node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
- TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
-
- spin_unlock_bh(&node->lock);
-
- if (flags & TIPC_NOTIFY_NODE_DOWN)
- tipc_publ_notify(net, publ_list, addr);
-
- if (flags & TIPC_NOTIFY_NODE_UP)
- tipc_named_node_up(net, addr);
-
- if (flags & TIPC_NOTIFY_LINK_UP)
- tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
- TIPC_NODE_SCOPE, link_id, addr);
-
- if (flags & TIPC_NOTIFY_LINK_DOWN)
- tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
- link_id, addr);
-
-}
-
/* Caller should hold node lock for the passed node */
static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
{
@@ -997,20 +1159,6 @@ msg_full:
return -EMSGSIZE;
}
-static struct tipc_link *tipc_node_select_link(struct tipc_node *n, int sel,
- int *bearer_id,
- struct tipc_media_addr **maddr)
-{
- int id = n->active_links[sel & 1];
-
- if (unlikely(id < 0))
- return NULL;
-
- *bearer_id = id;
- *maddr = &n->links[id].maddr;
- return n->links[id].link;
-}
-
/**
* tipc_node_xmit() is the general link level function for message sending
* @net: the applicable net namespace
@@ -1023,29 +1171,32 @@ static struct tipc_link *tipc_node_select_link(struct tipc_node *n, int sel,
int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
u32 dnode, int selector)
{
- struct tipc_link *l = NULL;
+ struct tipc_link_entry *le = NULL;
struct tipc_node *n;
struct sk_buff_head xmitq;
- struct tipc_media_addr *maddr;
- int bearer_id;
+ int bearer_id = -1;
int rc = -EHOSTUNREACH;
__skb_queue_head_init(&xmitq);
n = tipc_node_find(net, dnode);
if (likely(n)) {
- tipc_node_lock(n);
- l = tipc_node_select_link(n, selector, &bearer_id, &maddr);
- if (likely(l))
- rc = tipc_link_xmit(l, list, &xmitq);
- tipc_node_unlock(n);
- if (unlikely(rc == -ENOBUFS))
+ tipc_node_read_lock(n);
+ bearer_id = n->active_links[selector & 1];
+ if (bearer_id >= 0) {
+ le = &n->links[bearer_id];
+ spin_lock_bh(&le->lock);
+ rc = tipc_link_xmit(le->link, list, &xmitq);
+ spin_unlock_bh(&le->lock);
+ }
+ tipc_node_read_unlock(n);
+ if (likely(!rc))
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+ else if (rc == -ENOBUFS)
tipc_node_link_down(n, bearer_id, false);
tipc_node_put(n);
+ return rc;
}
- if (likely(!rc)) {
- tipc_bearer_xmit(net, bearer_id, &xmitq, maddr);
- return 0;
- }
+
if (likely(in_own_node(net, dnode))) {
tipc_sk_rcv(net, list);
return 0;
@@ -1075,6 +1226,30 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
return 0;
}
+void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
+{
+ struct sk_buff *txskb;
+ struct tipc_node *n;
+ u32 dst;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(n, tipc_nodes(net), list) {
+ dst = n->addr;
+ if (in_own_node(net, dst))
+ continue;
+ if (!tipc_node_is_up(n))
+ continue;
+ txskb = pskb_copy(skb, GFP_ATOMIC);
+ if (!txskb)
+ break;
+ msg_set_destnode(buf_msg(txskb), dst);
+ tipc_node_xmit_skb(net, txskb, dst, 0);
+ }
+ rcu_read_unlock();
+
+ kfree_skb(skb);
+}
+
/**
* tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
* @net: the applicable net namespace
@@ -1116,9 +1291,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
/* Broadcast ACKs are sent on a unicast link */
if (rc & TIPC_LINK_SND_BC_ACK) {
- tipc_node_lock(n);
+ tipc_node_read_lock(n);
tipc_link_build_ack_msg(le->link, &xmitq);
- tipc_node_unlock(n);
+ tipc_node_read_unlock(n);
}
if (!skb_queue_empty(&xmitq))
@@ -1151,30 +1326,30 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
u16 oseqno = msg_seqno(hdr);
u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
u16 exp_pkts = msg_msgcnt(hdr);
- u16 rcv_nxt, syncpt, dlv_nxt;
+ u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
int state = n->state;
struct tipc_link *l, *tnl, *pl = NULL;
struct tipc_media_addr *maddr;
- int i, pb_id;
+ int pb_id;
l = n->links[bearer_id].link;
if (!l)
return false;
- rcv_nxt = l->rcv_nxt;
+ rcv_nxt = tipc_link_rcv_nxt(l);
if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
return true;
/* Find parallel link, if any */
- for (i = 0; i < MAX_BEARERS; i++) {
- if ((i != bearer_id) && n->links[i].link) {
- pl = n->links[i].link;
+ for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
+ if ((pb_id != bearer_id) && n->links[pb_id].link) {
+ pl = n->links[pb_id].link;
break;
}
}
- /* Update node accesibility if applicable */
+ /* Check and update node accesibility if applicable */
if (state == SELF_UP_PEER_COMING) {
if (!tipc_link_is_up(l))
return true;
@@ -1187,8 +1362,12 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
if (msg_peer_node_is_up(hdr))
return false;
tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
+ return true;
}
+ if (state == SELF_LEAVING_PEER_DOWN)
+ return false;
+
/* Ignore duplicate packets */
if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
return true;
@@ -1197,9 +1376,9 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
syncpt = oseqno + exp_pkts - 1;
if (pl && tipc_link_is_up(pl)) {
- pb_id = pl->bearer_id;
__tipc_node_link_down(n, &pb_id, xmitq, &maddr);
- tipc_skb_queue_splice_tail_init(pl->inputq, l->inputq);
+ tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
+ tipc_link_inputq(l));
}
/* If pkts arrive out of order, use lowest calculated syncpt */
if (less(syncpt, n->sync_point))
@@ -1232,19 +1411,18 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
}
- if (less(syncpt, n->sync_point))
- n->sync_point = syncpt;
}
/* Open tunnel link when parallel link reaches synch point */
- if ((n->state == NODE_SYNCHING) && tipc_link_is_synching(l)) {
+ if (n->state == NODE_SYNCHING) {
if (tipc_link_is_synching(l)) {
tnl = l;
} else {
tnl = pl;
pl = l;
}
- dlv_nxt = pl->rcv_nxt - mod(skb_queue_len(pl->inputq));
+ inputq_len = skb_queue_len(tipc_link_inputq(pl));
+ dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
if (more(dlv_nxt, n->sync_point)) {
tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
@@ -1304,22 +1482,32 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
/* Ensure broadcast reception is in synch with peer's send state */
if (unlikely(usr == LINK_PROTOCOL))
tipc_bcast_sync_rcv(net, n->bc_entry.link, hdr);
- else if (unlikely(n->bc_entry.link->acked != bc_ack))
+ else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack);
- tipc_node_lock(n);
-
- /* Is reception permitted at the moment ? */
- if (!tipc_node_filter_pkt(n, hdr))
- goto unlock;
-
- /* Check and if necessary update node state */
- if (likely(tipc_node_check_state(n, skb, bearer_id, &xmitq))) {
- rc = tipc_link_rcv(le->link, skb, &xmitq);
- skb = NULL;
+ /* Receive packet directly if conditions permit */
+ tipc_node_read_lock(n);
+ if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
+ spin_lock_bh(&le->lock);
+ if (le->link) {
+ rc = tipc_link_rcv(le->link, skb, &xmitq);
+ skb = NULL;
+ }
+ spin_unlock_bh(&le->lock);
+ }
+ tipc_node_read_unlock(n);
+
+ /* Check/update node state before receiving */
+ if (unlikely(skb)) {
+ tipc_node_write_lock(n);
+ if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
+ if (le->link) {
+ rc = tipc_link_rcv(le->link, skb, &xmitq);
+ skb = NULL;
+ }
+ }
+ tipc_node_write_unlock(n);
}
-unlock:
- tipc_node_unlock(n);
if (unlikely(rc & TIPC_LINK_UP_EVT))
tipc_node_link_up(n, bearer_id, &xmitq);
@@ -1384,15 +1572,15 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
continue;
}
- tipc_node_lock(node);
+ tipc_node_read_lock(node);
err = __tipc_nl_add_node(&msg, node);
if (err) {
last_addr = node->addr;
- tipc_node_unlock(node);
+ tipc_node_read_unlock(node);
goto out;
}
- tipc_node_unlock(node);
+ tipc_node_read_unlock(node);
}
done = 1;
out:
@@ -1402,3 +1590,314 @@ out:
return skb->len;
}
+
+/* tipc_node_find_by_name - locate owner node of link by link's name
+ * @net: the applicable net namespace
+ * @name: pointer to link name string
+ * @bearer_id: pointer to index in 'node->links' array where the link was found.
+ *
+ * Returns pointer to node owning the link, or 0 if no matching link is found.
+ */
+static struct tipc_node *tipc_node_find_by_name(struct net *net,
+ const char *link_name,
+ unsigned int *bearer_id)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *l;
+ struct tipc_node *n;
+ struct tipc_node *found_node = NULL;
+ int i;
+
+ *bearer_id = 0;
+ rcu_read_lock();
+ list_for_each_entry_rcu(n, &tn->node_list, list) {
+ tipc_node_read_lock(n);
+ for (i = 0; i < MAX_BEARERS; i++) {
+ l = n->links[i].link;
+ if (l && !strcmp(tipc_link_name(l), link_name)) {
+ *bearer_id = i;
+ found_node = n;
+ break;
+ }
+ }
+ tipc_node_read_unlock(n);
+ if (found_node)
+ break;
+ }
+ rcu_read_unlock();
+
+ return found_node;
+}
+
+int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ int res = 0;
+ int bearer_id;
+ char *name;
+ struct tipc_link *link;
+ struct tipc_node *node;
+ struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+
+ if (!info->attrs[TIPC_NLA_LINK])
+ return -EINVAL;
+
+ err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
+ info->attrs[TIPC_NLA_LINK],
+ tipc_nl_link_policy);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_LINK_NAME])
+ return -EINVAL;
+
+ name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
+
+ if (strcmp(name, tipc_bclink_name) == 0)
+ return tipc_nl_bc_link_set(net, attrs);
+
+ node = tipc_node_find_by_name(net, name, &bearer_id);
+ if (!node)
+ return -EINVAL;
+
+ tipc_node_read_lock(node);
+
+ link = node->links[bearer_id].link;
+ if (!link) {
+ res = -EINVAL;
+ goto out;
+ }
+
+ if (attrs[TIPC_NLA_LINK_PROP]) {
+ struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+
+ err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
+ props);
+ if (err) {
+ res = err;
+ goto out;
+ }
+
+ if (props[TIPC_NLA_PROP_TOL]) {
+ u32 tol;
+
+ tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
+ tipc_link_set_tolerance(link, tol);
+ }
+ if (props[TIPC_NLA_PROP_PRIO]) {
+ u32 prio;
+
+ prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
+ tipc_link_set_prio(link, prio);
+ }
+ if (props[TIPC_NLA_PROP_WIN]) {
+ u32 win;
+
+ win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ tipc_link_set_queue_limits(link, win);
+ }
+ }
+
+out:
+ tipc_node_read_unlock(node);
+
+ return res;
+}
+
+int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct tipc_nl_msg msg;
+ char *name;
+ int err;
+
+ msg.portid = info->snd_portid;
+ msg.seq = info->snd_seq;
+
+ if (!info->attrs[TIPC_NLA_LINK_NAME])
+ return -EINVAL;
+ name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
+
+ msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg.skb)
+ return -ENOMEM;
+
+ if (strcmp(name, tipc_bclink_name) == 0) {
+ err = tipc_nl_add_bc_link(net, &msg);
+ if (err) {
+ nlmsg_free(msg.skb);
+ return err;
+ }
+ } else {
+ int bearer_id;
+ struct tipc_node *node;
+ struct tipc_link *link;
+
+ node = tipc_node_find_by_name(net, name, &bearer_id);
+ if (!node)
+ return -EINVAL;
+
+ tipc_node_read_lock(node);
+ link = node->links[bearer_id].link;
+ if (!link) {
+ tipc_node_read_unlock(node);
+ nlmsg_free(msg.skb);
+ return -EINVAL;
+ }
+
+ err = __tipc_nl_add_link(net, &msg, link, 0);
+ tipc_node_read_unlock(node);
+ if (err) {
+ nlmsg_free(msg.skb);
+ return err;
+ }
+ }
+
+ return genlmsg_reply(msg.skb, info);
+}
+
+int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *link_name;
+ unsigned int bearer_id;
+ struct tipc_link *link;
+ struct tipc_node *node;
+ struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+ struct tipc_link_entry *le;
+
+ if (!info->attrs[TIPC_NLA_LINK])
+ return -EINVAL;
+
+ err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
+ info->attrs[TIPC_NLA_LINK],
+ tipc_nl_link_policy);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_LINK_NAME])
+ return -EINVAL;
+
+ link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
+
+ if (strcmp(link_name, tipc_bclink_name) == 0) {
+ err = tipc_bclink_reset_stats(net);
+ if (err)
+ return err;
+ return 0;
+ }
+
+ node = tipc_node_find_by_name(net, link_name, &bearer_id);
+ if (!node)
+ return -EINVAL;
+
+ le = &node->links[bearer_id];
+ tipc_node_read_lock(node);
+ spin_lock_bh(&le->lock);
+ link = node->links[bearer_id].link;
+ if (!link) {
+ spin_unlock_bh(&le->lock);
+ tipc_node_read_unlock(node);
+ return -EINVAL;
+ }
+ tipc_link_reset_stats(link);
+ spin_unlock_bh(&le->lock);
+ tipc_node_read_unlock(node);
+ return 0;
+}
+
+/* Caller should hold node lock */
+static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
+ struct tipc_node *node, u32 *prev_link)
+{
+ u32 i;
+ int err;
+
+ for (i = *prev_link; i < MAX_BEARERS; i++) {
+ *prev_link = i;
+
+ if (!node->links[i].link)
+ continue;
+
+ err = __tipc_nl_add_link(net, msg,
+ node->links[i].link, NLM_F_MULTI);
+ if (err)
+ return err;
+ }
+ *prev_link = 0;
+
+ return 0;
+}
+
+int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_node *node;
+ struct tipc_nl_msg msg;
+ u32 prev_node = cb->args[0];
+ u32 prev_link = cb->args[1];
+ int done = cb->args[2];
+ int err;
+
+ if (done)
+ return 0;
+
+ msg.skb = skb;
+ msg.portid = NETLINK_CB(cb->skb).portid;
+ msg.seq = cb->nlh->nlmsg_seq;
+
+ rcu_read_lock();
+ if (prev_node) {
+ node = tipc_node_find(net, prev_node);
+ if (!node) {
+ /* We never set seq or call nl_dump_check_consistent()
+ * this means that setting prev_seq here will cause the
+ * consistence check to fail in the netlink callback
+ * handler. Resulting in the last NLMSG_DONE message
+ * having the NLM_F_DUMP_INTR flag set.
+ */
+ cb->prev_seq = 1;
+ goto out;
+ }
+ tipc_node_put(node);
+
+ list_for_each_entry_continue_rcu(node, &tn->node_list,
+ list) {
+ tipc_node_read_lock(node);
+ err = __tipc_nl_add_node_links(net, &msg, node,
+ &prev_link);
+ tipc_node_read_unlock(node);
+ if (err)
+ goto out;
+
+ prev_node = node->addr;
+ }
+ } else {
+ err = tipc_nl_add_bc_link(net, &msg);
+ if (err)
+ goto out;
+
+ list_for_each_entry_rcu(node, &tn->node_list, list) {
+ tipc_node_read_lock(node);
+ err = __tipc_nl_add_node_links(net, &msg, node,
+ &prev_link);
+ tipc_node_read_unlock(node);
+ if (err)
+ goto out;
+
+ prev_node = node->addr;
+ }
+ }
+ done = 1;
+out:
+ rcu_read_unlock();
+
+ cb->args[0] = prev_node;
+ cb->args[1] = prev_link;
+ cb->args[2] = done;
+
+ return skb->len;
+}
OpenPOWER on IntegriCloud