From 5405ff6e15f40f2f53e37d2dcd7de521e2b7a96f Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Thu, 19 Nov 2015 14:30:44 -0500 Subject: tipc: convert node lock to rwlock According to the node FSM a node in state SELF_UP_PEER_UP cannot change state inside a lock context, except when a TUNNEL_PROTOCOL (SYNCH or FAILOVER) packet arrives. However, the node's individual links may still change state. Since each link now is protected by its own spinlock, we finally have the conditions in place to convert the node spinlock to an rwlock_t. If the node state and arriving packet type are rigth, we can let the link directly receive the packet under protection of its own spinlock and the node lock in read mode. In all other cases we use the node lock in write mode. This enables full concurrent execution between parallel links during steady-state traffic situations, i.e., 99+ % of the time. This commit implements this change. Reviewed-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/link.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'net/tipc/link.c') diff --git a/net/tipc/link.c b/net/tipc/link.c index b5e895c..1dda46e 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1547,7 +1547,7 @@ static struct tipc_node *tipc_link_find_owner(struct net *net, *bearer_id = 0; rcu_read_lock(); list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { - tipc_node_lock(n_ptr); + tipc_node_read_lock(n_ptr); for (i = 0; i < MAX_BEARERS; i++) { l_ptr = n_ptr->links[i].link; if (l_ptr && !strcmp(l_ptr->name, link_name)) { @@ -1556,7 +1556,7 @@ static struct tipc_node *tipc_link_find_owner(struct net *net, break; } } - tipc_node_unlock(n_ptr); + tipc_node_read_unlock(n_ptr); if (found_node) break; } @@ -1658,7 +1658,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) if (!node) return -EINVAL; - tipc_node_lock(node); + tipc_node_read_lock(node); link = node->links[bearer_id].link; if (!link) { @@ -1699,7 +1699,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) } out: - tipc_node_unlock(node); + tipc_node_read_unlock(node); return res; } @@ -1898,10 +1898,10 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) list_for_each_entry_continue_rcu(node, &tn->node_list, list) { - tipc_node_lock(node); + tipc_node_read_lock(node); err = __tipc_nl_add_node_links(net, &msg, node, &prev_link); - tipc_node_unlock(node); + tipc_node_read_unlock(node); if (err) goto out; @@ -1913,10 +1913,10 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) goto out; list_for_each_entry_rcu(node, &tn->node_list, list) { - tipc_node_lock(node); + tipc_node_read_lock(node); err = __tipc_nl_add_node_links(net, &msg, node, &prev_link); - tipc_node_unlock(node); + tipc_node_read_unlock(node); if (err) goto out; @@ -1967,16 +1967,16 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) if (!node) return -EINVAL; - tipc_node_lock(node); + tipc_node_read_lock(node); link = node->links[bearer_id].link; if (!link) { - tipc_node_unlock(node); + tipc_node_read_unlock(node); nlmsg_free(msg.skb); return -EINVAL; } err = __tipc_nl_add_link(net, &msg, link, 0); - tipc_node_unlock(node); + tipc_node_read_unlock(node); if (err) { nlmsg_free(msg.skb); return err; @@ -2021,18 +2021,18 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info) node = tipc_link_find_owner(net, link_name, &bearer_id); if (!node) return -EINVAL; + le = &node->links[bearer_id]; - tipc_node_lock(node); + tipc_node_read_lock(node); spin_lock_bh(&le->lock); link = le->link; if (!link) { - tipc_node_unlock(node); + spin_unlock_bh(&le->lock); + tipc_node_read_unlock(node); return -EINVAL; } - link_reset_statistics(link); spin_unlock_bh(&le->lock); - tipc_node_unlock(node); - + tipc_node_read_unlock(node); return 0; } -- cgit v1.1