diff options
author | Jon Paul Maloy <jon.maloy@ericsson.com> | 2014-08-22 18:09:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-08-23 11:18:33 -0700 |
commit | 50100a5e39461b2a61d6040e73c384766c29975d (patch) | |
tree | 7f632d0f22af7f38c282603e5c9f4de3139bce59 /net/tipc/node.c | |
parent | 1dd0bd2b14032037d40a316dd52370f1713fa62b (diff) | |
download | op-kernel-dev-50100a5e39461b2a61d6040e73c384766c29975d.zip op-kernel-dev-50100a5e39461b2a61d6040e73c384766c29975d.tar.gz |
tipc: use pseudo message to wake up sockets after link congestion
The current link implementation keeps a linked list of blocked ports/
sockets that is populated when there is link congestion. The purpose
of this is to let the link know which users to wake up when the
congestion abates.
This adds unnecessary complexity to the data structure and the code,
since it forces us to involve the link each time we want to delete
a socket. It also forces us to grab the spinlock port_lock within
the scope of node_lock. We want to get rid of this direct dependence,
as well as the deadlock hazard resulting from the usage of port_lock.
In this commit, we instead let the link keep list of a "wakeup" pseudo
messages for use in such situations. Those messages are sent to the
pending sockets via the ordinary message reception path, and wake up
the socket's owner when they are received.
This enables us to get rid of the 'waiting_ports' linked lists in struct
tipc_port that manifest this direct reference. As a consequence, we can
eliminate another BH entry into the socket, and hence the need to grab
port_lock. This is a further step in our effort to remove port_lock
altogether.
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Reviewed-by: Erik Hugne <erik.hugne@ericsson.com>
Reviewed-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r-- | net/tipc/node.c | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c index f706929..6ea2c15 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -38,6 +38,7 @@ #include "config.h" #include "node.h" #include "name_distr.h" +#include "socket.h" #define NODE_HTABLE_SIZE 512 @@ -100,6 +101,7 @@ struct tipc_node *tipc_node_create(u32 addr) INIT_HLIST_NODE(&n_ptr->hash); INIT_LIST_HEAD(&n_ptr->list); INIT_LIST_HEAD(&n_ptr->nsub); + __skb_queue_head_init(&n_ptr->waiting_sks); hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); @@ -474,6 +476,7 @@ int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len) void tipc_node_unlock(struct tipc_node *node) { LIST_HEAD(nsub_list); + struct sk_buff_head waiting_sks; u32 addr = 0; if (likely(!node->action_flags)) { @@ -481,6 +484,11 @@ void tipc_node_unlock(struct tipc_node *node) return; } + __skb_queue_head_init(&waiting_sks); + if (node->action_flags & TIPC_WAKEUP_USERS) { + skb_queue_splice_init(&node->waiting_sks, &waiting_sks); + node->action_flags &= ~TIPC_WAKEUP_USERS; + } if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) { list_replace_init(&node->nsub, &nsub_list); node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN; @@ -491,8 +499,12 @@ void tipc_node_unlock(struct tipc_node *node) } spin_unlock_bh(&node->lock); + while (!skb_queue_empty(&waiting_sks)) + tipc_sk_rcv(__skb_dequeue(&waiting_sks)); + if (!list_empty(&nsub_list)) tipc_nodesub_notify(&nsub_list); + if (addr) tipc_named_node_up(addr); } |