summaryrefslogtreecommitdiffstats
path: root/net/rxrpc
diff options
context:
space:
mode:
Diffstat (limited to 'net/rxrpc')
-rw-r--r--net/rxrpc/af_rxrpc.c15
-rw-r--r--net/rxrpc/ar-internal.h77
-rw-r--r--net/rxrpc/call_accept.c27
-rw-r--r--net/rxrpc/call_event.c5
-rw-r--r--net/rxrpc/call_object.c32
-rw-r--r--net/rxrpc/conn_client.c3
-rw-r--r--net/rxrpc/conn_event.c6
-rw-r--r--net/rxrpc/conn_object.c10
-rw-r--r--net/rxrpc/conn_service.c1
-rw-r--r--net/rxrpc/input.c23
-rw-r--r--net/rxrpc/local_object.c65
-rw-r--r--net/rxrpc/net_ns.c24
-rw-r--r--net/rxrpc/output.c59
-rw-r--r--net/rxrpc/peer_event.c98
-rw-r--r--net/rxrpc/peer_object.c93
-rw-r--r--net/rxrpc/proc.c6
-rw-r--r--net/rxrpc/recvmsg.c2
-rw-r--r--net/rxrpc/rxkad.c2
-rw-r--r--net/rxrpc/security.c3
-rw-r--r--net/rxrpc/sendmsg.c10
20 files changed, 464 insertions, 97 deletions
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 0c9c18a..9a2c8e7 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -32,7 +32,7 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_RXRPC);
unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
-module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
+module_param_named(debug, rxrpc_debug, uint, 0644);
MODULE_PARM_DESC(debug, "RxRPC debugging mask");
static struct proto rxrpc_proto;
@@ -40,6 +40,7 @@ static const struct proto_ops rxrpc_rpc_ops;
/* current debugging ID */
atomic_t rxrpc_debug_id;
+EXPORT_SYMBOL(rxrpc_debug_id);
/* count of skbs currently in use */
atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
@@ -267,6 +268,7 @@ static int rxrpc_listen(struct socket *sock, int backlog)
* @gfp: The allocation constraints
* @notify_rx: Where to send notifications instead of socket queue
* @upgrade: Request service upgrade for call
+ * @debug_id: The debug ID for tracing to be assigned to the call
*
* Allow a kernel service to begin a call on the nominated socket. This just
* sets up all the internal tracking structures and allocates connection and
@@ -282,7 +284,8 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
s64 tx_total_len,
gfp_t gfp,
rxrpc_notify_rx_t notify_rx,
- bool upgrade)
+ bool upgrade,
+ unsigned int debug_id)
{
struct rxrpc_conn_parameters cp;
struct rxrpc_call_params p;
@@ -314,13 +317,14 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
cp.exclusive = false;
cp.upgrade = upgrade;
cp.service_id = srx->srx_service;
- call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp);
+ call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp, debug_id);
/* The socket has been unlocked. */
if (!IS_ERR(call)) {
call->notify_rx = notify_rx;
mutex_unlock(&call->user_mutex);
}
+ rxrpc_put_peer(cp.peer);
_leave(" = %p", call);
return call;
}
@@ -444,6 +448,7 @@ int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call,
ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL);
mutex_unlock(&call->user_mutex);
+ rxrpc_put_peer(cp.peer);
_leave(" = %d", ret);
return ret;
}
@@ -759,6 +764,7 @@ static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
+ struct rxrpc_net *rxnet;
struct rxrpc_sock *rx;
struct sock *sk;
@@ -798,6 +804,9 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
rwlock_init(&rx->call_lock);
memset(&rx->srx, 0, sizeof(rx->srx));
+ rxnet = rxrpc_net(sock_net(&rx->sk));
+ timer_reduce(&rxnet->peer_keepalive_timer, jiffies + 1);
+
_leave(" = 0 [%p]", rx);
return 0;
}
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 4166883..90d7079 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -75,7 +75,9 @@ struct rxrpc_net {
u32 epoch; /* Local epoch for detecting local-end reset */
struct list_head calls; /* List of calls active in this namespace */
rwlock_t call_lock; /* Lock for ->calls */
+ atomic_t nr_calls; /* Count of allocated calls */
+ atomic_t nr_conns;
struct list_head conn_proc_list; /* List of conns in this namespace for proc */
struct list_head service_conns; /* Service conns in this namespace */
rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
@@ -97,8 +99,16 @@ struct rxrpc_net {
struct list_head local_endpoints;
struct mutex local_mutex; /* Lock for ->local_endpoints */
- spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
DECLARE_HASHTABLE (peer_hash, 10);
+ spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
+
+#define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
+ u8 peer_keepalive_cursor;
+ ktime_t peer_keepalive_base;
+ struct hlist_head peer_keepalive[RXRPC_KEEPALIVE_TIME + 1];
+ struct hlist_head peer_keepalive_new;
+ struct timer_list peer_keepalive_timer;
+ struct work_struct peer_keepalive_work;
};
/*
@@ -285,6 +295,8 @@ struct rxrpc_peer {
struct hlist_head error_targets; /* targets for net error distribution */
struct work_struct error_distributor;
struct rb_root service_conns; /* Service connections */
+ struct hlist_node keepalive_link; /* Link in net->peer_keepalive[] */
+ time64_t last_tx_at; /* Last time packet sent here */
seqlock_t service_conn_lock;
spinlock_t lock; /* access lock */
unsigned int if_mtu; /* interface MTU for this peer */
@@ -518,6 +530,7 @@ struct rxrpc_call {
struct rxrpc_connection *conn; /* connection carrying call */
struct rxrpc_peer *peer; /* Peer record for remote address */
struct rxrpc_sock __rcu *socket; /* socket responsible */
+ struct rxrpc_net *rxnet; /* Network namespace to which call belongs */
struct mutex user_mutex; /* User access mutex */
unsigned long ack_at; /* When deferred ACK needs to happen */
unsigned long ack_lost_at; /* When ACK is figured as lost */
@@ -691,7 +704,6 @@ struct rxrpc_send_params {
* af_rxrpc.c
*/
extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
-extern atomic_t rxrpc_debug_id;
extern struct workqueue_struct *rxrpc_workqueue;
/*
@@ -732,11 +744,12 @@ extern unsigned int rxrpc_max_call_lifetime;
extern struct kmem_cache *rxrpc_call_jar;
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
-struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t);
+struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int);
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
struct rxrpc_conn_parameters *,
struct sockaddr_rxrpc *,
- struct rxrpc_call_params *, gfp_t);
+ struct rxrpc_call_params *, gfp_t,
+ unsigned int);
int rxrpc_retry_client_call(struct rxrpc_sock *,
struct rxrpc_call *,
struct rxrpc_conn_parameters *,
@@ -778,6 +791,7 @@ static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
call->error = error;
call->completion = compl,
call->state = RXRPC_CALL_COMPLETE;
+ trace_rxrpc_call_complete(call);
wake_up(&call->waitq);
return true;
}
@@ -822,7 +836,7 @@ static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
rxrpc_seq_t seq,
u32 abort_code, int error)
{
- trace_rxrpc_abort(why, call->cid, call->call_id, seq,
+ trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
abort_code, error);
return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
abort_code, error);
@@ -968,31 +982,12 @@ extern void rxrpc_process_local_events(struct rxrpc_local *);
* local_object.c
*/
struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
-void __rxrpc_put_local(struct rxrpc_local *);
+struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
+struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
+void rxrpc_put_local(struct rxrpc_local *);
+void rxrpc_queue_local(struct rxrpc_local *);
void rxrpc_destroy_all_locals(struct rxrpc_net *);
-static inline void rxrpc_get_local(struct rxrpc_local *local)
-{
- atomic_inc(&local->usage);
-}
-
-static inline
-struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
-{
- return atomic_inc_not_zero(&local->usage) ? local : NULL;
-}
-
-static inline void rxrpc_put_local(struct rxrpc_local *local)
-{
- if (local && atomic_dec_and_test(&local->usage))
- __rxrpc_put_local(local);
-}
-
-static inline void rxrpc_queue_local(struct rxrpc_local *local)
-{
- rxrpc_queue_work(&local->processor);
-}
-
/*
* misc.c
*/
@@ -1025,6 +1020,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
int rxrpc_send_abort_packet(struct rxrpc_call *);
int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
void rxrpc_reject_packets(struct rxrpc_local *);
+void rxrpc_send_keepalive(struct rxrpc_peer *);
/*
* peer_event.c
@@ -1033,6 +1029,7 @@ void rxrpc_error_report(struct sock *);
void rxrpc_peer_error_distributor(struct work_struct *);
void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
+void rxrpc_peer_keepalive_worker(struct work_struct *);
/*
* peer_object.c
@@ -1044,25 +1041,11 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
struct rxrpc_peer *);
-
-static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
-{
- atomic_inc(&peer->usage);
- return peer;
-}
-
-static inline
-struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
-{
- return atomic_inc_not_zero(&peer->usage) ? peer : NULL;
-}
-
-extern void __rxrpc_put_peer(struct rxrpc_peer *peer);
-static inline void rxrpc_put_peer(struct rxrpc_peer *peer)
-{
- if (peer && atomic_dec_and_test(&peer->usage))
- __rxrpc_put_peer(peer);
-}
+void rxrpc_destroy_all_peers(struct rxrpc_net *);
+struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
+struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
+void rxrpc_put_peer(struct rxrpc_peer *);
+void __rxrpc_queue_peer_error(struct rxrpc_peer *);
/*
* proc.c
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 3028298..a9a9be5 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -34,7 +34,8 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
struct rxrpc_backlog *b,
rxrpc_notify_rx_t notify_rx,
rxrpc_user_attach_call_t user_attach_call,
- unsigned long user_call_ID, gfp_t gfp)
+ unsigned long user_call_ID, gfp_t gfp,
+ unsigned int debug_id)
{
const void *here = __builtin_return_address(0);
struct rxrpc_call *call;
@@ -94,7 +95,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
/* Now it gets complicated, because calls get registered with the
* socket here, particularly if a user ID is preassigned by the user.
*/
- call = rxrpc_alloc_call(rx, gfp);
+ call = rxrpc_alloc_call(rx, gfp, debug_id);
if (!call)
return -ENOMEM;
call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
@@ -137,6 +138,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
write_unlock(&rx->call_lock);
+ rxnet = call->rxnet;
write_lock(&rxnet->call_lock);
list_add_tail(&call->link, &rxnet->calls);
write_unlock(&rxnet->call_lock);
@@ -174,7 +176,8 @@ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
if (rx->discard_new_call)
return 0;
- while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp) == 0)
+ while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
+ atomic_inc_return(&rxrpc_debug_id)) == 0)
;
return 0;
@@ -216,6 +219,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
list_del(&conn->proc_link);
write_unlock(&rxnet->conn_lock);
kfree(conn);
+ if (atomic_dec_and_test(&rxnet->nr_conns))
+ wake_up_var(&rxnet->nr_conns);
tail = (tail + 1) & (size - 1);
}
@@ -223,7 +228,7 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
tail = b->call_backlog_tail;
while (CIRC_CNT(head, tail, size) > 0) {
struct rxrpc_call *call = b->call_backlog[tail];
- call->socket = rx;
+ rcu_assign_pointer(call->socket, rx);
if (rx->discard_new_call) {
_debug("discard %lx", call->user_call_ID);
rx->discard_new_call(call, call->user_call_ID);
@@ -293,8 +298,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
b->conn_backlog[conn_tail] = NULL;
smp_store_release(&b->conn_backlog_tail,
(conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
- rxrpc_get_local(local);
- conn->params.local = local;
+ conn->params.local = rxrpc_get_local(local);
conn->params.peer = peer;
rxrpc_see_connection(conn);
rxrpc_new_incoming_connection(rx, conn, skb);
@@ -347,7 +351,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
service_id == rx->second_service))
goto found_service;
- trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_INVALID_OPERATION, EOPNOTSUPP);
skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
skb->priority = RX_INVALID_OPERATION;
@@ -358,7 +362,7 @@ found_service:
spin_lock(&rx->incoming_lock);
if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
rx->sk.sk_state == RXRPC_CLOSE) {
- trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber,
+ trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
skb->priority = RX_INVALID_OPERATION;
@@ -454,6 +458,7 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
unsigned long user_call_ID,
rxrpc_notify_rx_t notify_rx)
__releases(&rx->sk.sk_lock.slock)
+ __acquires(call->user_mutex)
{
struct rxrpc_call *call;
struct rb_node *parent, **pp;
@@ -635,6 +640,7 @@ out_discard:
* @user_attach_call: Func to attach call to user_call_ID
* @user_call_ID: The tag to attach to the preallocated call
* @gfp: The allocation conditions.
+ * @debug_id: The tracing debug ID.
*
* Charge up the socket with preallocated calls, each with a user ID. A
* function should be provided to effect the attachment from the user's side.
@@ -645,7 +651,8 @@ out_discard:
int rxrpc_kernel_charge_accept(struct socket *sock,
rxrpc_notify_rx_t notify_rx,
rxrpc_user_attach_call_t user_attach_call,
- unsigned long user_call_ID, gfp_t gfp)
+ unsigned long user_call_ID, gfp_t gfp,
+ unsigned int debug_id)
{
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
struct rxrpc_backlog *b = rx->backlog;
@@ -655,6 +662,6 @@ int rxrpc_kernel_charge_accept(struct socket *sock,
return rxrpc_service_prealloc_one(rx, b, notify_rx,
user_attach_call, user_call_ID,
- gfp);
+ gfp, debug_id);
}
EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index ad2ab1103..6e0d788 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -195,6 +195,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
* the packets in the Tx buffer we're going to resend and what the new
* resend timeout will be.
*/
+ trace_rxrpc_resend(call, (cursor + 1) & RXRPC_RXTX_BUFF_MASK);
oldest = now;
for (seq = cursor + 1; before_eq(seq, top); seq++) {
ix = seq & RXRPC_RXTX_BUFF_MASK;
@@ -225,7 +226,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
}
- resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now)));
+ resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
resend_at += jiffies + rxrpc_resend_timeout;
WRITE_ONCE(call->resend_at, resend_at);
@@ -237,7 +238,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
* retransmitting data.
*/
if (!retrans) {
- rxrpc_reduce_call_timer(call, resend_at, now,
+ rxrpc_reduce_call_timer(call, resend_at, now_j,
rxrpc_timer_set_for_resend);
spin_unlock_bh(&call->lock);
ack_ts = ktime_sub(now, call->acks_latest_ts);
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 0b2db38..f6734d8 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -99,9 +99,11 @@ found_extant_call:
/*
* allocate a new call
*/
-struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
+struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
+ unsigned int debug_id)
{
struct rxrpc_call *call;
+ struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
if (!call)
@@ -138,7 +140,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
spin_lock_init(&call->notify_lock);
rwlock_init(&call->state_lock);
atomic_set(&call->usage, 1);
- call->debug_id = atomic_inc_return(&rxrpc_debug_id);
+ call->debug_id = debug_id;
call->tx_total_len = -1;
call->next_rx_timo = 20 * HZ;
call->next_req_timo = 1 * HZ;
@@ -152,6 +154,9 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
call->cong_cwnd = 2;
call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
+
+ call->rxnet = rxnet;
+ atomic_inc(&rxnet->nr_calls);
return call;
nomem_2:
@@ -166,14 +171,15 @@ nomem:
*/
static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
struct sockaddr_rxrpc *srx,
- gfp_t gfp)
+ gfp_t gfp,
+ unsigned int debug_id)
{
struct rxrpc_call *call;
ktime_t now;
_enter("");
- call = rxrpc_alloc_call(rx, gfp);
+ call = rxrpc_alloc_call(rx, gfp, debug_id);
if (!call)
return ERR_PTR(-ENOMEM);
call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
@@ -214,18 +220,20 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx,
struct rxrpc_call_params *p,
- gfp_t gfp)
+ gfp_t gfp,
+ unsigned int debug_id)
__releases(&rx->sk.sk_lock.slock)
+ __acquires(&call->user_mutex)
{
struct rxrpc_call *call, *xcall;
- struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
+ struct rxrpc_net *rxnet;
struct rb_node *parent, **pp;
const void *here = __builtin_return_address(0);
int ret;
_enter("%p,%lx", rx, p->user_call_ID);
- call = rxrpc_alloc_client_call(rx, srx, gfp);
+ call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
if (IS_ERR(call)) {
release_sock(&rx->sk);
_leave(" = %ld", PTR_ERR(call));
@@ -268,6 +276,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
write_unlock(&rx->call_lock);
+ rxnet = call->rxnet;
write_lock(&rxnet->call_lock);
list_add_tail(&call->link, &rxnet->calls);
write_unlock(&rxnet->call_lock);
@@ -613,7 +622,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
*/
void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
{
- struct rxrpc_net *rxnet;
+ struct rxrpc_net *rxnet = call->rxnet;
const void *here = __builtin_return_address(0);
int n;
@@ -627,7 +636,6 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
if (!list_empty(&call->link)) {
- rxnet = rxrpc_net(sock_net(&call->socket->sk));
write_lock(&rxnet->call_lock);
list_del_init(&call->link);
write_unlock(&rxnet->call_lock);
@@ -643,11 +651,14 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
{
struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
+ struct rxrpc_net *rxnet = call->rxnet;
rxrpc_put_peer(call->peer);
kfree(call->rxtx_buffer);
kfree(call->rxtx_annotations);
kmem_cache_free(rxrpc_call_jar, call);
+ if (atomic_dec_and_test(&rxnet->nr_calls))
+ wake_up_var(&rxnet->nr_calls);
}
/*
@@ -712,4 +723,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
}
write_unlock(&rxnet->call_lock);
+
+ atomic_dec(&rxnet->nr_calls);
+ wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
}
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 0641750..5736f64 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -207,6 +207,7 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
if (ret < 0)
goto error_2;
+ atomic_inc(&rxnet->nr_conns);
write_lock(&rxnet->conn_lock);
list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
write_unlock(&rxnet->conn_lock);
@@ -776,7 +777,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
unsigned int channel = call->cid & RXRPC_CHANNELMASK;
struct rxrpc_connection *conn = call->conn;
struct rxrpc_channel *chan = &conn->channels[channel];
- struct rxrpc_net *rxnet = rxrpc_net(sock_net(&call->socket->sk));
+ struct rxrpc_net *rxnet = conn->params.local->rxnet;
trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
call->conn = NULL;
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index b1dfae1..c717152 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -136,6 +136,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
}
kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
+ conn->params.peer->last_tx_at = ktime_get_real();
_leave("");
return;
}
@@ -160,7 +161,8 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
lockdep_is_held(&conn->channel_lock));
if (call) {
if (compl == RXRPC_CALL_LOCALLY_ABORTED)
- trace_rxrpc_abort("CON", call->cid,
+ trace_rxrpc_abort(call->debug_id,
+ "CON", call->cid,
call->call_id, 0,
abort_code, error);
if (rxrpc_set_call_completion(call, compl,
@@ -238,6 +240,8 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
return -EAGAIN;
}
+ conn->params.peer->last_tx_at = ktime_get_real();
+
_leave(" = 0");
return 0;
}
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index ccbac19..4c77a78 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -365,6 +365,9 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
key_put(conn->params.key);
key_put(conn->server_key);
rxrpc_put_peer(conn->params.peer);
+
+ if (atomic_dec_and_test(&conn->params.local->rxnet->nr_conns))
+ wake_up_var(&conn->params.local->rxnet->nr_conns);
rxrpc_put_local(conn->params.local);
kfree(conn);
@@ -418,7 +421,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
*/
if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
continue;
- trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0);
+ trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, NULL);
if (rxrpc_conn_is_client(conn))
BUG();
@@ -458,6 +461,7 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
_enter("");
+ atomic_dec(&rxnet->nr_conns);
rxrpc_destroy_all_client_connections(rxnet);
del_timer_sync(&rxnet->service_conn_reap_timer);
@@ -475,5 +479,9 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
ASSERT(list_empty(&rxnet->conn_proc_list));
+ /* We need to wait for the connections to be destroyed by RCU as they
+ * pin things that we still need to get rid of.
+ */
+ wait_var_event(&rxnet->nr_conns, !atomic_read(&rxnet->nr_conns));
_leave("");
}
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index f6fcdb3..80773a5 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -132,6 +132,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
conn->state = RXRPC_CONN_SERVICE_PREALLOC;
atomic_set(&conn->usage, 2);
+ atomic_inc(&rxnet->nr_conns);
write_lock(&rxnet->conn_lock);
list_add_tail(&conn->link, &rxnet->service_conns);
list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 6fc61400..21800e6 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1183,6 +1183,8 @@ void rxrpc_data_ready(struct sock *udp_sk)
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_VERSION:
+ if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED))
+ goto discard;
rxrpc_post_packet_to_local(local, skb);
goto out;
@@ -1240,16 +1242,19 @@ void rxrpc_data_ready(struct sock *udp_sk)
goto discard_unlock;
if (sp->hdr.callNumber == chan->last_call) {
- /* For the previous service call, if completed successfully, we
- * discard all further packets.
+ if (chan->call ||
+ sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
+ goto discard_unlock;
+
+ /* For the previous service call, if completed
+ * successfully, we discard all further packets.
*/
if (rxrpc_conn_is_service(conn) &&
- (chan->last_type == RXRPC_PACKET_TYPE_ACK ||
- sp->hdr.type == RXRPC_PACKET_TYPE_ABORT))
+ chan->last_type == RXRPC_PACKET_TYPE_ACK)
goto discard_unlock;
- /* But otherwise we need to retransmit the final packet from
- * data cached in the connection record.
+ /* But otherwise we need to retransmit the final packet
+ * from data cached in the connection record.
*/
rxrpc_post_packet_to_conn(conn, skb);
goto out_unlock;
@@ -1307,21 +1312,21 @@ out_unlock:
wrong_security:
rcu_read_unlock();
- trace_rxrpc_abort("SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RXKADINCONSISTENCY, EBADMSG);
skb->priority = RXKADINCONSISTENCY;
goto post_abort;
reupgrade:
rcu_read_unlock();
- trace_rxrpc_abort("UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_PROTOCOL_ERROR, EBADMSG);
goto protocol_error;
bad_message_unlock:
rcu_read_unlock();
bad_message:
- trace_rxrpc_abort("BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_PROTOCOL_ERROR, EBADMSG);
protocol_error:
skb->priority = RX_PROTOCOL_ERROR;
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 38b99db..8b54e95 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -95,6 +95,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
local->debug_id = atomic_inc_return(&rxrpc_debug_id);
memcpy(&local->srx, srx, sizeof(*srx));
local->srx.srx_service = 0;
+ trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
}
_leave(" = %p", local);
@@ -257,15 +258,74 @@ addr_in_use:
}
/*
+ * Get a ref on a local endpoint.
+ */
+struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
+{
+ const void *here = __builtin_return_address(0);
+ int n;
+
+ n = atomic_inc_return(&local->usage);
+ trace_rxrpc_local(local, rxrpc_local_got, n, here);
+ return local;
+}
+
+/*
+ * Get a ref on a local endpoint unless its usage has already reached 0.
+ */
+struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
+{
+ const void *here = __builtin_return_address(0);
+
+ if (local) {
+ int n = __atomic_add_unless(&local->usage, 1, 0);
+ if (n > 0)
+ trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
+ else
+ local = NULL;
+ }
+ return local;
+}
+
+/*
+ * Queue a local endpoint.
+ */
+void rxrpc_queue_local(struct rxrpc_local *local)
+{
+ const void *here = __builtin_return_address(0);
+
+ if (rxrpc_queue_work(&local->processor))
+ trace_rxrpc_local(local, rxrpc_local_queued,
+ atomic_read(&local->usage), here);
+}
+
+/*
* A local endpoint reached its end of life.
*/
-void __rxrpc_put_local(struct rxrpc_local *local)
+static void __rxrpc_put_local(struct rxrpc_local *local)
{
_enter("%d", local->debug_id);
rxrpc_queue_work(&local->processor);
}
/*
+ * Drop a ref on a local endpoint.
+ */
+void rxrpc_put_local(struct rxrpc_local *local)
+{
+ const void *here = __builtin_return_address(0);
+ int n;
+
+ if (local) {
+ n = atomic_dec_return(&local->usage);
+ trace_rxrpc_local(local, rxrpc_local_put, n, here);
+
+ if (n == 0)
+ __rxrpc_put_local(local);
+ }
+}
+
+/*
* Destroy a local endpoint's socket and then hand the record to RCU to dispose
* of.
*
@@ -322,7 +382,8 @@ static void rxrpc_local_processor(struct work_struct *work)
container_of(work, struct rxrpc_local, processor);
bool again;
- _enter("%d", local->debug_id);
+ trace_rxrpc_local(local, rxrpc_local_processing,
+ atomic_read(&local->usage), NULL);
do {
again = false;
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index f18c924..c7a023f 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -32,13 +32,22 @@ static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
rxrpc_queue_work(&rxnet->service_conn_reaper);
}
+static void rxrpc_peer_keepalive_timeout(struct timer_list *timer)
+{
+ struct rxrpc_net *rxnet =
+ container_of(timer, struct rxrpc_net, peer_keepalive_timer);
+
+ if (rxnet->live)
+ rxrpc_queue_work(&rxnet->peer_keepalive_work);
+}
+
/*
* Initialise a per-network namespace record.
*/
static __net_init int rxrpc_init_net(struct net *net)
{
struct rxrpc_net *rxnet = rxrpc_net(net);
- int ret;
+ int ret, i;
rxnet->live = true;
get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
@@ -46,7 +55,9 @@ static __net_init int rxrpc_init_net(struct net *net)
INIT_LIST_HEAD(&rxnet->calls);
rwlock_init(&rxnet->call_lock);
+ atomic_set(&rxnet->nr_calls, 1);
+ atomic_set(&rxnet->nr_conns, 1);
INIT_LIST_HEAD(&rxnet->conn_proc_list);
INIT_LIST_HEAD(&rxnet->service_conns);
rwlock_init(&rxnet->conn_lock);
@@ -70,8 +81,16 @@ static __net_init int rxrpc_init_net(struct net *net)
INIT_LIST_HEAD(&rxnet->local_endpoints);
mutex_init(&rxnet->local_mutex);
+
hash_init(rxnet->peer_hash);
spin_lock_init(&rxnet->peer_hash_lock);
+ for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
+ INIT_HLIST_HEAD(&rxnet->peer_keepalive[i]);
+ INIT_HLIST_HEAD(&rxnet->peer_keepalive_new);
+ timer_setup(&rxnet->peer_keepalive_timer,
+ rxrpc_peer_keepalive_timeout, 0);
+ INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
+ rxnet->peer_keepalive_base = ktime_add(ktime_get_real(), NSEC_PER_SEC);
ret = -ENOMEM;
rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
@@ -95,8 +114,11 @@ static __net_exit void rxrpc_exit_net(struct net *net)
struct rxrpc_net *rxnet = rxrpc_net(net);
rxnet->live = false;
+ del_timer_sync(&rxnet->peer_keepalive_timer);
+ cancel_work_sync(&rxnet->peer_keepalive_work);
rxrpc_destroy_all_calls(rxnet);
rxrpc_destroy_all_connections(rxnet);
+ rxrpc_destroy_all_peers(rxnet);
rxrpc_destroy_all_locals(rxnet);
proc_remove(rxnet->proc_net);
}
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index cf73dc0..7f1fc04 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -32,6 +32,8 @@ struct rxrpc_abort_buffer {
__be32 abort_code;
};
+static const char rxrpc_keepalive_string[] = "";
+
/*
* Arrange for a keepalive ping a certain time after we last transmitted. This
* lets the far side know we're still interested in this call and helps keep
@@ -122,6 +124,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
struct kvec iov[2];
rxrpc_serial_t serial;
rxrpc_seq_t hard_ack, top;
+ ktime_t now;
size_t len, n;
int ret;
u8 reason;
@@ -203,8 +206,10 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
}
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
+ now = ktime_get_real();
if (ping)
- call->ping_time = ktime_get_real();
+ call->ping_time = now;
+ conn->params.peer->last_tx_at = ktime_get_real();
if (call->state < RXRPC_CALL_COMPLETE) {
if (ret < 0) {
@@ -288,6 +293,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
ret = kernel_sendmsg(conn->params.local->socket,
&msg, iov, 1, sizeof(pkt));
+ conn->params.peer->last_tx_at = ktime_get_real();
rxrpc_put_connection(conn);
return ret;
@@ -378,6 +384,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
* message and update the peer record
*/
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
+ conn->params.peer->last_tx_at = ktime_get_real();
up_read(&conn->params.local->defrag_sem);
if (ret == -EMSGSIZE)
@@ -429,6 +436,7 @@ send_fragmentable:
if (ret == 0) {
ret = kernel_sendmsg(conn->params.local->socket, &msg,
iov, 2, len);
+ conn->params.peer->last_tx_at = ktime_get_real();
opt = IP_PMTUDISC_DO;
kernel_setsockopt(conn->params.local->socket, SOL_IP,
@@ -446,6 +454,7 @@ send_fragmentable:
if (ret == 0) {
ret = kernel_sendmsg(conn->params.local->socket, &msg,
iov, 2, len);
+ conn->params.peer->last_tx_at = ktime_get_real();
opt = IPV6_PMTUDISC_DO;
kernel_setsockopt(conn->params.local->socket,
@@ -515,3 +524,51 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
_leave("");
}
+
+/*
+ * Send a VERSION reply to a peer as a keepalive.
+ */
+void rxrpc_send_keepalive(struct rxrpc_peer *peer)
+{
+ struct rxrpc_wire_header whdr;
+ struct msghdr msg;
+ struct kvec iov[2];
+ size_t len;
+ int ret;
+
+ _enter("");
+
+ msg.msg_name = &peer->srx.transport;
+ msg.msg_namelen = peer->srx.transport_len;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = 0;
+
+ whdr.epoch = htonl(peer->local->rxnet->epoch);
+ whdr.cid = 0;
+ whdr.callNumber = 0;
+ whdr.seq = 0;
+ whdr.serial = 0;
+ whdr.type = RXRPC_PACKET_TYPE_VERSION; /* Not client-initiated */
+ whdr.flags = RXRPC_LAST_PACKET;
+ whdr.userStatus = 0;
+ whdr.securityIndex = 0;
+ whdr._rsvd = 0;
+ whdr.serviceId = 0;
+
+ iov[0].iov_base = &whdr;
+ iov[0].iov_len = sizeof(whdr);
+ iov[1].iov_base = (char *)rxrpc_keepalive_string;
+ iov[1].iov_len = sizeof(rxrpc_keepalive_string);
+
+ len = iov[0].iov_len + iov[1].iov_len;
+
+ _proto("Tx VERSION (keepalive)");
+
+ ret = kernel_sendmsg(peer->local->socket, &msg, iov, 2, len);
+ if (ret < 0)
+ _debug("sendmsg failed: %d", ret);
+
+ peer->last_tx_at = ktime_get_real();
+ _leave("");
+}
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 7f74950..78c2f95 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -192,7 +192,7 @@ void rxrpc_error_report(struct sock *sk)
rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
/* The ref we obtained is passed off to the work item */
- rxrpc_queue_work(&peer->error_distributor);
+ __rxrpc_queue_peer_error(peer);
_leave("");
}
@@ -348,3 +348,99 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
usage, avg);
}
+
+/*
+ * Perform keep-alive pings with VERSION packets to keep any NAT alive.
+ */
+void rxrpc_peer_keepalive_worker(struct work_struct *work)
+{
+ struct rxrpc_net *rxnet =
+ container_of(work, struct rxrpc_net, peer_keepalive_work);
+ struct rxrpc_peer *peer;
+ unsigned long delay;
+ ktime_t base, now = ktime_get_real();
+ s64 diff;
+ u8 cursor, slot;
+
+ base = rxnet->peer_keepalive_base;
+ cursor = rxnet->peer_keepalive_cursor;
+
+ _enter("%u,%lld", cursor, ktime_sub(now, base));
+
+next_bucket:
+ diff = ktime_to_ns(ktime_sub(now, base));
+ if (diff < 0)
+ goto resched;
+
+ _debug("at %u", cursor);
+ spin_lock_bh(&rxnet->peer_hash_lock);
+next_peer:
+ if (!rxnet->live) {
+ spin_unlock_bh(&rxnet->peer_hash_lock);
+ goto out;
+ }
+
+ /* Everything in the bucket at the cursor is processed this second; the
+ * bucket at cursor + 1 goes now + 1s and so on...
+ */
+ if (hlist_empty(&rxnet->peer_keepalive[cursor])) {
+ if (hlist_empty(&rxnet->peer_keepalive_new)) {
+ spin_unlock_bh(&rxnet->peer_hash_lock);
+ goto emptied_bucket;
+ }
+
+ hlist_move_list(&rxnet->peer_keepalive_new,
+ &rxnet->peer_keepalive[cursor]);
+ }
+
+ peer = hlist_entry(rxnet->peer_keepalive[cursor].first,
+ struct rxrpc_peer, keepalive_link);
+ hlist_del_init(&peer->keepalive_link);
+ if (!rxrpc_get_peer_maybe(peer))
+ goto next_peer;
+
+ spin_unlock_bh(&rxnet->peer_hash_lock);
+
+ _debug("peer %u {%pISp}", peer->debug_id, &peer->srx.transport);
+
+recalc:
+ diff = ktime_divns(ktime_sub(peer->last_tx_at, base), NSEC_PER_SEC);
+ if (diff < -30 || diff > 30)
+ goto send; /* LSW of 64-bit time probably wrapped on 32-bit */
+ diff += RXRPC_KEEPALIVE_TIME - 1;
+ if (diff < 0)
+ goto send;
+
+ slot = (diff > RXRPC_KEEPALIVE_TIME - 1) ? RXRPC_KEEPALIVE_TIME - 1 : diff;
+ if (slot == 0)
+ goto send;
+
+ /* A transmission to this peer occurred since last we examined it so
+ * put it into the appropriate future bucket.
+ */
+ slot = (slot + cursor) % ARRAY_SIZE(rxnet->peer_keepalive);
+ spin_lock_bh(&rxnet->peer_hash_lock);
+ hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive[slot]);
+ rxrpc_put_peer(peer);
+ goto next_peer;
+
+send:
+ rxrpc_send_keepalive(peer);
+ now = ktime_get_real();
+ goto recalc;
+
+emptied_bucket:
+ cursor++;
+ if (cursor >= ARRAY_SIZE(rxnet->peer_keepalive))
+ cursor = 0;
+ base = ktime_add_ns(base, NSEC_PER_SEC);
+ goto next_bucket;
+
+resched:
+ rxnet->peer_keepalive_base = base;
+ rxnet->peer_keepalive_cursor = cursor;
+ delay = nsecs_to_jiffies(-diff) + 1;
+ timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
+out:
+ _leave("");
+}
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index d02a99f..1b7e810 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -322,6 +322,7 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
if (!peer) {
peer = prealloc;
hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
+ hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive_new);
}
spin_unlock(&rxnet->peer_hash_lock);
@@ -363,9 +364,12 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
if (peer && !rxrpc_get_peer_maybe(peer))
peer = NULL;
- if (!peer)
+ if (!peer) {
hash_add_rcu(rxnet->peer_hash,
&candidate->hash_link, hash_key);
+ hlist_add_head(&candidate->keepalive_link,
+ &rxnet->peer_keepalive_new);
+ }
spin_unlock_bh(&rxnet->peer_hash_lock);
@@ -382,9 +386,54 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
}
/*
- * Discard a ref on a remote peer record.
+ * Get a ref on a peer record.
+ */
+struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
+{
+ const void *here = __builtin_return_address(0);
+ int n;
+
+ n = atomic_inc_return(&peer->usage);
+ trace_rxrpc_peer(peer, rxrpc_peer_got, n, here);
+ return peer;
+}
+
+/*
+ * Get a ref on a peer record unless its usage has already reached 0.
+ */
+struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
+{
+ const void *here = __builtin_return_address(0);
+
+ if (peer) {
+ int n = __atomic_add_unless(&peer->usage, 1, 0);
+ if (n > 0)
+ trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here);
+ else
+ peer = NULL;
+ }
+ return peer;
+}
+
+/*
+ * Queue a peer record. This passes the caller's ref to the workqueue.
+ */
+void __rxrpc_queue_peer_error(struct rxrpc_peer *peer)
+{
+ const void *here = __builtin_return_address(0);
+ int n;
+
+ n = atomic_read(&peer->usage);
+ if (rxrpc_queue_work(&peer->error_distributor))
+ trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here);
+ else
+ rxrpc_put_peer(peer);
+}
+
+/*
+ * Discard a peer record.
*/
-void __rxrpc_put_peer(struct rxrpc_peer *peer)
+static void __rxrpc_put_peer(struct rxrpc_peer *peer)
{
struct rxrpc_net *rxnet = peer->local->rxnet;
@@ -392,11 +441,49 @@ void __rxrpc_put_peer(struct rxrpc_peer *peer)
spin_lock_bh(&rxnet->peer_hash_lock);
hash_del_rcu(&peer->hash_link);
+ hlist_del_init(&peer->keepalive_link);
spin_unlock_bh(&rxnet->peer_hash_lock);
kfree_rcu(peer, rcu);
}
+/*
+ * Drop a ref on a peer record.
+ */
+void rxrpc_put_peer(struct rxrpc_peer *peer)
+{
+ const void *here = __builtin_return_address(0);
+ int n;
+
+ if (peer) {
+ n = atomic_dec_return(&peer->usage);
+ trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
+ if (n == 0)
+ __rxrpc_put_peer(peer);
+ }
+}
+
+/*
+ * Make sure all peer records have been discarded.
+ */
+void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
+{
+ struct rxrpc_peer *peer;
+ int i;
+
+ for (i = 0; i < HASH_SIZE(rxnet->peer_hash); i++) {
+ if (hlist_empty(&rxnet->peer_hash[i]))
+ continue;
+
+ hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) {
+ pr_err("Leaked peer %u {%u} %pISp\n",
+ peer->debug_id,
+ atomic_read(&peer->usage),
+ &peer->srx.transport);
+ }
+ }
+}
+
/**
* rxrpc_kernel_get_peer - Get the peer address of a call
* @sock: The socket on which the call is in progress.
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index f79f260..7e45db0 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -29,6 +29,8 @@ static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
* generate a list of extant and dead calls in /proc/net/rxrpc_calls
*/
static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
+ __acquires(rcu)
+ __acquires(rxnet->call_lock)
{
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
@@ -45,6 +47,8 @@ static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
+ __releases(rxnet->call_lock)
+ __releases(rcu)
{
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
@@ -135,6 +139,7 @@ const struct file_operations rxrpc_call_seq_fops = {
* generate a list of extant virtual connections in /proc/net/rxrpc_conns
*/
static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
+ __acquires(rxnet->conn_lock)
{
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
@@ -151,6 +156,7 @@ static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
}
static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
+ __releases(rxnet->conn_lock)
{
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 9d45d8b..7bff716 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -272,7 +272,7 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
unsigned int *_offset, unsigned int *_len)
{
unsigned int offset = sizeof(struct rxrpc_wire_header);
- unsigned int len = *_len;
+ unsigned int len;
int ret;
u8 annotation = *_annotation;
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 77cb23c..588fea0 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -668,6 +668,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
return -EAGAIN;
}
+ conn->params.peer->last_tx_at = ktime_get_real();
_leave(" = 0");
return 0;
}
@@ -722,6 +723,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
return -EAGAIN;
}
+ conn->params.peer->last_tx_at = ktime_get_real();
_leave(" = 0");
return 0;
}
diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
index e9f4283..c4479af 100644
--- a/net/rxrpc/security.c
+++ b/net/rxrpc/security.c
@@ -19,9 +19,6 @@
#include <keys/rxrpc-type.h>
#include "ar-internal.h"
-static LIST_HEAD(rxrpc_security_methods);
-static DECLARE_RWSEM(rxrpc_security_sem);
-
static const struct rxrpc_security *rxrpc_security_types[] = {
[RXRPC_SECURITY_NONE] = &rxrpc_no_security,
#ifdef CONFIG_RXKAD
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 09f2a3e..206e802 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -130,7 +130,9 @@ static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix)
spin_lock_bh(&call->lock);
if (call->state < RXRPC_CALL_COMPLETE) {
- call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS;
+ call->rxtx_annotations[ix] =
+ (call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) |
+ RXRPC_TX_ANNO_RETRANS;
if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
rxrpc_queue_call(call);
}
@@ -554,6 +556,7 @@ static struct rxrpc_call *
rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
struct rxrpc_send_params *p)
__releases(&rx->sk.sk_lock.slock)
+ __acquires(&call->user_mutex)
{
struct rxrpc_conn_parameters cp;
struct rxrpc_call *call;
@@ -579,9 +582,11 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
cp.exclusive = rx->exclusive | p->exclusive;
cp.upgrade = p->upgrade;
cp.service_id = srx->srx_service;
- call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL);
+ call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL,
+ atomic_inc_return(&rxrpc_debug_id));
/* The socket is now unlocked */
+ rxrpc_put_peer(cp.peer);
_leave(" = %p\n", call);
return call;
}
@@ -593,6 +598,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
*/
int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
__releases(&rx->sk.sk_lock.slock)
+ __releases(&call->user_mutex)
{
enum rxrpc_call_state state;
struct rxrpc_call *call;
OpenPOWER on IntegriCloud