summaryrefslogtreecommitdiffstats
path: root/net/dccp/input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/dccp/input.c')
-rw-r--r--net/dccp/input.c155
1 files changed, 116 insertions, 39 deletions
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 1ce1010..08392ed 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -22,26 +22,77 @@
/* rate-limit for syncs in reply to sequence-invalid packets; RFC 4340, 7.5.4 */
int sysctl_dccp_sync_ratelimit __read_mostly = HZ / 8;
-static void dccp_fin(struct sock *sk, struct sk_buff *skb)
+static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
{
- sk->sk_shutdown |= RCV_SHUTDOWN;
- sock_set_flag(sk, SOCK_DONE);
__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
__skb_queue_tail(&sk->sk_receive_queue, skb);
skb_set_owner_r(skb, sk);
sk->sk_data_ready(sk, 0);
}
-static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
+static void dccp_fin(struct sock *sk, struct sk_buff *skb)
{
- dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
- dccp_fin(sk, skb);
- dccp_set_state(sk, DCCP_CLOSED);
- sk_wake_async(sk, 1, POLL_HUP);
+ /*
+ * On receiving Close/CloseReq, both RD/WR shutdown are performed.
+ * RFC 4340, 8.3 says that we MAY send further Data/DataAcks after
+ * receiving the closing segment, but there is no guarantee that such
+ * data will be processed at all.
+ */
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ sock_set_flag(sk, SOCK_DONE);
+ dccp_enqueue_skb(sk, skb);
+}
+
+static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
+{
+ int queued = 0;
+
+ switch (sk->sk_state) {
+ /*
+ * We ignore Close when received in one of the following states:
+ * - CLOSED (may be a late or duplicate packet)
+ * - PASSIVE_CLOSEREQ (the peer has sent a CloseReq earlier)
+ * - RESPOND (already handled by dccp_check_req)
+ */
+ case DCCP_CLOSING:
+ /*
+ * Simultaneous-close: receiving a Close after sending one. This
+ * can happen if both client and server perform active-close and
+ * will result in an endless ping-pong of crossing and retrans-
+ * mitted Close packets, which only terminates when one of the
+ * nodes times out (min. 64 seconds). Quicker convergence can be
+ * achieved when one of the nodes acts as tie-breaker.
+ * This is ok as both ends are done with data transfer and each
+ * end is just waiting for the other to acknowledge termination.
+ */
+ if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT)
+ break;
+ /* fall through */
+ case DCCP_REQUESTING:
+ case DCCP_ACTIVE_CLOSEREQ:
+ dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
+ dccp_done(sk);
+ break;
+ case DCCP_OPEN:
+ case DCCP_PARTOPEN:
+ /* Give waiting application a chance to read pending data */
+ queued = 1;
+ dccp_fin(sk, skb);
+ dccp_set_state(sk, DCCP_PASSIVE_CLOSE);
+ /* fall through */
+ case DCCP_PASSIVE_CLOSE:
+ /*
+ * Retransmitted Close: we have already enqueued the first one.
+ */
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
+ }
+ return queued;
}
-static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
+static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
{
+ int queued = 0;
+
/*
* Step 7: Check for unexpected packet types
* If (S.is_server and P.type == CloseReq)
@@ -50,12 +101,26 @@ static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
*/
if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
- return;
+ return queued;
}
- if (sk->sk_state != DCCP_CLOSING)
+ /* Step 13: process relevant Client states < CLOSEREQ */
+ switch (sk->sk_state) {
+ case DCCP_REQUESTING:
+ dccp_send_close(sk, 0);
dccp_set_state(sk, DCCP_CLOSING);
- dccp_send_close(sk, 0);
+ break;
+ case DCCP_OPEN:
+ case DCCP_PARTOPEN:
+ /* Give waiting application a chance to read pending data */
+ queued = 1;
+ dccp_fin(sk, skb);
+ dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ);
+ /* fall through */
+ case DCCP_PASSIVE_CLOSEREQ:
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
+ }
+ return queued;
}
static u8 dccp_reset_code_convert(const u8 code)
@@ -90,7 +155,7 @@ static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
dccp_fin(sk, skb);
if (err && !sock_flag(sk, SOCK_DEAD))
- sk_wake_async(sk, 0, POLL_ERR);
+ sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
}
@@ -103,6 +168,21 @@ static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
DCCP_SKB_CB(skb)->dccpd_ack_seq);
}
+static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
+{
+ const struct dccp_sock *dp = dccp_sk(sk);
+
+ /* Don't deliver to RX CCID when node has shut down read end. */
+ if (!(sk->sk_shutdown & RCV_SHUTDOWN))
+ ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
+ /*
+ * Until the TX queue has been drained, we can not honour SHUT_WR, since
+ * we need received feedback as input to adjust congestion control.
+ */
+ if (sk->sk_write_queue.qlen > 0 || !(sk->sk_shutdown & SEND_SHUTDOWN))
+ ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
+}
+
static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
{
const struct dccp_hdr *dh = dccp_hdr(skb);
@@ -209,13 +289,11 @@ static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
case DCCP_PKT_DATAACK:
case DCCP_PKT_DATA:
/*
- * FIXME: check if sk_receive_queue is full, schedule DATA_DROPPED
- * option if it is.
+ * FIXME: schedule DATA_DROPPED (RFC 4340, 11.7.2) if and when
+ * - sk_shutdown == RCV_SHUTDOWN, use Code 1, "Not Listening"
+ * - sk_receive_queue is full, use Code 2, "Receive Buffer"
*/
- __skb_pull(skb, dh->dccph_doff * 4);
- __skb_queue_tail(&sk->sk_receive_queue, skb);
- skb_set_owner_r(skb, sk);
- sk->sk_data_ready(sk, 0);
+ dccp_enqueue_skb(sk, skb);
return 0;
case DCCP_PKT_ACK:
goto discard;
@@ -231,11 +309,13 @@ static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
dccp_rcv_reset(sk, skb);
return 0;
case DCCP_PKT_CLOSEREQ:
- dccp_rcv_closereq(sk, skb);
+ if (dccp_rcv_closereq(sk, skb))
+ return 0;
goto discard;
case DCCP_PKT_CLOSE:
- dccp_rcv_close(sk, skb);
- return 0;
+ if (dccp_rcv_close(sk, skb))
+ return 0;
+ goto discard;
case DCCP_PKT_REQUEST:
/* Step 7
* or (S.is_server and P.type == Response)
@@ -289,7 +369,7 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
if (dccp_check_seqno(sk, skb))
goto discard;
- if (dccp_parse_options(sk, skb))
+ if (dccp_parse_options(sk, NULL, skb))
goto discard;
if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
@@ -300,9 +380,7 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
DCCP_SKB_CB(skb)->dccpd_seq,
DCCP_ACKVEC_STATE_RECEIVED))
goto discard;
-
- ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
- ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
+ dccp_deliver_input_to_ccids(sk, skb);
return __dccp_rcv_established(sk, skb, dh, len);
discard:
@@ -349,7 +427,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
goto out_invalid_packet;
}
- if (dccp_parse_options(sk, skb))
+ if (dccp_parse_options(sk, NULL, skb))
goto out_invalid_packet;
/* Obtain usec RTT sample from SYN exchange (used by CCID 3) */
@@ -402,7 +480,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
- sk_wake_async(sk, 0, POLL_OUT);
+ sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
}
if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
@@ -531,7 +609,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
/*
* Step 8: Process options and mark acknowledgeable
*/
- if (dccp_parse_options(sk, skb))
+ if (dccp_parse_options(sk, NULL, skb))
goto discard;
if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
@@ -543,8 +621,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
DCCP_ACKVEC_STATE_RECEIVED))
goto discard;
- ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
- ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
+ dccp_deliver_input_to_ccids(sk, skb);
}
/*
@@ -560,16 +637,14 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
return 0;
/*
* Step 7: Check for unexpected packet types
- * If (S.is_server and P.type == CloseReq)
- * or (S.is_server and P.type == Response)
+ * If (S.is_server and P.type == Response)
* or (S.is_client and P.type == Request)
* or (S.state == RESPOND and P.type == Data),
* Send Sync packet acknowledging P.seqno
* Drop packet and return
*/
} else if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
- (dh->dccph_type == DCCP_PKT_RESPONSE ||
- dh->dccph_type == DCCP_PKT_CLOSEREQ)) ||
+ dh->dccph_type == DCCP_PKT_RESPONSE) ||
(dp->dccps_role == DCCP_ROLE_CLIENT &&
dh->dccph_type == DCCP_PKT_REQUEST) ||
(sk->sk_state == DCCP_RESPOND &&
@@ -577,11 +652,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
goto discard;
} else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
- dccp_rcv_closereq(sk, skb);
+ if (dccp_rcv_closereq(sk, skb))
+ return 0;
goto discard;
} else if (dh->dccph_type == DCCP_PKT_CLOSE) {
- dccp_rcv_close(sk, skb);
- return 0;
+ if (dccp_rcv_close(sk, skb))
+ return 0;
+ goto discard;
}
switch (sk->sk_state) {
@@ -611,7 +688,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
switch (old_state) {
case DCCP_PARTOPEN:
sk->sk_state_change(sk);
- sk_wake_async(sk, 0, POLL_OUT);
+ sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
break;
}
} else if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) {
OpenPOWER on IntegriCloud