summaryrefslogtreecommitdiffstats
path: root/sys/netinet/sctp_timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/netinet/sctp_timer.c')
-rw-r--r--sys/netinet/sctp_timer.c112
1 files changed, 34 insertions, 78 deletions
diff --git a/sys/netinet/sctp_timer.c b/sys/netinet/sctp_timer.c
index 7e9dd29..c0a76e6 100644
--- a/sys/netinet/sctp_timer.c
+++ b/sys/netinet/sctp_timer.c
@@ -1,5 +1,7 @@
/*-
* Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -57,7 +59,7 @@ sctp_early_fr_timer(struct sctp_inpcb *inp,
struct sctp_tcb *stcb,
struct sctp_nets *net)
{
- struct sctp_tmit_chunk *chk, *tp2;
+ struct sctp_tmit_chunk *chk, *pchk;
struct timeval now, min_wait, tv;
unsigned int cur_rtt, cnt = 0, cnt_resend = 0;
@@ -88,9 +90,7 @@ sctp_early_fr_timer(struct sctp_inpcb *inp,
*/
min_wait.tv_sec = min_wait.tv_usec = 0;
}
- chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead);
- for (; chk != NULL; chk = tp2) {
- tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next);
+ TAILQ_FOREACH_REVERSE_SAFE(chk, &stcb->asoc.sent_queue, sctpchunk_listhead, sctp_next, pchk) {
if (chk->whoTo != net) {
continue;
}
@@ -215,7 +215,7 @@ sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
* not in PF state.
*/
/* Stop any running T3 timers here? */
- if ((stcb->asoc.sctp_cmt_on_off == 1) &&
+ if ((stcb->asoc.sctp_cmt_on_off > 0) &&
(stcb->asoc.sctp_cmt_pf > 0)) {
net->dest_state &= ~SCTP_ADDR_PF;
SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
@@ -574,20 +574,14 @@ sctp_backoff_on_timeout(struct sctp_tcb *stcb,
static void
sctp_recover_sent_list(struct sctp_tcb *stcb)
{
- struct sctp_tmit_chunk *chk, *tp2;
+ struct sctp_tmit_chunk *chk, *nchk;
struct sctp_association *asoc;
asoc = &stcb->asoc;
- chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
- for (; chk != NULL; chk = tp2) {
- tp2 = TAILQ_NEXT(chk, sctp_next);
- if ((compare_with_wrap(stcb->asoc.last_acked_seq,
- chk->rec.data.TSN_seq,
- MAX_TSN)) ||
- (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
-
+ TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
+ if (SCTP_TSN_GE(asoc->last_acked_seq, chk->rec.data.TSN_seq)) {
SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n",
- chk, chk->rec.data.TSN_seq, stcb->asoc.last_acked_seq);
+ chk, chk->rec.data.TSN_seq, asoc->last_acked_seq);
TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
if (chk->pr_sctp_on) {
if (asoc->pr_sctp_cnt != 0)
@@ -597,19 +591,17 @@ sctp_recover_sent_list(struct sctp_tcb *stcb)
/* sa_ignore NO_NULL_CHK */
sctp_free_bufspace(stcb, asoc, chk, 1);
sctp_m_freem(chk->data);
+ chk->data = NULL;
if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(chk->flags)) {
asoc->sent_queue_cnt_removeable--;
}
}
- chk->data = NULL;
asoc->sent_queue_cnt--;
sctp_free_a_chunk(stcb, chk);
}
}
SCTP_PRINTF("after recover order is as follows\n");
- chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
- for (; chk != NULL; chk = tp2) {
- tp2 = TAILQ_NEXT(chk, sctp_next);
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
SCTP_PRINTF("chk:%p TSN:%x\n", chk, chk->rec.data.TSN_seq);
}
}
@@ -631,7 +623,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
* We only mark chunks that have been outstanding long enough to
* have received feed-back.
*/
- struct sctp_tmit_chunk *chk, *tp2;
+ struct sctp_tmit_chunk *chk, *nchk;
struct sctp_nets *lnets;
struct timeval now, min_wait, tv;
int cur_rtt;
@@ -697,13 +689,8 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
#ifndef INVARIANTS
start_again:
#endif
- chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
- for (; chk != NULL; chk = tp2) {
- tp2 = TAILQ_NEXT(chk, sctp_next);
- if ((compare_with_wrap(stcb->asoc.last_acked_seq,
- chk->rec.data.TSN_seq,
- MAX_TSN)) ||
- (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
+ TAILQ_FOREACH_SAFE(chk, &stcb->asoc.sent_queue, sctp_next, nchk) {
+ if (SCTP_TSN_GE(stcb->asoc.last_acked_seq, chk->rec.data.TSN_seq)) {
/* Strange case our list got out of order? */
SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x",
(unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.TSN_seq);
@@ -850,7 +837,7 @@ start_again:
/*
* CMT: Do not allow FRs on retransmitted TSNs.
*/
- if (stcb->asoc.sctp_cmt_on_off == 1) {
+ if (stcb->asoc.sctp_cmt_on_off > 0) {
chk->no_fr_allowed = 1;
}
#ifdef THIS_SHOULD_NOT_BE_DONE
@@ -949,19 +936,6 @@ start_again:
}
}
}
- /*
- * Setup the ecn nonce re-sync point. We do this since
- * retranmissions are NOT setup for ECN. This means that do to
- * Karn's rule, we don't know the total of the peers ecn bits.
- */
- chk = TAILQ_FIRST(&stcb->asoc.send_queue);
- if (chk == NULL) {
- stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
- } else {
- stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
- }
- stcb->asoc.nonce_wait_for_ecne = 0;
- stcb->asoc.nonce_sum_check = 0;
/* We return 1 if we only have a window probe outstanding */
return (0);
}
@@ -1005,7 +979,7 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
* addition, find an alternate destination with PF-based
* find_alt_net().
*/
- if ((stcb->asoc.sctp_cmt_on_off == 1) &&
+ if ((stcb->asoc.sctp_cmt_on_off > 0) &&
(stcb->asoc.sctp_cmt_pf > 0)) {
if ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF) {
net->dest_state |= SCTP_ADDR_PF;
@@ -1014,7 +988,7 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
net);
}
alt = sctp_find_alternate_net(stcb, net, 2);
- } else if (stcb->asoc.sctp_cmt_on_off == 1) {
+ } else if (stcb->asoc.sctp_cmt_on_off > 0) {
/*
* CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being
* used, then pick dest with largest ssthresh for any
@@ -1129,7 +1103,7 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
}
}
- } else if ((stcb->asoc.sctp_cmt_on_off == 1) &&
+ } else if ((stcb->asoc.sctp_cmt_on_off > 0) &&
(stcb->asoc.sctp_cmt_pf > 0) &&
((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
/*
@@ -1158,13 +1132,7 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
/* C3. See if we need to send a Fwd-TSN */
- if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
- stcb->asoc.last_acked_seq, MAX_TSN)) {
- /*
- * ISSUE with ECN, see FWD-TSN processing for notes
- * on issues that will occur when the ECN NONCE
- * stuff is put into SCTP for cross checking.
- */
+ if (SCTP_TSN_GT(stcb->asoc.advanced_peer_ack_point, stcb->asoc.last_acked_seq)) {
send_forward_tsn(stcb, &stcb->asoc);
if (lchk) {
/* Assure a timer is up */
@@ -1369,7 +1337,7 @@ sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
struct sctp_nets *net)
{
struct sctp_nets *alt;
- struct sctp_tmit_chunk *asconf, *chk, *nchk;
+ struct sctp_tmit_chunk *asconf, *chk;
/* is this a first send, or a retransmission? */
if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) {
@@ -1427,8 +1395,7 @@ sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
atomic_add_int(&alt->ref_count, 1);
}
}
- for (chk = asconf; chk; chk = nchk) {
- nchk = TAILQ_NEXT(chk, sctp_next);
+ TAILQ_FOREACH(chk, &stcb->asoc.asconf_send_queue, sctp_next) {
if (chk->whoTo != alt) {
sctp_free_remote_addr(chk->whoTo);
chk->whoTo = alt;
@@ -1530,9 +1497,8 @@ static void
sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
struct sctp_tcb *stcb)
{
- struct sctp_stream_out *outs;
struct sctp_stream_queue_pending *sp;
- unsigned int chks_in_queue = 0;
+ unsigned int i, chks_in_queue = 0;
int being_filled = 0;
/*
@@ -1546,32 +1512,21 @@ sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
stcb->asoc.sent_queue_retran_cnt);
stcb->asoc.sent_queue_retran_cnt = 0;
}
- SCTP_TCB_SEND_LOCK(stcb);
- if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
- int i, cnt = 0;
-
- /* Check to see if a spoke fell off the wheel */
- for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
- if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
- sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1);
- cnt++;
- }
- }
- if (cnt) {
- /* yep, we lost a spoke or two */
- SCTP_PRINTF("Found an additional %d streams NOT on outwheel, corrected\n", cnt);
+ if (stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) {
+ /* No stream scheduler information, initialize scheduler */
+ stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 0);
+ if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) {
+ /* yep, we lost a stream or two */
+ SCTP_PRINTF("Found additional streams NOT managed by scheduler, corrected\n");
} else {
- /* no spokes lost, */
+ /* no streams lost */
stcb->asoc.total_output_queue_size = 0;
}
- SCTP_TCB_SEND_UNLOCK(stcb);
- return;
}
- SCTP_TCB_SEND_UNLOCK(stcb);
/* Check to see if some data queued, if so report it */
- TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
- if (!TAILQ_EMPTY(&outs->outqueue)) {
- TAILQ_FOREACH(sp, &outs->outqueue, next) {
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
+ TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
if (sp->msg_is_complete)
being_filled++;
chks_in_queue++;
@@ -1662,7 +1617,8 @@ sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
else if (ret == 0) {
break;
}
- if (cnt_sent >= SCTP_BASE_SYSCTL(sctp_hb_maxburst))
+ if (SCTP_BASE_SYSCTL(sctp_hb_maxburst) &&
+ (cnt_sent >= SCTP_BASE_SYSCTL(sctp_hb_maxburst)))
break;
}
}
OpenPOWER on IntegriCloud