summaryrefslogtreecommitdiffstats
path: root/sys/netinet
diff options
context:
space:
mode:
authortuexen <tuexen@FreeBSD.org>2010-01-17 21:00:28 +0000
committertuexen <tuexen@FreeBSD.org>2010-01-17 21:00:28 +0000
commit5aaf03563a1c2dbe84b8801d89df0650ed6b4391 (patch)
tree6d50c63f4b91e2148b599c7400e63d422d71f45c /sys/netinet
parente0b03cdcce8c6f73185aabff2687bd9540f20989 (diff)
downloadFreeBSD-src-5aaf03563a1c2dbe84b8801d89df0650ed6b4391.zip
FreeBSD-src-5aaf03563a1c2dbe84b8801d89df0650ed6b4391.tar.gz
Get rid of a lot of duplicated code for NR-SACK handle.
Generalize the SACK to code handle also NR-SACKs.
Diffstat (limited to 'sys/netinet')
-rw-r--r--sys/netinet/sctp_header.h8
-rw-r--r--sys/netinet/sctp_indata.c2010
-rw-r--r--sys/netinet/sctp_indata.h17
-rw-r--r--sys/netinet/sctp_input.c127
-rw-r--r--sys/netinet/sctp_output.c22
-rw-r--r--sys/netinet/sctp_structs.h2
6 files changed, 241 insertions, 1945 deletions
diff --git a/sys/netinet/sctp_header.h b/sys/netinet/sctp_header.h
index c551e53..e4ad773 100644
--- a/sys/netinet/sctp_header.h
+++ b/sys/netinet/sctp_header.h
@@ -284,13 +284,6 @@ struct sctp_sack_chunk {
struct sctp_sack sack;
} SCTP_PACKED;
-
-/* EY Following 3 structs define NR Selective Ack (NR_SACK) chunk */
-struct sctp_nr_gap_ack_block {
- uint16_t start; /* NR Gap Ack block start */
- uint16_t end; /* NR Gap Ack block end */
-} SCTP_PACKED;
-
struct sctp_nr_sack {
uint32_t cum_tsn_ack; /* cumulative TSN Ack */
uint32_t a_rwnd; /* updated a_rwnd of sender */
@@ -299,7 +292,6 @@ struct sctp_nr_sack {
uint16_t num_dup_tsns; /* number of duplicate TSNs */
uint16_t reserved; /* not currently used */
/* struct sctp_gap_ack_block's follow */
- /* struct sctp_nr_gap_ack_block's follow */
/* uint32_t duplicate_tsn's follow */
} SCTP_PACKED;
diff --git a/sys/netinet/sctp_indata.c b/sys/netinet/sctp_indata.c
index 9174522..56d174c 100644
--- a/sys/netinet/sctp_indata.c
+++ b/sys/netinet/sctp_indata.c
@@ -439,12 +439,11 @@ abandon:
* EY!-TODO- this tsn should be tagged nr only if it is
* out-of-order, the if statement should be modified
*/
- if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
-
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
+ asoc->peer_supports_nr_sack) {
nr_tsn = chk->rec.data.TSN_seq;
SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
- if ((nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3)) ||
- (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
+ if ((nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
/*
* EY The 1st should never happen, as in
* process_a_data_chunk method this check
@@ -525,11 +524,11 @@ abandon:
* nr_gap and tag this tsn
* NR
*/
- if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
+ asoc->peer_supports_nr_sack) {
SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
(nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
- printf("Impossible NR gap calculation?\n");
/*
* EY The
* 1st
@@ -694,7 +693,8 @@ protocol_error:
* calculate the gap and such then tag this TSN nr
* chk->rec.data.TSN_seq
*/
- if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
+ asoc->peer_supports_nr_sack) {
SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
(nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
@@ -750,11 +750,11 @@ protocol_error:
* such then tag this TSN nr
* chk->rec.data.TSN_seq
*/
- if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
+ asoc->peer_supports_nr_sack) {
SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
(nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
- printf("Impossible nr TSN set 3?\n");
/*
* EY The 1st should never
* happen, as in
@@ -773,7 +773,8 @@ protocol_error:
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
- if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map,
+ if (compare_with_wrap(nr_tsn,
+ asoc->highest_tsn_inside_nr_map,
MAX_TSN))
asoc->highest_tsn_inside_nr_map = nr_tsn;
}
@@ -1759,7 +1760,8 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
/* EY set this tsn present in nr_sack's nr_mapping_array */
- if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
+ asoc->peer_supports_nr_sack) {
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
SCTP_REVERSE_OUT_TSN_PRES(gap, tsn, asoc);
@@ -2015,7 +2017,8 @@ failed_express_del:
* block here I should check if this delivered tsn
* is out_of_order, if yes then update the nr_map
*/
- if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
+ asoc->peer_supports_nr_sack) {
/* EY debugging block */
{
/*
@@ -2260,13 +2263,13 @@ failed_pdapi_express_del:
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
-
/*
* EY It is added to the read queue in prev if block
* here I should check if this delivered tsn is
* out_of_order, if yes then update the nr_map
*/
- if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
+ asoc->peer_supports_nr_sack) {
/*
* EY check if the mapping_array and
* nr_mapping array are consistent
@@ -2412,6 +2415,10 @@ finish_express_del:
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
+ /*
+ * EY - set tsn present in nr-map if doing nr-sacks and the tsn is
+ * non-renegable
+ */
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
asoc->peer_supports_nr_sack &&
(SCTP_BASE_SYSCTL(sctp_do_drain) == 0)) {
@@ -3416,49 +3423,37 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
}
-static void
+static int
sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
- struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
+ uint32_t last_tsn, uint32_t * biggest_tsn_acked,
uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
- int num_seg, int *ecn_seg_sums)
+ int num_seg, int num_nr_seg, int *ecn_seg_sums)
{
- /************************************************/
- /* process fragments and update sendqueue */
- /************************************************/
- struct sctp_sack *sack;
struct sctp_gap_ack_block *frag, block;
struct sctp_tmit_chunk *tp1;
int i;
int num_frs = 0;
+ int chunk_freed;
+ int non_revocable;
+ uint16_t frag_strt, frag_end;
+ uint32_t last_frag_high;
- uint16_t frag_strt, frag_end, primary_flag_set;
- u_long last_frag_high;
-
- /*
- * @@@ JRI : TODO: This flag is not used anywhere .. remove?
- */
- if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
- primary_flag_set = 1;
- } else {
- primary_flag_set = 0;
- }
- sack = &ch->sack;
-
- frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
- sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
- *offset += sizeof(block);
- if (frag == NULL) {
- return;
- }
tp1 = NULL;
last_frag_high = 0;
- for (i = 0; i < num_seg; i++) {
+ chunk_freed = 0;
+
+ for (i = 0; i < (num_seg + num_nr_seg); i++) {
+ frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
+ *offset += sizeof(block);
+ if (frag == NULL) {
+ return (chunk_freed);
+ }
frag_strt = ntohs(frag->start);
frag_end = ntohs(frag->end);
/* some sanity checks on the fragment offsets */
if (frag_strt > frag_end) {
/* this one is malformed, skip */
- frag++;
continue;
}
if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
@@ -3468,7 +3463,6 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
/* mark acked dgs and find out the highestTSN being acked */
if (tp1 == NULL) {
tp1 = TAILQ_FIRST(&asoc->sent_queue);
-
/* save the locations of the last frags */
last_frag_high = frag_end + last_tsn;
} else {
@@ -3496,14 +3490,15 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
}
last_frag_high = frag_end + last_tsn;
}
- sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
- 0, &num_frs, biggest_newly_acked_tsn,
- this_sack_lowest_newack, ecn_seg_sums);
- frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
- sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
- *offset += sizeof(block);
- if (frag == NULL) {
- break;
+ if (i < num_seg) {
+ non_revocable = 0;
+ } else {
+ non_revocable = 1;
+ }
+ if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
+ non_revocable, &num_frs, biggest_newly_acked_tsn,
+ this_sack_lowest_newack, ecn_seg_sums)) {
+ chunk_freed = 1;
}
}
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
@@ -3512,6 +3507,7 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
*biggest_newly_acked_tsn,
last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
}
+ return (chunk_freed);
}
static void
@@ -4450,7 +4446,6 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
struct socket *so;
#endif
-
SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
/* sa_ignore NO_NULL_CHK */
@@ -4525,10 +4520,10 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
asoc->nonce_sum_check = 1;
/*
- * now we must calculate what the base is.
+ * Now we must calculate what the base is.
* We do this based on two things, we know
* the total's for all the segments
- * gap-acked in the SACK (none), We also
+ * gap-acked in the SACK (none). We also
* know the SACK's nonce sum, its in
* nonce_sum_flag. So we can build a truth
* table to back-calculate the new value of
@@ -4567,6 +4562,7 @@ again:
/* sa_ignore FREED_MEMORY */
TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
if (tp1->window_probe) {
+ /* move back to data send queue */
sctp_window_probe_recovery(stcb, asoc, net, tp1);
break;
}
@@ -4620,9 +4616,7 @@ again:
*/
if (sctp_fs_audit(asoc)) {
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- if (net->flight_size) {
- net->flight_size = 0;
- }
+ net->flight_size = 0;
}
asoc->total_flight = 0;
asoc->total_flight_count = 0;
@@ -4782,20 +4776,62 @@ again:
}
}
+/* EY- nr_sack */
+/* Identifies the non-renegable tsns that are revoked*/
+static void
+sctp_check_for_nr_revoked(struct sctp_tcb *stcb,
+ struct sctp_association *asoc, uint32_t cumack,
+ u_long biggest_tsn_acked)
+{
+ struct sctp_tmit_chunk *tp1;
+
+ for (tp1 = TAILQ_FIRST(&asoc->sent_queue); tp1; tp1 = TAILQ_NEXT(tp1, sctp_next)) {
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
+ MAX_TSN)) {
+ /*
+ * ok this guy is either ACK or MARKED. If it is
+ * ACKED it has been previously acked but not this
+ * time i.e. revoked. If it is MARKED it was ACK'ed
+ * again.
+ */
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
+ MAX_TSN))
+ break;
+
+
+ if (tp1->sent == SCTP_DATAGRAM_NR_ACKED) {
+ /*
+ * EY! a non-renegable TSN is revoked, need
+ * to abort the association
+ */
+ /*
+ * EY TODO: put in the code to abort the
+ * assoc.
+ */
+ return;
+ } else if (tp1->sent == SCTP_DATAGRAM_NR_MARKED) {
+ /* it has been re-acked in this SACK */
+ tp1->sent = SCTP_DATAGRAM_NR_ACKED;
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_UNSENT)
+ break;
+ }
+ return;
+}
+
void
-sctp_handle_sack(struct mbuf *m, int offset,
- struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
- struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd)
+sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
+ struct sctp_tcb *stcb, struct sctp_nets *net_from,
+ uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
+ int *abort_now, uint8_t flags,
+ uint32_t cum_ack, uint32_t rwnd)
{
struct sctp_association *asoc;
- struct sctp_sack *sack;
struct sctp_tmit_chunk *tp1, *tp2;
- uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
- this_sack_lowest_newack;
+ uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
uint32_t sav_cum_ack;
- uint16_t num_seg, num_dup;
uint16_t wake_him = 0;
- unsigned int sack_length;
uint32_t send_s = 0;
long j;
int accum_moved = 0;
@@ -4831,15 +4867,13 @@ sctp_handle_sack(struct mbuf *m, int offset,
* if in shutdown_recv state.
*/
SCTP_TCB_LOCK_ASSERT(stcb);
- sack = &ch->sack;
/* CMT DAC algo */
this_sack_lowest_newack = 0;
j = 0;
- sack_length = (unsigned int)sack_len;
- /* ECN Nonce */
SCTP_STAT_INCR(sctps_slowpath_sack);
- nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
- cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
+ last_tsn = cum_ack;
+ nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
+ cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
#ifdef SCTP_ASOCLOG_OF_TSNS
stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
stcb->asoc.cumack_log_at++;
@@ -4847,13 +4881,12 @@ sctp_handle_sack(struct mbuf *m, int offset,
stcb->asoc.cumack_log_at = 0;
}
#endif
- num_seg = ntohs(sack->num_gap_ack_blks);
a_rwnd = rwnd;
- /* CMT DAC algo */
- cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
- num_dup = ntohs(sack->num_dup_tsns);
-
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
+ rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
+ }
old_rwnd = stcb->asoc.peers_rwnd;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
@@ -4873,27 +4906,16 @@ sctp_handle_sack(struct mbuf *m, int offset,
SCTP_LOG_NEW_SACK);
}
if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
- int off_to_dup, iii;
+ uint16_t i;
uint32_t *dupdata, dblock;
- off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
- if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
- dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
+ for (i = 0; i < num_dup; i++) {
+ dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
sizeof(uint32_t), (uint8_t *) & dblock);
- off_to_dup += sizeof(uint32_t);
- if (dupdata) {
- for (iii = 0; iii < num_dup; iii++) {
- sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
- dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
- sizeof(uint32_t), (uint8_t *) & dblock);
- if (dupdata == NULL)
- break;
- off_to_dup += sizeof(uint32_t);
- }
+ if (dupdata == NULL) {
+ break;
}
- } else {
- SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
- off_to_dup, num_dup, sack_length, num_seg);
+ sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
}
}
if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
@@ -4915,8 +4937,6 @@ sctp_handle_sack(struct mbuf *m, int offset,
hopeless_peer:
panic("Impossible sack 1");
#else
-
-
/*
* no way, we have not even sent this TSN out yet.
* Peer is hopelessly messed up with us.
@@ -4956,8 +4976,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
/* update the Rwnd of the peer */
if (TAILQ_EMPTY(&asoc->sent_queue) &&
TAILQ_EMPTY(&asoc->send_queue) &&
- (asoc->stream_queue_cnt == 0)
- ) {
+ (asoc->stream_queue_cnt == 0)) {
/* nothing left on send/sent and strmq */
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
@@ -5128,14 +5147,7 @@ sctp_handle_sack(struct mbuf *m, int offset,
/* always set this up to cum-ack */
asoc->this_sack_highest_gap = last_tsn;
- /* Move offset up to point to gaps/dups */
- offset += sizeof(struct sctp_sack_chunk);
- if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
-
- /* skip corrupt segments */
- goto skip_segments;
- }
- if (num_seg > 0) {
+ if ((num_seg > 0) || (num_nr_seg > 0)) {
/*
* CMT: SFR algo (and HTNA) - this_sack_highest_newack has
@@ -5153,10 +5165,11 @@ sctp_handle_sack(struct mbuf *m, int offset,
* handling NEWLY ACKED chunks. this_sack_lowest_newack is
* used for CMT DAC algo. saw_newack will also change.
*/
- sctp_handle_segments(m, &offset, stcb, asoc, ch, last_tsn,
- &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
- num_seg, &ecn_seg_sums);
-
+ if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
+ &biggest_tsn_newly_acked, &this_sack_lowest_newack,
+ num_seg, num_nr_seg, &ecn_seg_sums)) {
+ wake_him++;
+ }
if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
/*
* validate the biggest_tsn_acked in the gap acks if
@@ -5172,7 +5185,6 @@ sctp_handle_sack(struct mbuf *m, int offset,
}
}
}
-skip_segments:
/*******************************************/
/* cancel ALL T3-send timer if accum moved */
/*******************************************/
@@ -5310,6 +5322,19 @@ done_with_it:
if (tp1 != NULL) {
/* Peer revoked all dg's marked or acked */
TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ /*
+ * EY- maybe check only if it is nr_acked
+ * nr_marked may not be possible
+ */
+ if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
+ (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
+ /*
+ * EY! - TODO: Something previously
+ * nr_gapped is reneged, abort the
+ * association
+ */
+ return;
+ }
if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
(tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
tp1->sent = SCTP_DATAGRAM_SENT;
@@ -5345,6 +5370,10 @@ done_with_it:
else
asoc->saw_sack_with_frags = 0;
+ /* EY! - not sure about if there should be an IF */
+ if (num_nr_seg > 0)
+ sctp_check_for_nr_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
+
/* JRS - Use the congestion control given in the CC module */
asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
@@ -5480,7 +5509,7 @@ done_with_it:
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
this_sack_lowest_newack = cum_ack;
}
- if (num_seg > 0) {
+ if ((num_seg > 0) || (num_nr_seg > 0)) {
sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
}
@@ -5603,9 +5632,12 @@ again:
}
if (net->flight_size) {
j++;
- sctp_timer_start(SCTP_TIMER_TYPE_SEND,
- stcb->sctp_ep, stcb, net);
+ if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+ }
if (net->window_probe) {
+ net->window_probe = 0;
}
} else {
if (net->window_probe) {
@@ -5613,7 +5645,6 @@ again:
* In window probes we must assure a timer
* is still running there
*/
-
if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
sctp_timer_start(SCTP_TIMER_TYPE_SEND,
stcb->sctp_ep, stcb, net);
@@ -5661,7 +5692,11 @@ again:
done_once = 1;
goto again;
}
- /* Fix up the a-p-a-p for future PR-SCTP sends */
+ /*********************************************/
+ /* Here we perform PR-SCTP procedures */
+ /* (section 4.2) */
+ /*********************************************/
+ /* C1. update advancedPeerAckPoint */
if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
asoc->advanced_peer_ack_point = cum_ack;
}
@@ -5773,8 +5808,8 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
* such then tag this TSN nr
* chk->rec.data.TSN_seq
*/
- if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
-
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
+ asoc->peer_supports_nr_sack) {
SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
(nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
@@ -5874,7 +5909,8 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
* such then tag this TSN nr
* chk->rec.data.TSN_seq
*/
- if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
+ asoc->peer_supports_nr_sack) {
SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
(nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
@@ -6359,1743 +6395,3 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
sctp_deliver_reasm_check(stcb, &stcb->asoc);
}
}
-
-/* EY fully identical to sctp_express_handle_sack, duplicated for only naming convention */
-void
-sctp_express_handle_nr_sack(struct sctp_tcb *stcb, uint32_t cumack,
- uint32_t rwnd, int nonce_sum_flag, int *abort_now)
-{
- struct sctp_nets *net;
- struct sctp_association *asoc;
- struct sctp_tmit_chunk *tp1, *tp2;
- uint32_t old_rwnd;
- int win_probe_recovery = 0;
- int win_probe_recovered = 0;
- int j, done_once = 0;
-
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
- sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
- rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
- }
- SCTP_TCB_LOCK_ASSERT(stcb);
-#ifdef SCTP_ASOCLOG_OF_TSNS
- stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
- stcb->asoc.cumack_log_at++;
- if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
- stcb->asoc.cumack_log_at = 0;
- }
-#endif
- asoc = &stcb->asoc;
- old_rwnd = asoc->peers_rwnd;
- if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
- /* old ack */
- return;
- } else if (asoc->last_acked_seq == cumack) {
- /* Window update sack */
- asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
- (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
- if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
- /* SWS sender side engages */
- asoc->peers_rwnd = 0;
- }
- if (asoc->peers_rwnd > old_rwnd) {
- goto again;
- }
- return;
- }
- /* First setup for CC stuff */
- TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- net->prev_cwnd = net->cwnd;
- net->net_ack = 0;
- net->net_ack2 = 0;
-
- /*
- * CMT: Reset CUC and Fast recovery algo variables before
- * SACK processing
- */
- net->new_pseudo_cumack = 0;
- net->will_exit_fast_recovery = 0;
- }
- if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
- uint32_t send_s;
-
- if (!TAILQ_EMPTY(&asoc->sent_queue)) {
- tp1 = TAILQ_LAST(&asoc->sent_queue,
- sctpchunk_listhead);
- send_s = tp1->rec.data.TSN_seq + 1;
- } else {
- send_s = asoc->sending_seq;
- }
- if ((cumack == send_s) ||
- compare_with_wrap(cumack, send_s, MAX_TSN)) {
-#ifndef INVARIANTS
- struct mbuf *oper;
-
-#endif
-#ifdef INVARIANTS
- panic("Impossible sack 1");
-#else
- *abort_now = 1;
- /* XXX */
- oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
- 0, M_DONTWAIT, 1, MT_DATA);
- if (oper) {
- struct sctp_paramhdr *ph;
- uint32_t *ippp;
-
- SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
- sizeof(uint32_t);
- ph = mtod(oper, struct sctp_paramhdr *);
- ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
- ph->param_length = htons(SCTP_BUF_LEN(oper));
- ippp = (uint32_t *) (ph + 1);
- *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
- }
- stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
- sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
- return;
-#endif
- }
- }
- asoc->this_sack_highest_gap = cumack;
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
- sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
- stcb->asoc.overall_error_count,
- 0,
- SCTP_FROM_SCTP_INDATA,
- __LINE__);
- }
- stcb->asoc.overall_error_count = 0;
- if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
- /* process the new consecutive TSN first */
- tp1 = TAILQ_FIRST(&asoc->sent_queue);
- while (tp1) {
- tp2 = TAILQ_NEXT(tp1, sctp_next);
- if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
- MAX_TSN) ||
- cumack == tp1->rec.data.TSN_seq) {
- if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
- printf("Warning, an unsent is now acked?\n");
- }
- /*
- * ECN Nonce: Add the nonce to the sender's
- * nonce sum
- */
- asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
- if (tp1->sent < SCTP_DATAGRAM_ACKED) {
- /*
- * If it is less than ACKED, it is
- * now no-longer in flight. Higher
- * values may occur during marking
- */
- if (tp1->sent < SCTP_DATAGRAM_RESEND) {
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
- sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
- tp1->whoTo->flight_size,
- tp1->book_size,
- (uintptr_t) tp1->whoTo,
- tp1->rec.data.TSN_seq);
- }
- sctp_flight_size_decrease(tp1);
- /* sa_ignore NO_NULL_CHK */
- sctp_total_flight_decrease(stcb, tp1);
- }
- tp1->whoTo->net_ack += tp1->send_size;
- if (tp1->snd_count < 2) {
- /*
- * True non-retransmited
- * chunk
- */
- tp1->whoTo->net_ack2 +=
- tp1->send_size;
-
- /* update RTO too? */
- if (tp1->do_rtt) {
- tp1->whoTo->RTO =
- /*
- * sa_ignore
- * NO_NULL_CHK
- */
- sctp_calculate_rto(stcb,
- asoc, tp1->whoTo,
- &tp1->sent_rcv_time,
- sctp_align_safe_nocopy);
- tp1->do_rtt = 0;
- }
- }
- /*
- * CMT: CUCv2 algorithm. From the
- * cumack'd TSNs, for each TSN being
- * acked for the first time, set the
- * following variables for the
- * corresp destination.
- * new_pseudo_cumack will trigger a
- * cwnd update.
- * find_(rtx_)pseudo_cumack will
- * trigger search for the next
- * expected (rtx-)pseudo-cumack.
- */
- tp1->whoTo->new_pseudo_cumack = 1;
- tp1->whoTo->find_pseudo_cumack = 1;
- tp1->whoTo->find_rtx_pseudo_cumack = 1;
-
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
- /* sa_ignore NO_NULL_CHK */
- sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
- }
- }
- if (tp1->sent == SCTP_DATAGRAM_RESEND) {
- sctp_ucount_decr(asoc->sent_queue_retran_cnt);
- }
- if (tp1->rec.data.chunk_was_revoked) {
- /* deflate the cwnd */
- tp1->whoTo->cwnd -= tp1->book_size;
- tp1->rec.data.chunk_was_revoked = 0;
- }
- tp1->sent = SCTP_DATAGRAM_ACKED;
- TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
- if (tp1->data) {
- /* sa_ignore NO_NULL_CHK */
- sctp_free_bufspace(stcb, asoc, tp1, 1);
- sctp_m_freem(tp1->data);
- }
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
- sctp_log_sack(asoc->last_acked_seq,
- cumack,
- tp1->rec.data.TSN_seq,
- 0,
- 0,
- SCTP_LOG_FREE_SENT);
- }
- tp1->data = NULL;
- asoc->sent_queue_cnt--;
- sctp_free_a_chunk(stcb, tp1);
- tp1 = tp2;
- } else {
- break;
- }
- }
-
- }
- /* sa_ignore NO_NULL_CHK */
- if (stcb->sctp_socket) {
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- struct socket *so;
-
-#endif
-
- SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
- /* sa_ignore NO_NULL_CHK */
- sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
- }
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- so = SCTP_INP_SO(stcb->sctp_ep);
- atomic_add_int(&stcb->asoc.refcnt, 1);
- SCTP_TCB_UNLOCK(stcb);
- SCTP_SOCKET_LOCK(so, 1);
- SCTP_TCB_LOCK(stcb);
- atomic_subtract_int(&stcb->asoc.refcnt, 1);
- if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
- /* assoc was freed while we were unlocked */
- SCTP_SOCKET_UNLOCK(so, 1);
- return;
- }
-#endif
- sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- SCTP_SOCKET_UNLOCK(so, 1);
-#endif
- } else {
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
- sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
- }
- }
-
- /* JRS - Use the congestion control given in the CC module */
- if (asoc->last_acked_seq != cumack)
- asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
-
- asoc->last_acked_seq = cumack;
-
- if (TAILQ_EMPTY(&asoc->sent_queue)) {
- /* nothing left in-flight */
- TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- net->flight_size = 0;
- net->partial_bytes_acked = 0;
- }
- asoc->total_flight = 0;
- asoc->total_flight_count = 0;
- }
- /* Fix up the a-p-a-p for future PR-SCTP sends */
- if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
- asoc->advanced_peer_ack_point = cumack;
- }
- /* ECN Nonce updates */
- if (asoc->ecn_nonce_allowed) {
- if (asoc->nonce_sum_check) {
- if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
- if (asoc->nonce_wait_for_ecne == 0) {
- struct sctp_tmit_chunk *lchk;
-
- lchk = TAILQ_FIRST(&asoc->send_queue);
- asoc->nonce_wait_for_ecne = 1;
- if (lchk) {
- asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
- } else {
- asoc->nonce_wait_tsn = asoc->sending_seq;
- }
- } else {
- if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
- (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
- /*
- * Misbehaving peer. We need
- * to react to this guy
- */
- asoc->ecn_allowed = 0;
- asoc->ecn_nonce_allowed = 0;
- }
- }
- }
- } else {
- /* See if Resynchronization Possible */
- if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
- asoc->nonce_sum_check = 1;
- /*
- * now we must calculate what the base is.
- * We do this based on two things, we know
- * the total's for all the segments
- * gap-acked in the SACK (none), We also
- * know the SACK's nonce sum, its in
- * nonce_sum_flag. So we can build a truth
- * table to back-calculate the new value of
- * asoc->nonce_sum_expect_base:
- *
- * SACK-flag-Value Seg-Sums Base 0 0 0
- * 1 0 1 0 1 1 1 1 0
- */
- asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
- }
- }
- }
- /* RWND update */
- asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
- (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
- if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
- /* SWS sender side engages */
- asoc->peers_rwnd = 0;
- }
- if (asoc->peers_rwnd > old_rwnd) {
- win_probe_recovery = 1;
- }
- /* Now assure a timer where data is queued at */
-again:
- j = 0;
- TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- int to_ticks;
-
- if (win_probe_recovery && (net->window_probe)) {
- win_probe_recovered = 1;
- /*
- * Find first chunk that was used with window probe
- * and clear the sent
- */
- /* sa_ignore FREED_MEMORY */
- TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
- if (tp1->window_probe) {
- /* move back to data send queue */
- sctp_window_probe_recovery(stcb, asoc, net, tp1);
- break;
- }
- }
- }
- if (net->RTO == 0) {
- to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
- } else {
- to_ticks = MSEC_TO_TICKS(net->RTO);
- }
- if (net->flight_size) {
-
- j++;
- (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
- sctp_timeout_handler, &net->rxt_timer);
- if (net->window_probe) {
- net->window_probe = 0;
- }
- } else {
- if (net->window_probe) {
- /*
- * In window probes we must assure a timer
- * is still running there
- */
- net->window_probe = 0;
- (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
- sctp_timeout_handler, &net->rxt_timer);
- } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
- sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
- stcb, net,
- SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
- }
- if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
- if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
- SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
- sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
- SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
- }
- }
- }
- }
- if ((j == 0) &&
- (!TAILQ_EMPTY(&asoc->sent_queue)) &&
- (asoc->sent_queue_retran_cnt == 0) &&
- (win_probe_recovered == 0) &&
- (done_once == 0)) {
- /*
- * huh, this should not happen unless all packets are
- * PR-SCTP and marked to skip of course.
- */
- if (sctp_fs_audit(asoc)) {
- TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- net->flight_size = 0;
- }
- asoc->total_flight = 0;
- asoc->total_flight_count = 0;
- asoc->sent_queue_retran_cnt = 0;
- TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
- if (tp1->sent < SCTP_DATAGRAM_RESEND) {
- sctp_flight_size_increase(tp1);
- sctp_total_flight_increase(stcb, tp1);
- } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
- asoc->sent_queue_retran_cnt++;
- }
- }
- }
- done_once = 1;
- goto again;
- }
- /**********************************/
- /* Now what about shutdown issues */
- /**********************************/
- if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
- /* nothing left on sendqueue.. consider done */
- /* clean up */
- if ((asoc->stream_queue_cnt == 1) &&
- ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
- (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
- (asoc->locked_on_sending)
- ) {
- struct sctp_stream_queue_pending *sp;
-
- /*
- * I may be in a state where we got all across.. but
- * cannot write more due to a shutdown... we abort
- * since the user did not indicate EOR in this case.
- * The sp will be cleaned during free of the asoc.
- */
- sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
- sctp_streamhead);
- if ((sp) && (sp->length == 0)) {
- /* Let cleanup code purge it */
- if (sp->msg_is_complete) {
- asoc->stream_queue_cnt--;
- } else {
- asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
- asoc->locked_on_sending = NULL;
- asoc->stream_queue_cnt--;
- }
- }
- }
- if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
- (asoc->stream_queue_cnt == 0)) {
- if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
- /* Need to abort here */
- struct mbuf *oper;
-
- abort_out_now:
- *abort_now = 1;
- /* XXX */
- oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
- 0, M_DONTWAIT, 1, MT_DATA);
- if (oper) {
- struct sctp_paramhdr *ph;
- uint32_t *ippp;
-
- SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
- sizeof(uint32_t);
- ph = mtod(oper, struct sctp_paramhdr *);
- ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
- ph->param_length = htons(SCTP_BUF_LEN(oper));
- ippp = (uint32_t *) (ph + 1);
- *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
- }
- stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
- sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
- } else {
- if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
- (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
- SCTP_STAT_DECR_GAUGE32(sctps_currestab);
- }
- SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
- SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
- sctp_stop_timers_for_shutdown(stcb);
- sctp_send_shutdown(stcb,
- stcb->asoc.primary_destination);
- sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
- stcb->sctp_ep, stcb, asoc->primary_destination);
- sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
- stcb->sctp_ep, stcb, asoc->primary_destination);
- }
- } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
- (asoc->stream_queue_cnt == 0)) {
- if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
- goto abort_out_now;
- }
- SCTP_STAT_DECR_GAUGE32(sctps_currestab);
- SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
- SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
- sctp_send_shutdown_ack(stcb,
- stcb->asoc.primary_destination);
-
- sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
- stcb->sctp_ep, stcb, asoc->primary_destination);
- }
- }
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
- sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
- rwnd,
- stcb->asoc.peers_rwnd,
- stcb->asoc.total_flight,
- stcb->asoc.total_output_queue_size);
- }
-}
-
-/* EY! nr_sack version of sctp_handle_segments, nr-gapped TSNs get removed from RtxQ in this method*/
-static void
-sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
- struct sctp_nr_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
- uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
- uint32_t num_seg, uint32_t num_nr_seg, int *ecn_seg_sums)
-{
- /************************************************/
- /* process fragments and update sendqueue */
- /************************************************/
- struct sctp_nr_sack *nr_sack;
- struct sctp_gap_ack_block *frag, block;
- struct sctp_nr_gap_ack_block *nr_frag, nr_block;
- struct sctp_tmit_chunk *tp1;
- uint32_t i;
- int wake_him = 0;
- int num_frs = 0;
-
- uint16_t frag_strt, frag_end, primary_flag_set;
- uint16_t nr_frag_strt, nr_frag_end;
-
- uint32_t last_frag_high;
- uint32_t last_nr_frag_high;
-
- /*
- * @@@ JRI : TODO: This flag is not used anywhere .. remove?
- */
- if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
- primary_flag_set = 1;
- } else {
- primary_flag_set = 0;
- }
- nr_sack = &ch->nr_sack;
-
- /*
- * EY! - I will process nr_gaps similarly,by going to this position
- * again if All bit is set
- */
- frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
- sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
- *offset += sizeof(block);
- if (frag == NULL) {
- return;
- }
- tp1 = NULL;
- last_frag_high = 0;
- for (i = 0; i < num_seg; i++) {
- frag_strt = ntohs(frag->start);
- frag_end = ntohs(frag->end);
- /* some sanity checks on the fargment offsets */
- if (frag_strt > frag_end) {
- /* this one is malformed, skip */
- frag++;
- continue;
- }
- if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
- MAX_TSN))
- *biggest_tsn_acked = frag_end + last_tsn;
-
- /* mark acked dgs and find out the highestTSN being acked */
- if (tp1 == NULL) {
- tp1 = TAILQ_FIRST(&asoc->sent_queue);
-
- /* save the locations of the last frags */
- last_frag_high = frag_end + last_tsn;
- } else {
- /*
- * now lets see if we need to reset the queue due to
- * a out-of-order SACK fragment
- */
- if (compare_with_wrap(frag_strt + last_tsn,
- last_frag_high, MAX_TSN)) {
- /*
- * if the new frag starts after the last TSN
- * frag covered, we are ok and this one is
- * beyond the last one
- */
- ;
- } else {
- /*
- * ok, they have reset us, so we need to
- * reset the queue this will cause extra
- * hunting but hey, they chose the
- * performance hit when they failed to order
- * there gaps..
- */
- tp1 = TAILQ_FIRST(&asoc->sent_queue);
- }
- last_frag_high = frag_end + last_tsn;
- }
- sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
- 0, &num_frs, biggest_newly_acked_tsn,
- this_sack_lowest_newack, ecn_seg_sums);
- frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
- sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
- *offset += sizeof(block);
- if (frag == NULL) {
- break;
- }
- }
-
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
- if (num_frs)
- sctp_log_fr(*biggest_tsn_acked,
- *biggest_newly_acked_tsn,
- last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
- }
- nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
- sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
- *offset += sizeof(nr_block);
-
-
-
- if (nr_frag == NULL) {
- return;
- }
- tp1 = NULL;
- last_nr_frag_high = 0;
- /* Reset to beginning for the nr_sack section */
- tp1 = TAILQ_FIRST(&asoc->sent_queue);
-
- for (i = 0; i < num_nr_seg; i++) {
-
- nr_frag_strt = ntohs(nr_frag->start);
- nr_frag_end = ntohs(nr_frag->end);
-
- /* some sanity checks on the nr fargment offsets */
- if (nr_frag_strt > nr_frag_end) {
- /* this one is malformed, skip */
- nr_frag++;
- continue;
- }
- /* mark acked dgs and find out the highestTSN being acked */
- if (tp1 == NULL) {
- tp1 = TAILQ_FIRST(&asoc->sent_queue);
-
- /* save the locations of the last frags */
- last_nr_frag_high = nr_frag_end + last_tsn;
- } else {
- /*
- * now lets see if we need to reset the queue due to
- * a out-of-order SACK fragment
- */
- if (compare_with_wrap(nr_frag_strt + last_tsn,
- last_nr_frag_high, MAX_TSN)) {
- /*
- * if the new frag starts after the last TSN
- * frag covered, we are ok and this one is
- * beyond the last one
- */
- ;
- } else {
- /*
- * ok, they have reset us, so we need to
- * reset the queue this will cause extra
- * hunting but hey, they chose the
- * performance hit when they failed to order
- * there gaps..
- */
- tp1 = TAILQ_FIRST(&asoc->sent_queue);
- }
- last_nr_frag_high = nr_frag_end + last_tsn;
- }
- num_frs = 0;
- wake_him = sctp_process_segment_range(stcb, &tp1, last_tsn,
- nr_frag_strt, nr_frag_end, 1,
- &num_frs, biggest_newly_acked_tsn,
- this_sack_lowest_newack, ecn_seg_sums);
-
- nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
- sizeof(struct sctp_nr_gap_ack_block),
- (uint8_t *) & nr_block);
- *offset += sizeof(nr_block);
- if (nr_frag == NULL) {
- break;
- }
- }
-
- /*
- * EY- wake up the socket if things have been removed from the sent
- * queue
- */
- if ((wake_him) && (stcb->sctp_socket)) {
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- struct socket *so;
-
-#endif
- SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
- /*
- * if (SCTP_BASE_SYSCTL(sctp_logging_level) &
- * SCTP_WAKE_LOGGING_ENABLE) { sctp_wakeup_log(stcb,
- * cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);}
- */
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- so = SCTP_INP_SO(stcb->sctp_ep);
- atomic_add_int(&stcb->asoc.refcnt, 1);
- SCTP_TCB_UNLOCK(stcb);
- SCTP_SOCKET_LOCK(so, 1);
- SCTP_TCB_LOCK(stcb);
- atomic_subtract_int(&stcb->asoc.refcnt, 1);
- if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
- /* assoc was freed while we were unlocked */
- SCTP_SOCKET_UNLOCK(so, 1);
- return;
- }
-#endif
- sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- SCTP_SOCKET_UNLOCK(so, 1);
-#endif
- } /* else { if
- * (SCTP_BASE_SYSCTL(sctp_logging_level) &
- * SCTP_WAKE_LOGGING_ENABLE) {
- * sctp_wakeup_log(stcb, cum_ack, wake_him,
- * SCTP_NOWAKE_FROM_SACK); } } */
-}
-
-/* EY- nr_sack */
-/* Identifies the non-renegable tsns that are revoked*/
-static void
-sctp_check_for_nr_revoked(struct sctp_tcb *stcb,
- struct sctp_association *asoc, uint32_t cumack,
- u_long biggest_tsn_acked)
-{
- struct sctp_tmit_chunk *tp1;
-
- tp1 = TAILQ_FIRST(&asoc->sent_queue);
- while (tp1) {
- if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
- MAX_TSN)) {
- /*
- * ok this guy is either ACK or MARKED. If it is
- * ACKED it has been previously acked but not this
- * time i.e. revoked. If it is MARKED it was ACK'ed
- * again.
- */
- if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
- MAX_TSN))
- break;
-
-
- if (tp1->sent == SCTP_DATAGRAM_NR_ACKED) {
- /*
- * EY! a non-renegable TSN is revoked, need
- * to abort the association
- */
- /*
- * EY TODO: put in the code to abort the
- * assoc.
- */
- return;
- } else if (tp1->sent == SCTP_DATAGRAM_NR_MARKED) {
- /* it has been re-acked in this SACK */
- tp1->sent = SCTP_DATAGRAM_NR_ACKED;
- }
- }
- if (tp1->sent == SCTP_DATAGRAM_UNSENT)
- break;
- tp1 = TAILQ_NEXT(tp1, sctp_next);
- }
-}
-
-/* EY! nr_sack version of sctp_handle_sack, nr_gap_ack processing should be added to this method*/
-void
-sctp_handle_nr_sack(struct mbuf *m, int offset,
- struct sctp_nr_sack_chunk *ch, struct sctp_tcb *stcb,
- struct sctp_nets *net_from, int *abort_now, int nr_sack_len, uint32_t rwnd)
-{
- struct sctp_association *asoc;
-
- /* EY sack */
- struct sctp_nr_sack *nr_sack;
- struct sctp_tmit_chunk *tp1, *tp2;
- uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
- this_sack_lowest_newack;
- uint32_t sav_cum_ack;
-
- /* EY num_seg */
- uint16_t num_seg, num_nr_seg, num_dup;
- uint16_t wake_him = 0;
- unsigned int nr_sack_length;
- uint32_t send_s = 0;
- long j;
- int accum_moved = 0;
- int will_exit_fast_recovery = 0;
- uint32_t a_rwnd, old_rwnd;
- int win_probe_recovery = 0;
- int win_probe_recovered = 0;
- struct sctp_nets *net = NULL;
- int nonce_sum_flag, ecn_seg_sums = 0;
- int done_once;
- uint8_t reneged_all = 0;
- uint8_t cmt_dac_flag;
-
- /*
- * we take any chance we can to service our queues since we cannot
- * get awoken when the socket is read from :<
- */
- /*
- * Now perform the actual SACK handling: 1) Verify that it is not an
- * old sack, if so discard. 2) If there is nothing left in the send
- * queue (cum-ack is equal to last acked) then you have a duplicate
- * too, update any rwnd change and verify no timers are running.
- * then return. 3) Process any new consequtive data i.e. cum-ack
- * moved process these first and note that it moved. 4) Process any
- * sack blocks. 5) Drop any acked from the queue. 6) Check for any
- * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
- * sync up flightsizes and things, stop all timers and also check
- * for shutdown_pending state. If so then go ahead and send off the
- * shutdown. If in shutdown recv, send off the shutdown-ack and
- * start that timer, Ret. 9) Strike any non-acked things and do FR
- * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
- * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
- * if in shutdown_recv state.
- */
- SCTP_TCB_LOCK_ASSERT(stcb);
- nr_sack = &ch->nr_sack;
- /* CMT DAC algo */
- this_sack_lowest_newack = 0;
- j = 0;
- nr_sack_length = (unsigned int)nr_sack_len;
- /* ECN Nonce */
- SCTP_STAT_INCR(sctps_slowpath_sack);
- nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
- cum_ack = last_tsn = ntohl(nr_sack->cum_tsn_ack);
-#ifdef SCTP_ASOCLOG_OF_TSNS
- stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
- stcb->asoc.cumack_log_at++;
- if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
- stcb->asoc.cumack_log_at = 0;
- }
-#endif
- num_seg = ntohs(nr_sack->num_gap_ack_blks);
- num_nr_seg = ntohs(nr_sack->num_nr_gap_ack_blks);
- a_rwnd = rwnd;
-
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
- sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
- rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
- }
- /* CMT DAC algo */
- cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
- num_dup = ntohs(nr_sack->num_dup_tsns);
-
- old_rwnd = stcb->asoc.peers_rwnd;
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
- sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
- stcb->asoc.overall_error_count,
- 0,
- SCTP_FROM_SCTP_INDATA,
- __LINE__);
- }
- stcb->asoc.overall_error_count = 0;
- asoc = &stcb->asoc;
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
- sctp_log_sack(asoc->last_acked_seq,
- cum_ack,
- 0,
- num_seg,
- num_dup,
- SCTP_LOG_NEW_SACK);
- }
- if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
- int off_to_dup, iii;
- uint32_t *dupdata, dblock;
-
- off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) +
- (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) + sizeof(struct sctp_nr_sack_chunk);
- if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= nr_sack_length) {
- dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
- sizeof(uint32_t), (uint8_t *) & dblock);
- off_to_dup += sizeof(uint32_t);
- if (dupdata) {
- for (iii = 0; iii < num_dup; iii++) {
- sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
- dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
- sizeof(uint32_t), (uint8_t *) & dblock);
- if (dupdata == NULL)
- break;
- off_to_dup += sizeof(uint32_t);
- }
- }
- } else {
- SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d nr_sack_len:%d num gaps:%d num nr_gaps:%d\n",
- off_to_dup, num_dup, nr_sack_length, num_seg, num_nr_seg);
- }
- }
- if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
- /* reality check */
- if (!TAILQ_EMPTY(&asoc->sent_queue)) {
- tp1 = TAILQ_LAST(&asoc->sent_queue,
- sctpchunk_listhead);
- send_s = tp1->rec.data.TSN_seq + 1;
- } else {
- send_s = asoc->sending_seq;
- }
- if (cum_ack == send_s ||
- compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
-#ifndef INVARIANTS
- struct mbuf *oper;
-
-#endif
-#ifdef INVARIANTS
- hopeless_peer:
- panic("Impossible sack 1");
-#else
-
-
- /*
- * no way, we have not even sent this TSN out yet.
- * Peer is hopelessly messed up with us.
- */
- hopeless_peer:
- *abort_now = 1;
- /* XXX */
- oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
- 0, M_DONTWAIT, 1, MT_DATA);
- if (oper) {
- struct sctp_paramhdr *ph;
- uint32_t *ippp;
-
- SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
- sizeof(uint32_t);
- ph = mtod(oper, struct sctp_paramhdr *);
- ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
- ph->param_length = htons(SCTP_BUF_LEN(oper));
- ippp = (uint32_t *) (ph + 1);
- *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
- }
- stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
- sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
- return;
-#endif
- }
- }
- /**********************/
- /* 1) check the range */
- /**********************/
- if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
- /* acking something behind */
- return;
- }
- sav_cum_ack = asoc->last_acked_seq;
-
- /* update the Rwnd of the peer */
- if (TAILQ_EMPTY(&asoc->sent_queue) &&
- TAILQ_EMPTY(&asoc->send_queue) &&
- (asoc->stream_queue_cnt == 0)
- ) {
- /* nothing left on send/sent and strmq */
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
- sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
- asoc->peers_rwnd, 0, 0, a_rwnd);
- }
- asoc->peers_rwnd = a_rwnd;
- if (asoc->sent_queue_retran_cnt) {
- asoc->sent_queue_retran_cnt = 0;
- }
- if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
- /* SWS sender side engages */
- asoc->peers_rwnd = 0;
- }
- /* stop any timers */
- TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
- stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
- if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
- if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
- SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
- sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
- SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
- }
- }
- net->partial_bytes_acked = 0;
- net->flight_size = 0;
- }
- asoc->total_flight = 0;
- asoc->total_flight_count = 0;
- return;
- }
- /*
- * We init netAckSz and netAckSz2 to 0. These are used to track 2
- * things. The total byte count acked is tracked in netAckSz AND
- * netAck2 is used to track the total bytes acked that are un-
- * amibguious and were never retransmitted. We track these on a per
- * destination address basis.
- */
- TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- net->prev_cwnd = net->cwnd;
- net->net_ack = 0;
- net->net_ack2 = 0;
-
- /*
- * CMT: Reset CUC and Fast recovery algo variables before
- * SACK processing
- */
- net->new_pseudo_cumack = 0;
- net->will_exit_fast_recovery = 0;
- }
- /* process the new consecutive TSN first */
- tp1 = TAILQ_FIRST(&asoc->sent_queue);
- while (tp1) {
- if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
- MAX_TSN) ||
- last_tsn == tp1->rec.data.TSN_seq) {
- if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
- /*
- * ECN Nonce: Add the nonce to the sender's
- * nonce sum
- */
- asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
- accum_moved = 1;
- if (tp1->sent < SCTP_DATAGRAM_ACKED) {
- /*
- * If it is less than ACKED, it is
- * now no-longer in flight. Higher
- * values may occur during marking
- */
- if ((tp1->whoTo->dest_state &
- SCTP_ADDR_UNCONFIRMED) &&
- (tp1->snd_count < 2)) {
- /*
- * If there was no retran
- * and the address is
- * un-confirmed and we sent
- * there and are now
- * sacked.. its confirmed,
- * mark it so.
- */
- tp1->whoTo->dest_state &=
- ~SCTP_ADDR_UNCONFIRMED;
- }
- if (tp1->sent < SCTP_DATAGRAM_RESEND) {
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
- sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
- tp1->whoTo->flight_size,
- tp1->book_size,
- (uintptr_t) tp1->whoTo,
- tp1->rec.data.TSN_seq);
- }
- sctp_flight_size_decrease(tp1);
- sctp_total_flight_decrease(stcb, tp1);
- }
- tp1->whoTo->net_ack += tp1->send_size;
-
- /* CMT SFR and DAC algos */
- this_sack_lowest_newack = tp1->rec.data.TSN_seq;
- tp1->whoTo->saw_newack = 1;
-
- if (tp1->snd_count < 2) {
- /*
- * True non-retransmited
- * chunk
- */
- tp1->whoTo->net_ack2 +=
- tp1->send_size;
-
- /* update RTO too? */
- if (tp1->do_rtt) {
- tp1->whoTo->RTO =
- sctp_calculate_rto(stcb,
- asoc, tp1->whoTo,
- &tp1->sent_rcv_time,
- sctp_align_safe_nocopy);
- tp1->do_rtt = 0;
- }
- }
- /*
- * CMT: CUCv2 algorithm. From the
- * cumack'd TSNs, for each TSN being
- * acked for the first time, set the
- * following variables for the
- * corresp destination.
- * new_pseudo_cumack will trigger a
- * cwnd update.
- * find_(rtx_)pseudo_cumack will
- * trigger search for the next
- * expected (rtx-)pseudo-cumack.
- */
- tp1->whoTo->new_pseudo_cumack = 1;
- tp1->whoTo->find_pseudo_cumack = 1;
- tp1->whoTo->find_rtx_pseudo_cumack = 1;
-
-
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
- sctp_log_sack(asoc->last_acked_seq,
- cum_ack,
- tp1->rec.data.TSN_seq,
- 0,
- 0,
- SCTP_LOG_TSN_ACKED);
- }
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
- sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
- }
- }
- if (tp1->sent == SCTP_DATAGRAM_RESEND) {
- sctp_ucount_decr(asoc->sent_queue_retran_cnt);
-#ifdef SCTP_AUDITING_ENABLED
- sctp_audit_log(0xB3,
- (asoc->sent_queue_retran_cnt & 0x000000ff));
-#endif
- }
- if (tp1->rec.data.chunk_was_revoked) {
- /* deflate the cwnd */
- tp1->whoTo->cwnd -= tp1->book_size;
- tp1->rec.data.chunk_was_revoked = 0;
- }
- tp1->sent = SCTP_DATAGRAM_ACKED;
- }
- } else {
- break;
- }
- tp1 = TAILQ_NEXT(tp1, sctp_next);
- }
- biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
- /* always set this up to cum-ack */
- asoc->this_sack_highest_gap = last_tsn;
-
- /* Move offset up to point to gaps/dups */
- offset += sizeof(struct sctp_nr_sack_chunk);
- if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_nr_sack_chunk)) > nr_sack_length) {
-
- /* skip corrupt segments */
- goto skip_segments;
- }
- if (num_seg > 0) {
-
- /*
- * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
- * to be greater than the cumack. Also reset saw_newack to 0
- * for all dests.
- */
- TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- net->saw_newack = 0;
- net->this_sack_highest_newack = last_tsn;
- }
-
- /*
- * thisSackHighestGap will increase while handling NEW
- * segments this_sack_highest_newack will increase while
- * handling NEWLY ACKED chunks. this_sack_lowest_newack is
- * used for CMT DAC algo. saw_newack will also change.
- */
-
- sctp_handle_nr_sack_segments(m, &offset, stcb, asoc, ch, last_tsn,
- &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
- num_seg, num_nr_seg, &ecn_seg_sums);
-
-
- if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
- /*
- * validate the biggest_tsn_acked in the gap acks if
- * strict adherence is wanted.
- */
- if ((biggest_tsn_acked == send_s) ||
- (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
- /*
- * peer is either confused or we are under
- * attack. We must abort.
- */
- goto hopeless_peer;
- }
- }
- }
-skip_segments:
- /*******************************************/
- /* cancel ALL T3-send timer if accum moved */
- /*******************************************/
- if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
- TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- if (net->new_pseudo_cumack)
- sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
- stcb, net,
- SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
-
- }
- } else {
- if (accum_moved) {
- TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
- stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
- }
- }
- }
- /********************************************/
- /* drop the acked chunks from the sendqueue */
- /********************************************/
- asoc->last_acked_seq = cum_ack;
-
- tp1 = TAILQ_FIRST(&asoc->sent_queue);
- if (tp1 == NULL)
- goto done_with_it;
- do {
- if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
- MAX_TSN)) {
- break;
- }
- if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
- /* no more sent on list */
- printf("Warning, tp1->sent == %d and its now acked?\n",
- tp1->sent);
- }
- tp2 = TAILQ_NEXT(tp1, sctp_next);
- TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
- if (tp1->pr_sctp_on) {
- if (asoc->pr_sctp_cnt != 0)
- asoc->pr_sctp_cnt--;
- }
- if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
- (asoc->total_flight > 0)) {
-#ifdef INVARIANTS
- panic("Warning flight size is postive and should be 0");
-#else
- SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
- asoc->total_flight);
-#endif
- asoc->total_flight = 0;
- }
- if (tp1->data) {
- /* sa_ignore NO_NULL_CHK */
- sctp_free_bufspace(stcb, asoc, tp1, 1);
- sctp_m_freem(tp1->data);
- if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
- asoc->sent_queue_cnt_removeable--;
- }
- }
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
- sctp_log_sack(asoc->last_acked_seq,
- cum_ack,
- tp1->rec.data.TSN_seq,
- 0,
- 0,
- SCTP_LOG_FREE_SENT);
- }
- tp1->data = NULL;
- asoc->sent_queue_cnt--;
- sctp_free_a_chunk(stcb, tp1);
- wake_him++;
- tp1 = tp2;
- } while (tp1 != NULL);
-
-done_with_it:
- /* sa_ignore NO_NULL_CHK */
- if ((wake_him) && (stcb->sctp_socket)) {
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- struct socket *so;
-
-#endif
- SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
- sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
- }
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- so = SCTP_INP_SO(stcb->sctp_ep);
- atomic_add_int(&stcb->asoc.refcnt, 1);
- SCTP_TCB_UNLOCK(stcb);
- SCTP_SOCKET_LOCK(so, 1);
- SCTP_TCB_LOCK(stcb);
- atomic_subtract_int(&stcb->asoc.refcnt, 1);
- if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
- /* assoc was freed while we were unlocked */
- SCTP_SOCKET_UNLOCK(so, 1);
- return;
- }
-#endif
- sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- SCTP_SOCKET_UNLOCK(so, 1);
-#endif
- } else {
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
- sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
- }
- }
-
- if (asoc->fast_retran_loss_recovery && accum_moved) {
- if (compare_with_wrap(asoc->last_acked_seq,
- asoc->fast_recovery_tsn, MAX_TSN) ||
- asoc->last_acked_seq == asoc->fast_recovery_tsn) {
- /* Setup so we will exit RFC2582 fast recovery */
- will_exit_fast_recovery = 1;
- }
- }
- /*
- * Check for revoked fragments:
- *
- * if Previous sack - Had no frags then we can't have any revoked if
- * Previous sack - Had frag's then - If we now have frags aka
- * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
- * some of them. else - The peer revoked all ACKED fragments, since
- * we had some before and now we have NONE.
- */
-
- if (num_seg)
- sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
-
- else if (asoc->saw_sack_with_frags) {
- int cnt_revoked = 0;
-
- tp1 = TAILQ_FIRST(&asoc->sent_queue);
- if (tp1 != NULL) {
- /* Peer revoked all dg's marked or acked */
- TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
- /*
- * EY- maybe check only if it is nr_acked
- * nr_marked may not be possible
- */
- if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
- (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
- /*
- * EY! - TODO: Something previously
- * nr_gapped is reneged, abort the
- * association
- */
- return;
- }
- if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
- (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
- tp1->sent = SCTP_DATAGRAM_SENT;
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
- sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
- tp1->whoTo->flight_size,
- tp1->book_size,
- (uintptr_t) tp1->whoTo,
- tp1->rec.data.TSN_seq);
- }
- sctp_flight_size_increase(tp1);
- sctp_total_flight_increase(stcb, tp1);
- tp1->rec.data.chunk_was_revoked = 1;
- /*
- * To ensure that this increase in
- * flightsize, which is artificial,
- * does not throttle the sender, we
- * also increase the cwnd
- * artificially.
- */
- tp1->whoTo->cwnd += tp1->book_size;
- cnt_revoked++;
- }
- }
- if (cnt_revoked) {
- reneged_all = 1;
- }
- }
- asoc->saw_sack_with_frags = 0;
- }
- if (num_seg)
- asoc->saw_sack_with_frags = 1;
- else
- asoc->saw_sack_with_frags = 0;
-
- /* EY! - not sure about if there should be an IF */
- if (num_nr_seg)
- sctp_check_for_nr_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
- else if (asoc->saw_sack_with_nr_frags) {
- /*
- * EY!- TODO: all previously nr_gapped chunks have been
- * reneged abort the association
- */
- asoc->saw_sack_with_nr_frags = 0;
- }
- if (num_nr_seg)
- asoc->saw_sack_with_nr_frags = 1;
- else
- asoc->saw_sack_with_nr_frags = 0;
- /* JRS - Use the congestion control given in the CC module */
- asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
-
- if (TAILQ_EMPTY(&asoc->sent_queue)) {
- /* nothing left in-flight */
- TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- /* stop all timers */
- if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
- if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
- SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
- sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
- SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
- }
- }
- sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
- stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
- net->flight_size = 0;
- net->partial_bytes_acked = 0;
- }
- asoc->total_flight = 0;
- asoc->total_flight_count = 0;
- }
- /**********************************/
- /* Now what about shutdown issues */
- /**********************************/
- if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
- /* nothing left on sendqueue.. consider done */
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
- sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
- asoc->peers_rwnd, 0, 0, a_rwnd);
- }
- asoc->peers_rwnd = a_rwnd;
- if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
- /* SWS sender side engages */
- asoc->peers_rwnd = 0;
- }
- /* clean up */
- if ((asoc->stream_queue_cnt == 1) &&
- ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
- (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
- (asoc->locked_on_sending)
- ) {
- struct sctp_stream_queue_pending *sp;
-
- /*
- * I may be in a state where we got all across.. but
- * cannot write more due to a shutdown... we abort
- * since the user did not indicate EOR in this case.
- */
- sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
- sctp_streamhead);
- if ((sp) && (sp->length == 0)) {
- asoc->locked_on_sending = NULL;
- if (sp->msg_is_complete) {
- asoc->stream_queue_cnt--;
- } else {
- asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
- asoc->stream_queue_cnt--;
- }
- }
- }
- if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
- (asoc->stream_queue_cnt == 0)) {
- if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
- /* Need to abort here */
- struct mbuf *oper;
-
- abort_out_now:
- *abort_now = 1;
- /* XXX */
- oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
- 0, M_DONTWAIT, 1, MT_DATA);
- if (oper) {
- struct sctp_paramhdr *ph;
- uint32_t *ippp;
-
- SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
- sizeof(uint32_t);
- ph = mtod(oper, struct sctp_paramhdr *);
- ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
- ph->param_length = htons(SCTP_BUF_LEN(oper));
- ippp = (uint32_t *) (ph + 1);
- *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
- }
- stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
- sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
- return;
- } else {
- if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
- (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
- SCTP_STAT_DECR_GAUGE32(sctps_currestab);
- }
- SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
- SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
- sctp_stop_timers_for_shutdown(stcb);
- sctp_send_shutdown(stcb,
- stcb->asoc.primary_destination);
- sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
- stcb->sctp_ep, stcb, asoc->primary_destination);
- sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
- stcb->sctp_ep, stcb, asoc->primary_destination);
- }
- return;
- } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
- (asoc->stream_queue_cnt == 0)) {
- if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
- goto abort_out_now;
- }
- SCTP_STAT_DECR_GAUGE32(sctps_currestab);
- SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
- SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
- sctp_send_shutdown_ack(stcb,
- stcb->asoc.primary_destination);
-
- sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
- stcb->sctp_ep, stcb, asoc->primary_destination);
- return;
- }
- }
- /*
- * Now here we are going to recycle net_ack for a different use...
- * HEADS UP.
- */
- TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- net->net_ack = 0;
- }
-
- /*
- * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
- * to be done. Setting this_sack_lowest_newack to the cum_ack will
- * automatically ensure that.
- */
- if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
- this_sack_lowest_newack = cum_ack;
- }
- if (num_seg > 0) {
- sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
- biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
- }
- /* JRS - Use the congestion control given in the CC module */
- asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
-
- /******************************************************************
- * Here we do the stuff with ECN Nonce checking.
- * We basically check to see if the nonce sum flag was incorrect
- * or if resynchronization needs to be done. Also if we catch a
- * misbehaving receiver we give him the kick.
- ******************************************************************/
-
- if (asoc->ecn_nonce_allowed) {
- if (asoc->nonce_sum_check) {
- if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
- if (asoc->nonce_wait_for_ecne == 0) {
- struct sctp_tmit_chunk *lchk;
-
- lchk = TAILQ_FIRST(&asoc->send_queue);
- asoc->nonce_wait_for_ecne = 1;
- if (lchk) {
- asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
- } else {
- asoc->nonce_wait_tsn = asoc->sending_seq;
- }
- } else {
- if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
- (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
- /*
- * Misbehaving peer. We need
- * to react to this guy
- */
- asoc->ecn_allowed = 0;
- asoc->ecn_nonce_allowed = 0;
- }
- }
- }
- } else {
- /* See if Resynchronization Possible */
- if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
- asoc->nonce_sum_check = 1;
- /*
- * now we must calculate what the base is.
- * We do this based on two things, we know
- * the total's for all the segments
- * gap-acked in the SACK, its stored in
- * ecn_seg_sums. We also know the SACK's
- * nonce sum, its in nonce_sum_flag. So we
- * can build a truth table to back-calculate
- * the new value of
- * asoc->nonce_sum_expect_base:
- *
- * SACK-flag-Value Seg-Sums Base 0 0 0
- * 1 0 1 0 1 1 1 1 0
- */
- asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
- }
- }
- }
- /* Now are we exiting loss recovery ? */
- if (will_exit_fast_recovery) {
- /* Ok, we must exit fast recovery */
- asoc->fast_retran_loss_recovery = 0;
- }
- if ((asoc->sat_t3_loss_recovery) &&
- ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
- MAX_TSN) ||
- (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
- /* end satellite t3 loss recovery */
- asoc->sat_t3_loss_recovery = 0;
- }
- /*
- * CMT Fast recovery
- */
- TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- if (net->will_exit_fast_recovery) {
- /* Ok, we must exit fast recovery */
- net->fast_retran_loss_recovery = 0;
- }
- }
-
- /* Adjust and set the new rwnd value */
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
- sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
- asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
- }
- asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
- (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
- if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
- /* SWS sender side engages */
- asoc->peers_rwnd = 0;
- }
- if (asoc->peers_rwnd > old_rwnd) {
- win_probe_recovery = 1;
- }
- /*
- * Now we must setup so we have a timer up for anyone with
- * outstanding data.
- */
- done_once = 0;
-again:
- j = 0;
- TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- if (win_probe_recovery && (net->window_probe)) {
- win_probe_recovered = 1;
- /*-
- * Find first chunk that was used with
- * window probe and clear the event. Put
- * it back into the send queue as if has
- * not been sent.
- */
- TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
- if (tp1->window_probe) {
- sctp_window_probe_recovery(stcb, asoc, net, tp1);
- break;
- }
- }
- }
- if (net->flight_size) {
- j++;
- sctp_timer_start(SCTP_TIMER_TYPE_SEND,
- stcb->sctp_ep, stcb, net);
- if (net->window_probe) {
- net->window_probe = 0;
- }
- } else {
- if (net->window_probe) {
- net->window_probe = 0;
- sctp_timer_start(SCTP_TIMER_TYPE_SEND,
- stcb->sctp_ep, stcb, net);
- } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
- sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
- stcb, net,
- SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
- }
- if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
- if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
- SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
- sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
- SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
- }
- }
- }
- }
- if ((j == 0) &&
- (!TAILQ_EMPTY(&asoc->sent_queue)) &&
- (asoc->sent_queue_retran_cnt == 0) &&
- (win_probe_recovered == 0) &&
- (done_once == 0)) {
- /*
- * huh, this should not happen unless all packets are
- * PR-SCTP and marked to skip of course.
- */
- if (sctp_fs_audit(asoc)) {
- TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
- net->flight_size = 0;
- }
- asoc->total_flight = 0;
- asoc->total_flight_count = 0;
- asoc->sent_queue_retran_cnt = 0;
- TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
- if (tp1->sent < SCTP_DATAGRAM_RESEND) {
- sctp_flight_size_increase(tp1);
- sctp_total_flight_increase(stcb, tp1);
- } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
- asoc->sent_queue_retran_cnt++;
- }
- }
- }
- done_once = 1;
- goto again;
- }
- /*********************************************/
- /* Here we perform PR-SCTP procedures */
- /* (section 4.2) */
- /*********************************************/
- /* C1. update advancedPeerAckPoint */
- if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
- asoc->advanced_peer_ack_point = cum_ack;
- }
- /* C2. try to further move advancedPeerAckPoint ahead */
- if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
- struct sctp_tmit_chunk *lchk;
- uint32_t old_adv_peer_ack_point;
-
- old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
- lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
- /* C3. See if we need to send a Fwd-TSN */
- if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
- MAX_TSN)) {
- /*
- * ISSUE with ECN, see FWD-TSN processing for notes
- * on issues that will occur when the ECN NONCE
- * stuff is put into SCTP for cross checking.
- */
- if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
- MAX_TSN)) {
- send_forward_tsn(stcb, asoc);
- /*
- * ECN Nonce: Disable Nonce Sum check when
- * FWD TSN is sent and store resync tsn
- */
- asoc->nonce_sum_check = 0;
- asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
- } else if (lchk) {
- /* try to FR fwd-tsn's that get lost too */
- lchk->rec.data.fwd_tsn_cnt++;
- if (lchk->rec.data.fwd_tsn_cnt > 3) {
- send_forward_tsn(stcb, asoc);
- lchk->rec.data.fwd_tsn_cnt = 0;
- }
- }
- }
- if (lchk) {
- /* Assure a timer is up */
- sctp_timer_start(SCTP_TIMER_TYPE_SEND,
- stcb->sctp_ep, stcb, lchk->whoTo);
- }
- }
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
- sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
- a_rwnd,
- stcb->asoc.peers_rwnd,
- stcb->asoc.total_flight,
- stcb->asoc.total_output_queue_size);
- }
-}
diff --git a/sys/netinet/sctp_indata.h b/sys/netinet/sctp_indata.h
index 76fa946..b6a8323 100644
--- a/sys/netinet/sctp_indata.h
+++ b/sys/netinet/sctp_indata.h
@@ -96,18 +96,11 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
uint32_t rwnd, int nonce_sum_flag, int *abort_now);
void
-sctp_handle_sack(struct mbuf *m, int offset, struct sctp_sack_chunk *, struct sctp_tcb *,
- struct sctp_nets *, int *, int, uint32_t);
-
-/* EY does "exactly" the same as sctp_express_handle_sack */
-void
-sctp_express_handle_nr_sack(struct sctp_tcb *stcb, uint32_t cumack,
- uint32_t rwnd, int nonce_sum_flag, int *abort_now);
-
-/* EY nr_sack version of sctp_handle_sack */
-void
-sctp_handle_nr_sack(struct mbuf *m, int offset, struct sctp_nr_sack_chunk *, struct sctp_tcb *,
- struct sctp_nets *, int *, int, uint32_t);
+sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
+ struct sctp_tcb *stcb, struct sctp_nets *net_from,
+ uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
+ int *abort_now, uint8_t flags,
+ uint32_t cum_ack, uint32_t rwnd);
/* draft-ietf-tsvwg-usctp */
void
diff --git a/sys/netinet/sctp_input.c b/sys/netinet/sctp_input.c
index 18b7d55..89a7a53 100644
--- a/sys/netinet/sctp_input.c
+++ b/sys/netinet/sctp_input.c
@@ -4599,16 +4599,18 @@ process_control_chunks:
struct sctp_sack_chunk *sack;
int abort_now = 0;
uint32_t a_rwnd, cum_ack;
- uint16_t num_seg;
+ uint16_t num_seg, num_dup;
+ uint8_t flags;
+ int offset_seg, offset_dup;
int nonce_sum_flag;
- if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) {
- SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n");
- *offset = length;
- if (locked_tcb) {
- SCTP_TCB_UNLOCK(locked_tcb);
- }
- return (NULL);
+ if (stcb == NULL) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n");
+ break;
+ }
+ if (chk_length < sizeof(struct sctp_sack_chunk)) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
+ break;
}
if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
/*-
@@ -4619,15 +4621,22 @@ process_control_chunks:
break;
}
sack = (struct sctp_sack_chunk *)ch;
- nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
+ flags = ch->chunk_flags;
+ nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
cum_ack = ntohl(sack->sack.cum_tsn_ack);
num_seg = ntohs(sack->sack.num_gap_ack_blks);
+ num_dup = ntohs(sack->sack.num_dup_tsns);
a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
+ if (sizeof(struct sctp_sack_chunk) +
+ num_seg * sizeof(struct sctp_gap_ack_block) +
+ num_dup * sizeof(uint32_t) != chk_length) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
+ break;
+ }
+ offset_seg = *offset + sizeof(struct sctp_sack_chunk);
+ offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
- cum_ack,
- num_seg,
- a_rwnd
- );
+ cum_ack, num_seg, a_rwnd);
stcb->asoc.seen_a_sack_this_pkt = 1;
if ((stcb->asoc.pr_sctp_cnt == 0) &&
(num_seg == 0) &&
@@ -4649,19 +4658,21 @@ process_control_chunks:
&abort_now);
} else {
if (netp && *netp)
- sctp_handle_sack(m, *offset,
- sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
- }
- if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
- TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
- (stcb->asoc.stream_queue_cnt == 0)) {
- sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ sctp_handle_sack(m, offset_seg, offset_dup,
+ stcb, *netp,
+ num_seg, 0, num_dup, &abort_now, flags,
+ cum_ack, a_rwnd);
}
if (abort_now) {
/* ABORT signal from sack processing */
*offset = length;
return (NULL);
}
+ if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
+ TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
+ (stcb->asoc.stream_queue_cnt == 0)) {
+ sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ }
}
break;
/*
@@ -4675,80 +4686,90 @@ process_control_chunks:
struct sctp_nr_sack_chunk *nr_sack;
int abort_now = 0;
uint32_t a_rwnd, cum_ack;
- uint16_t num_seg, num_nr_seg;
+ uint16_t num_seg, num_nr_seg, num_dup;
+ uint8_t flags;
+ int offset_seg, offset_dup;
int nonce_sum_flag;
- if ((stcb == NULL) || (chk_length < sizeof(struct sctp_nr_sack_chunk))) {
- SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on nr_sack chunk, too small\n");
- ignore_nr_sack:
- *offset = length;
- if (locked_tcb) {
- SCTP_TCB_UNLOCK(locked_tcb);
- }
- return (NULL);
- }
/*
* EY nr_sacks have not been negotiated but
* the peer end sent an nr_sack, silently
* discard the chunk
*/
- if (!(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)) {
+ if (!(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
+ stcb->asoc.peer_supports_nr_sack)) {
goto unknown_chunk;
}
+ if (stcb == NULL) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n");
+ break;
+ }
+ if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n");
+ break;
+ }
if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
/*-
* If we have sent a shutdown-ack, we will pay no
* attention to a sack sent in to us since
* we don't care anymore.
*/
- goto ignore_nr_sack;
+ break;
}
nr_sack = (struct sctp_nr_sack_chunk *)ch;
- nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
+ flags = ch->chunk_flags;
+ nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
+ num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
+ if (sizeof(struct sctp_nr_sack_chunk) +
+ (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
+ num_dup * sizeof(uint32_t) != chk_length) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
+ break;
+ }
+ offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
+ offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
- cum_ack,
- num_seg,
- a_rwnd
- );
+ cum_ack, num_seg, a_rwnd);
stcb->asoc.seen_a_sack_this_pkt = 1;
if ((stcb->asoc.pr_sctp_cnt == 0) &&
- (num_seg == 0) &&
+ (num_seg == 0) && (num_nr_seg == 0) &&
((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
(cum_ack == stcb->asoc.last_acked_seq)) &&
(stcb->asoc.saw_sack_with_frags == 0) &&
- (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
- ) {
+ (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
/*
* We have a SIMPLE sack having no
* prior segments and data on sent
- * queue to be acked.. Use the
- * faster path sack processing. We
- * also allow window update sacks
- * with no missing segments to go
- * this way too.
+ * queue to be acked. Use the faster
+ * path sack processing. We also
+ * allow window update sacks with no
+ * missing segments to go this way
+ * too.
*/
- sctp_express_handle_nr_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
+ sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
&abort_now);
} else {
if (netp && *netp)
- sctp_handle_nr_sack(m, *offset,
- nr_sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
- }
- if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
- TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
- (stcb->asoc.stream_queue_cnt == 0)) {
- sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ sctp_handle_sack(m, offset_seg, offset_dup,
+ stcb, *netp,
+ num_seg, num_nr_seg, num_dup, &abort_now, flags,
+ cum_ack, a_rwnd);
}
if (abort_now) {
/* ABORT signal from sack processing */
*offset = length;
return (NULL);
}
+ if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
+ TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
+ (stcb->asoc.stream_queue_cnt == 0)) {
+ sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ }
}
break;
diff --git a/sys/netinet/sctp_output.c b/sys/netinet/sctp_output.c
index af0e488..4afe009 100644
--- a/sys/netinet/sctp_output.c
+++ b/sys/netinet/sctp_output.c
@@ -10172,7 +10172,6 @@ sctp_send_nr_sack(struct sctp_tcb *stcb)
struct sctp_nr_sack_chunk *nr_sack;
struct sctp_gap_ack_block *gap_descriptor;
- struct sctp_nr_gap_ack_block *nr_gap_descriptor;
struct sack_track *selector;
struct sack_track *nr_selector;
@@ -10433,8 +10432,6 @@ sctp_send_nr_sack(struct sctp_tcb *stcb)
}
/*---------------------------------------------------------filling the nr_gap_ack blocks----------------------------------------------------*/
- nr_gap_descriptor = (struct sctp_nr_gap_ack_block *)gap_descriptor;
-
/* EY - there will be gaps + nr_gaps if draining is possible */
if ((SCTP_BASE_SYSCTL(sctp_do_drain)) && (limit_reached == 0)) {
@@ -10470,7 +10467,7 @@ sctp_send_nr_sack(struct sctp_tcb *stcb)
* ok to merge.
*/
num_nr_gap_blocks--;
- nr_gap_descriptor--;
+ gap_descriptor--;
}
if (nr_selector->num_entries == 0)
mergeable = 0;
@@ -10489,12 +10486,12 @@ sctp_send_nr_sack(struct sctp_tcb *stcb)
* left side
*/
mergeable = 0;
- nr_gap_descriptor->start = htons((nr_selector->gaps[j].start + offset));
+ gap_descriptor->start = htons((nr_selector->gaps[j].start + offset));
}
- nr_gap_descriptor->end = htons((nr_selector->gaps[j].end + offset));
+ gap_descriptor->end = htons((nr_selector->gaps[j].end + offset));
num_nr_gap_blocks++;
- nr_gap_descriptor++;
- if (((caddr_t)nr_gap_descriptor + sizeof(struct sctp_nr_gap_ack_block)) > limit) {
+ gap_descriptor++;
+ if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
/* no more room */
limit_reached = 1;
break;
@@ -10517,7 +10514,7 @@ sctp_send_nr_sack(struct sctp_tcb *stcb)
/* now we must add any dups we are going to report. */
if ((limit_reached == 0) && (asoc->numduptsns)) {
- dup = (uint32_t *) nr_gap_descriptor;
+ dup = (uint32_t *) gap_descriptor;
for (i = 0; i < asoc->numduptsns; i++) {
*dup = htonl(asoc->dup_tsns[i]);
dup++;
@@ -10537,10 +10534,9 @@ sctp_send_nr_sack(struct sctp_tcb *stcb)
num_nr_gap_blocks = num_gap_blocks;
num_gap_blocks = 0;
}
- a_chk->send_size = (sizeof(struct sctp_nr_sack_chunk) +
- (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
- (num_nr_gap_blocks * sizeof(struct sctp_nr_gap_ack_block)) +
- (num_dups * sizeof(int32_t)));
+ a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
+ (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
+ num_dups * sizeof(int32_t);
SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
diff --git a/sys/netinet/sctp_structs.h b/sys/netinet/sctp_structs.h
index b1a0f13..9b952fd 100644
--- a/sys/netinet/sctp_structs.h
+++ b/sys/netinet/sctp_structs.h
@@ -1043,8 +1043,6 @@ struct sctp_association {
uint8_t delayed_connection;
uint8_t ifp_had_enobuf;
uint8_t saw_sack_with_frags;
- /* EY */
- uint8_t saw_sack_with_nr_frags;
uint8_t in_asocid_hash;
uint8_t assoc_up_sent;
uint8_t adaptation_needed;
OpenPOWER on IntegriCloud