summaryrefslogtreecommitdiffstats
path: root/sys/netinet/sctp_indata.c
diff options
context:
space:
mode:
authorrrs <rrs@FreeBSD.org>2008-12-06 13:19:54 +0000
committerrrs <rrs@FreeBSD.org>2008-12-06 13:19:54 +0000
commit0f2b9dafa358fb3b64efed4842cd31650ceea672 (patch)
tree773b646ae7f5af08e1421071f2aa493698537042 /sys/netinet/sctp_indata.c
parentf4594595d3de298d45caf389d9cee0d893cedfc0 (diff)
downloadFreeBSD-src-0f2b9dafa358fb3b64efed4842cd31650ceea672.zip
FreeBSD-src-0f2b9dafa358fb3b64efed4842cd31650ceea672.tar.gz
Code from the hack-session known as the IETF (and a
bit of debugging afterwards): - Fix protection code for notification generation. - Decouple associd from vtag - Allow vtags to have less strigent requirements in non-uniqueness. o don't pre-hash them when you issue one in a cookie. o Allow duplicates and use addresses and ports to discriminate amongst the duplicates during lookup. - Add support for the NAT draft draft-ietf-behave-sctpnat-00, this is still experimental and needs more extensive testing with the Jason Butt ipfw changes. - Support for the SENDER_DRY event to get DTLS in OpenSSL working with a set of patches from Michael Tuexen (hopefully heading to OpenSSL soon). - Update the support of SCTP-AUTH by Peter Lei. - Use macros for refcounting. - Fix MTU for UDP encapsulation. - Fix reporting back of unsent data. - Update assoc send counter handling to be consistent with endpoint sent counter. - Fix a bug in PR-SCTP. - Fix so we only send another FWD-TSN when a SACK arrives IF and only if the adv-peer-ack point progressed. However we still make sure a timer is running if we do have an adv_peer_ack point. - Fix PR-SCTP bug where chunks were retransmitted if they are sent unreliable but not abandoned yet. With the help of: Michael Teuxen and Peter Lei :-) MFC after: 4 weeks
Diffstat (limited to 'sys/netinet/sctp_indata.c')
-rw-r--r--sys/netinet/sctp_indata.c2796
1 files changed, 2717 insertions, 79 deletions
diff --git a/sys/netinet/sctp_indata.c b/sys/netinet/sctp_indata.c
index 20d153e..a1a8d7a 100644
--- a/sys/netinet/sctp_indata.c
+++ b/sys/netinet/sctp_indata.c
@@ -300,6 +300,10 @@ sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
uint16_t stream_no;
int end = 0;
int cntDel;
+
+ /* EY if any out-of-order delivered, then tag it nr on nr_map */
+ uint32_t nr_tsn, nr_gap;
+
struct sctp_queued_to_read *control, *ctl, *ctlat;
if (stcb == NULL)
@@ -407,6 +411,42 @@ abandon:
}
/* pull it we did it */
TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
+ /*
+ * EY this is the chunk that should be tagged nr gapped
+ * calculate the gap and such then tag this TSN nr
+ * chk->rec.data.TSN_seq
+ */
+ /*
+ * EY!-TODO- this tsn should be tagged nr only if it is
+ * out-of-order, the if statement should be modified
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ nr_tsn = chk->rec.data.TSN_seq;
+ if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
+ nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
+ } else {
+ nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
+ }
+ if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
+ (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
+ /*
+ * EY The 1st should never happen, as in
+ * process_a_data_chunk method this check
+ * should be done
+ */
+ /*
+ * EY The 2nd should never happen, because
+ * nr_mapping_array is always expanded when
+ * mapping_array is expanded
+ */
+ } else {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ asoc->highest_tsn_inside_nr_map = nr_tsn;
+ }
+ }
if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
asoc->fragmented_delivery_inprogress = 0;
if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
@@ -453,9 +493,67 @@ abandon:
asoc->size_on_all_streams -= ctl->length;
sctp_ucount_decr(asoc->cnt_on_all_streams);
strm->last_sequence_delivered++;
+ /*
+ * EY will be used to
+ * calculate nr-gap
+ */
+ nr_tsn = ctl->sinfo_tsn;
sctp_add_to_readq(stcb->sctp_ep, stcb,
ctl,
&stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
+ /*
+ * EY -now something is
+ * delivered, calculate
+ * nr_gap and tag this tsn
+ * NR
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
+ nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
+ } else {
+ nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
+ }
+ if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
+ (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
+ /*
+ * EY The
+ * 1st
+ * should
+ * never
+ * happen,
+ * as in
+ * process_a_
+ * data_chunk
+ * method
+ * this
+ * check
+ * should be
+ * done
+ */
+ /*
+ * EY The
+ * 2nd
+ * should
+ * never
+ * happen,
+ * because
+ * nr_mapping
+ * _array is
+ * always
+ * expanded
+ * when
+ * mapping_ar
+ * ray is
+ * expanded
+ */
+ } else {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ asoc->highest_tsn_inside_nr_map = nr_tsn;
+ }
+ }
ctl = ctlat;
} else {
break;
@@ -504,6 +602,9 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
uint16_t nxt_todel;
struct mbuf *oper;
+ /* EY- will be used to calculate nr-gap for a tsn */
+ uint32_t nr_tsn, nr_gap;
+
queue_needed = 1;
asoc->size_on_all_streams += control->length;
sctp_ucount_incr(asoc->cnt_on_all_streams);
@@ -560,13 +661,48 @@ protocol_error:
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
}
+ /* EY it wont be queued if it could be delivered directly */
queue_needed = 0;
asoc->size_on_all_streams -= control->length;
sctp_ucount_decr(asoc->cnt_on_all_streams);
strm->last_sequence_delivered++;
+ /* EY will be used to calculate nr-gap */
+ nr_tsn = control->sinfo_tsn;
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
+
+ /*
+ * EY this is the chunk that should be tagged nr gapped
+ * calculate the gap and such then tag this TSN nr
+ * chk->rec.data.TSN_seq
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
+ nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
+ } else {
+ nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
+ }
+ if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
+ (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
+ /*
+ * EY The 1st should never happen, as in
+ * process_a_data_chunk method this check
+ * should be done
+ */
+ /*
+ * EY The 2nd should never happen, because
+ * nr_mapping_array is always expanded when
+ * mapping_array is expanded
+ */
+ } else {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ asoc->highest_tsn_inside_nr_map = nr_tsn;
+ }
+ }
control = TAILQ_FIRST(&strm->inqueue);
while (control != NULL) {
/* all delivered */
@@ -587,9 +723,47 @@ protocol_error:
sctp_log_strm_del(control, NULL,
SCTP_STR_LOG_FROM_IMMED_DEL);
}
+ /* EY will be used to calculate nr-gap */
+ nr_tsn = control->sinfo_tsn;
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
+ /*
+ * EY this is the chunk that should be
+ * tagged nr gapped calculate the gap and
+ * such then tag this TSN nr
+ * chk->rec.data.TSN_seq
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
+ nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
+ } else {
+ nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
+ }
+ if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
+ (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
+ /*
+ * EY The 1st should never
+ * happen, as in
+ * process_a_data_chunk
+ * method this check should
+ * be done
+ */
+ /*
+ * EY The 2nd should never
+ * happen, because
+ * nr_mapping_array is
+ * always expanded when
+ * mapping_array is expanded
+ */
+ } else {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ asoc->highest_tsn_inside_nr_map = nr_tsn;
+ }
+ }
control = at;
continue;
}
@@ -1392,6 +1566,9 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
/* struct sctp_tmit_chunk *chk; */
struct sctp_tmit_chunk *chk;
uint32_t tsn, gap;
+
+ /* EY - for nr_sack */
+ uint32_t nr_gap;
struct mbuf *dmbuf;
int indx, the_len;
int need_reasm_check = 0;
@@ -1447,6 +1624,9 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
return (0);
}
}
+ /* EY - for nr_sack */
+ nr_gap = gap;
+
if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
*high_tsn = tsn;
}
@@ -1563,9 +1743,17 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
SCTP_STAT_INCR(sctps_badsid);
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
+ /* EY set this tsn present in nr_sack's nr_mapping_array */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+ }
if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
/* we have a new high score */
asoc->highest_tsn_inside_map = tsn;
+ /* EY nr_sack version of the above */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
+ asoc->highest_tsn_inside_nr_map = tsn;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
@@ -1716,6 +1904,48 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
goto failed_express_del;
}
sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
+
+ /*
+ * EY here I should check if this delivered tsn is
+ * out_of_order, if yes then update the nr_map
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ /*
+ * EY check if the mapping_array and nr_mapping
+ * array are consistent
+ */
+ if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
+ /*
+ * printf("EY-IN
+ * sctp_process_a_data_chunk(5): Something
+ * is wrong the map base tsn" "\nEY-and
+ * nr_map base tsn should be equal.");
+ */
+ /* EY debugging block */
+ {
+ /*
+ * printf("\nEY-Calculating an
+ * nr_gap!!\nmapping_array_size = %d
+ * nr_mapping_array_size = %d"
+ * "\nEY-mapping_array_base = %d
+ * nr_mapping_array_base =
+ * %d\nEY-highest_tsn_inside_map = %d"
+ * "highest_tsn_inside_nr_map = %d\nEY-TSN =
+ * %d nr_gap = %d",asoc->mapping_array_size,
+ * asoc->nr_mapping_array_size,
+ * asoc->mapping_array_base_tsn,
+ * asoc->nr_mapping_array_base_tsn,
+ * asoc->highest_tsn_inside_map,
+ * asoc->highest_tsn_inside_nr_map,tsn,nr_gap
+ * );
+ */
+ }
+ /* EY - not %100 sure about the lock thing */
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
/* for ordered, bump what we delivered */
asoc->strmin[strmno].last_sequence_delivered++;
@@ -1761,6 +1991,38 @@ failed_express_del:
SCTP_PRINTF("Append fails end:%d\n", end);
goto failed_pdapi_express_del;
}
+ /*
+ * EY It is appended to the read queue in prev if
+ * block here I should check if this delivered tsn
+ * is out_of_order, if yes then update the nr_map
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ /* EY debugging block */
+ {
+ /*
+ * printf("\nEY-Calculating an
+ * nr_gap!!\nEY-mapping_array_size =
+ * %d nr_mapping_array_size = %d"
+ * "\nEY-mapping_array_base = %d
+ * nr_mapping_array_base =
+ * %d\nEY-highest_tsn_inside_map =
+ * %d" "highest_tsn_inside_nr_map =
+ * %d\nEY-TSN = %d nr_gap =
+ * %d",asoc->mapping_array_size,
+ * asoc->nr_mapping_array_size,
+ * asoc->mapping_array_base_tsn,
+ * asoc->nr_mapping_array_base_tsn,
+ * asoc->highest_tsn_inside_map,
+ * asoc->highest_tsn_inside_nr_map,ts
+ * n,nr_gap);
+ */
+ }
+ /* EY - not %100 sure about the lock thing */
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
SCTP_STAT_INCR(sctps_recvexpressm);
control->sinfo_tsn = tsn;
asoc->tsn_last_delivered = tsn;
@@ -1978,6 +2240,55 @@ failed_pdapi_express_del:
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
+
+ /*
+ * EY It is added to the read queue in prev if block
+ * here I should check if this delivered tsn is
+ * out_of_order, if yes then update the nr_map
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ /*
+ * EY check if the mapping_array and
+ * nr_mapping array are consistent
+ */
+ if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
+ /*
+ * printf("EY-IN
+ * sctp_process_a_data_chunk(6):
+ * Something is wrong the map base
+ * tsn" "\nEY-and nr_map base tsn
+ * should be equal.");
+ */
+ /*
+ * EY - not %100 sure about the lock
+ * thing, i think we don't need the
+ * below,
+ */
+ /* SCTP_TCB_LOCK_ASSERT(stcb); */
+ {
+ /*
+ * printf("\nEY-Calculating an
+ * nr_gap!!\nEY-mapping_array_size =
+ * %d nr_mapping_array_size = %d"
+ * "\nEY-mapping_array_base = %d
+ * nr_mapping_array_base =
+ * %d\nEY-highest_tsn_inside_map =
+ * %d" "highest_tsn_inside_nr_map =
+ * %d\nEY-TSN = %d nr_gap =
+ * %d",asoc->mapping_array_size,
+ * asoc->nr_mapping_array_size,
+ * asoc->mapping_array_base_tsn,
+ * asoc->nr_mapping_array_base_tsn,
+ * asoc->highest_tsn_inside_map,
+ * asoc->highest_tsn_inside_nr_map,ts
+ * n,nr_gap);
+ */
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
} else {
/*
* Special check for when streams are resetting. We
@@ -2185,9 +2496,20 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
int at;
int last_all_ones = 0;
int slide_from, slide_end, lgap, distance;
+
+ /* EY nr_mapping array variables */
+ int nr_at;
+ int nr_last_all_ones = 0;
+ int nr_slide_from, nr_slide_end, nr_lgap, nr_distance;
+
uint32_t old_cumack, old_base, old_highest;
unsigned char aux_array[64];
+ /*
+ * EY! Don't think this is required but I am immitating the code for
+ * map just to make sure
+ */
+ unsigned char nr_aux_array[64];
asoc = &stcb->asoc;
at = 0;
@@ -2200,7 +2522,29 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
asoc->mapping_array_size);
else
memcpy(aux_array, asoc->mapping_array, 64);
+ /* EY do the same for nr_mapping_array */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
+ /*
+ * printf("\nEY-IN sack_check method: \nEY-" "The
+ * size of map and nr_map are inconsitent")
+ */ ;
+ }
+ if (asoc->nr_mapping_array_base_tsn != asoc->mapping_array_base_tsn) {
+ /*
+ * printf("\nEY-IN sack_check method VERY CRUCIAL
+ * error: \nEY-" "The base tsns of map and nr_map
+ * are inconsitent")
+ */ ;
+ }
+ /* EY! just immitating the above code */
+ if (asoc->nr_mapping_array_size < 64)
+ memcpy(nr_aux_array, asoc->nr_mapping_array,
+ asoc->nr_mapping_array_size);
+ else
+ memcpy(aux_array, asoc->nr_mapping_array, 64);
+ }
/*
* We could probably improve this a small bit by calculating the
* offset of the current cum-ack as the starting point.
@@ -2235,6 +2579,7 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
+ asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
#endif
}
if ((asoc->cumulative_tsn == asoc->highest_tsn_inside_map) && (at >= 8)) {
@@ -2251,6 +2596,17 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
memset(asoc->mapping_array, 0, clr);
/* base becomes one ahead of the cum-ack */
asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
+
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ if (clr > asoc->nr_mapping_array_size)
+ clr = asoc->nr_mapping_array_size;
+
+ memset(asoc->nr_mapping_array, 0, clr);
+ /* base becomes one ahead of the cum-ack */
+ asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
+ asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
+ }
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(old_base, old_cumack, old_highest,
SCTP_MAP_PREPARE_SLIDE);
@@ -2330,6 +2686,103 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
}
}
/*
+ * EY if doing nr_sacks then slide the nr_mapping_array accordingly
+ * please
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ nr_at = 0;
+ for (nr_slide_from = 0; nr_slide_from < stcb->asoc.nr_mapping_array_size; nr_slide_from++) {
+
+ if (asoc->nr_mapping_array[nr_slide_from] == 0xff) {
+ nr_at += 8;
+ nr_last_all_ones = 1;
+ } else {
+ /* there is a 0 bit */
+ nr_at += sctp_map_lookup_tab[asoc->nr_mapping_array[nr_slide_from]];
+ nr_last_all_ones = 0;
+ break;
+ }
+ }
+
+ nr_at++;
+
+ if (compare_with_wrap(asoc->cumulative_tsn,
+ asoc->highest_tsn_inside_nr_map, MAX_TSN) && (at >= 8)) {
+ /* The complete array was completed by a single FR */
+ /* higest becomes the cum-ack */
+ int clr;
+
+ clr = (nr_at >> 3) + 1;
+
+ if (clr > asoc->nr_mapping_array_size)
+ clr = asoc->nr_mapping_array_size;
+
+ memset(asoc->nr_mapping_array, 0, clr);
+ /* base becomes one ahead of the cum-ack */
+ asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
+ asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
+
+ } else if (nr_at >= 8) {
+ /* we can slide the mapping array down */
+ /* Calculate the new byte postion we can move down */
+
+ /*
+ * now calculate the ceiling of the move using our
+ * highest TSN value
+ */
+ if (asoc->highest_tsn_inside_nr_map >= asoc->nr_mapping_array_base_tsn) {
+ nr_lgap = asoc->highest_tsn_inside_nr_map -
+ asoc->nr_mapping_array_base_tsn;
+ } else {
+ nr_lgap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) +
+ asoc->highest_tsn_inside_nr_map + 1;
+ }
+ nr_slide_end = nr_lgap >> 3;
+ if (nr_slide_end < nr_slide_from) {
+#ifdef INVARIANTS
+ panic("impossible slide");
+#else
+ printf("impossible slide?\n");
+ return;
+#endif
+ }
+ if (nr_slide_end > asoc->nr_mapping_array_size) {
+#ifdef INVARIANTS
+ panic("would overrun buffer");
+#else
+ printf("Gak, would have overrun map end:%d nr_slide_end:%d\n",
+ asoc->nr_mapping_array_size, nr_slide_end);
+ nr_slide_end = asoc->nr_mapping_array_size;
+#endif
+ }
+ nr_distance = (nr_slide_end - nr_slide_from) + 1;
+
+ if (nr_distance + nr_slide_from > asoc->nr_mapping_array_size ||
+ nr_distance < 0) {
+ /*
+ * Here we do NOT slide forward the array so
+ * that hopefully when more data comes in to
+ * fill it up we will be able to slide it
+ * forward. Really I don't think this should
+ * happen :-0
+ */
+ ;
+ } else {
+ int ii;
+
+ for (ii = 0; ii < nr_distance; ii++) {
+ asoc->nr_mapping_array[ii] =
+ asoc->nr_mapping_array[nr_slide_from + ii];
+ }
+ for (ii = nr_distance; ii <= nr_slide_end; ii++) {
+ asoc->nr_mapping_array[ii] = 0;
+ }
+ asoc->nr_mapping_array_base_tsn += (nr_slide_from << 3);
+ }
+ }
+ }
+ /*
* Now we need to see if we need to queue a sack or just start the
* timer (if allowed).
*/
@@ -2345,7 +2798,14 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
}
sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
- sctp_send_sack(stcb);
+ /*
+ * EY if nr_sacks used then send an nr-sack , a sack
+ * otherwise
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
+ sctp_send_nr_sack(stcb);
+ else
+ sctp_send_sack(stcb);
} else {
int is_a_gap;
@@ -2398,7 +2858,14 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
* duplicates.
*/
(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
- sctp_send_sack(stcb);
+ /*
+ * EY if nr_sacks used then send an
+ * nr-sack , a sack otherwise
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
+ sctp_send_nr_sack(stcb);
+ else
+ sctp_send_sack(stcb);
}
} else {
if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
@@ -2608,6 +3075,7 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
case SCTP_INITIATION:
case SCTP_INITIATION_ACK:
case SCTP_SELECTIVE_ACK:
+ case SCTP_NR_SELECTIVE_ACK: /* EY */
case SCTP_HEARTBEAT_REQUEST:
case SCTP_HEARTBEAT_ACK:
case SCTP_ABORT_ASSOCIATION:
@@ -2738,7 +3206,14 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
}
- sctp_send_sack(stcb);
+ /*
+ * EY if nr_sacks used then send an nr-sack , a sack
+ * otherwise
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
+ sctp_send_nr_sack(stcb);
+ else
+ sctp_send_sack(stcb);
} else {
if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
sctp_timer_start(SCTP_TIMER_TYPE_RECV,
@@ -2794,7 +3269,7 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
for (i = 0; i < num_seg; i++) {
frag_strt = ntohs(frag->start);
frag_end = ntohs(frag->end);
- /* some sanity checks on the fargment offsets */
+ /* some sanity checks on the fragment offsets */
if (frag_strt > frag_end) {
/* this one is malformed, skip */
frag++;
@@ -2829,7 +3304,7 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
* reset the queue this will cause extra
* hunting but hey, they chose the
* performance hit when they failed to order
- * there gaps..
+ * their gaps
*/
tp1 = TAILQ_FIRST(&asoc->sent_queue);
}
@@ -3136,6 +3611,7 @@ sctp_check_for_revoked(struct sctp_tcb *stcb,
}
}
+
static void
sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
@@ -3650,51 +4126,6 @@ sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
/* advance PeerAckPoint goes forward */
asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
a_adv = tp1;
- /*
- * we don't want to de-queue it here. Just wait for
- * the next peer SACK to come with a new cumTSN and
- * then the chunk will be droped in the normal
- * fashion.
- */
- if (tp1->data) {
- sctp_free_bufspace(stcb, asoc, tp1, 1);
- /*
- * Maybe there should be another
- * notification type
- */
- sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
- (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
- tp1, SCTP_SO_NOT_LOCKED);
- sctp_m_freem(tp1->data);
- tp1->data = NULL;
- if (stcb->sctp_socket) {
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- struct socket *so;
-
- so = SCTP_INP_SO(stcb->sctp_ep);
- atomic_add_int(&stcb->asoc.refcnt, 1);
- SCTP_TCB_UNLOCK(stcb);
- SCTP_SOCKET_LOCK(so, 1);
- SCTP_TCB_LOCK(stcb);
- atomic_subtract_int(&stcb->asoc.refcnt, 1);
- if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
- /*
- * assoc was freed while we
- * were unlocked
- */
- SCTP_SOCKET_UNLOCK(so, 1);
- return (NULL);
- }
-#endif
- sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- SCTP_SOCKET_UNLOCK(so, 1);
-#endif
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
- sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN);
- }
- }
- }
} else {
/*
* If it is still in RESEND we can advance no
@@ -4268,6 +4699,38 @@ again:
stcb->sctp_ep, stcb, asoc->primary_destination);
}
}
+ /* PR-Sctp issues need to be addressed too */
+ if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
+ struct sctp_tmit_chunk *lchk;
+ uint32_t old_adv_peer_ack_point;
+
+ old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
+ lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
+ /* C3. See if we need to send a Fwd-TSN */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
+ MAX_TSN)) {
+ /*
+ * ISSUE with ECN, see FWD-TSN processing for notes
+ * on issues that will occur when the ECN NONCE
+ * stuff is put into SCTP for cross checking.
+ */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
+ MAX_TSN)) {
+ send_forward_tsn(stcb, asoc);
+ /*
+ * ECN Nonce: Disable Nonce Sum check when
+ * FWD TSN is sent and store resync tsn
+ */
+ asoc->nonce_sum_check = 0;
+ asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
+ }
+ }
+ if (lchk) {
+ /* Assure a timer is up */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, lchk->whoTo);
+ }
+ }
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
rwnd,
@@ -4991,34 +5454,6 @@ done_with_it:
if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
asoc->advanced_peer_ack_point = cum_ack;
}
- /* C2. try to further move advancedPeerAckPoint ahead */
- if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
- struct sctp_tmit_chunk *lchk;
-
- lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
- /* C3. See if we need to send a Fwd-TSN */
- if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
- MAX_TSN)) {
- /*
- * ISSUE with ECN, see FWD-TSN processing for notes
- * on issues that will occur when the ECN NONCE
- * stuff is put into SCTP for cross checking.
- */
- send_forward_tsn(stcb, asoc);
-
- /*
- * ECN Nonce: Disable Nonce Sum check when FWD TSN
- * is sent and store resync tsn
- */
- asoc->nonce_sum_check = 0;
- asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
- if (lchk) {
- /* Assure a timer is up */
- sctp_timer_start(SCTP_TIMER_TYPE_SEND,
- stcb->sctp_ep, stcb, lchk->whoTo);
- }
- }
- }
/* JRS - Use the congestion control given in the CC module */
asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
@@ -5180,6 +5615,38 @@ again:
done_once = 1;
goto again;
}
+ /* C2. try to further move advancedPeerAckPoint ahead */
+ if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
+ struct sctp_tmit_chunk *lchk;
+ uint32_t old_adv_peer_ack_point;
+
+ old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
+ lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
+ /* C3. See if we need to send a Fwd-TSN */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
+ MAX_TSN)) {
+ /*
+ * ISSUE with ECN, see FWD-TSN processing for notes
+ * on issues that will occur when the ECN NONCE
+ * stuff is put into SCTP for cross checking.
+ */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
+ MAX_TSN)) {
+ send_forward_tsn(stcb, asoc);
+ /*
+ * ECN Nonce: Disable Nonce Sum check when
+ * FWD TSN is sent and store resync tsn
+ */
+ asoc->nonce_sum_check = 0;
+ asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
+ }
+ }
+ if (lchk) {
+ /* Assure a timer is up */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, lchk->whoTo);
+ }
+ }
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
a_rwnd,
@@ -5212,6 +5679,9 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
struct sctp_association *asoc;
int tt;
+ /* EY -used to calculate nr_gap information */
+ uint32_t nr_tsn, nr_gap;
+
asoc = &stcb->asoc;
tt = strmin->last_sequence_delivered;
/*
@@ -5230,9 +5700,85 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
sctp_ucount_decr(asoc->cnt_on_all_streams);
/* deliver it to at least the delivery-q */
if (stcb->sctp_socket) {
+ /* EY need the tsn info for calculating nr */
+ nr_tsn = ctl->sinfo_tsn;
sctp_add_to_readq(stcb->sctp_ep, stcb,
ctl,
&stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
+ /*
+ * EY this is the chunk that should be
+ * tagged nr gapped calculate the gap and
+ * such then tag this TSN nr
+ * chk->rec.data.TSN_seq
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
+ nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
+ } else {
+ nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
+ }
+ if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
+ (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
+ /*
+ * EY These should never
+ * happen- explained before
+ */
+ } else {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ asoc->highest_tsn_inside_nr_map = nr_tsn;
+ }
+
+ if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
+ /*
+ * printf("In
+ * sctp_kick_prsctp_reorder_q
+ * ueue(7): Something wrong,
+ * the TSN to be tagged"
+ * "\nas NR is not even in
+ * the mapping_array, or map
+ * and nr_map are
+ * inconsistent");
+ */
+ /*
+ * EY - not %100 sure about
+ * the lock thing, don't
+ * think its required
+ */
+ /*
+ * SCTP_TCB_LOCK_ASSERT(stcb)
+ * ;
+ */
+ {
+ /*
+ * printf("\nCalculating an
+ * nr_gap!!\nmapping_array_si
+ * ze = %d
+ * nr_mapping_array_size =
+ * %d" "\nmapping_array_base
+ * = %d
+ * nr_mapping_array_base =
+ * %d\nhighest_tsn_inside_map
+ * = %d"
+ * "highest_tsn_inside_nr_map
+ * = %d\nTSN = %d nr_gap =
+ * %d",asoc->mapping_array_si
+ * ze,
+ * asoc->nr_mapping_array_siz
+ * e,
+ * asoc->mapping_array_base_t
+ * sn,
+ * asoc->nr_mapping_array_bas
+ * e_tsn,
+ * asoc->highest_tsn_inside_m
+ * ap,
+ * asoc->highest_tsn_inside_n
+ * r_map,tsn,nr_gap);
+ */
+ }
+ }
}
} else {
/* no more delivery now. */
@@ -5257,9 +5803,86 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
/* deliver it to at least the delivery-q */
strmin->last_sequence_delivered = ctl->sinfo_ssn;
if (stcb->sctp_socket) {
+ /* EY */
+ nr_tsn = ctl->sinfo_tsn;
sctp_add_to_readq(stcb->sctp_ep, stcb,
ctl,
&stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
+ /*
+ * EY this is the chunk that should be
+ * tagged nr gapped calculate the gap and
+ * such then tag this TSN nr
+ * chk->rec.data.TSN_seq
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
+ nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
+ } else {
+ nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
+ }
+ if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
+ (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
+ /*
+ * EY These should never
+ * happen, explained before
+ */
+ } else {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ asoc->highest_tsn_inside_nr_map = nr_tsn;
+ }
+
+
+ if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
+ /*
+ * printf("In
+ * sctp_kick_prsctp_reorder_q
+ * ueue(8): Something wrong,
+ * the TSN to be tagged"
+ * "\nas NR is not even in
+ * the mapping_array, or map
+ * and nr_map are
+ * inconsistent");
+ */
+ /*
+ * EY - not %100 sure about
+ * the lock thing, don't
+ * think its required
+ */
+ /*
+ * SCTP_TCB_LOCK_ASSERT(stcb)
+ * ;
+ */
+ {
+ /*
+ * printf("\nCalculating an
+ * nr_gap!!\nmapping_array_si
+ * ze = %d
+ * nr_mapping_array_size =
+ * %d" "\nmapping_array_base
+ * = %d
+ * nr_mapping_array_base =
+ * %d\nhighest_tsn_inside_map
+ * = %d"
+ * "highest_tsn_inside_nr_map
+ * = %d\nTSN = %d nr_gap =
+ * %d",asoc->mapping_array_si
+ * ze,
+ * asoc->nr_mapping_array_siz
+ * e,
+ * asoc->mapping_array_base_t
+ * sn,
+ * asoc->nr_mapping_array_bas
+ * e_tsn,
+ * asoc->highest_tsn_inside_m
+ * ap,
+ * asoc->highest_tsn_inside_n
+ * r_map,tsn,nr_gap);
+ */
+ }
+ }
}
tt = strmin->last_sequence_delivered + 1;
} else {
@@ -5324,6 +5947,12 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
MAX_TSN)) {
asoc->highest_tsn_inside_map = new_cum_tsn;
+ /* EY nr_mapping_array version of the above */
+ /*
+ * if(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
+ * asoc->peer_supports_nr_sack)
+ */
+ asoc->highest_tsn_inside_nr_map = new_cum_tsn;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
@@ -5382,7 +6011,19 @@ slide_out:
cumack_set_flag = 1;
asoc->mapping_array_base_tsn = new_cum_tsn + 1;
asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn;
-
+ /* EY - nr_sack: nr_mapping_array version of the above */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
+ asoc->nr_mapping_array_base_tsn = new_cum_tsn + 1;
+ asoc->highest_tsn_inside_nr_map = new_cum_tsn;
+ if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
+ /*
+ * printf("IN sctp_handle_forward_tsn:
+ * Something is wrong the size of" "map and
+ * nr_map should be equal!")
+ */ ;
+ }
+ }
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
@@ -5569,3 +6210,2000 @@ slide_out:
sctp_deliver_reasm_check(stcb, &stcb->asoc);
}
}
+
+/* EY fully identical to sctp_express_handle_sack, duplicated for only naming convention */
+void
+sctp_express_handle_nr_sack(struct sctp_tcb *stcb, uint32_t cumack,
+ uint32_t rwnd, int nonce_sum_flag, int *abort_now)
+{
+ struct sctp_nets *net;
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *tp1, *tp2;
+ uint32_t old_rwnd;
+ int win_probe_recovery = 0;
+ int win_probe_recovered = 0;
+ int j, done_once = 0;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
+ rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
+ stcb->asoc.cumack_log_at++;
+ if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
+ stcb->asoc.cumack_log_at = 0;
+ }
+#endif
+ asoc = &stcb->asoc;
+ old_rwnd = asoc->peers_rwnd;
+ if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
+ /* old ack */
+ return;
+ } else if (asoc->last_acked_seq == cumack) {
+ /* Window update sack */
+ asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
+ (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ if (asoc->peers_rwnd > old_rwnd) {
+ goto again;
+ }
+ return;
+ }
+ /* First setup for CC stuff */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->prev_cwnd = net->cwnd;
+ net->net_ack = 0;
+ net->net_ack2 = 0;
+
+ /*
+ * CMT: Reset CUC and Fast recovery algo variables before
+ * SACK processing
+ */
+ net->new_pseudo_cumack = 0;
+ net->will_exit_fast_recovery = 0;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
+ uint32_t send_s;
+
+ if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+ tp1 = TAILQ_LAST(&asoc->sent_queue,
+ sctpchunk_listhead);
+ send_s = tp1->rec.data.TSN_seq + 1;
+ } else {
+ send_s = asoc->sending_seq;
+ }
+ if ((cumack == send_s) ||
+ compare_with_wrap(cumack, send_s, MAX_TSN)) {
+#ifndef INVARIANTS
+ struct mbuf *oper;
+
+#endif
+#ifdef INVARIANTS
+ panic("Impossible sack 1");
+#else
+ *abort_now = 1;
+ /* XXX */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+ return;
+#endif
+ }
+ }
+ asoc->this_sack_highest_gap = cumack;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INDATA,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
+ /* process the new consecutive TSN first */
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ tp2 = TAILQ_NEXT(tp1, sctp_next);
+ if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
+ MAX_TSN) ||
+ cumack == tp1->rec.data.TSN_seq) {
+ if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
+ printf("Warning, an unsent is now acked?\n");
+ }
+ /*
+ * ECN Nonce: Add the nonce to the sender's
+ * nonce sum
+ */
+ asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
+ if (tp1->sent < SCTP_DATAGRAM_ACKED) {
+ /*
+ * If it is less than ACKED, it is
+ * now no-longer in flight. Higher
+ * values may occur during marking
+ */
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ }
+ sctp_flight_size_decrease(tp1);
+ /* sa_ignore NO_NULL_CHK */
+ sctp_total_flight_decrease(stcb, tp1);
+ }
+ tp1->whoTo->net_ack += tp1->send_size;
+ if (tp1->snd_count < 2) {
+ /*
+ * True non-retransmited
+ * chunk
+ */
+ tp1->whoTo->net_ack2 +=
+ tp1->send_size;
+
+ /* update RTO too? */
+ if (tp1->do_rtt) {
+ tp1->whoTo->RTO =
+ /*
+ * sa_ignore
+ * NO_NULL_CHK
+ */
+ sctp_calculate_rto(stcb,
+ asoc, tp1->whoTo,
+ &tp1->sent_rcv_time,
+ sctp_align_safe_nocopy);
+ tp1->do_rtt = 0;
+ }
+ }
+ /*
+ * CMT: CUCv2 algorithm. From the
+ * cumack'd TSNs, for each TSN being
+ * acked for the first time, set the
+ * following variables for the
+ * corresp destination.
+ * new_pseudo_cumack will trigger a
+ * cwnd update.
+ * find_(rtx_)pseudo_cumack will
+ * trigger search for the next
+ * expected (rtx-)pseudo-cumack.
+ */
+ tp1->whoTo->new_pseudo_cumack = 1;
+ tp1->whoTo->find_pseudo_cumack = 1;
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+ }
+ if (tp1->rec.data.chunk_was_revoked) {
+ /* deflate the cwnd */
+ tp1->whoTo->cwnd -= tp1->book_size;
+ tp1->rec.data.chunk_was_revoked = 0;
+ }
+ tp1->sent = SCTP_DATAGRAM_ACKED;
+ TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
+ if (tp1->data) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_free_bufspace(stcb, asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cumack,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_FREE_SENT);
+ }
+ tp1->data = NULL;
+ asoc->sent_queue_cnt--;
+ sctp_free_a_chunk(stcb, tp1);
+ tp1 = tp2;
+ } else {
+ break;
+ }
+ }
+
+ }
+ /* sa_ignore NO_NULL_CHK */
+ if (stcb->sctp_socket) {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+
+ SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
+ }
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
+ }
+ }
+
+ /* JRS - Use the congestion control given in the CC module */
+ if (asoc->last_acked_seq != cumack)
+ asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
+
+ asoc->last_acked_seq = cumack;
+
+ if (TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left in-flight */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->flight_size = 0;
+ net->partial_bytes_acked = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ }
+ /* Fix up the a-p-a-p for future PR-SCTP sends */
+ if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
+ asoc->advanced_peer_ack_point = cumack;
+ }
+ /* ECN Nonce updates */
+ if (asoc->ecn_nonce_allowed) {
+ if (asoc->nonce_sum_check) {
+ if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
+ if (asoc->nonce_wait_for_ecne == 0) {
+ struct sctp_tmit_chunk *lchk;
+
+ lchk = TAILQ_FIRST(&asoc->send_queue);
+ asoc->nonce_wait_for_ecne = 1;
+ if (lchk) {
+ asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
+ } else {
+ asoc->nonce_wait_tsn = asoc->sending_seq;
+ }
+ } else {
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
+ (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
+ /*
+ * Misbehaving peer. We need
+ * to react to this guy
+ */
+ asoc->ecn_allowed = 0;
+ asoc->ecn_nonce_allowed = 0;
+ }
+ }
+ }
+ } else {
+ /* See if Resynchronization Possible */
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
+ asoc->nonce_sum_check = 1;
+ /*
+ * now we must calculate what the base is.
+ * We do this based on two things, we know
+ * the total's for all the segments
+ * gap-acked in the SACK (none), We also
+ * know the SACK's nonce sum, its in
+ * nonce_sum_flag. So we can build a truth
+ * table to back-calculate the new value of
+ * asoc->nonce_sum_expect_base:
+ *
+ * SACK-flag-Value Seg-Sums Base 0 0 0
+ * 1 0 1 0 1 1 1 1 0
+ */
+ asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
+ }
+ }
+ }
+ /* RWND update */
+ asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
+ (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ if (asoc->peers_rwnd > old_rwnd) {
+ win_probe_recovery = 1;
+ }
+ /* Now assure a timer where data is queued at */
+again:
+ j = 0;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (win_probe_recovery && (net->window_probe)) {
+ net->window_probe = 0;
+ win_probe_recovered = 1;
+ /*
+ * Find first chunk that was used with window probe
+ * and clear the sent
+ */
+ /* sa_ignore FREED_MEMORY */
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->window_probe) {
+ /* move back to data send queue */
+ sctp_window_probe_recovery(stcb, asoc, net, tp1);
+ break;
+ }
+ }
+ }
+ if (net->flight_size) {
+ int to_ticks;
+
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ j++;
+ (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
+ sctp_timeout_handler, &net->rxt_timer);
+ } else {
+ if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
+ }
+ }
+ }
+ }
+ if ((j == 0) &&
+ (!TAILQ_EMPTY(&asoc->sent_queue)) &&
+ (asoc->sent_queue_retran_cnt == 0) &&
+ (win_probe_recovered == 0) &&
+ (done_once == 0)) {
+ /* huh, this should not happen */
+ sctp_fs_audit(asoc);
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->flight_size = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ asoc->sent_queue_retran_cnt = 0;
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_flight_size_increase(tp1);
+ sctp_total_flight_increase(stcb, tp1);
+ } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ asoc->sent_queue_retran_cnt++;
+ }
+ }
+ done_once = 1;
+ goto again;
+ }
+ /**********************************/
+ /* Now what about shutdown issues */
+ /**********************************/
+ if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left on sendqueue.. consider done */
+ /* clean up */
+ if ((asoc->stream_queue_cnt == 1) &&
+ ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+ (asoc->locked_on_sending)
+ ) {
+ struct sctp_stream_queue_pending *sp;
+
+ /*
+ * I may be in a state where we got all across.. but
+ * cannot write more due to a shutdown... we abort
+ * since the user did not indicate EOR in this case.
+ * The sp will be cleaned during free of the asoc.
+ */
+ sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
+ sctp_streamhead);
+ if ((sp) && (sp->length == 0)) {
+ /* Let cleanup code purge it */
+ if (sp->msg_is_complete) {
+ asoc->stream_queue_cnt--;
+ } else {
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ asoc->locked_on_sending = NULL;
+ asoc->stream_queue_cnt--;
+ }
+ }
+ }
+ if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ /* Need to abort here */
+ struct mbuf *oper;
+
+ abort_out_now:
+ *abort_now = 1;
+ /* XXX */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
+ } else {
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown(stcb,
+ stcb->asoc.primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ }
+ } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ goto abort_out_now;
+ }
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_send_shutdown_ack(stcb,
+ stcb->asoc.primary_destination);
+
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
+ rwnd,
+ stcb->asoc.peers_rwnd,
+ stcb->asoc.total_flight,
+ stcb->asoc.total_output_queue_size);
+ }
+}
+
+/* EY! nr_sack version of sctp_handle_segments, nr-gapped TSNs get removed from RtxQ in this method*/
+static void
+sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_nr_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
+ uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
+ uint32_t num_seg, uint32_t num_nr_seg, int *ecn_seg_sums)
+{
+ /************************************************/
+ /* process fragments and update sendqueue */
+ /************************************************/
+ struct sctp_nr_sack *nr_sack;
+ struct sctp_gap_ack_block *frag, block;
+ struct sctp_nr_gap_ack_block *nr_frag, nr_block;
+ struct sctp_tmit_chunk *tp1;
+ uint32_t i, j, all_bit;
+ int wake_him = 0;
+ uint32_t theTSN;
+ int num_frs = 0;
+
+ uint16_t frag_strt, frag_end, primary_flag_set;
+ uint16_t nr_frag_strt, nr_frag_end;
+
+ uint32_t last_frag_high;
+ uint32_t last_nr_frag_high;
+
+ all_bit = ch->ch.chunk_flags & SCTP_NR_SACK_ALL_BIT;
+
+ /*
+ * @@@ JRI : TODO: This flag is not used anywhere .. remove?
+ */
+ if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
+ primary_flag_set = 1;
+ } else {
+ primary_flag_set = 0;
+ }
+ nr_sack = &ch->nr_sack;
+
+ /*
+ * EY! - I will process nr_gaps similarly,by going to this position
+ * again if All bit is set
+ */
+ frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
+ *offset += sizeof(block);
+ if (frag == NULL) {
+ return;
+ }
+ tp1 = NULL;
+ last_frag_high = 0;
+ for (i = 0; i < num_seg; i++) {
+ frag_strt = ntohs(frag->start);
+ frag_end = ntohs(frag->end);
+ /* some sanity checks on the fargment offsets */
+ if (frag_strt > frag_end) {
+ /* this one is malformed, skip */
+ frag++;
+ continue;
+ }
+ if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
+ MAX_TSN))
+ *biggest_tsn_acked = frag_end + last_tsn;
+
+ /* mark acked dgs and find out the highestTSN being acked */
+ if (tp1 == NULL) {
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+
+ /* save the locations of the last frags */
+ last_frag_high = frag_end + last_tsn;
+ } else {
+ /*
+ * now lets see if we need to reset the queue due to
+ * a out-of-order SACK fragment
+ */
+ if (compare_with_wrap(frag_strt + last_tsn,
+ last_frag_high, MAX_TSN)) {
+ /*
+ * if the new frag starts after the last TSN
+ * frag covered, we are ok and this one is
+ * beyond the last one
+ */
+ ;
+ } else {
+ /*
+ * ok, they have reset us, so we need to
+ * reset the queue this will cause extra
+ * hunting but hey, they chose the
+ * performance hit when they failed to order
+ * there gaps..
+ */
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ }
+ last_frag_high = frag_end + last_tsn;
+ }
+ for (j = frag_strt; j <= frag_end; j++) {
+ theTSN = j + last_tsn;
+ while (tp1) {
+ if (tp1->rec.data.doing_fast_retransmit)
+ num_frs++;
+
+ /*
+ * CMT: CUCv2 algorithm. For each TSN being
+ * processed from the sent queue, track the
+ * next expected pseudo-cumack, or
+ * rtx_pseudo_cumack, if required. Separate
+ * cumack trackers for first transmissions,
+ * and retransmissions.
+ */
+ if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
+ (tp1->snd_count == 1)) {
+ tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
+ tp1->whoTo->find_pseudo_cumack = 0;
+ }
+ if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
+ (tp1->snd_count > 1)) {
+ tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
+ tp1->whoTo->find_rtx_pseudo_cumack = 0;
+ }
+ if (tp1->rec.data.TSN_seq == theTSN) {
+ if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
+ /*
+ * must be held until
+ * cum-ack passes
+ */
+ /*
+ * ECN Nonce: Add the nonce
+ * value to the sender's
+ * nonce sum
+ */
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ /*-
+ * If it is less than RESEND, it is
+ * now no-longer in flight.
+ * Higher values may already be set
+ * via previous Gap Ack Blocks...
+ * i.e. ACKED or RESEND.
+ */
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ *biggest_newly_acked_tsn, MAX_TSN)) {
+ *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
+ }
+ /*
+ * CMT: SFR algo
+ * (and HTNA) - set
+ * saw_newack to 1
+ * for dest being
+ * newly acked.
+ * update
+ * this_sack_highest_
+ * newack if
+ * appropriate.
+ */
+ if (tp1->rec.data.chunk_was_revoked == 0)
+ tp1->whoTo->saw_newack = 1;
+
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ tp1->whoTo->this_sack_highest_newack,
+ MAX_TSN)) {
+ tp1->whoTo->this_sack_highest_newack =
+ tp1->rec.data.TSN_seq;
+ }
+ /*
+ * CMT DAC algo:
+ * also update
+ * this_sack_lowest_n
+ * ewack
+ */
+ if (*this_sack_lowest_newack == 0) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(*this_sack_lowest_newack,
+ last_tsn,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_TSN_ACKED);
+ }
+ *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
+ }
+ /*
+ * CMT: CUCv2
+ * algorithm. If
+ * (rtx-)pseudo-cumac
+ * k for corresp
+ * dest is being
+ * acked, then we
+ * have a new
+ * (rtx-)pseudo-cumac
+ * k. Set
+ * new_(rtx_)pseudo_c
+ * umack to TRUE so
+ * that the cwnd for
+ * this dest can be
+ * updated. Also
+ * trigger search
+ * for the next
+ * expected
+ * (rtx-)pseudo-cumac
+ * k. Separate
+ * pseudo_cumack
+ * trackers for
+ * first
+ * transmissions and
+ * retransmissions.
+ */
+ if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
+ if (tp1->rec.data.chunk_was_revoked == 0) {
+ tp1->whoTo->new_pseudo_cumack = 1;
+ }
+ tp1->whoTo->find_pseudo_cumack = 1;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+ }
+ if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
+ if (tp1->rec.data.chunk_was_revoked == 0) {
+ tp1->whoTo->new_pseudo_cumack = 1;
+ }
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(*biggest_newly_acked_tsn,
+ last_tsn,
+ tp1->rec.data.TSN_seq,
+ frag_strt,
+ frag_end,
+ SCTP_LOG_TSN_ACKED);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ }
+ sctp_flight_size_decrease(tp1);
+ sctp_total_flight_decrease(stcb, tp1);
+
+ tp1->whoTo->net_ack += tp1->send_size;
+ if (tp1->snd_count < 2) {
+ /*
+ * True
+ * non-retran
+ * smited
+ * chunk
+ */
+ tp1->whoTo->net_ack2 += tp1->send_size;
+
+ /*
+ * update
+ * RTO too ?
+ */
+ if (tp1->do_rtt) {
+ tp1->whoTo->RTO =
+ sctp_calculate_rto(stcb,
+ asoc,
+ tp1->whoTo,
+ &tp1->sent_rcv_time,
+ sctp_align_safe_nocopy);
+ tp1->do_rtt = 0;
+ }
+ }
+ }
+ if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
+ (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
+ (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ asoc->this_sack_highest_gap,
+ MAX_TSN)) {
+ asoc->this_sack_highest_gap =
+ tp1->rec.data.TSN_seq;
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB2,
+ (asoc->sent_queue_retran_cnt & 0x000000ff));
+#endif
+ }
+ }
+ /*
+ * All chunks NOT UNSENT
+ * fall through here and are
+ * marked
+ */
+ tp1->sent = SCTP_DATAGRAM_MARKED;
+ if (tp1->rec.data.chunk_was_revoked) {
+ /* deflate the cwnd */
+ tp1->whoTo->cwnd -= tp1->book_size;
+ tp1->rec.data.chunk_was_revoked = 0;
+ }
+ /*
+ * EY - if all bit is set
+ * then this TSN is
+ * nr_marked
+ */
+ if (all_bit) {
+ tp1->sent = SCTP_DATAGRAM_NR_MARKED;
+ /*
+ * TAILQ_REMOVE(&asoc
+ * ->sent_queue,
+ * tp1, sctp_next);
+ */
+ if (tp1->data) {
+ /*
+ * sa_ignore
+ * NO_NULL_CH
+ * K
+ */
+ sctp_free_bufspace(stcb, asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ }
+ tp1->data = NULL;
+ /*
+ * asoc->sent_queue_c
+ * nt--;
+ */
+ /*
+ * sctp_free_a_chunk(
+ * stcb, tp1);
+ */
+ wake_him++;
+ }
+ }
+ break;
+ } /* if (tp1->TSN_seq == theTSN) */
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
+ MAX_TSN))
+ break;
+
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ } /* end while (tp1) */
+ } /* end for (j = fragStart */
+ frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
+ *offset += sizeof(block);
+ if (frag == NULL) {
+ break;
+ }
+ }
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ if (num_frs)
+ sctp_log_fr(*biggest_tsn_acked,
+ *biggest_newly_acked_tsn,
+ last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
+ }
+ /*
+ * EY - if all bit is not set then there should be other loops to
+ * identify nr TSNs
+ */
+ if (!all_bit) {
+
+ nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
+ *offset += sizeof(nr_block);
+
+
+
+ if (nr_frag == NULL) {
+ return;
+ }
+ tp1 = NULL;
+ last_nr_frag_high = 0;
+
+ for (i = 0; i < num_nr_seg; i++) {
+
+ nr_frag_strt = ntohs(nr_frag->start);
+ nr_frag_end = ntohs(nr_frag->end);
+
+ /* some sanity checks on the nr fargment offsets */
+ if (nr_frag_strt > nr_frag_end) {
+ /* this one is malformed, skip */
+ nr_frag++;
+ continue;
+ }
+ /*
+ * mark acked dgs and find out the highestTSN being
+ * acked
+ */
+ if (tp1 == NULL) {
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+
+ /* save the locations of the last frags */
+ last_nr_frag_high = nr_frag_end + last_tsn;
+ } else {
+ /*
+ * now lets see if we need to reset the
+ * queue due to a out-of-order SACK fragment
+ */
+ if (compare_with_wrap(nr_frag_strt + last_tsn,
+ last_nr_frag_high, MAX_TSN)) {
+ /*
+ * if the new frag starts after the
+ * last TSN frag covered, we are ok
+ * and this one is beyond the last
+ * one
+ */
+ ;
+ } else {
+ /*
+ * ok, they have reset us, so we
+ * need to reset the queue this will
+ * cause extra hunting but hey, they
+ * chose the performance hit when
+ * they failed to order there gaps..
+ */
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ }
+ last_nr_frag_high = nr_frag_end + last_tsn;
+ }
+
+ for (j = nr_frag_strt + last_tsn; (compare_with_wrap((nr_frag_end + last_tsn), j, MAX_TSN)); j++) {
+ while (tp1) {
+ if (tp1->rec.data.TSN_seq == j) {
+ if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
+ tp1->sent = SCTP_DATAGRAM_NR_MARKED;
+ /*
+ * TAILQ_REMOVE(&asoc
+ * ->sent_queue,
+ * tp1, sctp_next);
+ */
+ if (tp1->data) {
+ /*
+ * sa_ignore
+ * NO_NULL_CH
+ * K
+ */
+ sctp_free_bufspace(stcb, asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ }
+ tp1->data = NULL;
+ /*
+ * asoc->sent_queue_c
+ * nt--;
+ */
+ /*
+ * sctp_free_a_chunk(
+ * stcb, tp1);
+ */
+ wake_him++;
+ }
+ break;
+ } /* if (tp1->TSN_seq == j) */
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
+ MAX_TSN))
+ break;
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ } /* end while (tp1) */
+
+ } /* end for (j = nrFragStart */
+
+ nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
+ *offset += sizeof(nr_block);
+ if (nr_frag == NULL) {
+ break;
+ }
+ } /* end of if(!all_bit) */
+ }
+ /*
+ * EY- wake up the socket if things have been removed from the sent
+ * queue
+ */
+ if ((wake_him) && (stcb->sctp_socket)) {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+ SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
+ /*
+ * if (SCTP_BASE_SYSCTL(sctp_logging_level) &
+ * SCTP_WAKE_LOGGING_ENABLE) { sctp_wakeup_log(stcb,
+ * cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);}
+ */
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ } /* else { if
+ * (SCTP_BASE_SYSCTL(sctp_logging_level) &
+ * SCTP_WAKE_LOGGING_ENABLE) {
+ * sctp_wakeup_log(stcb, cum_ack, wake_him,
+ * SCTP_NOWAKE_FROM_SACK); } } */
+}
+
+/* EY- nr_sack */
+/* Identifies the non-renegable tsns that are revoked*/
+static void
+sctp_check_for_nr_revoked(struct sctp_tcb *stcb,
+ struct sctp_association *asoc, uint32_t cumack,
+ u_long biggest_tsn_acked)
+{
+ struct sctp_tmit_chunk *tp1;
+
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
+ MAX_TSN)) {
+ /*
+ * ok this guy is either ACK or MARKED. If it is
+ * ACKED it has been previously acked but not this
+ * time i.e. revoked. If it is MARKED it was ACK'ed
+ * again.
+ */
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
+ MAX_TSN))
+ break;
+
+
+ if (tp1->sent == SCTP_DATAGRAM_NR_ACKED) {
+ /*
+ * EY! a non-renegable TSN is revoked, need
+ * to abort the association
+ */
+ /*
+ * EY TODO: put in the code to abort the
+ * assoc.
+ */
+ return;
+ } else if (tp1->sent == SCTP_DATAGRAM_NR_MARKED) {
+ /* it has been re-acked in this SACK */
+ tp1->sent = SCTP_DATAGRAM_NR_ACKED;
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_UNSENT)
+ break;
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ }
+}
+
+/* EY! nr_sack version of sctp_handle_sack, nr_gap_ack processing should be added to this method*/
+void
+sctp_handle_nr_sack(struct mbuf *m, int offset,
+ struct sctp_nr_sack_chunk *ch, struct sctp_tcb *stcb,
+ struct sctp_nets *net_from, int *abort_now, int nr_sack_len, uint32_t rwnd)
+{
+ struct sctp_association *asoc;
+
+ /* EY sack */
+ struct sctp_nr_sack *nr_sack;
+ struct sctp_tmit_chunk *tp1, *tp2;
+ uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
+ this_sack_lowest_newack;
+ uint32_t sav_cum_ack;
+
+ /* EY num_seg */
+ uint16_t num_seg, num_nr_seg, num_dup;
+ uint16_t wake_him = 0;
+ unsigned int nr_sack_length;
+ uint32_t send_s = 0;
+ long j;
+ int accum_moved = 0;
+ int will_exit_fast_recovery = 0;
+ uint32_t a_rwnd, old_rwnd;
+ int win_probe_recovery = 0;
+ int win_probe_recovered = 0;
+ struct sctp_nets *net = NULL;
+ int nonce_sum_flag, ecn_seg_sums = 0, all_bit;
+ int done_once;
+ uint8_t reneged_all = 0;
+ uint8_t cmt_dac_flag;
+
+ /*
+ * we take any chance we can to service our queues since we cannot
+ * get awoken when the socket is read from :<
+ */
+ /*
+ * Now perform the actual SACK handling: 1) Verify that it is not an
+ * old sack, if so discard. 2) If there is nothing left in the send
+ * queue (cum-ack is equal to last acked) then you have a duplicate
+ * too, update any rwnd change and verify no timers are running.
+ * then return. 3) Process any new consequtive data i.e. cum-ack
+ * moved process these first and note that it moved. 4) Process any
+ * sack blocks. 5) Drop any acked from the queue. 6) Check for any
+ * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
+ * sync up flightsizes and things, stop all timers and also check
+ * for shutdown_pending state. If so then go ahead and send off the
+ * shutdown. If in shutdown recv, send off the shutdown-ack and
+ * start that timer, Ret. 9) Strike any non-acked things and do FR
+ * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
+ * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
+ * if in shutdown_recv state.
+ */
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ nr_sack = &ch->nr_sack;
+ /* CMT DAC algo */
+ this_sack_lowest_newack = 0;
+ j = 0;
+ nr_sack_length = (unsigned int)nr_sack_len;
+ /* ECN Nonce */
+ SCTP_STAT_INCR(sctps_slowpath_sack);
+ nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
+ cum_ack = last_tsn = ntohl(nr_sack->cum_tsn_ack);
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
+ stcb->asoc.cumack_log_at++;
+ if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
+ stcb->asoc.cumack_log_at = 0;
+ }
+#endif
+ all_bit = ch->ch.chunk_flags & SCTP_NR_SACK_ALL_BIT;
+ num_seg = ntohs(nr_sack->num_gap_ack_blks);
+ num_nr_seg = ntohs(nr_sack->num_nr_gap_ack_blks);
+ if (all_bit)
+ num_seg = num_nr_seg;
+ a_rwnd = rwnd;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
+ rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
+ }
+ /* CMT DAC algo */
+ cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
+ num_dup = ntohs(nr_sack->num_dup_tsns);
+
+ old_rwnd = stcb->asoc.peers_rwnd;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INDATA,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ asoc = &stcb->asoc;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cum_ack,
+ 0,
+ num_seg,
+ num_dup,
+ SCTP_LOG_NEW_SACK);
+ }
+ if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
+ int off_to_dup, iii;
+ uint32_t *dupdata, dblock;
+
+ /* EY! gotta be careful here */
+ if (all_bit) {
+ off_to_dup = (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) +
+ sizeof(struct sctp_nr_sack_chunk);
+ } else {
+ off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) +
+ (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) + sizeof(struct sctp_nr_sack_chunk);
+ }
+ if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= nr_sack_length) {
+ dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
+ sizeof(uint32_t), (uint8_t *) & dblock);
+ off_to_dup += sizeof(uint32_t);
+ if (dupdata) {
+ for (iii = 0; iii < num_dup; iii++) {
+ sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
+ dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
+ sizeof(uint32_t), (uint8_t *) & dblock);
+ if (dupdata == NULL)
+ break;
+ off_to_dup += sizeof(uint32_t);
+ }
+ }
+ } else {
+ SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d nr_sack_len:%d num gaps:%d num nr_gaps:%d\n",
+ off_to_dup, num_dup, nr_sack_length, num_seg, num_nr_seg);
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
+ /* reality check */
+ if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+ tp1 = TAILQ_LAST(&asoc->sent_queue,
+ sctpchunk_listhead);
+ send_s = tp1->rec.data.TSN_seq + 1;
+ } else {
+ send_s = asoc->sending_seq;
+ }
+ if (cum_ack == send_s ||
+ compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
+#ifndef INVARIANTS
+ struct mbuf *oper;
+
+#endif
+#ifdef INVARIANTS
+ hopeless_peer:
+ panic("Impossible sack 1");
+#else
+
+
+ /*
+ * no way, we have not even sent this TSN out yet.
+ * Peer is hopelessly messed up with us.
+ */
+ hopeless_peer:
+ *abort_now = 1;
+ /* XXX */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+ return;
+#endif
+ }
+ }
+ /**********************/
+ /* 1) check the range */
+ /**********************/
+ if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
+ /* acking something behind */
+ return;
+ }
+ sav_cum_ack = asoc->last_acked_seq;
+
+ /* update the Rwnd of the peer */
+ if (TAILQ_EMPTY(&asoc->sent_queue) &&
+ TAILQ_EMPTY(&asoc->send_queue) &&
+ (asoc->stream_queue_cnt == 0)
+ ) {
+ /* nothing left on send/sent and strmq */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+ asoc->peers_rwnd, 0, 0, a_rwnd);
+ }
+ asoc->peers_rwnd = a_rwnd;
+ if (asoc->sent_queue_retran_cnt) {
+ asoc->sent_queue_retran_cnt = 0;
+ }
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ /* stop any timers */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
+ }
+ }
+ net->partial_bytes_acked = 0;
+ net->flight_size = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ return;
+ }
+ /*
+ * We init netAckSz and netAckSz2 to 0. These are used to track 2
+ * things. The total byte count acked is tracked in netAckSz AND
+ * netAck2 is used to track the total bytes acked that are un-
+ * amibguious and were never retransmitted. We track these on a per
+ * destination address basis.
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->prev_cwnd = net->cwnd;
+ net->net_ack = 0;
+ net->net_ack2 = 0;
+
+ /*
+ * CMT: Reset CUC and Fast recovery algo variables before
+ * SACK processing
+ */
+ net->new_pseudo_cumack = 0;
+ net->will_exit_fast_recovery = 0;
+ }
+ /* process the new consecutive TSN first */
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
+ MAX_TSN) ||
+ last_tsn == tp1->rec.data.TSN_seq) {
+ if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
+ /*
+ * ECN Nonce: Add the nonce to the sender's
+ * nonce sum
+ */
+ asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
+ accum_moved = 1;
+ if (tp1->sent < SCTP_DATAGRAM_ACKED) {
+ /*
+ * If it is less than ACKED, it is
+ * now no-longer in flight. Higher
+ * values may occur during marking
+ */
+ if ((tp1->whoTo->dest_state &
+ SCTP_ADDR_UNCONFIRMED) &&
+ (tp1->snd_count < 2)) {
+ /*
+ * If there was no retran
+ * and the address is
+ * un-confirmed and we sent
+ * there and are now
+ * sacked.. its confirmed,
+ * mark it so.
+ */
+ tp1->whoTo->dest_state &=
+ ~SCTP_ADDR_UNCONFIRMED;
+ }
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ }
+ sctp_flight_size_decrease(tp1);
+ sctp_total_flight_decrease(stcb, tp1);
+ }
+ tp1->whoTo->net_ack += tp1->send_size;
+
+ /* CMT SFR and DAC algos */
+ this_sack_lowest_newack = tp1->rec.data.TSN_seq;
+ tp1->whoTo->saw_newack = 1;
+
+ if (tp1->snd_count < 2) {
+ /*
+ * True non-retransmited
+ * chunk
+ */
+ tp1->whoTo->net_ack2 +=
+ tp1->send_size;
+
+ /* update RTO too? */
+ if (tp1->do_rtt) {
+ tp1->whoTo->RTO =
+ sctp_calculate_rto(stcb,
+ asoc, tp1->whoTo,
+ &tp1->sent_rcv_time,
+ sctp_align_safe_nocopy);
+ tp1->do_rtt = 0;
+ }
+ }
+ /*
+ * CMT: CUCv2 algorithm. From the
+ * cumack'd TSNs, for each TSN being
+ * acked for the first time, set the
+ * following variables for the
+ * corresp destination.
+ * new_pseudo_cumack will trigger a
+ * cwnd update.
+ * find_(rtx_)pseudo_cumack will
+ * trigger search for the next
+ * expected (rtx-)pseudo-cumack.
+ */
+ tp1->whoTo->new_pseudo_cumack = 1;
+ tp1->whoTo->find_pseudo_cumack = 1;
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cum_ack,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_TSN_ACKED);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB3,
+ (asoc->sent_queue_retran_cnt & 0x000000ff));
+#endif
+ }
+ if (tp1->rec.data.chunk_was_revoked) {
+ /* deflate the cwnd */
+ tp1->whoTo->cwnd -= tp1->book_size;
+ tp1->rec.data.chunk_was_revoked = 0;
+ }
+ tp1->sent = SCTP_DATAGRAM_ACKED;
+ }
+ } else {
+ break;
+ }
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ }
+ biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
+ /* always set this up to cum-ack */
+ asoc->this_sack_highest_gap = last_tsn;
+
+ /* Move offset up to point to gaps/dups */
+ offset += sizeof(struct sctp_nr_sack_chunk);
+ if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_nr_sack_chunk)) > nr_sack_length) {
+
+ /* skip corrupt segments */
+ goto skip_segments;
+ }
+ if (num_seg > 0) {
+
+ /*
+ * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
+ * to be greater than the cumack. Also reset saw_newack to 0
+ * for all dests.
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->saw_newack = 0;
+ net->this_sack_highest_newack = last_tsn;
+ }
+
+ /*
+ * thisSackHighestGap will increase while handling NEW
+ * segments this_sack_highest_newack will increase while
+ * handling NEWLY ACKED chunks. this_sack_lowest_newack is
+ * used for CMT DAC algo. saw_newack will also change.
+ */
+
+ sctp_handle_nr_sack_segments(m, &offset, stcb, asoc, ch, last_tsn,
+ &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
+ num_seg, num_nr_seg, &ecn_seg_sums);
+
+
+ if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
+ /*
+ * validate the biggest_tsn_acked in the gap acks if
+ * strict adherence is wanted.
+ */
+ if ((biggest_tsn_acked == send_s) ||
+ (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
+ /*
+ * peer is either confused or we are under
+ * attack. We must abort.
+ */
+ goto hopeless_peer;
+ }
+ }
+ }
+skip_segments:
+ /*******************************************/
+ /* cancel ALL T3-send timer if accum moved */
+ /*******************************************/
+ if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->new_pseudo_cumack)
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
+
+ }
+ } else {
+ if (accum_moved) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
+ }
+ }
+ }
+ /********************************************/
+ /* drop the acked chunks from the sendqueue */
+ /********************************************/
+ asoc->last_acked_seq = cum_ack;
+
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ if (tp1 == NULL)
+ goto done_with_it;
+ do {
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
+ MAX_TSN)) {
+ break;
+ }
+ if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
+ /* no more sent on list */
+ printf("Warning, tp1->sent == %d and its now acked?\n",
+ tp1->sent);
+ }
+ tp2 = TAILQ_NEXT(tp1, sctp_next);
+ TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
+ if (tp1->pr_sctp_on) {
+ if (asoc->pr_sctp_cnt != 0)
+ asoc->pr_sctp_cnt--;
+ }
+ if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
+ (asoc->total_flight > 0)) {
+#ifdef INVARIANTS
+ panic("Warning flight size is postive and should be 0");
+#else
+ SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
+ asoc->total_flight);
+#endif
+ asoc->total_flight = 0;
+ }
+ if (tp1->data) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_free_bufspace(stcb, asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
+ asoc->sent_queue_cnt_removeable--;
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cum_ack,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_FREE_SENT);
+ }
+ tp1->data = NULL;
+ asoc->sent_queue_cnt--;
+ sctp_free_a_chunk(stcb, tp1);
+ wake_him++;
+ tp1 = tp2;
+ } while (tp1 != NULL);
+
+done_with_it:
+ /* sa_ignore NO_NULL_CHK */
+ if ((wake_him) && (stcb->sctp_socket)) {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+ SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
+ }
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
+ }
+ }
+
+ if (asoc->fast_retran_loss_recovery && accum_moved) {
+ if (compare_with_wrap(asoc->last_acked_seq,
+ asoc->fast_recovery_tsn, MAX_TSN) ||
+ asoc->last_acked_seq == asoc->fast_recovery_tsn) {
+ /* Setup so we will exit RFC2582 fast recovery */
+ will_exit_fast_recovery = 1;
+ }
+ }
+ /*
+ * Check for revoked fragments:
+ *
+ * if Previous sack - Had no frags then we can't have any revoked if
+ * Previous sack - Had frag's then - If we now have frags aka
+ * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
+ * some of them. else - The peer revoked all ACKED fragments, since
+ * we had some before and now we have NONE.
+ */
+
+ if (num_seg)
+ sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
+
+ else if (asoc->saw_sack_with_frags) {
+ int cnt_revoked = 0;
+
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ if (tp1 != NULL) {
+ /* Peer revoked all dg's marked or acked */
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ /*
+ * EY- maybe check only if it is nr_acked
+ * nr_marked may not be possible
+ */
+ if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
+ (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
+ /*
+ * EY! - TODO: Something previously
+ * nr_gapped is reneged, abort the
+ * association
+ */
+ return;
+ }
+ if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
+ (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
+ tp1->sent = SCTP_DATAGRAM_SENT;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ }
+ sctp_flight_size_increase(tp1);
+ sctp_total_flight_increase(stcb, tp1);
+ tp1->rec.data.chunk_was_revoked = 1;
+ /*
+ * To ensure that this increase in
+ * flightsize, which is artificial,
+ * does not throttle the sender, we
+ * also increase the cwnd
+ * artificially.
+ */
+ tp1->whoTo->cwnd += tp1->book_size;
+ cnt_revoked++;
+ }
+ }
+ if (cnt_revoked) {
+ reneged_all = 1;
+ }
+ }
+ asoc->saw_sack_with_frags = 0;
+ }
+ if (num_seg)
+ asoc->saw_sack_with_frags = 1;
+ else
+ asoc->saw_sack_with_frags = 0;
+
+ /* EY! - not sure about if there should be an IF */
+ if (num_nr_seg)
+ sctp_check_for_nr_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
+ else if (asoc->saw_sack_with_nr_frags) {
+ /*
+ * EY!- TODO: all previously nr_gapped chunks have been
+ * reneged abort the association
+ */
+ asoc->saw_sack_with_nr_frags = 0;
+ }
+ if (num_nr_seg)
+ asoc->saw_sack_with_nr_frags = 1;
+ else
+ asoc->saw_sack_with_nr_frags = 0;
+ /* JRS - Use the congestion control given in the CC module */
+ asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
+
+ if (TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left in-flight */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ /* stop all timers */
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
+ }
+ }
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
+ net->flight_size = 0;
+ net->partial_bytes_acked = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ }
+ /**********************************/
+ /* Now what about shutdown issues */
+ /**********************************/
+ if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left on sendqueue.. consider done */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+ asoc->peers_rwnd, 0, 0, a_rwnd);
+ }
+ asoc->peers_rwnd = a_rwnd;
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ /* clean up */
+ if ((asoc->stream_queue_cnt == 1) &&
+ ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+ (asoc->locked_on_sending)
+ ) {
+ struct sctp_stream_queue_pending *sp;
+
+ /*
+ * I may be in a state where we got all across.. but
+ * cannot write more due to a shutdown... we abort
+ * since the user did not indicate EOR in this case.
+ */
+ sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
+ sctp_streamhead);
+ if ((sp) && (sp->length == 0)) {
+ asoc->locked_on_sending = NULL;
+ if (sp->msg_is_complete) {
+ asoc->stream_queue_cnt--;
+ } else {
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ asoc->stream_queue_cnt--;
+ }
+ }
+ }
+ if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ /* Need to abort here */
+ struct mbuf *oper;
+
+ abort_out_now:
+ *abort_now = 1;
+ /* XXX */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
+ return;
+ } else {
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown(stcb,
+ stcb->asoc.primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ }
+ return;
+ } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ goto abort_out_now;
+ }
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_send_shutdown_ack(stcb,
+ stcb->asoc.primary_destination);
+
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ return;
+ }
+ }
+ /*
+ * Now here we are going to recycle net_ack for a different use...
+ * HEADS UP.
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->net_ack = 0;
+ }
+
+ /*
+ * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
+ * to be done. Setting this_sack_lowest_newack to the cum_ack will
+ * automatically ensure that.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
+ this_sack_lowest_newack = cum_ack;
+ }
+ if (num_seg > 0) {
+ sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
+ biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
+ }
+ /* JRS - Use the congestion control given in the CC module */
+ asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
+
+ /******************************************************************
+ * Here we do the stuff with ECN Nonce checking.
+ * We basically check to see if the nonce sum flag was incorrect
+ * or if resynchronization needs to be done. Also if we catch a
+ * misbehaving receiver we give him the kick.
+ ******************************************************************/
+
+ if (asoc->ecn_nonce_allowed) {
+ if (asoc->nonce_sum_check) {
+ if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
+ if (asoc->nonce_wait_for_ecne == 0) {
+ struct sctp_tmit_chunk *lchk;
+
+ lchk = TAILQ_FIRST(&asoc->send_queue);
+ asoc->nonce_wait_for_ecne = 1;
+ if (lchk) {
+ asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
+ } else {
+ asoc->nonce_wait_tsn = asoc->sending_seq;
+ }
+ } else {
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
+ (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
+ /*
+ * Misbehaving peer. We need
+ * to react to this guy
+ */
+ asoc->ecn_allowed = 0;
+ asoc->ecn_nonce_allowed = 0;
+ }
+ }
+ }
+ } else {
+ /* See if Resynchronization Possible */
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
+ asoc->nonce_sum_check = 1;
+ /*
+ * now we must calculate what the base is.
+ * We do this based on two things, we know
+ * the total's for all the segments
+ * gap-acked in the SACK, its stored in
+ * ecn_seg_sums. We also know the SACK's
+ * nonce sum, its in nonce_sum_flag. So we
+ * can build a truth table to back-calculate
+ * the new value of
+ * asoc->nonce_sum_expect_base:
+ *
+ * SACK-flag-Value Seg-Sums Base 0 0 0
+ * 1 0 1 0 1 1 1 1 0
+ */
+ asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
+ }
+ }
+ }
+ /* Now are we exiting loss recovery ? */
+ if (will_exit_fast_recovery) {
+ /* Ok, we must exit fast recovery */
+ asoc->fast_retran_loss_recovery = 0;
+ }
+ if ((asoc->sat_t3_loss_recovery) &&
+ ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
+ MAX_TSN) ||
+ (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
+ /* end satellite t3 loss recovery */
+ asoc->sat_t3_loss_recovery = 0;
+ }
+ /*
+ * CMT Fast recovery
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->will_exit_fast_recovery) {
+ /* Ok, we must exit fast recovery */
+ net->fast_retran_loss_recovery = 0;
+ }
+ }
+
+ /* Adjust and set the new rwnd value */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+ asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
+ }
+ asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
+ (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ if (asoc->peers_rwnd > old_rwnd) {
+ win_probe_recovery = 1;
+ }
+ /*
+ * Now we must setup so we have a timer up for anyone with
+ * outstanding data.
+ */
+ done_once = 0;
+again:
+ j = 0;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (win_probe_recovery && (net->window_probe)) {
+ net->window_probe = 0;
+ win_probe_recovered = 1;
+ /*-
+ * Find first chunk that was used with
+ * window probe and clear the event. Put
+ * it back into the send queue as if has
+ * not been sent.
+ */
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->window_probe) {
+ sctp_window_probe_recovery(stcb, asoc, net, tp1);
+ break;
+ }
+ }
+ }
+ if (net->flight_size) {
+ j++;
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+ } else {
+ if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
+ }
+ }
+ }
+ }
+ if ((j == 0) &&
+ (!TAILQ_EMPTY(&asoc->sent_queue)) &&
+ (asoc->sent_queue_retran_cnt == 0) &&
+ (win_probe_recovered == 0) &&
+ (done_once == 0)) {
+ /* huh, this should not happen */
+ sctp_fs_audit(asoc);
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->flight_size = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ asoc->sent_queue_retran_cnt = 0;
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_flight_size_increase(tp1);
+ sctp_total_flight_increase(stcb, tp1);
+ } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ asoc->sent_queue_retran_cnt++;
+ }
+ }
+ done_once = 1;
+ goto again;
+ }
+ /*********************************************/
+ /* Here we perform PR-SCTP procedures */
+ /* (section 4.2) */
+ /*********************************************/
+ /* C1. update advancedPeerAckPoint */
+ if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
+ asoc->advanced_peer_ack_point = cum_ack;
+ }
+ /* C2. try to further move advancedPeerAckPoint ahead */
+ if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
+ struct sctp_tmit_chunk *lchk;
+ uint32_t old_adv_peer_ack_point;
+
+ old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
+ lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
+ /* C3. See if we need to send a Fwd-TSN */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
+ MAX_TSN)) {
+ /*
+ * ISSUE with ECN, see FWD-TSN processing for notes
+ * on issues that will occur when the ECN NONCE
+ * stuff is put into SCTP for cross checking.
+ */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
+ MAX_TSN)) {
+ send_forward_tsn(stcb, asoc);
+ /*
+ * ECN Nonce: Disable Nonce Sum check when
+ * FWD TSN is sent and store resync tsn
+ */
+ asoc->nonce_sum_check = 0;
+ asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
+ }
+ }
+ if (lchk) {
+ /* Assure a timer is up */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, lchk->whoTo);
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
+ a_rwnd,
+ stcb->asoc.peers_rwnd,
+ stcb->asoc.total_flight,
+ stcb->asoc.total_output_queue_size);
+ }
+}
OpenPOWER on IntegriCloud