summaryrefslogtreecommitdiffstats
path: root/sys/netinet/sctp_indata.c
diff options
context:
space:
mode:
authorrrs <rrs@FreeBSD.org>2009-04-04 11:43:32 +0000
committerrrs <rrs@FreeBSD.org>2009-04-04 11:43:32 +0000
commitf72ef579b2bcf1c9f2abd735735fe92b2f8ada15 (patch)
treeaa1f0e7c08d66758879376d7bd3ed3ff5a7c7bb3 /sys/netinet/sctp_indata.c
parent3503cf99e3ac0111488d2a57341ab1b1088e2436 (diff)
downloadFreeBSD-src-f72ef579b2bcf1c9f2abd735735fe92b2f8ada15.zip
FreeBSD-src-f72ef579b2bcf1c9f2abd735735fe92b2f8ada15.tar.gz
Many bug fixes (from the IETF hack-fest):
- PR-SCTP had major issues when skipping through a multi-part message. o Did not look at socket buffer. o Did not properly handle the reassmebly queue. o The MARKED segments could interfere and un-skip a chunk causing a problem with the proper FWD-TSN. o No FR of FWD-TSN's was being done. - NR-Sack code was basically disabled. It needed fixes that never got into the real code. - CMT code had issues when the two paths were NOT the same b/w. We found a few small bugs, but also the critcal one here was not dividing the rwnd amongst the paths. Obtained from: Michael Tuexen and myself at the IETF hack-fest ;-)
Diffstat (limited to 'sys/netinet/sctp_indata.c')
-rw-r--r--sys/netinet/sctp_indata.c332
1 files changed, 189 insertions, 143 deletions
diff --git a/sys/netinet/sctp_indata.c b/sys/netinet/sctp_indata.c
index 0075007..a9f315c 100644
--- a/sys/netinet/sctp_indata.c
+++ b/sys/netinet/sctp_indata.c
@@ -423,12 +423,13 @@ abandon:
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
nr_tsn = chk->rec.data.TSN_seq;
- if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
+ if ((compare_with_wrap(nr_tsn, asoc->nr_mapping_array_base_tsn, MAX_TSN)) ||
+ (nr_tsn == asoc->nr_mapping_array_base_tsn)) {
nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
} else {
nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
}
- if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
+ if ((nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3)) ||
(nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
/*
* EY The 1st should never happen, as in
@@ -440,10 +441,11 @@ abandon:
* nr_mapping_array is always expanded when
* mapping_array is expanded
*/
+ printf("Impossible nr_gap ack range failed\n");
} else {
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
- if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
asoc->highest_tsn_inside_nr_map = nr_tsn;
}
}
@@ -550,7 +552,9 @@ abandon:
} else {
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
- if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ if (compare_with_wrap(nr_tsn,
+ asoc->highest_tsn_inside_nr_map,
+ MAX_TSN))
asoc->highest_tsn_inside_nr_map = nr_tsn;
}
}
@@ -699,7 +703,7 @@ protocol_error:
} else {
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
- if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
asoc->highest_tsn_inside_nr_map = nr_tsn;
}
}
@@ -760,7 +764,8 @@ protocol_error:
} else {
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
- if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map,
+ MAX_TSN))
asoc->highest_tsn_inside_nr_map = nr_tsn;
}
}
@@ -2390,6 +2395,15 @@ finish_express_del:
}
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
+
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
+ asoc->peer_supports_nr_sack &&
+ (SCTP_BASE_SYSCTL(sctp_do_drain) == 0)) {
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+ if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
+ }
/* check the special flag for stream resets */
if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
@@ -2498,9 +2512,9 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
int slide_from, slide_end, lgap, distance;
/* EY nr_mapping array variables */
- int nr_at;
- int nr_last_all_ones = 0;
- int nr_slide_from, nr_slide_end, nr_lgap, nr_distance;
+ /* int nr_at; */
+ /* int nr_last_all_ones = 0; */
+ /* int nr_slide_from, nr_slide_end, nr_lgap, nr_distance; */
uint32_t old_cumack, old_base, old_highest;
unsigned char aux_array[64];
@@ -2683,102 +2697,19 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
SCTP_MAP_SLIDE_RESULT);
}
- }
- }
- /*
- * EY if doing nr_sacks then slide the nr_mapping_array accordingly
- * please
- */
- if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
-
- nr_at = 0;
- for (nr_slide_from = 0; nr_slide_from < stcb->asoc.nr_mapping_array_size; nr_slide_from++) {
-
- if (asoc->nr_mapping_array[nr_slide_from] == 0xff) {
- nr_at += 8;
- nr_last_all_ones = 1;
- } else {
- /* there is a 0 bit */
- nr_at += sctp_map_lookup_tab[asoc->nr_mapping_array[nr_slide_from]];
- nr_last_all_ones = 0;
- break;
- }
- }
-
- nr_at++;
-
- if (compare_with_wrap(asoc->cumulative_tsn,
- asoc->highest_tsn_inside_nr_map, MAX_TSN) && (at >= 8)) {
- /* The complete array was completed by a single FR */
- /* higest becomes the cum-ack */
- int clr;
-
- clr = (nr_at >> 3) + 1;
-
- if (clr > asoc->nr_mapping_array_size)
- clr = asoc->nr_mapping_array_size;
-
- memset(asoc->nr_mapping_array, 0, clr);
- /* base becomes one ahead of the cum-ack */
- asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
- asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
-
- } else if (nr_at >= 8) {
- /* we can slide the mapping array down */
- /* Calculate the new byte postion we can move down */
-
/*
- * now calculate the ceiling of the move using our
- * highest TSN value
+ * EY if doing nr_sacks then slide the
+ * nr_mapping_array accordingly please
*/
- if (asoc->highest_tsn_inside_nr_map >= asoc->nr_mapping_array_base_tsn) {
- nr_lgap = asoc->highest_tsn_inside_nr_map -
- asoc->nr_mapping_array_base_tsn;
- } else {
- nr_lgap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) +
- asoc->highest_tsn_inside_nr_map + 1;
- }
- nr_slide_end = nr_lgap >> 3;
- if (nr_slide_end < nr_slide_from) {
-#ifdef INVARIANTS
- panic("impossible slide");
-#else
- printf("impossible slide?\n");
- return;
-#endif
- }
- if (nr_slide_end > asoc->nr_mapping_array_size) {
-#ifdef INVARIANTS
- panic("would overrun buffer");
-#else
- printf("Gak, would have overrun map end:%d nr_slide_end:%d\n",
- asoc->nr_mapping_array_size, nr_slide_end);
- nr_slide_end = asoc->nr_mapping_array_size;
-#endif
- }
- nr_distance = (nr_slide_end - nr_slide_from) + 1;
-
- if (nr_distance + nr_slide_from > asoc->nr_mapping_array_size ||
- nr_distance < 0) {
- /*
- * Here we do NOT slide forward the array so
- * that hopefully when more data comes in to
- * fill it up we will be able to slide it
- * forward. Really I don't think this should
- * happen :-0
- */
- ;
- } else {
- int ii;
-
- for (ii = 0; ii < nr_distance; ii++) {
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ for (ii = 0; ii < distance; ii++) {
asoc->nr_mapping_array[ii] =
- asoc->nr_mapping_array[nr_slide_from + ii];
+ asoc->nr_mapping_array[slide_from + ii];
}
- for (ii = nr_distance; ii <= nr_slide_end; ii++) {
+ for (ii = distance; ii <= slide_end; ii++) {
asoc->nr_mapping_array[ii] = 0;
}
- asoc->nr_mapping_array_base_tsn += (nr_slide_from << 3);
+ asoc->nr_mapping_array_base_tsn += (slide_from << 3);
}
}
}
@@ -2802,7 +2733,7 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
* EY if nr_sacks used then send an nr-sack , a sack
* otherwise
*/
- if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
sctp_send_nr_sack(stcb);
else
sctp_send_sack(stcb);
@@ -3496,9 +3427,13 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
/*
* All chunks NOT UNSENT
* fall through here and are
- * marked
+ * marked (leave PR-SCTP
+ * ones that are to skip
+ * alone though)
*/
- tp1->sent = SCTP_DATAGRAM_MARKED;
+ if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
+ tp1->sent = SCTP_DATAGRAM_MARKED;
+
if (tp1->rec.data.chunk_was_revoked) {
/* deflate the cwnd */
tp1->whoTo->cwnd -= tp1->book_size;
@@ -5798,7 +5733,9 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
} else {
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
- if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ if (compare_with_wrap(nr_tsn,
+ asoc->highest_tsn_inside_nr_map,
+ MAX_TSN))
asoc->highest_tsn_inside_nr_map = nr_tsn;
}
@@ -5901,7 +5838,8 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
} else {
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
- if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map,
+ MAX_TSN))
asoc->highest_tsn_inside_nr_map = nr_tsn;
}
@@ -5963,6 +5901,91 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
}
}
+static void
+sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ uint16_t stream, uint16_t seq)
+{
+ struct sctp_tmit_chunk *chk, *at;
+
+ if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
+ /* For each one on here see if we need to toss it */
+ /*
+ * For now large messages held on the reasmqueue that are
+ * complete will be tossed too. We could in theory do more
+ * work to spin through and stop after dumping one msg aka
+ * seeing the start of a new msg at the head, and call the
+ * delivery function... to see if it can be delivered... But
+ * for now we just dump everything on the queue.
+ */
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ while (chk) {
+ at = TAILQ_NEXT(chk, sctp_next);
+ if (chk->rec.data.stream_number != stream) {
+ chk = at;
+ continue;
+ }
+ if (chk->rec.data.stream_seq == seq) {
+ /* It needs to be tossed */
+ TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
+ if (compare_with_wrap(chk->rec.data.TSN_seq,
+ asoc->tsn_last_delivered, MAX_TSN)) {
+ asoc->tsn_last_delivered =
+ chk->rec.data.TSN_seq;
+ asoc->str_of_pdapi =
+ chk->rec.data.stream_number;
+ asoc->ssn_of_pdapi =
+ chk->rec.data.stream_seq;
+ asoc->fragment_flags =
+ chk->rec.data.rcv_flags;
+ }
+ asoc->size_on_reasm_queue -= chk->send_size;
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+
+ /* Clear up any stream problem */
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
+ SCTP_DATA_UNORDERED &&
+ (compare_with_wrap(chk->rec.data.stream_seq,
+ asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
+ MAX_SEQ))) {
+ /*
+ * We must dump forward this streams
+ * sequence number if the chunk is
+ * not unordered that is being
+ * skipped. There is a chance that
+ * if the peer does not include the
+ * last fragment in its FWD-TSN we
+ * WILL have a problem here since
+ * you would have a partial chunk in
+ * queue that may not be
+ * deliverable. Also if a Partial
+ * delivery API as started the user
+ * may get a partial chunk. The next
+ * read returning a new chunk...
+ * really ugly but I see no way
+ * around it! Maybe a notify??
+ */
+ asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
+ chk->rec.data.stream_seq;
+ }
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk);
+ } else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
+ /*
+ * If the stream_seq is > than the purging
+ * one, we are done
+ */
+ break;
+ }
+ chk = at;
+ }
+ }
+}
+
+
void
sctp_handle_forward_tsn(struct sctp_tcb *stcb,
struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset)
@@ -5992,13 +6015,14 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
*/
struct sctp_association *asoc;
uint32_t new_cum_tsn, gap;
- unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
+ unsigned int i, fwd_sz, cumack_set_flag, m_size;
+ uint32_t str_seq;
struct sctp_stream_in *strm;
struct sctp_tmit_chunk *chk, *at;
+ struct sctp_queued_to_read *ctl, *sv;
cumack_set_flag = 0;
asoc = &stcb->asoc;
- cnt_gone = 0;
if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
SCTPDBG(SCTP_DEBUG_INDATA1,
"Bad size too small/big fwd-tsn\n");
@@ -6102,6 +6126,14 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
SCTP_TCB_LOCK_ASSERT(stcb);
for (i = 0; i <= gap; i++) {
SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
+ /*
+ * EY if drain is off then every gap-ack is an
+ * nr-gap-ack
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack
+ && SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
+ }
}
/*
* Now after marking all, slide thing forward but no sack
@@ -6152,7 +6184,6 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
}
asoc->size_on_reasm_queue -= chk->send_size;
sctp_ucount_decr(asoc->cnt_on_reasm_queue);
- cnt_gone++;
/* Clear up any stream problem */
if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
@@ -6188,45 +6219,17 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
} else {
/*
* Ok we have gone beyond the end of the
- * fwd-tsn's mark. Some checks...
+ * fwd-tsn's mark.
*/
- if ((asoc->fragmented_delivery_inprogress) &&
- (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
- uint32_t str_seq;
-
- /*
- * Special case PD-API is up and
- * what we fwd-tsn' over includes
- * one that had the LAST_FRAG. We no
- * longer need to do the PD-API.
- */
- asoc->fragmented_delivery_inprogress = 0;
-
- str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
- sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
- stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED);
-
- }
break;
}
chk = at;
}
}
- if (asoc->fragmented_delivery_inprogress) {
- /*
- * Ok we removed cnt_gone chunks in the PD-API queue that
- * were being delivered. So now we must turn off the flag.
- */
- uint32_t str_seq;
-
- str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
- sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
- stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED);
- asoc->fragmented_delivery_inprogress = 0;
- }
- /*************************************************************/
- /* 3. Update the PR-stream re-ordering queues */
- /*************************************************************/
+ /*******************************************************/
+ /* 3. Update the PR-stream re-ordering queues and fix */
+ /* delivery issues as needed. */
+ /*******************************************************/
fwd_sz -= sizeof(*fwd);
if (m && fwd_sz) {
/* New method. */
@@ -6235,6 +6238,7 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
offset += sizeof(*fwd);
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
num_str = fwd_sz / sizeof(struct sctp_strseq);
for (i = 0; i < num_str; i++) {
uint16_t st;
@@ -6251,11 +6255,49 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
stseq->stream = st;
st = ntohs(stseq->sequence);
stseq->sequence = st;
+
/* now process */
+
+ /*
+ * Ok we now look for the stream/seq on the read
+ * queue where its not all delivered. If we find it
+ * we transmute the read entry into a PDI_ABORTED.
+ */
if (stseq->stream >= asoc->streamincnt) {
/* screwed up streams, stop! */
break;
}
+ if ((asoc->str_of_pdapi == stseq->stream) &&
+ (asoc->ssn_of_pdapi == stseq->sequence)) {
+ /*
+ * If this is the one we were partially
+ * delivering now then we no longer are.
+ * Note this will change with the reassembly
+ * re-write.
+ */
+ asoc->fragmented_delivery_inprogress = 0;
+ }
+ sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
+ TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
+ if ((ctl->sinfo_stream == stseq->stream) &&
+ (ctl->sinfo_ssn == stseq->sequence)) {
+ str_seq = (stseq->stream << 16) | stseq->sequence;
+ ctl->end_added = 1;
+ ctl->pdapi_aborted = 1;
+ sv = stcb->asoc.control_pdapi;
+ stcb->asoc.control_pdapi = ctl;
+ sctp_notify_partial_delivery_indication(stcb,
+ SCTP_PARTIAL_DELIVERY_ABORTED,
+ SCTP_HOLDS_LOCK,
+ str_seq);
+ stcb->asoc.control_pdapi = sv;
+ break;
+ } else if ((ctl->sinfo_stream == stseq->stream) &&
+ (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
+ /* We are past our victim SSN */
+ break;
+ }
+ }
strm = &asoc->strmin[stseq->stream];
if (compare_with_wrap(stseq->sequence,
strm->last_sequence_delivered, MAX_SEQ)) {
@@ -6267,6 +6309,7 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
/* sa_ignore NO_NULL_CHK */
sctp_kick_prsctp_reorder_queue(stcb, strm);
}
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
}
if (TAILQ_FIRST(&asoc->reasmqueue)) {
/* now lets kick out and check for more fragmented delivery */
@@ -7067,7 +7110,8 @@ sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb,
* fall through here and are
* marked
*/
- tp1->sent = SCTP_DATAGRAM_MARKED;
+ if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
+ tp1->sent = SCTP_DATAGRAM_NR_MARKED;
if (tp1->rec.data.chunk_was_revoked) {
/* deflate the cwnd */
tp1->whoTo->cwnd -= tp1->book_size;
@@ -7079,7 +7123,8 @@ sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb,
* nr_marked
*/
if (all_bit) {
- tp1->sent = SCTP_DATAGRAM_NR_MARKED;
+ if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
+ tp1->sent = SCTP_DATAGRAM_NR_MARKED;
/*
* TAILQ_REMOVE(&asoc
* ->sent_queue,
@@ -7198,7 +7243,8 @@ sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb,
while (tp1) {
if (tp1->rec.data.TSN_seq == j) {
if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
- tp1->sent = SCTP_DATAGRAM_NR_MARKED;
+ if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
+ tp1->sent = SCTP_DATAGRAM_NR_MARKED;
/*
* TAILQ_REMOVE(&asoc
* ->sent_queue,
OpenPOWER on IntegriCloud