summaryrefslogtreecommitdiffstats
path: root/sys/netinet/sctp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/netinet/sctp_output.c')
-rw-r--r--sys/netinet/sctp_output.c219
1 files changed, 168 insertions, 51 deletions
diff --git a/sys/netinet/sctp_output.c b/sys/netinet/sctp_output.c
index 8eda473..4e683a2 100644
--- a/sys/netinet/sctp_output.c
+++ b/sys/netinet/sctp_output.c
@@ -3326,10 +3326,11 @@ sctp_source_address_selection(struct sctp_inpcb *inp,
#endif
/**
- * Rules: - Find the route if needed, cache if I can. - Look at
- * interface address in route, Is it in the bound list. If so we
- * have the best source. - If not we must rotate amongst the
- * addresses.
+ * Rules:
+ * - Find the route if needed, cache if I can.
+ * - Look at interface address in route, Is it in the bound list. If so we
+ * have the best source.
+ * - If not we must rotate amongst the addresses.
*
* Cavets and issues
*
@@ -4796,6 +4797,9 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
if (stcb->asoc.prsctp_supported == 1) {
pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
+ if (stcb->asoc.idata_supported) {
+ pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
+ }
}
if (stcb->asoc.auth_supported == 1) {
pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
@@ -4807,6 +4811,9 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
if (stcb->asoc.reconfig_supported == 1) {
pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
}
+ if (stcb->asoc.idata_supported) {
+ pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
+ }
if (stcb->asoc.nrsack_supported == 1) {
pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
}
@@ -5951,6 +5958,10 @@ do_a_abort:
if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
((asoc == NULL) && (inp->prsctp_supported == 1))) {
pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
+ if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
+ ((asoc == NULL) && (inp->idata_supported == 1))) {
+ pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
+ }
}
if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
((asoc == NULL) && (inp->auth_supported == 1))) {
@@ -5965,6 +5976,10 @@ do_a_abort:
((asoc == NULL) && (inp->reconfig_supported == 1))) {
pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
}
+ if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
+ ((asoc == NULL) && (inp->idata_supported == 1))) {
+ pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
+ }
if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
((asoc == NULL) && (inp->nrsack_supported == 1))) {
pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
@@ -6232,11 +6247,15 @@ sctp_get_frag_point(struct sctp_tcb *stcb,
* we use a larger frag point.
*/
if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
- ovh = SCTP_MED_OVERHEAD;
+ ovh = SCTP_MIN_OVERHEAD;
} else {
- ovh = SCTP_MED_V4_OVERHEAD;
+ ovh = SCTP_MIN_V4_OVERHEAD;
+ }
+ if (stcb->asoc.idata_supported) {
+ ovh += sizeof(struct sctp_idata_chunk);
+ } else {
+ ovh += sizeof(struct sctp_data_chunk);
}
-
if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
siz = asoc->smallest_mtu - ovh;
else
@@ -6361,6 +6380,8 @@ sctp_msg_append(struct sctp_tcb *stcb,
sp->timetolive = srcv->sinfo_timetolive;
sp->ppid = srcv->sinfo_ppid;
sp->context = srcv->sinfo_context;
+ sp->fsn = 0;
+ sp->msg_id = atomic_fetchadd_int(&stcb->asoc.assoc_msg_id, 1);
if (sp->sinfo_flags & SCTP_ADDR_OVER) {
sp->net = net;
atomic_add_int(&sp->net->ref_count, 1);
@@ -7163,8 +7184,9 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb,
struct sctp_association *asoc;
struct sctp_stream_queue_pending *sp;
struct sctp_tmit_chunk *chk;
- struct sctp_data_chunk *dchkh;
- uint32_t to_move, length;
+ struct sctp_data_chunk *dchkh = NULL;
+ struct sctp_idata_chunk *ndchkh = NULL;
+ uint32_t to_move, length, leading;
uint8_t rcv_flags = 0;
uint8_t some_taken;
uint8_t send_lock_up = 0;
@@ -7173,6 +7195,7 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb,
asoc = &stcb->asoc;
one_more_time:
/* sa_ignore FREED_MEMORY */
+ *locked = 0;
sp = TAILQ_FIRST(&strq->outqueue);
if (sp == NULL) {
*locked = 0;
@@ -7184,7 +7207,9 @@ one_more_time:
if (sp) {
goto one_more_time;
}
- if (strq->last_msg_incomplete) {
+ if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) &&
+ (stcb->asoc.idata_supported == 0) &&
+ (strq->last_msg_incomplete)) {
SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
strq->stream_no,
strq->last_msg_incomplete);
@@ -7248,7 +7273,8 @@ one_more_time:
* sender just finished this but still holds a
* reference
*/
- *locked = 1;
+ if (stcb->asoc.idata_supported == 0)
+ *locked = 1;
*giveup = 1;
to_move = 0;
goto out_of;
@@ -7257,7 +7283,8 @@ one_more_time:
/* is there some to get */
if (sp->length == 0) {
/* no */
- *locked = 1;
+ if (stcb->asoc.idata_supported == 0)
+ *locked = 1;
*giveup = 1;
to_move = 0;
goto out_of;
@@ -7280,7 +7307,8 @@ one_more_time:
}
sp->length = 0;
sp->some_taken = 1;
- *locked = 1;
+ if (stcb->asoc.idata_supported == 0)
+ *locked = 1;
*giveup = 1;
to_move = 0;
goto out_of;
@@ -7342,7 +7370,8 @@ re_look:
}
} else {
/* Nothing to take. */
- if (sp->some_taken) {
+ if ((sp->some_taken) &&
+ (stcb->asoc.idata_supported == 0)) {
*locked = 1;
}
*giveup = 1;
@@ -7461,7 +7490,12 @@ dont_do_it:
} else {
atomic_subtract_int(&sp->length, to_move);
}
- if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
+ if (stcb->asoc.idata_supported == 0) {
+ leading = (int)sizeof(struct sctp_data_chunk);
+ } else {
+ leading = (int)sizeof(struct sctp_idata_chunk);
+ }
+ if (M_LEADINGSPACE(chk->data) < leading) {
/* Not enough room for a chunk header, get some */
struct mbuf *m;
@@ -7502,7 +7536,11 @@ dont_do_it:
M_ALIGN(chk->data, 4);
}
}
- SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_NOWAIT);
+ if (stcb->asoc.idata_supported == 0) {
+ SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_NOWAIT);
+ } else {
+ SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_idata_chunk), M_NOWAIT);
+ }
if (chk->data == NULL) {
/* HELP, TSNH since we assured it would not above? */
#ifdef INVARIANTS
@@ -7515,8 +7553,13 @@ dont_do_it:
to_move = 0;
goto out_of;
}
- sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
- chk->book_size = chk->send_size = (uint16_t) (to_move + sizeof(struct sctp_data_chunk));
+ if (stcb->asoc.idata_supported == 0) {
+ sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
+ chk->book_size = chk->send_size = (uint16_t) (to_move + sizeof(struct sctp_data_chunk));
+ } else {
+ sctp_snd_sb_alloc(stcb, sizeof(struct sctp_idata_chunk));
+ chk->book_size = chk->send_size = (uint16_t) (to_move + sizeof(struct sctp_idata_chunk));
+ }
chk->book_size_scale = 0;
chk->sent = SCTP_DATAGRAM_UNSENT;
@@ -7555,7 +7598,11 @@ dont_do_it:
(uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
chk->rec.data.TSN_seq);
}
- dchkh = mtod(chk->data, struct sctp_data_chunk *);
+ if (stcb->asoc.idata_supported == 0) {
+ dchkh = mtod(chk->data, struct sctp_data_chunk *);
+ } else {
+ ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
+ }
/*
* Put the rest of the things in place now. Size was done earlier in
* previous loop prior to padding.
@@ -7577,14 +7624,29 @@ dont_do_it:
asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
asoc->tsn_out_at++;
#endif
-
- dchkh->ch.chunk_type = SCTP_DATA;
- dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
- dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
- dchkh->dp.stream_id = htons(strq->stream_no);
- dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
- dchkh->dp.protocol_id = chk->rec.data.payloadtype;
- dchkh->ch.chunk_length = htons(chk->send_size);
+ if (stcb->asoc.idata_supported == 0) {
+ dchkh->ch.chunk_type = SCTP_DATA;
+ dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
+ dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
+ dchkh->dp.stream_id = htons((strq->stream_no & 0x0000ffff));
+ dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
+ dchkh->dp.protocol_id = chk->rec.data.payloadtype;
+ dchkh->ch.chunk_length = htons(chk->send_size);
+ } else {
+ ndchkh->ch.chunk_type = SCTP_IDATA;
+ ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
+ ndchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
+ ndchkh->dp.stream_id = htons(strq->stream_no);
+ /* WHAT DO WE DO HERE??? */
+ ndchkh->dp.reserved = htons(0);
+ ndchkh->dp.msg_id = htonl(sp->msg_id);
+ if (sp->fsn == 0)
+ ndchkh->dp.protocol_id = chk->rec.data.payloadtype;
+ else
+ ndchkh->dp.fsn = htonl(sp->fsn);
+ sp->fsn++;
+ ndchkh->ch.chunk_length = htons(chk->send_size);
+ }
/* Now advance the chk->send_size by the actual pad needed. */
if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
/* need a pad */
@@ -7640,7 +7702,8 @@ dont_do_it:
stcb->asoc.locked_on_sending = NULL;
} else {
/* more to go, we are locked */
- *locked = 1;
+ if (stcb->asoc.idata_supported == 0)
+ *locked = 1;
}
asoc->chunks_on_out_queue++;
strq->chunks_on_queues++;
@@ -7686,7 +7749,11 @@ sctp_fill_outqueue(struct sctp_tcb *stcb,
break;
}
/* Need an allowance for the data chunk header too */
- goal_mtu -= sizeof(struct sctp_data_chunk);
+ if (stcb->asoc.idata_supported == 0) {
+ goal_mtu -= sizeof(struct sctp_data_chunk);
+ } else {
+ goal_mtu -= sizeof(struct sctp_idata_chunk);
+ }
/* must make even word boundary */
goal_mtu &= 0xfffffffc;
@@ -7797,12 +7864,15 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
{
/**
* Ok this is the generic chunk service queue. we must do the
- * following: - Service the stream queue that is next, moving any
- * message (note I must get a complete message i.e. FIRST/MIDDLE and
- * LAST to the out queue in one pass) and assigning TSN's - Check to
- * see if the cwnd/rwnd allows any output, if so we go ahead and
- * fomulate and send the low level chunks. Making sure to combine
- * any control in the control chunk queue also.
+ * following:
+ * - Service the stream queue that is next, moving any
+ * message (note I must get a complete message i.e. FIRST/MIDDLE and
+ * LAST to the out queue in one pass) and assigning TSN's. This
+ * only applys though if the peer does not support NDATA. For NDATA
+ * chunks its ok to not send the entire message ;-)
+ * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and
+ * fomulate and send the low level chunks. Making sure to combine
+ * any control in the control chunk queue also.
*/
struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
struct mbuf *outchain, *endoutchain;
@@ -10225,7 +10295,13 @@ send_forward_tsn(struct sctp_tcb *stcb,
struct sctp_tmit_chunk *chk;
struct sctp_forward_tsn_chunk *fwdtsn;
uint32_t advance_peer_ack_point;
+ int old;
+ if (asoc->idata_supported) {
+ old = 0;
+ } else {
+ old = 1;
+ }
SCTP_TCB_LOCK_ASSERT(stcb);
TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
@@ -10247,6 +10323,11 @@ send_forward_tsn(struct sctp_tcb *stcb,
}
asoc->fwd_tsn_cnt++;
chk->copy_by_ref = 0;
+ /*
+ * We don't do the old thing here since this is used not for on-wire
+ * but to tell if we are sending a fwd-tsn by the stack during
+ * output. And if its a IFORWARD or a FORWARD it is a fwd-tsn.
+ */
chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
chk->rec.chunk_id.can_take_data = 0;
chk->flags = 0;
@@ -10271,6 +10352,7 @@ sctp_fill_in_rest:
{
struct sctp_tmit_chunk *at, *tp1, *last;
struct sctp_strseq *strseq;
+ struct sctp_strseq_mid *strseq_m;
unsigned int cnt_of_space, i, ovh;
unsigned int space_needed;
unsigned int cnt_of_skipped = 0;
@@ -10287,9 +10369,13 @@ sctp_fill_in_rest:
}
cnt_of_skipped++;
}
- space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
- (cnt_of_skipped * sizeof(struct sctp_strseq)));
-
+ if (old) {
+ space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
+ (cnt_of_skipped * sizeof(struct sctp_strseq)));
+ } else {
+ space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
+ (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
+ }
cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
@@ -10318,8 +10404,13 @@ sctp_fill_in_rest:
0xff, 0xff, cnt_of_space,
space_needed);
}
- cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
- cnt_of_skipped /= sizeof(struct sctp_strseq);
+ if (old) {
+ cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
+ cnt_of_skipped /= sizeof(struct sctp_strseq);
+ } else {
+ cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
+ cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
+ }
/*-
* Go through and find the TSN that will be the one
* we report.
@@ -10346,15 +10437,24 @@ sctp_fill_in_rest:
*/
if (last)
advance_peer_ack_point = last->rec.data.TSN_seq;
- space_needed = sizeof(struct sctp_forward_tsn_chunk) +
- cnt_of_skipped * sizeof(struct sctp_strseq);
+ if (old) {
+ space_needed = sizeof(struct sctp_forward_tsn_chunk) +
+ cnt_of_skipped * sizeof(struct sctp_strseq);
+ } else {
+ space_needed = sizeof(struct sctp_forward_tsn_chunk) +
+ cnt_of_skipped * sizeof(struct sctp_strseq_mid);
+ }
}
chk->send_size = space_needed;
/* Setup the chunk */
fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
fwdtsn->ch.chunk_length = htons(chk->send_size);
fwdtsn->ch.chunk_flags = 0;
- fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
+ if (old) {
+ fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
+ } else {
+ fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
+ }
fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
SCTP_BUF_LEN(chk->data) = chk->send_size;
fwdtsn++;
@@ -10362,7 +10462,11 @@ sctp_fill_in_rest:
* Move pointer to after the fwdtsn and transfer to the
* strseq pointer.
*/
- strseq = (struct sctp_strseq *)fwdtsn;
+ if (old) {
+ strseq = (struct sctp_strseq *)fwdtsn;
+ } else {
+ strseq_m = (struct sctp_strseq_mid *)fwdtsn;
+ }
/*-
* Now populate the strseq list. This is done blindly
* without pulling out duplicate stream info. This is
@@ -10389,9 +10493,16 @@ sctp_fill_in_rest:
if (at->rec.data.TSN_seq == advance_peer_ack_point) {
at->rec.data.fwd_tsn_cnt = 0;
}
- strseq->stream = ntohs(at->rec.data.stream_number);
- strseq->sequence = ntohs(at->rec.data.stream_seq);
- strseq++;
+ if (old) {
+ strseq->stream = ntohs(at->rec.data.stream_number);
+ strseq->sequence = ntohs(at->rec.data.stream_seq);
+ strseq++;
+ } else {
+ strseq_m->stream = ntohs(at->rec.data.stream_number);
+ strseq_m->reserved = ntohs(0);
+ strseq_m->msg_id = ntohl(at->rec.data.stream_seq);
+ strseq_m++;
+ }
at = tp1;
}
}
@@ -12312,6 +12423,8 @@ sctp_copy_it_in(struct sctp_tcb *stcb,
sp->timetolive = srcv->sinfo_timetolive;
sp->ppid = srcv->sinfo_ppid;
sp->context = srcv->sinfo_context;
+ sp->fsn = 0;
+ sp->msg_id = atomic_fetchadd_int(&stcb->asoc.assoc_msg_id, 1);
(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
sp->stream = srcv->sinfo_stream;
@@ -13065,8 +13178,10 @@ skip_preblock:
* interrupt.
*/
strm->last_msg_incomplete = 1;
- asoc->stream_locked = 1;
- asoc->stream_locked_on = srcv->sinfo_stream;
+ if (stcb->asoc.idata_supported == 0) {
+ asoc->stream_locked = 1;
+ asoc->stream_locked_on = srcv->sinfo_stream;
+ }
sp->sender_all_done = 0;
}
sctp_snd_sb_alloc(stcb, sp->length);
@@ -13325,8 +13440,10 @@ skip_preblock:
if (sp) {
if (sp->msg_is_complete == 0) {
strm->last_msg_incomplete = 1;
- asoc->stream_locked = 1;
- asoc->stream_locked_on = srcv->sinfo_stream;
+ if (stcb->asoc.idata_supported == 0) {
+ asoc->stream_locked = 1;
+ asoc->stream_locked_on = srcv->sinfo_stream;
+ }
} else {
sp->sender_all_done = 1;
strm->last_msg_incomplete = 0;
OpenPOWER on IntegriCloud