summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorrrs <rrs@FreeBSD.org>2008-05-20 13:47:46 +0000
committerrrs <rrs@FreeBSD.org>2008-05-20 13:47:46 +0000
commit8a663465644dd0d19159348afb69bb2462822ed1 (patch)
tree2eb09e824f0a51e8694f455f2aa95ba0da7b95d8 /sys
parent2f427637e0af90ccec0c813c87cd3ae5404cedc3 (diff)
downloadFreeBSD-src-8a663465644dd0d19159348afb69bb2462822ed1.zip
FreeBSD-src-8a663465644dd0d19159348afb69bb2462822ed1.tar.gz
- Adds support for the multi-asconf (From Kozuka-san)
- Adds some prepwork (Not all yet) for vimage in particular support the delete the sctppcbinfo.xx structs. There is still a leak in here if it were to be called plus we stil need the regrouping (From Me and Michael Tuexen) - Adds support for UDP tunneling. For BSD there is no socket yet setup so its disabled, but major argument changes are in here to emcompass the passing of the port number (zero when you don't have a udp tunnel, the default for BSD). Will add some hooks in UDP here shortly (discussed with Robert) that will allow easy tunneling. (Mainly from Peter Lei and Michael Tuexen with some BSD work from me :-D) - Some ease for windows, evidently leave is reserved by their compile move label leave: -> out: MFC after: 1 week
Diffstat (limited to 'sys')
-rw-r--r--sys/netinet/sctp.h1
-rw-r--r--sys/netinet/sctp_asconf.c251
-rw-r--r--sys/netinet/sctp_asconf.h3
-rw-r--r--sys/netinet/sctp_cc_functions.c432
-rw-r--r--sys/netinet/sctp_constants.h6
-rw-r--r--sys/netinet/sctp_indata.c4
-rw-r--r--sys/netinet/sctp_input.c104
-rw-r--r--sys/netinet/sctp_input.h2
-rw-r--r--sys/netinet/sctp_lock_bsd.h8
-rw-r--r--sys/netinet/sctp_os_bsd.h6
-rw-r--r--sys/netinet/sctp_output.c658
-rw-r--r--sys/netinet/sctp_output.h8
-rw-r--r--sys/netinet/sctp_pcb.c174
-rw-r--r--sys/netinet/sctp_pcb.h3
-rw-r--r--sys/netinet/sctp_structs.h18
-rw-r--r--sys/netinet/sctp_sysctl.c37
-rw-r--r--sys/netinet/sctp_sysctl.h22
-rw-r--r--sys/netinet/sctp_timer.c65
-rw-r--r--sys/netinet/sctp_usrreq.c2
-rw-r--r--sys/netinet/sctp_var.h2
-rw-r--r--sys/netinet/sctputil.c40
-rw-r--r--sys/netinet/sctputil.h7
-rw-r--r--sys/netinet6/sctp6_usrreq.c6
23 files changed, 1304 insertions, 555 deletions
diff --git a/sys/netinet/sctp.h b/sys/netinet/sctp.h
index 959865e..295204a 100644
--- a/sys/netinet/sctp.h
+++ b/sys/netinet/sctp.h
@@ -472,6 +472,7 @@ __attribute__((packed));
#define SCTP_PCB_FLAGS_NO_FRAGMENT 0x00100000
#define SCTP_PCB_FLAGS_EXPLICIT_EOR 0x00400000
#define SCTP_PCB_FLAGS_NEEDS_MAPPED_V4 0x00800000
+#define SCTP_PCB_FLAGS_MULTIPLE_ASCONFS 0x01000000
/*-
* mobility_features parameters (by micchie).Note
diff --git a/sys/netinet/sctp_asconf.c b/sys/netinet/sctp_asconf.c
index e3bb19f..a1df7e6 100644
--- a/sys/netinet/sctp_asconf.c
+++ b/sys/netinet/sctp_asconf.c
@@ -931,6 +931,56 @@ sctp_asconf_addr_match(struct sctp_asconf_addr *aa, struct sockaddr *sa)
}
/*
+ * does the address match? returns 0 if not, 1 if so
+ */
+static uint32_t
+sctp_addr_match(
+#ifdef INET6
+ struct sctp_ipv6addr_param *v6addr,
+#else
+ struct sctp_ipv4addr_param *v4addr,
+#endif
+ struct sockaddr *sa)
+{
+ uint16_t param_type, param_length;
+
+#ifdef INET6
+ struct sctp_ipv4addr_param *v4addr = (struct sctp_ipv4addr_param *)v6addr;
+
+ if (sa->sa_family == AF_INET6) {
+ /* IPv6 sa address */
+ /* XXX scopeid */
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
+
+ param_type = ntohs(v6addr->ph.param_type);
+ param_length = ntohs(v6addr->ph.param_length);
+
+ if ((param_type == SCTP_IPV6_ADDRESS) &&
+ param_length == sizeof(struct sctp_ipv6addr_param) &&
+ (memcmp(&v6addr->addr, &sin6->sin6_addr,
+ sizeof(struct in6_addr)) == 0)) {
+ return (1);
+ }
+ } else
+#endif /* INET6 */
+ if (sa->sa_family == AF_INET) {
+ /* IPv4 sa address */
+ struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+ param_type = ntohs(v4addr->ph.param_type);
+ param_length = ntohs(v4addr->ph.param_length);
+
+ if ((param_type == SCTP_IPV4_ADDRESS) &&
+ param_length == sizeof(struct sctp_ipv4addr_param) &&
+ (memcmp(&v4addr->addr, &sin->sin_addr,
+ sizeof(struct in_addr)) == 0)) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
+/*
* Cleanup for non-responded/OP ERR'd ASCONF
*/
void
@@ -943,7 +993,7 @@ sctp_asconf_cleanup(struct sctp_tcb *stcb, struct sctp_nets *net)
*/
sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net,
SCTP_FROM_SCTP_ASCONF + SCTP_LOC_2);
- stcb->asoc.asconf_seq_out++;
+ stcb->asoc.asconf_seq_out_acked = stcb->asoc.asconf_seq_out;
/* remove the old ASCONF on our outbound queue */
sctp_toss_old_asconf(stcb);
}
@@ -1259,8 +1309,14 @@ sctp_asconf_queue_mgmt(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
/* address match? */
if (sctp_asconf_addr_match(aa, &ifa->address.sa) == 0)
continue;
- /* is the request already in queue (sent or not) */
- if (aa->ap.aph.ph.param_type == type) {
+ /*
+ * is the request already in queue but not sent? pass the
+ * request already sent in order to resolve the following
+ * case: 1. arrival of ADD, then sent 2. arrival of DEL. we
+ * can't remove the ADD request already sent 3. arrival of
+ * ADD
+ */
+ if (aa->ap.aph.ph.param_type == type && aa->sent == 0) {
return (-1);
}
/* is the negative request already in queue, and not sent */
@@ -1334,31 +1390,21 @@ sctp_asconf_queue_mgmt(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
}
aa->sent = 0; /* clear sent flag */
- /*
- * if we are deleting an address it should go out last otherwise,
- * add it to front of the pending queue
- */
- if (type == SCTP_ADD_IP_ADDRESS) {
- /* add goes to the front of the queue */
- TAILQ_INSERT_HEAD(&stcb->asoc.asconf_queue, aa, next);
- SCTPDBG(SCTP_DEBUG_ASCONF2,
- "asconf_queue_mgmt: inserted asconf ADD_IP_ADDRESS: ");
- SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
- } else {
- /* delete and set primary goes to the back of the queue */
- TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
#ifdef SCTP_DEBUG
- if (sctp_debug_on && SCTP_DEBUG_ASCONF2) {
- if (type == SCTP_DEL_IP_ADDRESS) {
- SCTP_PRINTF("asconf_queue_mgmt: appended asconf DEL_IP_ADDRESS: ");
- SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
- } else {
- SCTP_PRINTF("asconf_queue_mgmt: appended asconf SET_PRIM_ADDR: ");
- SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
- }
+ if (sctp_debug_on && SCTP_DEBUG_ASCONF2) {
+ if (type == SCTP_ADD_IP_ADDRESS) {
+ SCTP_PRINTF("asconf_queue_mgmt: inserted asconf ADD_IP_ADDRESS: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
+ } else if (type == SCTP_DEL_IP_ADDRESS) {
+ SCTP_PRINTF("asconf_queue_mgmt: appended asconf DEL_IP_ADDRESS: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
+ } else {
+ SCTP_PRINTF("asconf_queue_mgmt: appended asconf SET_PRIM_ADDR: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
}
-#endif
}
+#endif
return (0);
}
@@ -1395,12 +1441,15 @@ sctp_asconf_queue_add(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
"asconf_queue_add: mark delete last address pending\n");
return (-1);
}
+ /* queue an asconf parameter */
+ status = sctp_asconf_queue_mgmt(stcb, ifa, type);
+
/*
* if this is an add, and there is a delete also pending (i.e. the
* last local address is being changed), queue the pending delete
* too.
*/
- if ((type == SCTP_ADD_IP_ADDRESS) && stcb->asoc.asconf_del_pending) {
+ if ((type == SCTP_ADD_IP_ADDRESS) && stcb->asoc.asconf_del_pending && (status == 0)) {
/* queue in the pending delete */
if (sctp_asconf_queue_mgmt(stcb,
stcb->asoc.asconf_addr_del_pending,
@@ -1413,10 +1462,7 @@ sctp_asconf_queue_add(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
stcb->asoc.asconf_addr_del_pending = NULL;
}
}
- /* queue an asconf parameter */
- status = sctp_asconf_queue_mgmt(stcb, ifa, type);
-
- if (pending_delete_queued && (status == 0)) {
+ if (pending_delete_queued) {
struct sctp_nets *net;
/*
@@ -1652,6 +1698,8 @@ sctp_asconf_process_param_ack(struct sctp_tcb *stcb,
/* nothing really to do... lists already updated */
break;
case SCTP_SET_PRIM_ADDR:
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_param_ack: set primary IP address\n");
/* nothing to do... peer may start using this addr */
if (flag == 0)
stcb->asoc.peer_supports_asconf = 0;
@@ -1725,23 +1773,17 @@ sctp_handle_asconf_ack(struct mbuf *m, int offset,
*abort_no_unlock = 1;
return;
}
- if (serial_num != asoc->asconf_seq_out) {
+ if (serial_num != asoc->asconf_seq_out_acked + 1) {
/* got a duplicate/unexpected ASCONF-ACK */
SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: got duplicate/unexpected serial number = %xh (expected = %xh)\n",
- serial_num, asoc->asconf_seq_out);
+ serial_num, asoc->asconf_seq_out_acked + 1);
return;
}
- if (stcb->asoc.asconf_sent == 0) {
- /* got a unexpected ASCONF-ACK for serial not in flight */
- SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: got serial number = %xh but not in flight\n",
- serial_num);
- /* nothing to do... duplicate ACK received */
- return;
+ if (serial_num == asoc->asconf_seq_out - 1) {
+ /* stop our timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_ASCONF + SCTP_LOC_3);
}
- /* stop our timer */
- sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net,
- SCTP_FROM_SCTP_ASCONF + SCTP_LOC_3);
-
/* process the ASCONF-ACK contents */
ack_length = ntohs(cp->ch.chunk_length) -
sizeof(struct sctp_asconf_ack_chunk);
@@ -1855,11 +1897,9 @@ sctp_handle_asconf_ack(struct mbuf *m, int offset,
}
/* update the next sequence number to use */
- asoc->asconf_seq_out++;
+ asoc->asconf_seq_out_acked++;
/* remove the old ASCONF on our outbound queue */
sctp_toss_old_asconf(stcb);
- /* clear the sent flag to allow new ASCONFs */
- asoc->asconf_sent = 0;
if (!TAILQ_EMPTY(&stcb->asoc.asconf_queue)) {
#ifdef SCTP_TIMER_BASED_ASCONF
/* we have more params, so restart our timer */
@@ -2379,6 +2419,102 @@ sctp_set_primary_ip_address(struct sctp_ifa *ifa)
} /* for each inp */
}
+int
+sctp_is_addr_pending(struct sctp_tcb *stcb, struct sctp_ifa *sctp_ifa)
+{
+ struct sctp_tmit_chunk *chk, *nchk;
+ unsigned int offset, asconf_limit;
+ struct sctp_asconf_chunk *acp;
+ struct sctp_asconf_paramhdr *aph;
+ uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_ipv6addr_param *p_addr;
+ int add_cnt, del_cnt;
+ uint16_t last_param_type;
+
+ add_cnt = del_cnt = 0;
+ last_param_type = 0;
+ for (chk = TAILQ_FIRST(&stcb->asoc.asconf_send_queue); chk != NULL;
+ chk = nchk) {
+ /* get next chk */
+ nchk = TAILQ_NEXT(chk, sctp_next);
+
+ if (chk->data == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: No mbuf data?\n");
+ continue;
+ }
+ offset = 0;
+ acp = mtod(chk->data, struct sctp_asconf_chunk *);
+ offset += sizeof(struct sctp_asconf_chunk);
+ asconf_limit = ntohs(acp->ch.chunk_length);
+ p_addr = (struct sctp_ipv6addr_param *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_paramhdr), aparam_buf);
+ if (p_addr == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: couldn't get lookup addr!\n");
+ continue;
+ }
+ offset += ntohs(p_addr->ph.param_length);
+
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_asconf_paramhdr), aparam_buf);
+ if (aph == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: Empty ASCONF will be sent?\n");
+ continue;
+ }
+ while (aph != NULL) {
+ unsigned int param_length, param_type;
+
+ param_type = ntohs(aph->ph.param_type);
+ param_length = ntohs(aph->ph.param_length);
+ if (offset + param_length > asconf_limit) {
+ /* parameter goes beyond end of chunk! */
+ break;
+ }
+ if (param_length > sizeof(aparam_buf)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: param length (%u) larger than buffer size!\n", param_length);
+ break;
+ }
+ if (param_length <= sizeof(struct sctp_paramhdr)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: param length(%u) too short\n", param_length);
+ break;
+ }
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, param_length, aparam_buf);
+ if (aph == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: couldn't get entire param\n");
+ break;
+ }
+ p_addr = (struct sctp_ipv6addr_param *)(aph + 1);
+ if (sctp_addr_match(p_addr, &sctp_ifa->address.sa) != 0) {
+ switch (param_type) {
+ case SCTP_ADD_IP_ADDRESS:
+ add_cnt++;
+ break;
+ case SCTP_DEL_IP_ADDRESS:
+ del_cnt++;
+ break;
+ default:
+ break;
+ }
+ last_param_type = param_type;
+ }
+ offset += SCTP_SIZE32(param_length);
+ if (offset >= asconf_limit) {
+ /* no more data in the mbuf chain */
+ break;
+ }
+ /* get pointer to next asconf param */
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_asconf_paramhdr), aparam_buf);
+ }
+ }
+
+ /*
+ * we want to find the sequences which consist of ADD -> DEL -> ADD
+ * or DEL -> ADD
+ */
+ if (add_cnt > del_cnt ||
+ (add_cnt == del_cnt && last_param_type == SCTP_ADD_IP_ADDRESS)) {
+ return 1;
+ }
+ return 0;
+}
+
static struct sockaddr *
sctp_find_valid_localaddr(struct sctp_tcb *stcb, int addr_locked)
{
@@ -2414,7 +2550,8 @@ sctp_find_valid_localaddr(struct sctp_tcb *stcb, int addr_locked)
IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))
continue;
- if (sctp_is_addr_restricted(stcb, sctp_ifa))
+ if (sctp_is_addr_restricted(stcb, sctp_ifa) &&
+ (!sctp_is_addr_pending(stcb, sctp_ifa)))
continue;
/* found a valid local v4 address to use */
if (addr_locked == SCTP_ADDR_NOT_LOCKED)
@@ -2439,6 +2576,9 @@ sctp_find_valid_localaddr(struct sctp_tcb *stcb, int addr_locked)
IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))
continue;
+ if (sctp_is_addr_restricted(stcb, sctp_ifa) &&
+ (!sctp_is_addr_pending(stcb, sctp_ifa)))
+ continue;
/* found a valid local v6 address to use */
if (addr_locked == SCTP_ADDR_NOT_LOCKED)
SCTP_IPI_ADDR_RUNLOCK();
@@ -2462,7 +2602,8 @@ sctp_find_valid_localaddr_ep(struct sctp_tcb *stcb)
continue;
}
/* is the address restricted ? */
- if (sctp_is_addr_restricted(stcb, laddr->ifa))
+ if (sctp_is_addr_restricted(stcb, laddr->ifa) &&
+ (!sctp_is_addr_pending(stcb, laddr->ifa)))
continue;
/* found a valid local address to use */
@@ -2490,13 +2631,13 @@ sctp_compose_asconf(struct sctp_tcb *stcb, int *retlen, int addr_locked)
uint8_t lookup_used = 0;
/* are there any asconf params to send? */
- if (TAILQ_EMPTY(&stcb->asoc.asconf_queue)) {
- return (NULL);
+ TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+ if (aa->sent == 0)
+ break;
}
- /* can't send a new one if there is one in flight already */
- if (stcb->asoc.asconf_sent > 0) {
+ if (aa == NULL)
return (NULL);
- }
+
/*
* get a chunk header mbuf and a cluster for the asconf params since
* it's simpler to fill in the asconf chunk header lookup address on
@@ -2529,9 +2670,12 @@ sctp_compose_asconf(struct sctp_tcb *stcb, int *retlen, int addr_locked)
acp->ch.chunk_type = SCTP_ASCONF;
acp->ch.chunk_flags = 0;
acp->serial_number = htonl(stcb->asoc.asconf_seq_out);
+ stcb->asoc.asconf_seq_out++;
/* add parameters... up to smallest MTU allowed */
TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+ if (aa->sent)
+ continue;
/* get the parameter length */
p_length = SCTP_SIZE32(aa->ap.aph.ph.param_length);
/* will it fit in current chunk? */
@@ -2647,9 +2791,6 @@ sctp_compose_asconf(struct sctp_tcb *stcb, int *retlen, int addr_locked)
*retlen = SCTP_BUF_LEN(m_asconf_chk) + SCTP_BUF_LEN(m_asconf);
acp->ch.chunk_length = ntohs(*retlen);
- /* update "sent" flag */
- stcb->asoc.asconf_sent++;
-
return (m_asconf_chk);
}
diff --git a/sys/netinet/sctp_asconf.h b/sys/netinet/sctp_asconf.h
index ca57c00..12f1281 100644
--- a/sys/netinet/sctp_asconf.h
+++ b/sys/netinet/sctp_asconf.h
@@ -86,6 +86,9 @@ extern void
extern void
sctp_net_immediate_retrans(struct sctp_tcb *, struct sctp_nets *);
+extern int
+ sctp_is_addr_pending(struct sctp_tcb *, struct sctp_ifa *);
+
#endif /* _KERNEL */
#endif /* !_NETINET_SCTP_ASCONF_H_ */
diff --git a/sys/netinet/sctp_cc_functions.c b/sys/netinet/sctp_cc_functions.c
index 3e22a54..a4ada89 100644
--- a/sys/netinet/sctp_cc_functions.c
+++ b/sys/netinet/sctp_cc_functions.c
@@ -49,13 +49,10 @@ sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
{
/*
* We take the max of the burst limit times a MTU or the
- * INITIAL_CWND. We then limit this to 4 MTU's of sending.
+ * INITIAL_CWND. We then limit this to 4 MTU's of sending. cwnd must
+ * be at least 2 MTU.
*/
net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
- /* we always get at LEAST 2 MTU's */
- if (net->cwnd < (2 * net->mtu)) {
- net->cwnd = 2 * net->mtu;
- }
net->ssthresh = stcb->asoc.peers_rwnd;
if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
@@ -277,8 +274,7 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
/* If the cumulative ack moved we can proceed */
if (net->cwnd <= net->ssthresh) {
/* We are in slow start */
- if (net->flight_size + net->net_ack >=
- net->cwnd) {
+ if (net->flight_size + net->net_ack >= net->cwnd) {
if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) {
net->cwnd += (net->mtu * sctp_L2_abc_variable);
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
@@ -293,10 +289,6 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
}
}
} else {
- unsigned int dif;
-
- dif = net->cwnd - (net->flight_size +
- net->net_ack);
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack,
SCTP_CWND_LOG_NOADV_SS);
@@ -307,8 +299,7 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
/*
* Add to pba
*/
- net->partial_bytes_acked +=
- net->net_ack;
+ net->partial_bytes_acked += net->net_ack;
if ((net->flight_size + net->net_ack >= net->cwnd) &&
(net->partial_bytes_acked >= net->cwnd)) {
@@ -352,23 +343,182 @@ skip_cwnd_update:
}
void
-sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
- struct sctp_nets *net)
+sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net)
{
int old_cwnd = net->cwnd;
- net->ssthresh = net->cwnd >> 1;
- if (net->ssthresh < (net->mtu << 1)) {
- net->ssthresh = (net->mtu << 1);
- }
+ net->ssthresh = max(net->cwnd / 2, 2 * net->mtu);
net->cwnd = net->mtu;
- /* floor of 1 mtu */
- if (net->cwnd < net->mtu)
- net->cwnd = net->mtu;
+ net->partial_bytes_acked = 0;
+
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
}
- net->partial_bytes_acked = 0;
+}
+
+void
+sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ int old_cwnd = net->cwnd;
+
+ SCTP_STAT_INCR(sctps_ecnereducedcwnd);
+ net->ssthresh = net->cwnd / 2;
+ if (net->ssthresh < net->mtu) {
+ net->ssthresh = net->mtu;
+ /* here back off the timer as well, to slow us down */
+ net->RTO <<= 1;
+ }
+ net->cwnd = net->ssthresh;
+ if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
+ }
+}
+
+void
+sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
+ struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
+ uint32_t * bottle_bw, uint32_t * on_queue)
+{
+ uint32_t bw_avail;
+ int rtt, incr;
+ int old_cwnd = net->cwnd;
+
+ /* need real RTT for this calc */
+ rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
+ /* get bottle neck bw */
+ *bottle_bw = ntohl(cp->bottle_bw);
+ /* and whats on queue */
+ *on_queue = ntohl(cp->current_onq);
+ /*
+ * adjust the on-queue if our flight is more it could be that the
+ * router has not yet gotten data "in-flight" to it
+ */
+ if (*on_queue < net->flight_size)
+ *on_queue = net->flight_size;
+ /* calculate the available space */
+ bw_avail = (*bottle_bw * rtt) / 1000;
+ if (bw_avail > *bottle_bw) {
+ /*
+ * Cap the growth to no more than the bottle neck. This can
+ * happen as RTT slides up due to queues. It also means if
+ * you have more than a 1 second RTT with a empty queue you
+ * will be limited to the bottle_bw per second no matter if
+ * other points have 1/2 the RTT and you could get more
+ * out...
+ */
+ bw_avail = *bottle_bw;
+ }
+ if (*on_queue > bw_avail) {
+ /*
+ * No room for anything else don't allow anything else to be
+ * "added to the fire".
+ */
+ int seg_inflight, seg_onqueue, my_portion;
+
+ net->partial_bytes_acked = 0;
+
+ /* how much are we over queue size? */
+ incr = *on_queue - bw_avail;
+ if (stcb->asoc.seen_a_sack_this_pkt) {
+ /*
+ * undo any cwnd adjustment that the sack might have
+ * made
+ */
+ net->cwnd = net->prev_cwnd;
+ }
+ /* Now how much of that is mine? */
+ seg_inflight = net->flight_size / net->mtu;
+ seg_onqueue = *on_queue / net->mtu;
+ my_portion = (incr * seg_inflight) / seg_onqueue;
+
+ /* Have I made an adjustment already */
+ if (net->cwnd > net->flight_size) {
+ /*
+ * for this flight I made an adjustment we need to
+ * decrease the portion by a share our previous
+ * adjustment.
+ */
+ int diff_adj;
+
+ diff_adj = net->cwnd - net->flight_size;
+ if (diff_adj > my_portion)
+ my_portion = 0;
+ else
+ my_portion -= diff_adj;
+ }
+ /*
+ * back down to the previous cwnd (assume we have had a sack
+ * before this packet). minus what ever portion of the
+ * overage is my fault.
+ */
+ net->cwnd -= my_portion;
+
+ /* we will NOT back down more than 1 MTU */
+ if (net->cwnd <= net->mtu) {
+ net->cwnd = net->mtu;
+ }
+ /* force into CA */
+ net->ssthresh = net->cwnd - 1;
+ } else {
+ /*
+ * Take 1/4 of the space left or max burst up .. whichever
+ * is less.
+ */
+ incr = min((bw_avail - *on_queue) >> 2,
+ stcb->asoc.max_burst * net->mtu);
+ net->cwnd += incr;
+ }
+ if (net->cwnd > bw_avail) {
+ /* We can't exceed the pipe size */
+ net->cwnd = bw_avail;
+ }
+ if (net->cwnd < net->mtu) {
+ /* We always have 1 MTU */
+ net->cwnd = net->mtu;
+ }
+ if (net->cwnd - old_cwnd != 0) {
+ /* log only changes */
+ if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
+ SCTP_CWND_LOG_FROM_SAT);
+ }
+ }
+}
+
+void
+sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
+ struct sctp_nets *net, int burst_limit)
+{
+ int old_cwnd = net->cwnd;
+
+ if (net->ssthresh < net->cwnd)
+ net->ssthresh = net->cwnd;
+ net->cwnd = (net->flight_size + (burst_limit * net->mtu));
+
+ if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
+ }
+}
+
+void
+sctp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ int old_cwnd = net->cwnd;
+
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
+ /*
+ * make a small adjustment to cwnd and force to CA.
+ */
+ if (net->cwnd > net->mtu)
+ /* drop down one MTU after sending */
+ net->cwnd -= net->mtu;
+ if (net->cwnd < net->ssthresh)
+ /* still in SS move to CA */
+ net->ssthresh = net->cwnd - 1;
+ if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
+ }
}
struct sctp_hs_raise_drop {
@@ -741,16 +891,11 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
/* If the cumulative ack moved we can proceed */
if (net->cwnd <= net->ssthresh) {
/* We are in slow start */
- if (net->flight_size + net->net_ack >=
- net->cwnd) {
+ if (net->flight_size + net->net_ack >= net->cwnd) {
sctp_hs_cwnd_increase(stcb, net);
} else {
- unsigned int dif;
-
- dif = net->cwnd - (net->flight_size +
- net->net_ack);
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack,
SCTP_CWND_LOG_NOADV_SS);
@@ -758,50 +903,20 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
}
} else {
/* We are in congestion avoidance */
- if (net->flight_size + net->net_ack >=
- net->cwnd) {
- /*
- * add to pba only if we had a
- * cwnd's worth (or so) in flight OR
- * the burst limit was applied.
- */
- net->partial_bytes_acked +=
- net->net_ack;
-
- /*
- * Do we need to increase (if pba is
- * > cwnd)?
- */
- if (net->partial_bytes_acked >=
- net->cwnd) {
- if (net->cwnd <
- net->partial_bytes_acked) {
- net->partial_bytes_acked -=
- net->cwnd;
- } else {
- net->partial_bytes_acked =
- 0;
- }
- net->cwnd += net->mtu;
- if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
- sctp_log_cwnd(stcb, net, net->mtu,
- SCTP_CWND_LOG_FROM_CA);
- }
- } else {
- if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
- sctp_log_cwnd(stcb, net, net->net_ack,
- SCTP_CWND_LOG_NOADV_CA);
- }
+ net->partial_bytes_acked += net->net_ack;
+ if ((net->flight_size + net->net_ack >= net->cwnd) &&
+ (net->partial_bytes_acked >= net->cwnd)) {
+ net->partial_bytes_acked -= net->cwnd;
+ net->cwnd += net->mtu;
+ if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_FROM_CA);
}
} else {
- unsigned int dif;
-
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack,
SCTP_CWND_LOG_NOADV_CA);
}
- dif = net->cwnd - (net->flight_size +
- net->net_ack);
}
}
} else {
@@ -830,176 +945,6 @@ skip_cwnd_update:
}
}
-void
-sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
- struct sctp_nets *net)
-{
- int old_cwnd;
-
- old_cwnd = net->cwnd;
-
- SCTP_STAT_INCR(sctps_ecnereducedcwnd);
- net->ssthresh = net->cwnd / 2;
- if (net->ssthresh < net->mtu) {
- net->ssthresh = net->mtu;
- /* here back off the timer as well, to slow us down */
- net->RTO <<= 1;
- }
- net->cwnd = net->ssthresh;
- if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
- sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
- }
-}
-
-void
-sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
- struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
- uint32_t * bottle_bw, uint32_t * on_queue)
-{
- uint32_t bw_avail;
- int rtt, incr;
- int old_cwnd = net->cwnd;
-
- /* need real RTT for this calc */
- rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
- /* get bottle neck bw */
- *bottle_bw = ntohl(cp->bottle_bw);
- /* and whats on queue */
- *on_queue = ntohl(cp->current_onq);
- /*
- * adjust the on-queue if our flight is more it could be that the
- * router has not yet gotten data "in-flight" to it
- */
- if (*on_queue < net->flight_size)
- *on_queue = net->flight_size;
- /* calculate the available space */
- bw_avail = (*bottle_bw * rtt) / 1000;
- if (bw_avail > *bottle_bw) {
- /*
- * Cap the growth to no more than the bottle neck. This can
- * happen as RTT slides up due to queues. It also means if
- * you have more than a 1 second RTT with a empty queue you
- * will be limited to the bottle_bw per second no matter if
- * other points have 1/2 the RTT and you could get more
- * out...
- */
- bw_avail = *bottle_bw;
- }
- if (*on_queue > bw_avail) {
- /*
- * No room for anything else don't allow anything else to be
- * "added to the fire".
- */
- int seg_inflight, seg_onqueue, my_portion;
-
- net->partial_bytes_acked = 0;
-
- /* how much are we over queue size? */
- incr = *on_queue - bw_avail;
- if (stcb->asoc.seen_a_sack_this_pkt) {
- /*
- * undo any cwnd adjustment that the sack might have
- * made
- */
- net->cwnd = net->prev_cwnd;
- }
- /* Now how much of that is mine? */
- seg_inflight = net->flight_size / net->mtu;
- seg_onqueue = *on_queue / net->mtu;
- my_portion = (incr * seg_inflight) / seg_onqueue;
-
- /* Have I made an adjustment already */
- if (net->cwnd > net->flight_size) {
- /*
- * for this flight I made an adjustment we need to
- * decrease the portion by a share our previous
- * adjustment.
- */
- int diff_adj;
-
- diff_adj = net->cwnd - net->flight_size;
- if (diff_adj > my_portion)
- my_portion = 0;
- else
- my_portion -= diff_adj;
- }
- /*
- * back down to the previous cwnd (assume we have had a sack
- * before this packet). minus what ever portion of the
- * overage is my fault.
- */
- net->cwnd -= my_portion;
-
- /* we will NOT back down more than 1 MTU */
- if (net->cwnd <= net->mtu) {
- net->cwnd = net->mtu;
- }
- /* force into CA */
- net->ssthresh = net->cwnd - 1;
- } else {
- /*
- * Take 1/4 of the space left or max burst up .. whichever
- * is less.
- */
- incr = min((bw_avail - *on_queue) >> 2,
- stcb->asoc.max_burst * net->mtu);
- net->cwnd += incr;
- }
- if (net->cwnd > bw_avail) {
- /* We can't exceed the pipe size */
- net->cwnd = bw_avail;
- }
- if (net->cwnd < net->mtu) {
- /* We always have 1 MTU */
- net->cwnd = net->mtu;
- }
- if (net->cwnd - old_cwnd != 0) {
- /* log only changes */
- if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
- sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
- SCTP_CWND_LOG_FROM_SAT);
- }
- }
-}
-
-void
-sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
- struct sctp_nets *net, int burst_limit)
-{
- int old_cwnd;
-
- if (net->ssthresh < net->cwnd)
- net->ssthresh = net->cwnd;
- old_cwnd = net->cwnd;
- net->cwnd = (net->flight_size + (burst_limit * net->mtu));
-
- if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
- sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
- }
-}
-
-void
-sctp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
- struct sctp_tcb *stcb, struct sctp_nets *net)
-{
- int old_cwnd;
-
- old_cwnd = net->cwnd;
-
- sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
- /*
- * make a small adjustment to cwnd and force to CA.
- */
- if (net->cwnd > net->mtu)
- /* drop down one MTU after sending */
- net->cwnd -= net->mtu;
- if (net->cwnd < net->ssthresh)
- /* still in SS move to CA */
- net->ssthresh = net->cwnd - 1;
- if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
- sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
- }
-}
/*
* H-TCP congestion control. The algorithm is detailed in:
@@ -1220,10 +1165,6 @@ htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
}
}
} else {
- unsigned int dif;
-
- dif = net->cwnd - (net->flight_size +
- net->net_ack);
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack,
SCTP_CWND_LOG_NOADV_SS);
@@ -1289,10 +1230,6 @@ sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
* INITIAL_CWND. We then limit this to 4 MTU's of sending.
*/
net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
- /* we always get at LEAST 2 MTU's */
- if (net->cwnd < (2 * net->mtu)) {
- net->cwnd = 2 * net->mtu;
- }
net->ssthresh = stcb->asoc.peers_rwnd;
htcp_init(stcb, net);
@@ -1549,13 +1486,10 @@ sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
htcp_reset(&net->htcp_ca);
net->ssthresh = htcp_recalc_ssthresh(stcb, net);
net->cwnd = net->mtu;
- /* floor of 1 mtu */
- if (net->cwnd < net->mtu)
- net->cwnd = net->mtu;
+ net->partial_bytes_acked = 0;
if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
}
- net->partial_bytes_acked = 0;
}
void
diff --git a/sys/netinet/sctp_constants.h b/sys/netinet/sctp_constants.h
index 268ff68..9f628b8 100644
--- a/sys/netinet/sctp_constants.h
+++ b/sys/netinet/sctp_constants.h
@@ -36,6 +36,9 @@ __FBSDID("$FreeBSD$");
#ifndef __sctp_constants_h__
#define __sctp_constants_h__
+/* IANA assigned port number for SCTP over UDP encapsulation */
+#define SCTP_OVER_UDP_TUNNELING_PORT 9899
+
/* Number of packets to get before sack sent by default */
#define SCTP_DEFAULT_SACK_FREQ 2
@@ -268,6 +271,9 @@ __FBSDID("$FreeBSD$");
#define SCTP_DEFAULT_AUTO_ASCONF 1
#endif
+/* default MULTIPLE_ASCONF mode enable(1)/disable(0) value (sysctl) */
+#define SCTP_DEFAULT_MULTIPLE_ASCONFS 0
+
/* default MOBILITY_BASE mode enable(1)/disable(0) value (sysctl) */
#if defined (__APPLE__) && !defined(SCTP_APPLE_MOBILITY_BASE)
#define SCTP_DEFAULT_MOBILITY_BASE 0
diff --git a/sys/netinet/sctp_indata.c b/sys/netinet/sctp_indata.c
index bb74b7b..424d54c 100644
--- a/sys/netinet/sctp_indata.c
+++ b/sys/netinet/sctp_indata.c
@@ -2620,7 +2620,7 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
}
stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
sctp_abort_association(inp, stcb, m, iphlen, sh,
- op_err, 0);
+ op_err, 0, net->port);
return (2);
}
#ifdef SCTP_AUDITING_ENABLED
@@ -2683,7 +2683,7 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
struct mbuf *op_err;
op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
- sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0);
+ sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
return (2);
}
break;
diff --git a/sys/netinet/sctp_input.c b/sys/netinet/sctp_input.c
index 4c515f6..694d8ca 100644
--- a/sys/netinet/sctp_input.c
+++ b/sys/netinet/sctp_input.c
@@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_asconf.h>
#include <netinet/sctp_bsd_addr.h>
#include <netinet/sctp_timer.h>
+#include <netinet/udp.h>
@@ -79,7 +80,7 @@ sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
static void
sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
- struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
+ struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, uint16_t port)
{
struct sctp_init *init;
struct mbuf *op_err;
@@ -113,7 +114,7 @@ sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
* state :-)
*/
sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
- vrf_id);
+ vrf_id, port);
if (stcb)
*abort_no_unlock = 1;
goto outnow;
@@ -122,7 +123,7 @@ sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
/* Invalid length */
op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
- vrf_id);
+ vrf_id, port);
if (stcb)
*abort_no_unlock = 1;
goto outnow;
@@ -132,7 +133,7 @@ sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
/* protocol error... send abort */
op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
- vrf_id);
+ vrf_id, port);
if (stcb)
*abort_no_unlock = 1;
goto outnow;
@@ -141,7 +142,7 @@ sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
/* invalid parameter... send abort */
op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
- vrf_id);
+ vrf_id, port);
if (stcb)
*abort_no_unlock = 1;
goto outnow;
@@ -150,7 +151,7 @@ sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
/* protocol error... send abort */
op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
- vrf_id);
+ vrf_id, port);
if (stcb)
*abort_no_unlock = 1;
goto outnow;
@@ -159,7 +160,7 @@ sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
/* protocol error... send abort */
op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
- vrf_id);
+ vrf_id, port);
if (stcb)
*abort_no_unlock = 1;
goto outnow;
@@ -168,14 +169,14 @@ sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
init_limit)) {
/* auth parameter(s) error... send abort */
- sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id);
+ sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id, port);
if (stcb)
*abort_no_unlock = 1;
goto outnow;
}
/* send an INIT-ACK w/cookie */
SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
- sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id,
+ sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, port,
((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED));
outnow:
if (stcb == NULL) {
@@ -422,7 +423,7 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
"Load addresses from INIT causes an abort %d\n",
retval);
sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
- NULL, 0);
+ NULL, 0, net->port);
*abort_no_unlock = 1;
return (-1);
}
@@ -497,7 +498,7 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
mp->resv = 0;
}
sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
- sh, op_err, 0);
+ sh, op_err, 0, net->port);
*abort_no_unlock = 1;
}
return (retval);
@@ -1105,7 +1106,7 @@ sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
/* Invalid length */
op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
- op_err, 0);
+ op_err, 0, net->port);
*abort_no_unlock = 1;
return (-1);
}
@@ -1115,7 +1116,7 @@ sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
/* protocol error... send an abort */
op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
- op_err, 0);
+ op_err, 0, net->port);
*abort_no_unlock = 1;
return (-1);
}
@@ -1123,7 +1124,7 @@ sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
/* protocol error... send an abort */
op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
- op_err, 0);
+ op_err, 0, net->port);
*abort_no_unlock = 1;
return (-1);
}
@@ -1131,7 +1132,7 @@ sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
/* protocol error... send an abort */
op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
- op_err, 0);
+ op_err, 0, net->port);
*abort_no_unlock = 1;
return (-1);
}
@@ -1139,7 +1140,7 @@ sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
/* protocol error... send an abort */
op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
- op_err, 0);
+ op_err, 0, net->port);
*abort_no_unlock = 1;
return (-1);
}
@@ -1269,7 +1270,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
ph->param_length = htons(sizeof(struct sctp_paramhdr));
sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
- vrf_id);
+ vrf_id, net->port);
if (how_indx < sizeof(asoc->cookie_how))
asoc->cookie_how[how_indx] = 2;
return (NULL);
@@ -1646,6 +1647,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
ntohs(initack_cp->init.num_outbound_streams);
asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
+ asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
asoc->last_cwr_tsn = asoc->init_seq_number - 1;
asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
@@ -1749,7 +1751,7 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
struct sctp_inpcb *inp, struct sctp_nets **netp,
struct sockaddr *init_src, int *notification,
int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
- uint32_t vrf_id)
+ uint32_t vrf_id, uint16_t port)
{
struct sctp_tcb *stcb;
struct sctp_init_chunk *init_cp, init_buf;
@@ -1841,7 +1843,7 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
- sh, op_err, vrf_id);
+ sh, op_err, vrf_id, port);
return (NULL);
}
/* get the correct sctp_nets */
@@ -1867,7 +1869,7 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
atomic_add_int(&stcb->asoc.refcnt, 1);
op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
- sh, op_err, vrf_id);
+ sh, op_err, vrf_id, port);
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
SCTP_TCB_UNLOCK(stcb);
SCTP_SOCKET_LOCK(so, 1);
@@ -1888,6 +1890,7 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
+ asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
asoc->last_cwr_tsn = asoc->init_seq_number - 1;
asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
asoc->str_reset_seq_in = asoc->init_seq_number;
@@ -2095,7 +2098,7 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
- struct sctp_tcb **locked_tcb, uint32_t vrf_id)
+ struct sctp_tcb **locked_tcb, uint32_t vrf_id, uint16_t port)
{
struct sctp_state_cookie *cookie;
struct sockaddr_in6 sin6;
@@ -2329,7 +2332,7 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
tim = now.tv_usec - cookie->time_entered.tv_usec;
scm->time_usec = htonl(tim);
sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
- vrf_id);
+ vrf_id, port);
return (NULL);
}
/*
@@ -2409,7 +2412,7 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
/* this is the "normal" case... get a new TCB */
*stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
cookie_len, *inp_p, netp, to, &notification,
- auth_skipped, auth_offset, auth_len, vrf_id);
+ auth_skipped, auth_offset, auth_len, vrf_id, port);
} else {
/* this is abnormal... cookie-echo on existing TCB */
had_a_existing_tcb = 1;
@@ -2489,7 +2492,7 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
sctp_abort_association(*inp_p, NULL, m, iphlen,
- sh, op_err, vrf_id);
+ sh, op_err, vrf_id, port);
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
pcb_so = SCTP_INP_SO(*inp_p);
atomic_add_int(&(*stcb)->asoc.refcnt, 1);
@@ -3814,7 +3817,7 @@ __attribute__((noinline))
sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
- uint32_t vrf_id)
+ uint32_t vrf_id, uint16_t port)
{
struct sctp_association *asoc;
uint32_t vtag_in;
@@ -3968,7 +3971,7 @@ __attribute__((noinline))
if (stcb == NULL) {
/* no association, so it's out of the blue... */
sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
- vrf_id);
+ vrf_id, port);
*offset = length;
if (locked_tcb) {
SCTP_TCB_UNLOCK(locked_tcb);
@@ -4005,7 +4008,7 @@ __attribute__((noinline))
SCTP_TCB_UNLOCK(locked_tcb);
}
sctp_handle_ootb(m, iphlen, *offset, sh, inp,
- NULL, vrf_id);
+ NULL, vrf_id, port);
return (NULL);
}
} else {
@@ -4182,7 +4185,7 @@ process_control_chunks:
if (netp) {
sctp_handle_init(m, iphlen, *offset, sh,
(struct sctp_init_chunk *)ch, inp,
- stcb, *netp, &abort_no_unlock, vrf_id);
+ stcb, *netp, &abort_no_unlock, vrf_id, port);
}
if (abort_no_unlock)
return (NULL);
@@ -4448,7 +4451,7 @@ process_control_chunks:
htons(sizeof(struct sctp_paramhdr));
}
sctp_abort_association(inp, stcb, m,
- iphlen, sh, oper, vrf_id);
+ iphlen, sh, oper, vrf_id, port);
}
*offset = length;
return (NULL);
@@ -4475,7 +4478,8 @@ process_control_chunks:
auth_offset,
auth_len,
&locked_tcb,
- vrf_id);
+ vrf_id,
+ port);
} else {
ret_buf = NULL;
}
@@ -4984,7 +4988,7 @@ void
sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
- uint8_t ecn_bits, uint32_t vrf_id)
+ uint8_t ecn_bits, uint32_t vrf_id, uint16_t port)
{
/*
* Control chunk processing
@@ -5020,7 +5024,7 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
*/
SCTP_TCB_UNLOCK(stcb);
sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
- vrf_id);
+ vrf_id, port);
goto out_now;
}
}
@@ -5028,7 +5032,7 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
/* process the control portion of the SCTP packet */
/* sa_ignore NO_NULL_CHK */
stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
- inp, stcb, &net, &fwd_tsn_seen, vrf_id);
+ inp, stcb, &net, &fwd_tsn_seen, vrf_id, port);
if (stcb) {
/*
* This covers us if the cookie-echo was there and
@@ -5058,7 +5062,7 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
if (stcb == NULL) {
/* out of the blue DATA chunk */
sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
- vrf_id);
+ vrf_id, port);
goto out_now;
}
if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
@@ -5126,7 +5130,7 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
* We consider OOTB any data sent during asoc setup.
*/
sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
- vrf_id);
+ vrf_id, port);
SCTP_TCB_UNLOCK(stcb);
goto out_now;
/* sa_ignore NOTREACHED */
@@ -5221,12 +5225,11 @@ out_now:
}
-
void
-sctp_input(i_pak, off)
+sctp_input_with_port(i_pak, off, port)
struct mbuf *i_pak;
int off;
-
+ uint16_t port;
{
#ifdef SCTP_MBUF_LOGGING
struct mbuf *mat;
@@ -5329,6 +5332,12 @@ sctp_input(i_pak, off)
offset - sizeof(*ch),
sh, ch, &inp, &net,
vrf_id);
+ if ((net) && (port)) {
+ if (net->port == 0) {
+ sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
+ }
+ net->port = port;
+ }
if ((inp) && (stcb)) {
sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
@@ -5357,6 +5366,12 @@ sctp_skip_csum_4:
*/
stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
sh, ch, &inp, &net, vrf_id);
+ if ((net) && (port)) {
+ if (net->port == 0) {
+ sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
+ }
+ net->port = port;
+ }
/* inp's ref-count increased && stcb locked */
if (inp == NULL) {
struct sctp_init_chunk *init_chk, chunk_buf;
@@ -5386,14 +5401,14 @@ sctp_skip_csum_4:
sh->v_tag = init_chk->init.initiate_tag;
}
if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
- sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id);
+ sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
goto bad;
}
if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
goto bad;
}
if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
- sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id);
+ sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, port);
goto bad;
} else if (stcb == NULL) {
refcount_up = 1;
@@ -5420,7 +5435,7 @@ sctp_skip_csum_4:
/* sa_ignore NO_NULL_CHK */
sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
- inp, stcb, net, ecn_bits, vrf_id);
+ inp, stcb, net, ecn_bits, vrf_id, port);
/* inp's ref-count reduced && stcb unlocked */
if (m) {
sctp_m_freem(m);
@@ -5443,3 +5458,10 @@ bad:
}
return;
}
+void
+sctp_input(i_pak, off)
+ struct mbuf *i_pak;
+ int off;
+{
+ sctp_input_with_port(i_pak, off, 0);
+}
diff --git a/sys/netinet/sctp_input.h b/sys/netinet/sctp_input.h
index 01a67c6..2a28970 100644
--- a/sys/netinet/sctp_input.h
+++ b/sys/netinet/sctp_input.h
@@ -40,7 +40,7 @@ __FBSDID("$FreeBSD$");
void
sctp_common_input_processing(struct mbuf **, int, int, int,
struct sctphdr *, struct sctp_chunkhdr *, struct sctp_inpcb *,
- struct sctp_tcb *, struct sctp_nets *, uint8_t, uint32_t);
+ struct sctp_tcb *, struct sctp_nets *, uint8_t, uint32_t, uint16_t);
struct sctp_stream_reset_out_request *
sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq,
diff --git a/sys/netinet/sctp_lock_bsd.h b/sys/netinet/sctp_lock_bsd.h
index eeaca5d..95f3ed4 100644
--- a/sys/netinet/sctp_lock_bsd.h
+++ b/sys/netinet/sctp_lock_bsd.h
@@ -83,10 +83,10 @@ extern int sctp_logoff_stuff;
#define SCTP_STATLOG_DESTROY()
#define SCTP_INP_INFO_LOCK_DESTROY() do { \
- if(rw_wowned(sctppcbinfo.ipi_ep_mtx)) { \
+ if(rw_wowned(&sctppcbinfo.ipi_ep_mtx)) { \
rw_wunlock(&sctppcbinfo.ipi_ep_mtx); \
} \
- rw_destroy(sctppcbinfo.ipi_ep_mtx); \
+ rw_destroy(&sctppcbinfo.ipi_ep_mtx); \
} while (0)
#define SCTP_INP_INFO_LOCK_INIT() \
@@ -111,10 +111,10 @@ extern int sctp_logoff_stuff;
rw_init(&sctppcbinfo.ipi_addr_mtx, "sctp-addr")
#define SCTP_IPI_ADDR_DESTROY() do { \
- if(rw_wowned(sctppcbinfo.ipi_addr_mtx)) { \
+ if(rw_wowned(&sctppcbinfo.ipi_addr_mtx)) { \
rw_wunlock(&sctppcbinfo.ipi_addr_mtx); \
} \
- rw_destroy(&sctppcbinfo.ipi_addr_mtx) \
+ rw_destroy(&sctppcbinfo.ipi_addr_mtx); \
} while (0)
diff --git a/sys/netinet/sctp_os_bsd.h b/sys/netinet/sctp_os_bsd.h
index 01c0fcb..2d01952 100644
--- a/sys/netinet/sctp_os_bsd.h
+++ b/sys/netinet/sctp_os_bsd.h
@@ -202,6 +202,7 @@ MALLOC_DECLARE(SCTP_M_SOCKOPT);
#define SCTP_INIT_VRF_TABLEID(vrf)
#define SCTP_IFN_IS_IFT_LOOP(ifn) ((ifn)->ifn_type == IFT_LOOP)
+#define SCTP_ROUTE_IS_REAL_LOOP(ro) ((ro)->ro_rt && (ro)->ro_rt->rt_ifa && (ro)->ro_rt->rt_ifa->ifa_ifp && (ro)->ro_rt->rt_ifa->ifa_ifp->if_type == IFT_LOOP)
/*
* Access to IFN's to help with src-addr-selection
@@ -234,6 +235,7 @@ MALLOC_DECLARE(SCTP_M_SOCKOPT);
* zone allocation functions
*/
#include <vm/uma.h>
+
/* SCTP_ZONE_INIT: initialize the zone */
typedef struct uma_zone *sctp_zone_t;
@@ -244,6 +246,8 @@ typedef struct uma_zone *sctp_zone_t;
uma_zone_set_max(zone, number); \
}
+#define SCTP_ZONE_DESTROY(zone) uma_zdestroy(zone)
+
/* SCTP_ZONE_GET: allocate element from the zone */
#define SCTP_ZONE_GET(zone, type) \
(type *)uma_zalloc(zone, M_NOWAIT);
@@ -251,6 +255,7 @@ typedef struct uma_zone *sctp_zone_t;
/* SCTP_ZONE_FREE: free element from the zone */
#define SCTP_ZONE_FREE(zone, element) \
uma_zfree(zone, element);
+
#define SCTP_HASH_INIT(size, hashmark) hashinit_flags(size, M_PCB, hashmark, HASH_NOWAIT)
#define SCTP_HASH_FREE(table, hashmark) hashdestroy(table, M_PCB, hashmark)
@@ -262,6 +267,7 @@ typedef struct uma_zone *sctp_zone_t;
#include <sys/callout.h>
typedef struct callout sctp_os_timer_t;
+
#define SCTP_OS_TIMER_INIT(tmr) callout_init(tmr, 1)
#define SCTP_OS_TIMER_START callout_reset
#define SCTP_OS_TIMER_STOP callout_stop
diff --git a/sys/netinet/sctp_output.c b/sys/netinet/sctp_output.c
index 8fa1846..3272ab8 100644
--- a/sys/netinet/sctp_output.c
+++ b/sys/netinet/sctp_output.c
@@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_indata.h>
#include <netinet/sctp_bsd_addr.h>
#include <netinet/sctp_input.h>
+#include <netinet/udp.h>
@@ -2211,12 +2212,21 @@ sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
/* Ok the address may be ok */
if (fam == AF_INET6) {
- /* ok to use deprecated addresses? */
+ /* ok to use deprecated addresses? no lets not! */
if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
return (NULL);
}
- if (ifa->src_is_priv) {
+ if (ifa->src_is_priv && ifa->src_is_loop) {
+ /*
+ * don't allow fe80::1 to be a src on loop ::1, we
+ * don't list it to the peer so we will get an
+ * abort.
+ */
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2a\n");
+ return (NULL);
+ }
+ if (ifa->src_is_priv && !ifa->src_is_loop) {
if (dest_is_loop) {
SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
return (NULL);
@@ -2532,8 +2542,11 @@ sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
if (sifa == NULL)
continue;
- if ((non_asoc_addr_ok == 0) &&
- (sctp_is_addr_restricted(stcb, sifa))) {
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
/* on the no-no list */
continue;
}
@@ -2549,8 +2562,11 @@ sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
if (sifa == NULL)
continue;
- if ((non_asoc_addr_ok == 0) &&
- (sctp_is_addr_restricted(stcb, sifa))) {
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
/* on the no-no list */
continue;
}
@@ -2584,8 +2600,11 @@ sctp_from_the_top:
sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
if (sifa == NULL)
continue;
- if ((non_asoc_addr_ok == 0) &&
- (sctp_is_addr_restricted(stcb, sifa))) {
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
/* on the no-no list */
continue;
}
@@ -2620,8 +2639,11 @@ sctp_from_the_top2:
dest_is_priv, fam);
if (sifa == NULL)
continue;
- if ((non_asoc_addr_ok == 0) &&
- (sctp_is_addr_restricted(stcb, sifa))) {
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
/* on the no-no list */
continue;
}
@@ -2650,6 +2672,14 @@ sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
struct sctp_ifa *ifa, *sifa;
int num_eligible_addr = 0;
+#ifdef INET6
+ struct sockaddr_in6 sin6, lsa6;
+
+ if (fam == AF_INET6) {
+ memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
+ (void)sa6_recoverscope(&sin6);
+ }
+#endif /* INET6 */
LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
(non_asoc_addr_ok == 0))
@@ -2658,6 +2688,29 @@ sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
dest_is_priv, fam);
if (sifa == NULL)
continue;
+#ifdef INET6
+ if (fam == AF_INET6 &&
+ IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
+ IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
+ /*
+ * link-local <-> link-local must belong to the same
+ * scope.
+ */
+ memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
+ (void)sa6_recoverscope(&lsa6);
+ if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
+ continue;
+ }
+ if (dest_is_loop) {
+ /*
+ * we don't give out fe80::1, we must use
+ * ::1
+ */
+ continue;
+ }
+ }
+#endif /* INET6 */
+
/*
* Check if the IPv6 address matches to next-hop. In the
* mobile case, old IPv6 address may be not deleted from the
@@ -2682,8 +2735,11 @@ sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
}
}
if (stcb) {
- if ((non_asoc_addr_ok == 0) &&
- sctp_is_addr_restricted(stcb, sifa)) {
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
/*
* It is restricted for some reason..
* probably not yet added.
@@ -2722,8 +2778,11 @@ sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
continue;
}
if (stcb) {
- if ((non_asoc_addr_ok == 0) &&
- sctp_is_addr_restricted(stcb, sifa)) {
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
/*
* It is restricted for some reason..
* probably not yet added.
@@ -2899,8 +2958,11 @@ bound_all_plan_b:
if (sifa == NULL)
continue;
if (stcb) {
- if ((non_asoc_addr_ok == 0) &&
- sctp_is_addr_restricted(stcb, sifa)) {
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
/*
* It is restricted for some reason..
* probably not yet added.
@@ -2938,8 +3000,11 @@ plan_d:
if (sifa == NULL)
continue;
if (stcb) {
- if ((non_asoc_addr_ok == 0) &&
- sctp_is_addr_restricted(stcb, sifa)) {
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
/*
* It is restricted for some
* reason.. probably not yet added.
@@ -3056,25 +3121,26 @@ sctp_source_address_selection(struct sctp_inpcb *inp,
switch (fam) {
case AF_INET:
/* Scope based on outbound address */
- if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
- dest_is_priv = 1;
- } else if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
+ if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
dest_is_loop = 1;
if (net != NULL) {
/* mark it as local */
net->addr_is_local = 1;
}
+ } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
+ dest_is_priv = 1;
}
break;
#ifdef INET6
case AF_INET6:
/* Scope based on outbound address */
- if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
+ if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
+ SCTP_ROUTE_IS_REAL_LOOP(ro)) {
/*
- * If the route goes to the loopback address OR the
- * address is a loopback address, we are loopback
- * scope. But we don't use dest_is_priv (link local
- * addresses).
+ * If the address is a loopback address, which
+ * consists of "::1" OR "fe80::1%lo0", we are
+ * loopback scope. But we don't use dest_is_priv
+ * (link local addresses).
*/
dest_is_loop = 1;
if (net != NULL) {
@@ -3345,6 +3411,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
int ecn_ok,
struct sctp_tmit_chunk *chk,
int out_of_asoc_ok,
+ uint16_t port,
int so_locked
#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
SCTP_UNUSED
@@ -3371,6 +3438,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
int ret;
uint32_t vrf_id;
sctp_route_t *ro = NULL;
+ struct udphdr *udp;
if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
@@ -3411,15 +3479,25 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
sctp_route_t iproute;
uint8_t tos_value;
- newm = sctp_get_mbuf_for_msg(sizeof(struct ip), 1, M_DONTWAIT, 1, MT_DATA);
+ if (port) {
+ newm = sctp_get_mbuf_for_msg(sizeof(struct ip) + sizeof(struct udphdr), 1, M_DONTWAIT, 1, MT_DATA);
+ } else {
+ newm = sctp_get_mbuf_for_msg(sizeof(struct ip), 1, M_DONTWAIT, 1, MT_DATA);
+ }
if (newm == NULL) {
sctp_m_freem(m);
SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
return (ENOMEM);
}
- SCTP_ALIGN_TO_END(newm, sizeof(struct ip));
- SCTP_BUF_LEN(newm) = sizeof(struct ip);
- packet_length += sizeof(struct ip);
+ if (port) {
+ SCTP_ALIGN_TO_END(newm, sizeof(struct ip) + sizeof(struct udphdr));
+ SCTP_BUF_LEN(newm) = sizeof(struct ip) + sizeof(struct udphdr);
+ packet_length += sizeof(struct ip) + sizeof(struct udphdr);
+ } else {
+ SCTP_ALIGN_TO_END(newm, sizeof(struct ip));
+ SCTP_BUF_LEN(newm) = sizeof(struct ip);
+ packet_length += sizeof(struct ip);
+ }
SCTP_BUF_NEXT(newm) = m;
m = newm;
ip = mtod(m, struct ip *);
@@ -3430,7 +3508,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
} else {
tos_value = inp->ip_inp.inp.inp_ip_tos;
}
- if (nofragment_flag) {
+ if ((nofragment_flag) && (port == 0)) {
#if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__APPLE__)
ip->ip_off = IP_DF;
#else
@@ -3456,7 +3534,11 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
/* no association at all */
ip->ip_tos = (tos_value & 0xfc);
}
- ip->ip_p = IPPROTO_SCTP;
+ if (port) {
+ ip->ip_p = IPPROTO_UDP;
+ } else {
+ ip->ip_p = IPPROTO_SCTP;
+ }
ip->ip_sum = 0;
if (net == NULL) {
ro = &iproute;
@@ -3469,7 +3551,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
/* call the routine to select the src address */
- if (net) {
+ if (net && out_of_asoc_ok == 0) {
if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
sctp_free_ifa(net->ro._s_addr);
net->ro._s_addr = NULL;
@@ -3480,13 +3562,9 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
}
}
if (net->src_addr_selected == 0) {
- if (out_of_asoc_ok) {
- /* do not cache */
- goto temp_v4_src;
- }
/* Cache the source address */
net->ro._s_addr = sctp_source_address_selection(inp, stcb,
- ro, net, out_of_asoc_ok,
+ ro, net, 0,
vrf_id);
net->src_addr_selected = 1;
}
@@ -3499,7 +3577,6 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
} else {
struct sctp_ifa *_lsrc;
- temp_v4_src:
_lsrc = sctp_source_address_selection(inp, stcb, ro,
net,
out_of_asoc_ok,
@@ -3510,7 +3587,13 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
ip->ip_src = _lsrc->address.sin.sin_addr;
sctp_free_ifa(_lsrc);
}
-
+ if (port) {
+ udp = (struct udphdr *)(ip + 1);
+ udp->uh_sport = htons(sctp_udp_tunneling_port);
+ udp->uh_dport = port;
+ udp->uh_ulen = htons(packet_length - sizeof(struct ip));
+ udp->uh_sum = 0;
+ }
/*
* If source address selection fails and we find no route
* then the ip_output should fail as well with a
@@ -3685,15 +3768,25 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
}
- newm = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr), 1, M_DONTWAIT, 1, MT_DATA);
+ if (port) {
+ newm = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr) + sizeof(struct udphdr), 1, M_DONTWAIT, 1, MT_DATA);
+ } else {
+ newm = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr), 1, M_DONTWAIT, 1, MT_DATA);
+ }
if (newm == NULL) {
sctp_m_freem(m);
SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
return (ENOMEM);
}
- SCTP_ALIGN_TO_END(newm, sizeof(struct ip6_hdr));
- SCTP_BUF_LEN(newm) = sizeof(struct ip6_hdr);
- packet_length += sizeof(struct ip6_hdr);
+ if (port) {
+ SCTP_ALIGN_TO_END(newm, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
+ SCTP_BUF_LEN(newm) = sizeof(struct ip6_hdr) + sizeof(struct udphdr);
+ packet_length += sizeof(struct ip6_hdr) + sizeof(struct udphdr);
+ } else {
+ SCTP_ALIGN_TO_END(newm, sizeof(struct ip6_hdr));
+ SCTP_BUF_LEN(newm) = sizeof(struct ip6_hdr);
+ packet_length += sizeof(struct ip6_hdr);
+ }
SCTP_BUF_NEXT(newm) = m;
m = newm;
@@ -3735,7 +3828,11 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
}
ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom));
- ip6h->ip6_nxt = IPPROTO_SCTP;
+ if (port) {
+ ip6h->ip6_nxt = IPPROTO_UDP;
+ } else {
+ ip6h->ip6_nxt = IPPROTO_SCTP;
+ }
ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
ip6h->ip6_dst = sin6->sin6_addr;
@@ -3748,7 +3845,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
lsa6_tmp.sin6_family = AF_INET6;
lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
lsa6 = &lsa6_tmp;
- if (net) {
+ if (net && out_of_asoc_ok == 0) {
if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
sctp_free_ifa(net->ro._s_addr);
net->ro._s_addr = NULL;
@@ -3759,17 +3856,20 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
}
}
if (net->src_addr_selected == 0) {
- if (out_of_asoc_ok) {
- /* do not cache */
- goto temp_v6_src;
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ /* KAME hack: embed scopeid */
+ if (sa6_embedscope(sin6, ip6_use_defzone) != 0) {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
}
/* Cache the source address */
net->ro._s_addr = sctp_source_address_selection(inp,
stcb,
ro,
net,
- out_of_asoc_ok,
+ 0,
vrf_id);
+ (void)sa6_recoverscope(sin6);
net->src_addr_selected = 1;
}
if (net->ro._s_addr == NULL) {
@@ -3781,11 +3881,17 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
} else {
struct sctp_ifa *_lsrc;
- temp_v6_src:
+ sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
+ /* KAME hack: embed scopeid */
+ if (sa6_embedscope(sin6, ip6_use_defzone) != 0) {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
_lsrc = sctp_source_address_selection(inp, stcb, ro,
net,
out_of_asoc_ok,
vrf_id);
+ (void)sa6_recoverscope(sin6);
if (_lsrc == NULL) {
goto no_route;
}
@@ -3809,6 +3915,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
bzero(&lsa6_storage, sizeof(lsa6_storage));
lsa6_storage.sin6_family = AF_INET6;
lsa6_storage.sin6_len = sizeof(lsa6_storage);
+ lsa6_storage.sin6_addr = lsa6->sin6_addr;
if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
sctp_m_freem(m);
@@ -3820,6 +3927,13 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
lsa6 = &lsa6_storage;
ip6h->ip6_src = lsa6->sin6_addr;
+ if (port) {
+ udp = (struct udphdr *)(ip6h + 1);
+ udp->uh_sport = htons(sctp_udp_tunneling_port);
+ udp->uh_dport = port;
+ udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
+ udp->uh_sum = 0;
+ }
/*
* We set the hop limit now since there is a good chance
* that our ro pointer is now filled
@@ -4208,7 +4322,7 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
ret = sctp_lowlevel_chunk_output(inp, stcb, net,
(struct sockaddr *)&net->ro._l_addr,
- m, 0, NULL, 0, 0, NULL, 0, so_locked);
+ m, 0, NULL, 0, 0, NULL, 0, net->port, so_locked);
SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
@@ -4629,8 +4743,8 @@ sctp_are_there_new_addresses(struct sctp_association *asoc,
#ifdef INET6
if (sa->sa_family == AF_INET6) {
sa6 = (struct sockaddr_in6 *)sa;
- if (SCTP6_ARE_ADDR_EQUAL(&sa6->sin6_addr,
- &sin6.sin6_addr)) {
+ if (SCTP6_ARE_ADDR_EQUAL(sa6,
+ &sin6)) {
fnd = 1;
break;
}
@@ -4699,7 +4813,7 @@ sctp_are_there_new_addresses(struct sctp_association *asoc,
if (sa->sa_family == AF_INET6) {
sa6 = (struct sockaddr_in6 *)sa;
if (SCTP6_ARE_ADDR_EQUAL(
- &sa6->sin6_addr, &sin6.sin6_addr)) {
+ sa6, &sin6)) {
fnd = 1;
break;
}
@@ -4726,7 +4840,7 @@ sctp_are_there_new_addresses(struct sctp_association *asoc,
void
sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh,
- struct sctp_init_chunk *init_chk, uint32_t vrf_id, int hold_inp_lock)
+ struct sctp_init_chunk *init_chk, uint32_t vrf_id, uint16_t port, int hold_inp_lock)
{
struct sctp_association *asoc;
struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
@@ -4774,7 +4888,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
* though we even set the T bit and copy in the 0 tag.. this
* looks no different than if no listener was present.
*/
- sctp_send_abort(init_pkt, iphlen, sh, 0, NULL, vrf_id);
+ sctp_send_abort(init_pkt, iphlen, sh, 0, NULL, vrf_id, port);
return;
}
abort_flag = 0;
@@ -4783,7 +4897,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
&abort_flag, (struct sctp_chunkhdr *)init_chk);
if (abort_flag) {
sctp_send_abort(init_pkt, iphlen, sh,
- init_chk->init.initiate_tag, op_err, vrf_id);
+ init_chk->init.initiate_tag, op_err, vrf_id, port);
return;
}
m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
@@ -4926,6 +5040,13 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
stc.addr_type = SCTP_IPV6_ADDRESS;
stc.scope_id = 0;
if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) {
+ /*
+ * FIX ME: does this have scope from
+ * rcvif?
+ */
+ (void)sa6_recoverscope(sin6);
+ stc.scope_id = sin6->sin6_scope_id;
+ sa6_embedscope(sin6, ip6_use_defzone);
stc.loopback_scope = 1;
stc.local_scope = 0;
stc.site_scope = 1;
@@ -4960,9 +5081,8 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
* rcvif?
*/
(void)sa6_recoverscope(sin6);
-
- sa6_embedscope(sin6, ip6_use_defzone);
stc.scope_id = sin6->sin6_scope_id;
+ sa6_embedscope(sin6, ip6_use_defzone);
} else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
/*
* If the new destination is
@@ -5374,7 +5494,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
p_len += padval;
}
(void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
- NULL, 0, SCTP_SO_NOT_LOCKED);
+ NULL, 0, port, SCTP_SO_NOT_LOCKED);
SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
}
@@ -6243,15 +6363,23 @@ sctp_toss_old_asconf(struct sctp_tcb *stcb)
{
struct sctp_association *asoc;
struct sctp_tmit_chunk *chk, *chk_tmp;
+ struct sctp_asconf_chunk *acp;
asoc = &stcb->asoc;
- for (chk = TAILQ_FIRST(&asoc->control_send_queue); chk != NULL;
+ for (chk = TAILQ_FIRST(&asoc->asconf_send_queue); chk != NULL;
chk = chk_tmp) {
/* get next chk */
chk_tmp = TAILQ_NEXT(chk, sctp_next);
- /* find SCTP_ASCONF chunk in queue (only one ever in queue) */
+ /* find SCTP_ASCONF chunk in queue */
if (chk->rec.chunk_id.id == SCTP_ASCONF) {
- TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ if (chk->data) {
+ acp = mtod(chk->data, struct sctp_asconf_chunk *);
+ if (compare_with_wrap(ntohl(acp->serial_number), stcb->asoc.asconf_seq_out_acked, MAX_SEQ)) {
+ /* Not Acked yet */
+ break;
+ }
+ }
+ TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
if (chk->data) {
sctp_m_freem(chk->data);
chk->data = NULL;
@@ -7191,6 +7319,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
/* Nothing to possible to send? */
if (TAILQ_EMPTY(&asoc->control_send_queue) &&
+ TAILQ_EMPTY(&asoc->asconf_send_queue) &&
TAILQ_EMPTY(&asoc->send_queue) &&
TAILQ_EMPTY(&asoc->out_wheel)) {
*reason_code = 9;
@@ -7305,12 +7434,15 @@ skip_the_fill_from_streams:
/* now service each destination and send out what we can for it */
/* Nothing to send? */
if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) &&
+ (TAILQ_FIRST(&asoc->asconf_send_queue) == NULL) &&
(TAILQ_FIRST(&asoc->send_queue) == NULL)) {
*reason_code = 8;
return (0);
}
if (no_data_chunks) {
- chk = TAILQ_FIRST(&asoc->control_send_queue);
+ chk = TAILQ_FIRST(&asoc->asconf_send_queue);
+ if (chk == NULL)
+ chk = TAILQ_FIRST(&asoc->control_send_queue);
} else {
chk = TAILQ_FIRST(&asoc->send_queue);
}
@@ -7390,6 +7522,194 @@ again_one_more_time:
r_mtu = mtu;
}
/************************/
+ /* ASCONF transmission */
+ /************************/
+ /* Now first lets go through the asconf queue */
+ for (chk = TAILQ_FIRST(&asoc->asconf_send_queue);
+ chk; chk = nchk) {
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ if (chk->rec.chunk_id.id != SCTP_ASCONF) {
+ continue;
+ }
+ if (chk->whoTo != net) {
+ /*
+ * No, not sent to the network we are
+ * looking at
+ */
+ break;
+ }
+ if (chk->data == NULL) {
+ break;
+ }
+ if (chk->sent != SCTP_DATAGRAM_UNSENT &&
+ chk->sent != SCTP_DATAGRAM_RESEND) {
+ break;
+ }
+ /*
+ * if no AUTH is yet included and this chunk
+ * requires it, make sure to account for it. We
+ * don't apply the size until the AUTH chunk is
+ * actually added below in case there is no room for
+ * this chunk. NOTE: we overload the use of "omtu"
+ * here
+ */
+ if ((auth == NULL) &&
+ sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+ stcb->asoc.peer_auth_chunks)) {
+ omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ } else
+ omtu = 0;
+ /* Here we do NOT factor the r_mtu */
+ if ((chk->send_size < (int)(mtu - omtu)) ||
+ (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
+ /*
+ * We probably should glom the mbuf chain
+ * from the chk->data for control but the
+ * problem is it becomes yet one more level
+ * of tracking to do if for some reason
+ * output fails. Then I have got to
+ * reconstruct the merged control chain.. el
+ * yucko.. for now we take the easy way and
+ * do the copy
+ */
+ /*
+ * Add an AUTH chunk, if chunk requires it
+ * save the offset into the chain for AUTH
+ */
+ if ((auth == NULL) &&
+ (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+ stcb->asoc.peer_auth_chunks))) {
+ outchain = sctp_add_auth_chunk(outchain,
+ &endoutchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ chk->rec.chunk_id.id);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ }
+ outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
+ (int)chk->rec.chunk_id.can_take_data,
+ chk->send_size, chk->copy_by_ref);
+ if (outchain == NULL) {
+ *reason_code = 8;
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ /* update our MTU size */
+ if (mtu > (chk->send_size + omtu))
+ mtu -= (chk->send_size + omtu);
+ else
+ mtu = 0;
+ to_out += (chk->send_size + omtu);
+ /* Do clear IP_DF ? */
+ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+ no_fragmentflg = 0;
+ }
+ if (chk->rec.chunk_id.can_take_data)
+ chk->data = NULL;
+ /*
+ * set hb flag since we can use these for
+ * RTO
+ */
+ hbflag = 1;
+ asconf = 1;
+ /*
+ * should sysctl this: don't bundle data
+ * with ASCONF since it requires AUTH
+ */
+ no_data_chunks = 1;
+ chk->sent = SCTP_DATAGRAM_SENT;
+ chk->snd_count++;
+ if (mtu == 0) {
+ /*
+ * Ok we are out of room but we can
+ * output without effecting the
+ * flight size since this little guy
+ * is a control only packet.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
+ /*
+ * do NOT clear the asconf flag as
+ * it is used to do appropriate
+ * source address selection.
+ */
+ SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT);
+ if (outchain == NULL) {
+ /* no memory */
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
+ error = ENOBUFS;
+ *reason_code = 7;
+ continue;
+ }
+ shdr = mtod(outchain, struct sctphdr *);
+ shdr->src_port = inp->sctp_lport;
+ shdr->dest_port = stcb->rport;
+ shdr->v_tag = htonl(stcb->asoc.peer_vtag);
+ shdr->checksum = 0;
+ auth_offset += sizeof(struct sctphdr);
+ if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr,
+ outchain, auth_offset, auth,
+ no_fragmentflg, 0, NULL, asconf, net->port, so_locked))) {
+ if (error == ENOBUFS) {
+ asoc->ifp_had_enobuf = 1;
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ }
+ if (from_where == 0) {
+ SCTP_STAT_INCR(sctps_lowlevelerrusr);
+ }
+ if (*now_filled == 0) {
+ (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+ *now_filled = 1;
+ *now = net->last_sent_time;
+ } else {
+ net->last_sent_time = *now;
+ }
+ hbflag = 0;
+ /* error, could not output */
+ if (error == EHOSTUNREACH) {
+ /*
+ * Destination went
+ * unreachable
+ * during this send
+ */
+ sctp_move_to_an_alt(stcb, asoc, net);
+ }
+ *reason_code = 7;
+ continue;
+ } else
+ asoc->ifp_had_enobuf = 0;
+ if (*now_filled == 0) {
+ (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+ *now_filled = 1;
+ *now = net->last_sent_time;
+ } else {
+ net->last_sent_time = *now;
+ }
+ hbflag = 0;
+ /*
+ * increase the number we sent, if a
+ * cookie is sent we don't tell them
+ * any was sent out.
+ */
+ outchain = endoutchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ if (!no_out_cnt)
+ *num_out += ctl_cnt;
+ /* recalc a clean slate and setup */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ mtu = (net->mtu - SCTP_MIN_OVERHEAD);
+ } else {
+ mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
+ }
+ to_out = 0;
+ no_fragmentflg = 1;
+ }
+ }
+ }
+ /************************/
/* Control transmission */
/************************/
/* Now first lets go through the control queue */
@@ -7510,27 +7830,14 @@ again_one_more_time:
} else {
/*
* Other chunks, since they have
- * timers running (i.e. COOKIE or
- * ASCONF) we just "trust" that it
- * gets sent or retransmitted.
+ * timers running (i.e. COOKIE) we
+ * just "trust" that it gets sent or
+ * retransmitted.
*/
ctl_cnt++;
if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
cookie = 1;
no_out_cnt = 1;
- } else if (chk->rec.chunk_id.id == SCTP_ASCONF) {
- /*
- * set hb flag since we can
- * use these for RTO
- */
- hbflag = 1;
- asconf = 1;
- /*
- * should sysctl this: don't
- * bundle data with ASCONF
- * since it requires AUTH
- */
- no_data_chunks = 1;
}
chk->sent = SCTP_DATAGRAM_SENT;
chk->snd_count++;
@@ -7571,7 +7878,7 @@ again_one_more_time:
if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
(struct sockaddr *)&net->ro._l_addr,
outchain, auth_offset, auth,
- no_fragmentflg, 0, NULL, asconf, so_locked))) {
+ no_fragmentflg, 0, NULL, asconf, net->port, so_locked))) {
if (error == ENOBUFS) {
asoc->ifp_had_enobuf = 1;
SCTP_STAT_INCR(sctps_lowlevelerr);
@@ -7860,7 +8167,7 @@ again_one_more_time:
no_fragmentflg,
bundle_at,
data_list[0],
- asconf, so_locked))) {
+ asconf, net->port, so_locked))) {
/* error, we could not output */
if (error == ENOBUFS) {
SCTP_STAT_INCR(sctps_lowlevelerr);
@@ -8332,16 +8639,20 @@ sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
*/
struct sctp_tmit_chunk *chk;
struct mbuf *m_asconf;
- struct sctp_asconf_chunk *acp;
int len;
SCTP_TCB_LOCK_ASSERT(stcb);
+
+ if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
+ (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
+ /* can't send a new one if there is one in flight already */
+ return;
+ }
/* compose an ASCONF chunk, maximum length is PMTU */
m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
if (m_asconf == NULL) {
return;
}
- acp = mtod(m_asconf, struct sctp_asconf_chunk *);
sctp_alloc_a_chunk(stcb, chk);
if (chk == NULL) {
/* no memory */
@@ -8355,11 +8666,11 @@ sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
chk->rec.chunk_id.can_take_data = 0;
chk->sent = SCTP_DATAGRAM_UNSENT;
chk->snd_count = 0;
- chk->flags = 0;
+ chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
chk->asoc = &stcb->asoc;
- chk->whoTo = chk->asoc->primary_destination;
+ chk->whoTo = net;
atomic_add_int(&chk->whoTo->ref_count, 1);
- TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
chk->asoc->ctrl_queue_cnt++;
return;
}
@@ -8482,7 +8793,6 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
struct sctp_tmit_chunk *chk, *fwd;
struct mbuf *m, *endofchain;
struct sctphdr *shdr;
- int asconf;
struct sctp_nets *net = NULL;
uint32_t tsns_sent = 0;
int no_fragmentflg, bundle_at, cnt_thru;
@@ -8495,7 +8805,6 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
SCTP_TCB_LOCK_ASSERT(stcb);
tmr_started = ctl_cnt = bundle_at = error = 0;
no_fragmentflg = 1;
- asconf = 0;
fwd_tsn = 0;
*cnt_out = 0;
fwd = NULL;
@@ -8515,7 +8824,6 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
}
TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
- (chk->rec.chunk_id.id == SCTP_ASCONF) ||
(chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
(chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
@@ -8528,10 +8836,6 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
}
}
ctl_cnt++;
- if (chk->rec.chunk_id.id == SCTP_ASCONF) {
- no_fragmentflg = 1;
- asconf = 1;
- }
if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
fwd_tsn = 1;
fwd = chk;
@@ -8577,7 +8881,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
(struct sockaddr *)&chk->whoTo->ro._l_addr, m, auth_offset,
- auth, no_fragmentflg, 0, NULL, asconf, so_locked))) {
+ auth, no_fragmentflg, 0, NULL, 0, chk->whoTo->port, so_locked))) {
SCTP_STAT_INCR(sctps_lowlevelerr);
return (error);
}
@@ -8815,7 +9119,7 @@ one_chunk_around:
/* Now lets send it, if there is anything to send :> */
if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
(struct sockaddr *)&net->ro._l_addr, m, auth_offset,
- auth, no_fragmentflg, 0, NULL, asconf, so_locked))) {
+ auth, no_fragmentflg, 0, NULL, 0, net->port, so_locked))) {
/* error, we could not output */
SCTP_STAT_INCR(sctps_lowlevelerr);
return (error);
@@ -9795,7 +10099,7 @@ sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
(void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb,
stcb->asoc.primary_destination,
(struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr,
- m_out, auth_offset, auth, 1, 0, NULL, 0, so_locked);
+ m_out, auth_offset, auth, 1, 0, NULL, 0, stcb->asoc.primary_destination->port, so_locked);
SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
}
@@ -9824,19 +10128,20 @@ sctp_send_shutdown_complete(struct sctp_tcb *stcb,
SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_msg);
(void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
(struct sockaddr *)&net->ro._l_addr,
- m_shutdown_comp, 0, NULL, 1, 0, NULL, 0, SCTP_SO_NOT_LOCKED);
+ m_shutdown_comp, 0, NULL, 1, 0, NULL, 0, net->port, SCTP_SO_NOT_LOCKED);
SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
return;
}
void
sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
- uint32_t vrf_id)
+ uint32_t vrf_id, uint16_t port)
{
/* formulate and SEND a SHUTDOWN-COMPLETE */
struct mbuf *o_pak;
struct mbuf *mout;
struct ip *iph, *iph_out;
+ struct udphdr *udp;
#ifdef INET6
struct ip6_hdr *ip6, *ip6_out;
@@ -9845,28 +10150,36 @@ sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
int offset_out, len, mlen;
struct sctp_shutdown_complete_msg *comp_cp;
- /* Get room for the largest message */
+ iph = mtod(m, struct ip *);
+ switch (iph->ip_v) {
+ case IPVERSION:
+ len = (sizeof(struct ip) + sizeof(struct sctp_shutdown_complete_msg));
+ break;
#ifdef INET6
- len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg));
-#else
- len = (sizeof(struct ip) + sizeof(struct sctp_shutdown_complete_msg));
+ case IPV6_VERSION >> 4:
+ len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg));
+ break;
#endif
+ default:
+ return;
+ }
+ if (port) {
+ len += sizeof(struct udphdr);
+ }
mout = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
if (mout == NULL) {
return;
}
SCTP_BUF_LEN(mout) = len;
- iph = mtod(m, struct ip *);
+ SCTP_BUF_NEXT(mout) = NULL;
iph_out = NULL;
#ifdef INET6
ip6_out = NULL;
#endif
offset_out = 0;
+
switch (iph->ip_v) {
case IPVERSION:
- SCTP_BUF_LEN(mout) = sizeof(struct ip) +
- sizeof(struct sctp_shutdown_complete_msg);
- SCTP_BUF_NEXT(mout) = NULL;
iph_out = mtod(mout, struct ip *);
/* Fill in the IP header for the ABORT */
@@ -9876,7 +10189,11 @@ sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
iph_out->ip_id = 0;
iph_out->ip_off = 0;
iph_out->ip_ttl = MAXTTL;
- iph_out->ip_p = IPPROTO_SCTP;
+ if (port) {
+ iph_out->ip_p = IPPROTO_UDP;
+ } else {
+ iph_out->ip_p = IPPROTO_SCTP;
+ }
iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
@@ -9889,15 +10206,16 @@ sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
#ifdef INET6
case IPV6_VERSION >> 4:
ip6 = (struct ip6_hdr *)iph;
- SCTP_BUF_LEN(mout) = sizeof(struct ip6_hdr) +
- sizeof(struct sctp_shutdown_complete_msg);
- SCTP_BUF_NEXT(mout) = NULL;
ip6_out = mtod(mout, struct ip6_hdr *);
/* Fill in the IPv6 header for the ABORT */
ip6_out->ip6_flow = ip6->ip6_flow;
ip6_out->ip6_hlim = ip6_defhlim;
- ip6_out->ip6_nxt = IPPROTO_SCTP;
+ if (port) {
+ ip6_out->ip6_nxt = IPPROTO_UDP;
+ } else {
+ ip6_out->ip6_nxt = IPPROTO_SCTP;
+ }
ip6_out->ip6_src = ip6->ip6_dst;
ip6_out->ip6_dst = ip6->ip6_src;
/*
@@ -9914,6 +10232,15 @@ sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
/* Currently not supported. */
return;
}
+ if (port) {
+ udp = (struct udphdr *)comp_cp;
+ udp->uh_sport = htons(sctp_udp_tunneling_port);
+ udp->uh_dport = port;
+ udp->uh_ulen = htons(sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr));
+ udp->uh_sum = 0;
+ offset_out += sizeof(struct udphdr);
+ comp_cp = (struct sctp_shutdown_complete_msg *)((caddr_t)comp_cp + sizeof(struct udphdr));
+ }
if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
/* no mbuf's */
sctp_m_freem(mout);
@@ -10758,7 +11085,7 @@ sctp_send_str_reset_req(struct sctp_tcb *stcb,
void
sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
- struct mbuf *err_cause, uint32_t vrf_id)
+ struct mbuf *err_cause, uint32_t vrf_id, uint16_t port)
{
/*-
* Formulate the abort message, and send it back down.
@@ -10767,6 +11094,7 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
struct mbuf *mout;
struct sctp_abort_msg *abm;
struct ip *iph, *iph_out;
+ struct udphdr *udp;
#ifdef INET6
struct ip6_hdr *ip6, *ip6_out;
@@ -10780,18 +11108,30 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
sctp_m_freem(err_cause);
return;
}
+ iph = mtod(m, struct ip *);
+ switch (iph->ip_v) {
+ case IPVERSION:
+ len = (sizeof(struct ip) + sizeof(struct sctp_abort_msg));
+ break;
#ifdef INET6
- len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg));
-#else
- len = (sizeof(struct ip) + sizeof(struct sctp_abort_msg));
+ case IPV6_VERSION >> 4:
+ len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg));
+ break;
#endif
+ default:
+ return;
+ }
+ if (port) {
+ len += sizeof(struct udphdr);
+ }
mout = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
if (mout == NULL) {
if (err_cause)
sctp_m_freem(err_cause);
return;
}
- iph = mtod(m, struct ip *);
+ SCTP_BUF_LEN(mout) = len;
+ SCTP_BUF_NEXT(mout) = err_cause;
iph_out = NULL;
#ifdef INET6
ip6_out = NULL;
@@ -10799,8 +11139,6 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
switch (iph->ip_v) {
case IPVERSION:
iph_out = mtod(mout, struct ip *);
- SCTP_BUF_LEN(mout) = sizeof(*iph_out) + sizeof(*abm);
- SCTP_BUF_NEXT(mout) = err_cause;
/* Fill in the IP header for the ABORT */
iph_out->ip_v = IPVERSION;
@@ -10809,7 +11147,11 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
iph_out->ip_id = 0;
iph_out->ip_off = 0;
iph_out->ip_ttl = MAXTTL;
- iph_out->ip_p = IPPROTO_SCTP;
+ if (port) {
+ iph_out->ip_p = IPPROTO_UDP;
+ } else {
+ iph_out->ip_p = IPPROTO_SCTP;
+ }
iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
/* let IP layer calculate this */
@@ -10822,13 +11164,15 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
case IPV6_VERSION >> 4:
ip6 = (struct ip6_hdr *)iph;
ip6_out = mtod(mout, struct ip6_hdr *);
- SCTP_BUF_LEN(mout) = sizeof(*ip6_out) + sizeof(*abm);
- SCTP_BUF_NEXT(mout) = err_cause;
/* Fill in the IP6 header for the ABORT */
ip6_out->ip6_flow = ip6->ip6_flow;
ip6_out->ip6_hlim = ip6_defhlim;
- ip6_out->ip6_nxt = IPPROTO_SCTP;
+ if (port) {
+ ip6_out->ip6_nxt = IPPROTO_UDP;
+ } else {
+ ip6_out->ip6_nxt = IPPROTO_SCTP;
+ }
ip6_out->ip6_src = ip6->ip6_dst;
ip6_out->ip6_dst = ip6->ip6_src;
@@ -10844,6 +11188,15 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
return;
}
+ udp = (struct udphdr *)abm;
+ if (port) {
+ udp->uh_sport = htons(sctp_udp_tunneling_port);
+ udp->uh_dport = port;
+ /* set udp->uh_ulen later */
+ udp->uh_sum = 0;
+ iphlen_out += sizeof(struct udphdr);
+ abm = (struct sctp_abort_msg *)((caddr_t)abm + sizeof(struct udphdr));
+ }
abm->sh.src_port = sh->dest_port;
abm->sh.dest_port = sh->src_port;
abm->sh.checksum = 0;
@@ -10895,6 +11248,9 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
/* zap the stack pointer to the route */
bzero(&ro, sizeof ro);
+ if (port) {
+ udp->uh_ulen = htons(len - sizeof(struct ip));
+ }
SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip_output:\n");
SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, iph_out, &abm->sh);
/* set IPv4 length */
@@ -10920,6 +11276,9 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
/* zap the stack pointer to the route */
bzero(&ro, sizeof(ro));
+ if (port) {
+ udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
+ }
SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip6_output:\n");
SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, (struct ip *)ip6_out, &abm->sh);
ip6_out->ip6_plen = len - sizeof(*ip6_out);
@@ -10941,7 +11300,7 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
void
sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
- uint32_t vrf_id)
+ uint32_t vrf_id, uint16_t port)
{
struct mbuf *o_pak;
struct sctphdr *ihdr;
@@ -10949,6 +11308,7 @@ sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
struct sctphdr *ohdr;
struct sctp_chunkhdr *ophdr;
struct ip *iph;
+ struct udphdr *udp;
struct mbuf *mout;
#ifdef SCTP_DEBUG
@@ -10993,9 +11353,17 @@ sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
}
val = sctp_calculate_sum(scm, NULL, 0);
#ifdef INET6
- mout = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr), 1, M_DONTWAIT, 1, MT_DATA);
+ if (port) {
+ mout = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr) + sizeof(struct udphdr), 1, M_DONTWAIT, 1, MT_DATA);
+ } else {
+ mout = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr), 1, M_DONTWAIT, 1, MT_DATA);
+ }
#else
- mout = sctp_get_mbuf_for_msg(sizeof(struct ip), 1, M_DONTWAIT, 1, MT_DATA);
+ if (port) {
+ mout = sctp_get_mbuf_for_msg(sizeof(struct ip) + sizeof(struct udphdr), 1, M_DONTWAIT, 1, MT_DATA);
+ } else {
+ mout = sctp_get_mbuf_for_msg(sizeof(struct ip), 1, M_DONTWAIT, 1, MT_DATA);
+ }
#endif
if (mout == NULL) {
sctp_m_freem(scm);
@@ -11017,7 +11385,10 @@ sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
SCTP_BUF_LEN(mout) = sizeof(struct ip);
len += sizeof(struct ip);
-
+ if (port) {
+ SCTP_BUF_LEN(mout) += sizeof(struct udphdr);
+ len += sizeof(struct udphdr);
+ }
bzero(&ro, sizeof ro);
out = mtod(mout, struct ip *);
out->ip_v = iph->ip_v;
@@ -11026,11 +11397,22 @@ sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
out->ip_id = iph->ip_id;
out->ip_off = 0;
out->ip_ttl = MAXTTL;
- out->ip_p = IPPROTO_SCTP;
+ if (port) {
+ out->ip_p = IPPROTO_UDP;
+ } else {
+ out->ip_p = IPPROTO_SCTP;
+ }
out->ip_sum = 0;
out->ip_src = iph->ip_dst;
out->ip_dst = iph->ip_src;
out->ip_len = len;
+ if (port) {
+ udp = (struct udphdr *)(out + 1);
+ udp->uh_sport = htons(sctp_udp_tunneling_port);
+ udp->uh_dport = port;
+ udp->uh_ulen = htons(len - sizeof(struct ip));
+ udp->uh_sum = 0;
+ }
#ifdef SCTP_PACKET_LOGGING
if (sctp_logging_level & SCTP_LAST_PACKET_TRACING)
sctp_packet_log(mout, len);
@@ -11059,15 +11441,29 @@ sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
SCTP_BUF_LEN(mout) = sizeof(struct ip6_hdr);
len += sizeof(struct ip6_hdr);
bzero(&ro, sizeof ro);
+ if (port) {
+ SCTP_BUF_LEN(mout) += sizeof(struct udphdr);
+ len += sizeof(struct udphdr);
+ }
in6 = mtod(m, struct ip6_hdr *);
out6 = mtod(mout, struct ip6_hdr *);
out6->ip6_flow = in6->ip6_flow;
out6->ip6_hlim = ip6_defhlim;
- out6->ip6_nxt = IPPROTO_SCTP;
+ if (port) {
+ out6->ip6_nxt = IPPROTO_UDP;
+ } else {
+ out6->ip6_nxt = IPPROTO_SCTP;
+ }
out6->ip6_src = in6->ip6_dst;
out6->ip6_dst = in6->ip6_src;
out6->ip6_plen = len - sizeof(struct ip6_hdr);
-
+ if (port) {
+ udp = (struct udphdr *)(out6 + 1);
+ udp->uh_sport = htons(sctp_udp_tunneling_port);
+ udp->uh_dport = port;
+ udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
+ udp->uh_sum = 0;
+ }
#ifdef SCTP_DEBUG
bzero(&lsa6, sizeof(lsa6));
lsa6.sin6_len = sizeof(lsa6);
diff --git a/sys/netinet/sctp_output.h b/sys/netinet/sctp_output.h
index d37c163..f0961b8 100644
--- a/sys/netinet/sctp_output.h
+++ b/sys/netinet/sctp_output.h
@@ -84,7 +84,7 @@ sctp_send_initiate(struct sctp_inpcb *, struct sctp_tcb *, int
void
sctp_send_initiate_ack(struct sctp_inpcb *, struct sctp_tcb *,
struct mbuf *, int, int, struct sctphdr *, struct sctp_init_chunk *,
- uint32_t, int);
+ uint32_t, uint16_t, int);
struct mbuf *
sctp_arethere_unrecognized_parameters(struct mbuf *, int, int *,
@@ -110,7 +110,7 @@ void sctp_send_shutdown_complete(struct sctp_tcb *, struct sctp_nets *);
void
sctp_send_shutdown_complete2(struct mbuf *, int, struct sctphdr *,
- uint32_t);
+ uint32_t, uint16_t);
void sctp_send_asconf(struct sctp_tcb *, struct sctp_nets *, int addr_locked);
@@ -197,9 +197,9 @@ sctp_send_str_reset_req(struct sctp_tcb *stcb,
void
sctp_send_abort(struct mbuf *, int, struct sctphdr *, uint32_t,
- struct mbuf *, uint32_t);
+ struct mbuf *, uint32_t, uint16_t);
-void sctp_send_operr_to(struct mbuf *, int, struct mbuf *, uint32_t, uint32_t);
+void sctp_send_operr_to(struct mbuf *, int, struct mbuf *, uint32_t, uint32_t, uint16_t);
int
sctp_sosend(struct socket *so,
diff --git a/sys/netinet/sctp_pcb.c b/sys/netinet/sctp_pcb.c
index 7ad3f7f..6f540d1 100644
--- a/sys/netinet/sctp_pcb.c
+++ b/sys/netinet/sctp_pcb.c
@@ -45,24 +45,30 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_output.h>
#include <netinet/sctp_timer.h>
#include <netinet/sctp_bsd_addr.h>
+#include <netinet/udp.h>
+void sctp_pcb_finish(void);
+
struct sctp_epinfo sctppcbinfo;
/* FIX: we don't handle multiple link local scopes */
/* "scopeless" replacement IN6_ARE_ADDR_EQUAL */
#ifdef INET6
int
-SCTP6_ARE_ADDR_EQUAL(struct in6_addr *a, struct in6_addr *b)
+SCTP6_ARE_ADDR_EQUAL(struct sockaddr_in6 *a, struct sockaddr_in6 *b)
{
- struct in6_addr tmp_a, tmp_b;
-
- /* use a copy of a and b */
- tmp_a = *a;
- tmp_b = *b;
- in6_clearscope(&tmp_a);
- in6_clearscope(&tmp_b);
- return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b));
+ struct sockaddr_in6 tmp_a, tmp_b;
+
+ memcpy(&tmp_a, a, sizeof(struct sockaddr_in6));
+ if (sa6_embedscope(&tmp_a, ip6_use_defzone) != 0) {
+ return 0;
+ }
+ memcpy(&tmp_b, b, sizeof(struct sockaddr_in6));
+ if (sa6_embedscope(&tmp_b, ip6_use_defzone) != 0) {
+ return 0;
+ }
+ return (IN6_ARE_ADDR_EQUAL(&tmp_a.sin6_addr, &tmp_b.sin6_addr));
}
#endif
@@ -916,8 +922,8 @@ sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from,
to;
intf_addr6 = &laddr->ifa->address.sin6;
- if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
- &intf_addr6->sin6_addr)) {
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ intf_addr6)) {
match = 1;
break;
}
@@ -990,8 +996,8 @@ sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from,
sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
rsin6 = (struct sockaddr_in6 *)from;
- if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
- &rsin6->sin6_addr)) {
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ rsin6)) {
/* found it */
if (netp != NULL) {
*netp = net;
@@ -1147,8 +1153,8 @@ sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote,
sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
rsin6 = (struct sockaddr_in6 *)remote;
- if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
- &rsin6->sin6_addr)) {
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ rsin6)) {
/* found it */
if (netp != NULL) {
*netp = net;
@@ -1245,8 +1251,8 @@ sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote,
sin6 = (struct sockaddr_in6 *)
&net->ro._l_addr;
rsin6 = (struct sockaddr_in6 *)remote;
- if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
- &rsin6->sin6_addr)) {
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ rsin6)) {
/* found it */
if (netp != NULL) {
*netp = net;
@@ -1536,8 +1542,8 @@ sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
#ifdef INET6
case AF_INET6:
intf_addr6 = &laddr->ifa->address.sin6;
- if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
- &intf_addr6->sin6_addr)) {
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ intf_addr6)) {
SCTP_INP_RUNLOCK(inp);
return (inp);
}
@@ -2775,6 +2781,11 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF);
sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
}
+ if (sctp_multiple_asconfs == 0) {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS);
+ } else {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS);
+ }
/*
* set the automatic mobility_base from kernel flag (by
* micchie)
@@ -3506,6 +3517,7 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
addr_inscope = 0;
}
}
+#ifdef INET6
} else if (newaddr->sa_family == AF_INET6) {
struct sockaddr_in6 *sin6;
@@ -3553,6 +3565,7 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
addr_inscope = 0;
}
}
+#endif
} else {
/* not supported family type */
return (-1);
@@ -3599,6 +3612,11 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
stcb->asoc.numnets++;
*(&net->ref_count) = 1;
net->tos_flowlabel = 0;
+ if (sctp_udp_tunneling_for_client_enable) {
+ net->port = htons(sctp_udp_tunneling_port);
+ } else {
+ net->port = 0;
+ }
#ifdef INET
if (newaddr->sa_family == AF_INET)
net->tos_flowlabel = stcb->asoc.default_tos;
@@ -3625,14 +3643,6 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
#endif
SCTP_RTALLOC((sctp_route_t *) & net->ro, stcb->asoc.vrf_id);
-#ifdef INET6
- if (newaddr->sa_family == AF_INET6) {
- struct sockaddr_in6 *sin6;
-
- sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
- (void)sa6_recoverscope(sin6);
- }
-#endif
if (SCTP_ROUTE_HAS_VALID_IFN(&net->ro)) {
/* Get source address */
net->ro._s_addr = sctp_source_address_selection(stcb->sctp_ep,
@@ -3685,6 +3695,17 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
} else {
net->mtu = stcb->asoc.smallest_mtu;
}
+#ifdef INET6
+ if (newaddr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ (void)sa6_recoverscope(sin6);
+ }
+#endif
+ if (net->port) {
+ net->mtu -= sizeof(struct udphdr);
+ }
if (stcb->asoc.smallest_mtu > net->mtu) {
#ifdef SCTP_PRINT_FOR_B_AND_M
SCTP_PRINTF("new address mtu:%d smaller than smallest:%d\n",
@@ -4004,7 +4025,7 @@ sctp_remove_net(struct sctp_tcb *stcb, struct sctp_nets *net)
SCTPDBG(SCTP_DEBUG_ASCONF1, "remove_net: primary dst is deleting\n");
if (asoc->deleted_primary != NULL) {
SCTPDBG(SCTP_DEBUG_ASCONF1, "remove_net: deleted primary may be already stored\n");
- goto leave;
+ goto out;
}
asoc->deleted_primary = net;
atomic_add_int(&net->ref_count, 1);
@@ -4015,7 +4036,7 @@ sctp_remove_net(struct sctp_tcb *stcb, struct sctp_nets *net)
sctp_timer_start(SCTP_TIMER_TYPE_PRIM_DELETED,
stcb->sctp_ep, stcb, NULL);
}
-leave:
+out:
/* Try to find a confirmed primary */
asoc->primary_destination = sctp_find_alternate_net(stcb, lnet, 0);
}
@@ -4625,6 +4646,30 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
ccnt = 0;
}
*/
+
+ /* ASCONF queue MAY not be empty */
+ if (!TAILQ_EMPTY(&asoc->asconf_send_queue)) {
+ chk = TAILQ_FIRST(&asoc->asconf_send_queue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ ccnt++;
+ sctp_free_remote_addr(chk->whoTo);
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
+ SCTP_DECR_CHK_COUNT();
+ /* sa_ignore FREED_MEMORY */
+ chk = TAILQ_FIRST(&asoc->asconf_send_queue);
+ }
+ }
+/*
+ if(ccnt) {
+ printf("Freed %d from asconf_queue\n", ccnt);
+ ccnt = 0;
+ }
+*/
if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
chk = TAILQ_FIRST(&asoc->reasmqueue);
while (chk) {
@@ -5239,6 +5284,10 @@ sctp_pcb_init()
sizeof(struct sctp_stream_queue_pending),
(sctp_max_number_of_assoc * sctp_chunkscale));
+ SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_asconf, "sctp_asconf",
+ sizeof(struct sctp_asconf),
+ (sctp_max_number_of_assoc * sctp_chunkscale));
+
SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_asconf_ack, "sctp_asconf_ack",
sizeof(struct sctp_asconf_ack),
(sctp_max_number_of_assoc * sctp_chunkscale));
@@ -5298,6 +5347,73 @@ sctp_pcb_init()
}
+/*
+ * Assumes that the sctppcbinfo lock is NOT held.
+ */
+void
+sctp_pcb_finish(void)
+{
+ struct sctp_vrflist *vrf_bucket;
+ struct sctp_vrf *vrf;
+ struct sctp_ifn *ifn;
+ struct sctp_ifa *ifa;
+
+ /* FIXME MT */
+ /*
+ * free the vrf/ifn/ifa lists and hashes (be sure address monitor is
+ * destroyed first).
+ */
+ vrf_bucket = &sctppcbinfo.sctp_vrfhash[(SCTP_DEFAULT_VRFID & sctppcbinfo.hashvrfmark)];
+ vrf = LIST_FIRST(vrf_bucket);
+ while (vrf) {
+ ifn = LIST_FIRST(&vrf->ifnlist);
+ while (ifn) {
+ ifa = LIST_FIRST(&ifn->ifalist);
+ while (ifa) {
+ /* free the ifa */
+ LIST_REMOVE(ifa, next_bucket);
+ LIST_REMOVE(ifa, next_ifa);
+ SCTP_FREE(ifa, SCTP_M_IFA);
+ ifa = LIST_FIRST(&ifn->ifalist);
+ }
+ /* free the ifn */
+ LIST_REMOVE(ifn, next_bucket);
+ LIST_REMOVE(ifn, next_ifn);
+ SCTP_FREE(ifn, SCTP_M_IFN);
+ ifn = LIST_FIRST(&vrf->ifnlist);
+ }
+ SCTP_HASH_FREE(vrf->vrf_addr_hash, vrf->vrf_addr_hashmark);
+ /* free the vrf */
+ LIST_REMOVE(vrf, next_vrf);
+ SCTP_FREE(vrf, SCTP_M_VRF);
+ vrf = LIST_FIRST(vrf_bucket);
+ }
+ /* free the vrf hashes */
+ SCTP_HASH_FREE(sctppcbinfo.sctp_vrfhash, sctppcbinfo.hashvrfmark);
+ SCTP_HASH_FREE(sctppcbinfo.vrf_ifn_hash, sctppcbinfo.vrf_ifn_hashmark);
+
+ /* free the locks and mutexes */
+#ifdef SCTP_PACKET_LOGGING
+ SCTP_IP_PKTLOG_DESTROY();
+
+#endif
+ SCTP_IPI_ITERATOR_WQ_DESTROY();
+ SCTP_IPI_ADDR_DESTROY();
+ SCTP_ITERATOR_LOCK_DESTROY();
+ SCTP_STATLOG_DESTROY();
+ SCTP_INP_INFO_LOCK_DESTROY();
+
+ SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_ep);
+ SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_asoc);
+ SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_laddr);
+ SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_net);
+ SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_chunk);
+ SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_readq);
+ SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_strmoq);
+ SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_asconf);
+ SCTP_ZONE_DESTROY(sctppcbinfo.ipi_zone_asconf_ack);
+}
+
int
sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
diff --git a/sys/netinet/sctp_pcb.h b/sys/netinet/sctp_pcb.h
index 44ac300..9b4df18 100644
--- a/sys/netinet/sctp_pcb.h
+++ b/sys/netinet/sctp_pcb.h
@@ -184,6 +184,7 @@ struct sctp_epinfo {
sctp_zone_t ipi_zone_chunk;
sctp_zone_t ipi_zone_readq;
sctp_zone_t ipi_zone_strmoq;
+ sctp_zone_t ipi_zone_asconf;
sctp_zone_t ipi_zone_asconf_ack;
struct rwlock ipi_ep_mtx;
@@ -435,7 +436,7 @@ struct sctp_tcb {
extern struct sctp_epinfo sctppcbinfo;
#ifdef INET6
-int SCTP6_ARE_ADDR_EQUAL(struct in6_addr *a, struct in6_addr *b);
+int SCTP6_ARE_ADDR_EQUAL(struct sockaddr_in6 *a, struct sockaddr_in6 *b);
#endif
diff --git a/sys/netinet/sctp_structs.h b/sys/netinet/sctp_structs.h
index 69b6f92..8beb3bf 100644
--- a/sys/netinet/sctp_structs.h
+++ b/sys/netinet/sctp_structs.h
@@ -257,6 +257,8 @@ struct sctp_nets {
uint16_t failure_threshold;
/* error stats on destination */
uint16_t error_count;
+ /* UDP port number in case of UDP tunneling */
+ uint16_t port;
uint8_t fast_retran_loss_recovery;
uint8_t will_exit_fast_recovery;
@@ -547,6 +549,16 @@ struct sctp_cc_functions {
struct sctp_tcb *stcb, struct sctp_nets *net);
};
+/* used to save ASCONF chunks for retransmission */
+TAILQ_HEAD(sctp_asconf_head, sctp_asconf);
+struct sctp_asconf {
+ TAILQ_ENTRY(sctp_asconf) next;
+ uint32_t serial_number;
+ uint16_t snd_count;
+ struct mbuf *data;
+ uint16_t len;
+};
+
/* used to save ASCONF-ACK chunks for retransmission */
TAILQ_HEAD(sctp_asconf_ackhead, sctp_asconf_ack);
struct sctp_asconf_ack {
@@ -602,6 +614,9 @@ struct sctp_association {
/* Control chunk queue */
struct sctpchunk_listhead control_send_queue;
+ /* ASCONF chunk queue */
+ struct sctpchunk_listhead asconf_send_queue;
+
/*
* Once a TSN hits the wire it is moved to the sent_queue. We
* maintain two counts here (don't know if any but retran_cnt is
@@ -686,6 +701,7 @@ struct sctp_association {
uint32_t cookie_preserve_req;
/* ASCONF next seq I am sending out, inits at init-tsn */
uint32_t asconf_seq_out;
+ uint32_t asconf_seq_out_acked;
/* ASCONF last received ASCONF from peer, starts at peer's TSN-1 */
uint32_t asconf_seq_in;
@@ -919,8 +935,6 @@ struct sctp_association {
* lock flag: 0 is ok to send, 1+ (duals as a retran count) is
* awaiting ACK
*/
- uint16_t asconf_sent;
-
uint16_t mapping_array_size;
uint16_t last_strm_seq_delivered;
diff --git a/sys/netinet/sctp_sysctl.c b/sys/netinet/sctp_sysctl.c
index b428029..abf3f9e 100644
--- a/sys/netinet/sctp_sysctl.c
+++ b/sys/netinet/sctp_sysctl.c
@@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
uint32_t sctp_sendspace = SCTPCTL_MAXDGRAM_DEFAULT;
uint32_t sctp_recvspace = SCTPCTL_RECVSPACE_DEFAULT;
uint32_t sctp_auto_asconf = SCTPCTL_AUTOASCONF_DEFAULT;
+uint32_t sctp_multiple_asconfs = SCTPCTL_MULTIPLEASCONFS_DEFAULT;
uint32_t sctp_ecn_enable = SCTPCTL_ECN_ENABLE_DEFAULT;
uint32_t sctp_ecn_nonce = SCTPCTL_ECN_NONCE_DEFAULT;
uint32_t sctp_strict_sacks = SCTPCTL_STRICT_SACKS_DEFAULT;
@@ -102,6 +103,9 @@ uint32_t sctp_mobility_fasthandoff = SCTPCTL_MOBILITY_FASTHANDOFF_DEFAULT;
struct sctp_log sctp_log;
#endif
+uint32_t sctp_udp_tunneling_for_client_enable = SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_DEFAULT;
+uint32_t sctp_udp_tunneling_port = SCTPCTL_UDP_TUNNELING_PORT_DEFAULT;
+
#ifdef SCTP_DEBUG
uint32_t sctp_debug_on = SCTPCTL_DEBUG_DEFAULT;
@@ -493,11 +497,35 @@ sctp_assoclist(SYSCTL_HANDLER_ARGS)
return error;
}
+
+
#define RANGECHK(var, min, max) \
if ((var) < (min)) { (var) = (min); } \
else if ((var) > (max)) { (var) = (max); }
static int
+sysctl_sctp_udp_tunneling_check(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ uint32_t old_sctp_udp_tunneling_port;
+
+ old_sctp_udp_tunneling_port = sctp_udp_tunneling_port;
+ error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
+ if (error == 0) {
+ RANGECHK(sctp_udp_tunneling_port, SCTPCTL_UDP_TUNNELING_PORT_MIN, SCTPCTL_UDP_TUNNELING_PORT_MAX);
+ if (old_sctp_udp_tunneling_port) {
+ sctp_over_udp_stop();
+ }
+ if (sctp_udp_tunneling_port) {
+ if (sctp_over_udp_start()) {
+ sctp_udp_tunneling_port = 0;
+ }
+ }
+ }
+ return (error);
+}
+
+static int
sysctl_sctp_check(SYSCTL_HANDLER_ARGS)
{
int error;
@@ -565,6 +593,7 @@ sysctl_sctp_check(SYSCTL_HANDLER_ARGS)
#if defined(__FreeBSD__) || defined(SCTP_APPLE_MOBILITY_FASTHANDOFF)
RANGECHK(sctp_mobility_fasthandoff, SCTPCTL_MOBILITY_FASTHANDOFF_MIN, SCTPCTL_MOBILITY_FASTHANDOFF_MAX);
#endif
+ RANGECHK(sctp_udp_tunneling_for_client_enable, SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MIN, SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MAX);
#ifdef SCTP_DEBUG
RANGECHK(sctp_debug_on, SCTPCTL_DEBUG_MIN, SCTPCTL_DEBUG_MAX);
#endif
@@ -808,6 +837,14 @@ SYSCTL_STRUCT(_net_inet_sctp, OID_AUTO, log, CTLFLAG_RD,
"SCTP logging (struct sctp_log)");
#endif
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, udp_tunneling_for_client_enable, CTLTYPE_INT | CTLFLAG_RW,
+ &sctp_udp_tunneling_for_client_enable, 0, sysctl_sctp_check, "IU",
+ SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, udp_tunneling_port, CTLTYPE_INT | CTLFLAG_RW,
+ &sctp_udp_tunneling_port, 0, sysctl_sctp_udp_tunneling_check, "IU",
+ SCTPCTL_UDP_TUNNELING_PORT_DESC);
+
#ifdef SCTP_DEBUG
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, debug, CTLTYPE_INT | CTLFLAG_RW,
&sctp_debug_on, 0, sysctl_sctp_check, "IU",
diff --git a/sys/netinet/sctp_sysctl.h b/sys/netinet/sctp_sysctl.h
index 3b93b68..388ebbc 100644
--- a/sys/netinet/sctp_sysctl.h
+++ b/sys/netinet/sctp_sysctl.h
@@ -58,6 +58,12 @@ __FBSDID("$FreeBSD$");
#define SCTPCTL_AUTOASCONF_MAX 1
#define SCTPCTL_AUTOASCONF_DEFAULT SCTP_DEFAULT_AUTO_ASCONF
+/* autoasconf: Enable SCTP Auto-ASCONF */
+#define SCTPCTL_MULTIPLEASCONFS_DESC "Enable SCTP Muliple-ASCONFs"
+#define SCTPCTL_MULTIPLEASCONFS_MIN 0
+#define SCTPCTL_MULTIPLEASCONFS_MAX 1
+#define SCTPCTL_MULTIPLEASCONFS_DEFAULT SCTP_DEFAULT_MULTIPLE_ASCONFS
+
/* ecn_enable: Enable SCTP ECN */
#define SCTPCTL_ECN_ENABLE_DESC "Enable SCTP ECN"
#define SCTPCTL_ECN_ENABLE_MIN 0
@@ -370,6 +376,18 @@ __FBSDID("$FreeBSD$");
#define SCTPCTL_MOBILITY_FASTHANDOFF_MAX 1
#define SCTPCTL_MOBILITY_FASTHANDOFF_DEFAULT SCTP_DEFAULT_MOBILITY_FASTHANDOFF
+/* Enable SCTP/UDP tunneling for clients*/
+#define SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_DESC "Enable SCTP/UDP tunneling for client"
+#define SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MIN 0
+#define SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MAX 1
+#define SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_DEFAULT SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MIN
+
+/* Enable SCTP/UDP tunneling port */
+#define SCTPCTL_UDP_TUNNELING_PORT_DESC "Set the SCTP/UDP tunneling port"
+#define SCTPCTL_UDP_TUNNELING_PORT_MIN 0
+#define SCTPCTL_UDP_TUNNELING_PORT_MAX 65535
+#define SCTPCTL_UDP_TUNNELING_PORT_DEFAULT SCTP_OVER_UDP_TUNNELING_PORT
+
#if defined(SCTP_DEBUG)
/* debug: Configure debug output */
#define SCTPCTL_DEBUG_DESC "Configure debug output"
@@ -388,6 +406,7 @@ __FBSDID("$FreeBSD$");
extern uint32_t sctp_sendspace;
extern uint32_t sctp_recvspace;
extern uint32_t sctp_auto_asconf;
+extern uint32_t sctp_multiple_asconfs;
extern uint32_t sctp_ecn_enable;
extern uint32_t sctp_ecn_nonce;
extern uint32_t sctp_strict_sacks;
@@ -449,6 +468,9 @@ extern uint32_t sctp_mobility_fasthandoff;
extern struct sctp_log sctp_log;
#endif
+extern uint32_t sctp_udp_tunneling_for_client_enable;
+extern uint32_t sctp_udp_tunneling_port;
+
#if defined(SCTP_DEBUG)
extern uint32_t sctp_debug_on;
diff --git a/sys/netinet/sctp_timer.c b/sys/netinet/sctp_timer.c
index 6f6d188..b6dd318 100644
--- a/sys/netinet/sctp_timer.c
+++ b/sys/netinet/sctp_timer.c
@@ -173,6 +173,11 @@ sctp_audit_retranmission_queue(struct sctp_association *asoc)
sctp_ucount_incr(asoc->sent_queue_retran_cnt);
}
}
+ TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) {
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+ }
+ }
SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n",
asoc->sent_queue_retran_cnt,
asoc->sent_queue_cnt);
@@ -1314,10 +1319,10 @@ sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
struct sctp_nets *net)
{
struct sctp_nets *alt;
- struct sctp_tmit_chunk *asconf, *chk;
+ struct sctp_tmit_chunk *asconf, *chk, *nchk;
/* is this a first send, or a retransmission? */
- if (stcb->asoc.asconf_sent == 0) {
+ if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) {
/* compose a new ASCONF chunk and send it */
sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED);
} else {
@@ -1326,12 +1331,7 @@ sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
*/
/* find the existing ASCONF */
- TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
- sctp_next) {
- if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
- break;
- }
- }
+ asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue);
if (asconf == NULL) {
return (0);
}
@@ -1359,10 +1359,11 @@ sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
*/
sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0);
- sctp_free_remote_addr(asconf->whoTo);
- asconf->whoTo = alt;
- atomic_add_int(&alt->ref_count, 1);
-
+ if (asconf->whoTo != alt) {
+ sctp_free_remote_addr(asconf->whoTo);
+ asconf->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
/* See if an ECN Echo is also stranded */
TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
if ((chk->whoTo == net) &&
@@ -1376,17 +1377,32 @@ sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
atomic_add_int(&alt->ref_count, 1);
}
}
+ for (chk = asconf; chk; chk = nchk) {
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ if (chk->whoTo != alt) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ }
if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
/*
* If the address went un-reachable, we need to move
* to the alternate for ALL chunks in queue
*/
sctp_move_all_chunks_to_alt(stcb, net, alt);
+ net = alt;
}
/* mark the retran info */
if (asconf->sent != SCTP_DATAGRAM_RESEND)
sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
asconf->sent = SCTP_DATAGRAM_RESEND;
+
+ /* send another ASCONF if any and we can do */
+ sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED);
}
return (0);
}
@@ -1677,16 +1693,11 @@ sctp_pathmtu_timer(struct sctp_inpcb *inp,
struct sctp_tcb *stcb,
struct sctp_nets *net)
{
- uint32_t next_mtu;
+ uint32_t next_mtu, mtu;
- /* restart the timer in any case */
next_mtu = sctp_getnext_mtu(inp, net->mtu);
- if (next_mtu <= net->mtu) {
- /* nothing to do */
- return;
- } {
- uint32_t mtu;
+ if ((next_mtu > net->mtu) && (net->port == 0)) {
if ((net->src_addr_selected == 0) ||
(net->ro._s_addr == NULL) ||
(net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
@@ -1695,10 +1706,26 @@ sctp_pathmtu_timer(struct sctp_inpcb *inp,
net->ro._s_addr = NULL;
net->src_addr_selected = 0;
} else if (net->ro._s_addr == NULL) {
+#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
+ if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+
+ /* KAME hack: embed scopeid */
+ (void)sa6_embedscope(sin6, ip6_use_defzone);
+ }
+#endif
+
net->ro._s_addr = sctp_source_address_selection(inp,
stcb,
(sctp_route_t *) & net->ro,
net, 0, stcb->asoc.vrf_id);
+#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
+ if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+
+ (void)sa6_recoverscope(sin6);
+ }
+#endif /* INET6 */
}
if (net->ro._s_addr)
net->src_addr_selected = 1;
diff --git a/sys/netinet/sctp_usrreq.c b/sys/netinet/sctp_usrreq.c
index 1be19f9..c1a1acd 100644
--- a/sys/netinet/sctp_usrreq.c
+++ b/sys/netinet/sctp_usrreq.c
@@ -102,7 +102,7 @@ sctp_pcbinfo_cleanup(void)
}
-static void
+void
sctp_pathmtu_adjustment(struct sctp_inpcb *inp,
struct sctp_tcb *stcb,
struct sctp_nets *net,
diff --git a/sys/netinet/sctp_var.h b/sys/netinet/sctp_var.h
index cae00b0..21af7d1 100644
--- a/sys/netinet/sctp_var.h
+++ b/sys/netinet/sctp_var.h
@@ -300,7 +300,9 @@ int sctp_disconnect(struct socket *so);
void sctp_ctlinput __P((int, struct sockaddr *, void *));
int sctp_ctloutput __P((struct socket *, struct sockopt *));
+void sctp_input_with_port __P((struct mbuf *, int, uint16_t));
void sctp_input __P((struct mbuf *, int));
+void sctp_pathmtu_adjustment __P((struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *, uint16_t));
void sctp_drain __P((void));
void sctp_init __P((void));
diff --git a/sys/netinet/sctputil.c b/sys/netinet/sctputil.c
index aab1510..05082ac 100644
--- a/sys/netinet/sctputil.c
+++ b/sys/netinet/sctputil.c
@@ -956,6 +956,7 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
asoc->assoc_id = asoc->my_vtag;
asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
sctp_select_initial_TSN(&m->sctp_ep);
+ asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
/* we are optimisitic here */
asoc->peer_supports_pktdrop = 1;
@@ -1151,6 +1152,7 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
TAILQ_INIT(&asoc->free_chunks);
TAILQ_INIT(&asoc->out_wheel);
TAILQ_INIT(&asoc->control_send_queue);
+ TAILQ_INIT(&asoc->asconf_send_queue);
TAILQ_INIT(&asoc->send_queue);
TAILQ_INIT(&asoc->sent_queue);
TAILQ_INIT(&asoc->reasmqueue);
@@ -3792,7 +3794,7 @@ sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
void
sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
- uint32_t vrf_id)
+ uint32_t vrf_id, uint16_t port)
{
uint32_t vtag;
@@ -3810,7 +3812,7 @@ sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
vrf_id = stcb->asoc.vrf_id;
stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
}
- sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id);
+ sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
if (stcb != NULL) {
/* Ok, now lets free it */
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
@@ -3967,7 +3969,7 @@ sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
void
sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
- struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id)
+ struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
{
struct sctp_chunkhdr *ch, chunk_buf;
unsigned int chk_length;
@@ -4005,7 +4007,7 @@ sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
*/
return;
case SCTP_SHUTDOWN_ACK:
- sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id);
+ sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
return;
default:
break;
@@ -4014,7 +4016,7 @@ sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
sizeof(*ch), (uint8_t *) & chunk_buf);
}
- sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id);
+ sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
}
/*
@@ -4140,8 +4142,8 @@ sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
sin6_1 = (struct sockaddr_in6 *)sa1;
sin6_2 = (struct sockaddr_in6 *)sa2;
- return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
- &sin6_2->sin6_addr));
+ return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
+ sin6_2));
}
#endif
case AF_INET:
@@ -4776,8 +4778,8 @@ sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
}
#ifdef INET6
if (addr->sa_family == AF_INET6) {
- if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
- &laddr->ifa->address.sin6.sin6_addr)) {
+ if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
+ &laddr->ifa->address.sin6)) {
/* found him. */
if (holds_lock == 0) {
SCTP_INP_RUNLOCK(inp);
@@ -4866,8 +4868,8 @@ sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
}
#ifdef INET6
if (addr->sa_family == AF_INET6) {
- if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
- &sctp_ifap->address.sin6.sin6_addr)) {
+ if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
+ &sctp_ifap->address.sin6)) {
/* found him. */
if (holds_lock == 0)
SCTP_IPI_ADDR_RUNLOCK();
@@ -6605,3 +6607,19 @@ sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_
}
#endif
+/* We will need to add support
+ * to bind the ports and such here
+ * so we can do UDP tunneling. In
+ * the mean-time, we return error
+ */
+
+void
+sctp_over_udp_stop(void)
+{
+ return;
+}
+int
+sctp_over_udp_start(void)
+{
+ return (-1);
+}
diff --git a/sys/netinet/sctputil.h b/sys/netinet/sctputil.h
index ec4631c..3fd384e 100644
--- a/sys/netinet/sctputil.h
+++ b/sys/netinet/sctputil.h
@@ -178,7 +178,7 @@ sctp_abort_notification(struct sctp_tcb *, int, int
/* We abort responding to an IP packet for some reason */
void
sctp_abort_association(struct sctp_inpcb *, struct sctp_tcb *,
- struct mbuf *, int, struct sctphdr *, struct mbuf *, uint32_t);
+ struct mbuf *, int, struct sctphdr *, struct mbuf *, uint32_t, uint16_t);
/* We choose to abort via user input */
@@ -192,7 +192,7 @@ sctp_abort_an_association(struct sctp_inpcb *, struct sctp_tcb *, int,
void
sctp_handle_ootb(struct mbuf *, int, int, struct sctphdr *,
- struct sctp_inpcb *, struct mbuf *, uint32_t);
+ struct sctp_inpcb *, struct mbuf *, uint32_t, uint16_t);
int
sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
@@ -314,6 +314,9 @@ do { \
} \
} while (0)
+/* new functions to start/stop udp tunneling */
+void sctp_over_udp_stop(void);
+int sctp_over_udp_start(void);
int
sctp_soreceive(struct socket *so, struct sockaddr **psa,
diff --git a/sys/netinet6/sctp6_usrreq.c b/sys/netinet6/sctp6_usrreq.c
index 65372da..8798092 100644
--- a/sys/netinet6/sctp6_usrreq.c
+++ b/sys/netinet6/sctp6_usrreq.c
@@ -177,14 +177,14 @@ sctp_skip_csum:
sh->v_tag = 0;
}
if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
- sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id);
+ sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, 0);
goto bad;
}
if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
goto bad;
}
if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
- sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id);
+ sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, 0);
goto bad;
} else if (stcb == NULL) {
refcount_up = 1;
@@ -212,7 +212,7 @@ sctp_skip_csum:
/* sa_ignore NO_NULL_CHK */
sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
- in6p, stcb, net, ecn_bits, vrf_id);
+ in6p, stcb, net, ecn_bits, vrf_id, 0);
/* inp's ref-count reduced && stcb unlocked */
/* XXX this stuff below gets moved to appropriate parts later... */
if (m)
OpenPOWER on IntegriCloud