summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrrs <rrs@FreeBSD.org>2006-11-03 15:23:16 +0000
committerrrs <rrs@FreeBSD.org>2006-11-03 15:23:16 +0000
commit3d3e3f2242423b47549f89486754bc40030fbe9f (patch)
tree0ec895f64207afbb268edd872d01288ffc058501
parentd23275fe7d190eab56c82bf462ecb67346e58ab3 (diff)
downloadFreeBSD-src-3d3e3f2242423b47549f89486754bc40030fbe9f.zip
FreeBSD-src-3d3e3f2242423b47549f89486754bc40030fbe9f.tar.gz
Ok, here it is, we finally add SCTP to current. Note that this
work is not just mine, but it is also the works of Peter Lei and Michael Tuexen. They both are my two key other developers working on the project.. and they need ata-boy's too: **** peterlei@cisco.com tuexen@fh-muenster.de **** I did do a make sysent which updated the syscall's and sysproto.. I hope that is correct... without it you don't build since we have new syscalls for SCTP :-0 So go out and look at the NOTES, add option SCTP (make sure inet and inet6 are present too) and play with SCTP. I will see about comitting some test tools I have after I figure out where I should place them. I also have a lib (libsctp.a) that adds some of the missing socketapi functions that I need to put into lib's.. I will talk to George about this :-) There may still be some 64 bit issues in here, none of us have a 64 bit processor to test with yet.. Michael may have a MAC but thats another beast too.. If you have a mac and want to use SCTP contact Michael he maintains a web site with a loadable module with this code :-) Reviewed by: gnn Approved by: gnn
-rw-r--r--sys/compat/freebsd32/syscalls.master10
-rw-r--r--sys/conf/NOTES91
-rw-r--r--sys/conf/files18
-rw-r--r--sys/conf/options27
-rw-r--r--sys/kern/init_sysent.c4
-rw-r--r--sys/kern/syscalls.c4
-rw-r--r--sys/kern/syscalls.master10
-rw-r--r--sys/kern/systrace_args.c47
-rw-r--r--sys/kern/uipc_syscalls.c451
-rw-r--r--sys/net/rtsock.c15
-rw-r--r--sys/netinet/in_proto.c48
-rw-r--r--sys/netinet/sctp.h349
-rw-r--r--sys/netinet/sctp_asconf.c2856
-rw-r--r--sys/netinet/sctp_asconf.h80
-rw-r--r--sys/netinet/sctp_auth.c2389
-rw-r--r--sys/netinet/sctp_auth.h262
-rw-r--r--sys/netinet/sctp_bsd_addr.c2032
-rw-r--r--sys/netinet/sctp_bsd_addr.h70
-rw-r--r--sys/netinet/sctp_constants.h903
-rw-r--r--sys/netinet/sctp_crc32.c713
-rw-r--r--sys/netinet/sctp_crc32.h56
-rw-r--r--sys/netinet/sctp_header.h562
-rw-r--r--sys/netinet/sctp_indata.c5588
-rw-r--r--sys/netinet/sctp_indata.h118
-rw-r--r--sys/netinet/sctp_input.c4749
-rw-r--r--sys/netinet/sctp_input.h57
-rw-r--r--sys/netinet/sctp_lock_bsd.h355
-rw-r--r--sys/netinet/sctp_os.h66
-rw-r--r--sys/netinet/sctp_os_bsd.h89
-rw-r--r--sys/netinet/sctp_output.c10378
-rw-r--r--sys/netinet/sctp_output.h171
-rw-r--r--sys/netinet/sctp_pcb.c5283
-rw-r--r--sys/netinet/sctp_pcb.h504
-rw-r--r--sys/netinet/sctp_peeloff.c240
-rw-r--r--sys/netinet/sctp_peeloff.h56
-rw-r--r--sys/netinet/sctp_structs.h892
-rw-r--r--sys/netinet/sctp_timer.c1736
-rw-r--r--sys/netinet/sctp_timer.h99
-rw-r--r--sys/netinet/sctp_uio.h946
-rw-r--r--sys/netinet/sctp_usrreq.c4852
-rw-r--r--sys/netinet/sctp_var.h476
-rw-r--r--sys/netinet/sctputil.c5390
-rw-r--r--sys/netinet/sctputil.h314
-rw-r--r--sys/netinet6/in6_proto.c48
-rw-r--r--sys/netinet6/sctp6_usrreq.c1370
-rw-r--r--sys/netinet6/sctp6_var.h52
-rw-r--r--sys/sys/mbuf.h1
-rw-r--r--sys/sys/socket.h1
-rw-r--r--sys/sys/syscall.h6
-rw-r--r--sys/sys/syscall.mk6
-rw-r--r--sys/sys/sysproto.h39
51 files changed, 54873 insertions, 6 deletions
diff --git a/sys/compat/freebsd32/syscalls.master b/sys/compat/freebsd32/syscalls.master
index de2ab5b..c8b7b08 100644
--- a/sys/compat/freebsd32/syscalls.master
+++ b/sys/compat/freebsd32/syscalls.master
@@ -771,3 +771,13 @@
468 AUE_NULL UNIMPL nosys
469 AUE_NULL UNIMPL __getpath_fromfd
470 AUE_NULL UNIMPL __getpath_fromaddr
+471 AUE_NULL STD { int sctp_peeloff(int sd, uint32_t name); }
+472 AUE_NULL STD { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \
+ caddr_t to, __socklen_t tolen, \
+ struct sctp_sndrcvinfo *sinfo, int flags); }
+473 AUE_NULL STD { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \
+ caddr_t to, __socklen_t tolen, \
+ struct sctp_sndrcvinfo *sinfo, int flags); }
+474 AUE_NULL STD { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \
+ struct sockaddr * from, __socklen_t *fromlenaddr, \
+ struct sctp_sndrcvinfo *sinfo, int *msg_flags); }
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index 35465f7..102d66c 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -506,6 +506,97 @@ options LIBMCHAIN
# libalias library, performing NAT
options LIBALIAS
+#
+# SCTP is a NEW transport protocol defined by
+# RFC2960 updated by RFC3309 and RFC3758.. and
+# soon to have a new base RFC and many many more
+# extensions. This release supports all the extensions
+# including many drafts (most about to become RFC's).
+# It is the premeier SCTP implementation in the NET
+# and is quite well tested.
+#
+# Note YOU MUST have both INET and INET6 defined.
+# you don't have to enable V6, but SCTP is
+# dual stacked and so far we have not teased apart
+# the V6 and V4.. since an association can span
+# both a V6 and V4 address at the SAME time :-)
+#
+options SCTP
+# There are bunches of options:
+# this one turns on all sorts of
+# nastly printing that you can
+# do. Its all controled by a
+# bit mask (settable by socket opt and
+# by sysctl). Including will not cause
+# logging until you set the bits.. but it
+# can be quite verbose.. so without this
+# option we don't do any of the tests for
+# bits and prints.. which makes the code run
+# faster.. if you are not debugging don't use.
+options SCTP_DEBUG
+#
+# High speed enables sally floyds HS TCP optioin
+# for congestion control increase, use only in
+# very HS networks and with caution since I doubt
+# it will compete fairly with peers. For the big-bad
+# internet its best NOT to enable.
+#
+options SCTP_HIGH_SPEED
+#
+# This option turns off the CRC32c checksum. Basically
+# You will not be able to talk to anyone else that
+# has not done this. Its more for expermentation to
+# see how much CPU the CRC32c really takes. Most new
+# cards for TCP support checksum offload.. so this
+# option gives you a "view" into what SCTP would be
+# like with such an offload (which only exists in
+# high in iSCSI boards so far). With the new
+# splitting 8's algorithm its not as bad as it used
+# to be.. but it does speed things up try only
+# for in a captured lab environment :-)
+options SCTP_WITH_NO_CSUM
+#
+# Logging, this is another debug tool thats way
+# cool.. but does take resources so its off
+# by default. To do any logging you must first
+# enable SCTP_STAT_LOGGING. This gets the utilities
+# into the code base that actually do the logging and
+# alocates a hugh fixed circular buffer that logging
+# uses (about 80,000 entires that are probably 8 long
+# words or so long.. so it does take a LOT of memory).
+# Its cool for real-time debugging though.
+#
+options SCTP_STAT_LOGGING
+#
+# All that options after that turn on specific types of
+# logging. You can monitor CWND growth, flight size
+# and all sorts of things. Go look at the code and
+# see. I have used this to produce interesting
+# charts and graphs as well :->
+#
+# I have not yet commited the tools to get and print
+# the logs, I will do that eventually .. before then
+# if you want them send me an email rrs@freebsd.org
+#
+options SCTP_LOG_MAXBURST
+options SCTP_LOG_RWND
+options SCTP_CWND_LOGGING
+options SCTP_CWND_MONITOR
+options SCTP_BLK_LOGGING
+options SCTP_STR_LOGGING
+options SCTP_FR_LOGGING
+options SCTP_MAP_LOGGING
+options SCTP_SACK_LOGGING
+options SCTP_LOCK_LOGGING
+options SCTP_RTTVAR_LOGGING
+options SCTP_SB_LOGGING
+options SCTP_EARLYFR_LOGGING
+options SCTP_NAGLE_LOGGING
+options SCTP_WAKE_LOGGING
+options SCTP_RECV_RWND_LOGGING
+options SCTP_SACK_RWND_LOGGING
+options SCTP_MBUF_LOGGING
+
# altq(9). Enable the base part of the hooks with the ALTQ option.
# Individual disciplines must be built into the base system and can not be
# loaded as modules at this point. ALTQ requires a stable TSC so if yours is
diff --git a/sys/conf/files b/sys/conf/files
index 0af0998..52cd9c1 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -349,8 +349,9 @@ crypto/rijndael/rijndael-alg-fst.c optional crypto | geom_bde | \
crypto/rijndael/rijndael-api-fst.c optional geom_bde | random
crypto/rijndael/rijndael-api.c optional crypto | ipsec | wlan_ccmp
crypto/sha1.c optional carp | crypto | ipsec | \
- netgraph_mppc_encryption
-crypto/sha2/sha2.c optional crypto | geom_bde | ipsec | random
+ netgraph_mppc_encryption | sctp
+crypto/sha2/sha2.c optional crypto | geom_bde | ipsec | random | \
+ sctp
ddb/db_access.c optional ddb
ddb/db_break.c optional ddb
ddb/db_command.c optional ddb
@@ -1748,6 +1749,18 @@ netinet/ip_mroute.c optional mrouting
netinet/ip_options.c optional inet
netinet/ip_output.c optional inet
netinet/raw_ip.c optional inet
+netinet/sctp_usrreq.c optional inet inet6 sctp
+netinet/sctp_pcb.c optional inet inet6 sctp
+netinet/sctputil.c optional inet inet6 sctp
+netinet/sctp_bsd_addr.c optional inet inet6 sctp
+netinet/sctp_timer.c optional inet inet6 sctp
+netinet/sctp_input.c optional inet inet6 sctp
+netinet/sctp_output.c optional inet inet6 sctp
+netinet/sctp_indata.c optional inet inet6 sctp
+netinet/sctp_asconf.c optional inet inet6 sctp
+netinet/sctp_peeloff.c optional inet inet6 sctp
+netinet/sctp_crc32.c optional inet inet6 sctp
+netinet/sctp_auth.c optional inet inet6 sctp
netinet/tcp_debug.c optional tcpdebug
netinet/tcp_hostcache.c optional inet
netinet/tcp_input.c optional inet
@@ -1800,6 +1813,7 @@ netinet6/nd6_rtr.c optional inet6
netinet6/raw_ip6.c optional inet6
netinet6/route6.c optional inet6
netinet6/scope6.c optional inet6
+netinet6/sctp6_usrreq.c optional inet6 inet6 sctp
netinet6/udp6_output.c optional inet6
netinet6/udp6_usrreq.c optional inet6
netipsec/ipsec.c optional fast_ipsec
diff --git a/sys/conf/options b/sys/conf/options
index fdf0dfe..e0681ea 100644
--- a/sys/conf/options
+++ b/sys/conf/options
@@ -395,6 +395,33 @@ DEV_VLAN opt_vlan.h
VLAN_ARRAY opt_vlan.h
XBONEHACK
+#
+# SCTP
+#
+SCTP opt_sctp.h
+SCTP_DEBUG opt_sctp.h
+SCTP_HIGH_SPEED opt_sctp.h
+SCTP_LOG_MAXBURST opt_sctp.h
+SCTP_LOG_RWND opt_sctp.h
+SCTP_STAT_LOGGING opt_sctp.h
+SCTP_CWND_LOGGING opt_sctp.h
+SCTP_CWND_MONITOR opt_sctp.h
+SCTP_BLK_LOGGING opt_sctp.h
+SCTP_STR_LOGGING opt_sctp.h
+SCTP_FR_LOGGING opt_sctp.h
+SCTP_MAP_LOGGING opt_sctp.h
+SCTP_SACK_LOGGING opt_sctp.h
+SCTP_LOCK_LOGGING opt_sctp.h
+SCTP_RTTVAR_LOGGING opt_sctp.h
+SCTP_SB_LOGGING opt_sctp.h
+SCTP_WITH_NO_CSUM opt_sctp.h
+SCTP_EARLYFR_LOGGING opt_sctp.h
+SCTP_NAGLE_LOGGING opt_sctp.h
+SCTP_WAKE_LOGGING opt_sctp.h
+SCTP_RECV_RWND_LOGGING opt_sctp.h
+SCTP_SACK_RWND_LOGGING opt_sctp.h
+SCTP_MBUF_LOGGING opt_sctp.h
+
# Netgraph(4). Use option NETGRAPH to enable the base netgraph code.
# Each netgraph node type can be either be compiled into the kernel
# or loaded dynamically. To get the former, include the corresponding
diff --git a/sys/kern/init_sysent.c b/sys/kern/init_sysent.c
index b98c6d4..35de9e9 100644
--- a/sys/kern/init_sysent.c
+++ b/sys/kern/init_sysent.c
@@ -500,4 +500,8 @@ struct sysent sysent[] = {
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 468 = nosys */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 469 = __getpath_fromfd */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 470 = __getpath_fromaddr */
+ { AS(sctp_peeloff_args), (sy_call_t *)sctp_peeloff, AUE_NULL, NULL, 0, 0 }, /* 471 = sctp_peeloff */
+ { AS(sctp_generic_sendmsg_args), (sy_call_t *)sctp_generic_sendmsg, AUE_NULL, NULL, 0, 0 }, /* 472 = sctp_generic_sendmsg */
+ { AS(sctp_generic_sendmsg_iov_args), (sy_call_t *)sctp_generic_sendmsg_iov, AUE_NULL, NULL, 0, 0 }, /* 473 = sctp_generic_sendmsg_iov */
+ { AS(sctp_generic_recvmsg_args), (sy_call_t *)sctp_generic_recvmsg, AUE_NULL, NULL, 0, 0 }, /* 474 = sctp_generic_recvmsg */
};
diff --git a/sys/kern/syscalls.c b/sys/kern/syscalls.c
index c72425c..0fa7b5b 100644
--- a/sys/kern/syscalls.c
+++ b/sys/kern/syscalls.c
@@ -478,4 +478,8 @@ const char *syscallnames[] = {
"#468", /* 468 = nosys */
"#469", /* 469 = __getpath_fromfd */
"#470", /* 470 = __getpath_fromaddr */
+ "sctp_peeloff", /* 471 = sctp_peeloff */
+ "sctp_generic_sendmsg", /* 472 = sctp_generic_sendmsg */
+ "sctp_generic_sendmsg_iov", /* 473 = sctp_generic_sendmsg_iov */
+ "sctp_generic_recvmsg", /* 474 = sctp_generic_recvmsg */
};
diff --git a/sys/kern/syscalls.master b/sys/kern/syscalls.master
index 54e7c39..dd9e25f 100644
--- a/sys/kern/syscalls.master
+++ b/sys/kern/syscalls.master
@@ -825,5 +825,15 @@
468 AUE_NULL UNIMPL nosys
469 AUE_NULL UNIMPL __getpath_fromfd
470 AUE_NULL UNIMPL __getpath_fromaddr
+471 AUE_NULL STD { int sctp_peeloff(int sd, uint32_t name); }
+472 AUE_NULL STD { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \
+ caddr_t to, __socklen_t tolen, \
+ struct sctp_sndrcvinfo *sinfo, int flags); }
+473 AUE_NULL STD { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \
+ caddr_t to, __socklen_t tolen, \
+ struct sctp_sndrcvinfo *sinfo, int flags); }
+474 AUE_NULL STD { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \
+ struct sockaddr * from, __socklen_t *fromlenaddr, \
+ struct sctp_sndrcvinfo *sinfo, int *msg_flags); }
; Please copy any additions and changes to the following compatability tables:
; sys/compat/freebsd32/syscalls.master
diff --git a/sys/kern/systrace_args.c b/sys/kern/systrace_args.c
index aa7e726..6aef4a0 100644
--- a/sys/kern/systrace_args.c
+++ b/sys/kern/systrace_args.c
@@ -2758,6 +2758,53 @@ systrace_args(int sysnum, void *params, u_int64_t *uarg, int *n_args)
*n_args = 3;
break;
}
+ /* sctp_peeloff */
+ case 471: {
+ struct sctp_peeloff_args *p = params;
+ iarg[0] = p->sd; /* int */
+ uarg[1] = p->name; /* uint32_t */
+ *n_args = 2;
+ break;
+ }
+ /* sctp_generic_sendmsg */
+ case 472: {
+ struct sctp_generic_sendmsg_args *p = params;
+ iarg[0] = p->sd; /* int */
+ uarg[1] = (intptr_t) p->msg; /* caddr_t */
+ iarg[2] = p->mlen; /* int */
+ uarg[3] = (intptr_t) p->to; /* caddr_t */
+ iarg[4] = p->tolen; /* __socklen_t */
+ uarg[5] = (intptr_t) p->sinfo; /* struct sctp_sndrcvinfo * */
+ iarg[6] = p->flags; /* int */
+ *n_args = 7;
+ break;
+ }
+ /* sctp_generic_sendmsg_iov */
+ case 473: {
+ struct sctp_generic_sendmsg_iov_args *p = params;
+ iarg[0] = p->sd; /* int */
+ uarg[1] = (intptr_t) p->iov; /* struct iovec * */
+ iarg[2] = p->iovlen; /* int */
+ uarg[3] = (intptr_t) p->to; /* caddr_t */
+ iarg[4] = p->tolen; /* __socklen_t */
+ uarg[5] = (intptr_t) p->sinfo; /* struct sctp_sndrcvinfo * */
+ iarg[6] = p->flags; /* int */
+ *n_args = 7;
+ break;
+ }
+ /* sctp_generic_recvmsg */
+ case 474: {
+ struct sctp_generic_recvmsg_args *p = params;
+ iarg[0] = p->sd; /* int */
+ uarg[1] = (intptr_t) p->iov; /* struct iovec * */
+ iarg[2] = p->iovlen; /* int */
+ uarg[3] = (intptr_t) p->from; /* struct sockaddr * */
+ uarg[4] = (intptr_t) p->fromlenaddr; /* __socklen_t * */
+ uarg[5] = (intptr_t) p->sinfo; /* struct sctp_sndrcvinfo * */
+ uarg[6] = (intptr_t) p->msg_flags; /* int * */
+ *n_args = 7;
+ break;
+ }
default:
*n_args = 0;
break;
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index 1636a69..6951f4c 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -35,6 +35,7 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_sctp.h"
#include "opt_compat.h"
#include "opt_ktrace.h"
#include "opt_mac.h"
@@ -76,6 +77,11 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
+#ifdef SCTP
+#include <netinet/sctp.h>
+#include <netinet/sctp_peeloff.h>
+#endif /* SCTP */
+
static int sendit(struct thread *td, int s, struct msghdr *mp, int flags);
static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp);
@@ -2319,3 +2325,448 @@ done:
return (error);
}
+
+
+int
+sctp_peeloff(td, uap)
+ struct thread *td;
+ register struct sctp_peeloff_args /* {
+ int sd;
+ caddr_t name;
+ } */ *uap;
+{
+#ifdef SCTP
+ struct filedesc *fdp;
+ struct file *nfp = NULL;
+ int error;
+ struct socket *head, *so;
+ int fd;
+ u_int fflag;
+
+ fdp = td->td_proc->p_fd;
+ error = fgetsock(td, uap->sd, &head, &fflag);
+ if (error)
+ goto done2;
+ error = sctp_can_peel_off(head, (sctp_assoc_t)uap->name);
+ if (error)
+ goto done2;
+ /*
+ * At this point we know we do have a assoc to pull
+ * we proceed to get the fd setup. This may block
+ * but that is ok.
+ */
+
+ error = falloc(td, &nfp, &fd);
+ if (error)
+ goto done;
+ td->td_retval[0] = fd;
+
+ so = sonewconn(head, SS_ISCONNECTED);
+ if (so == NULL)
+ goto noconnection;
+ /*
+ * Before changing the flags on the socket, we have to bump the
+ * reference count. Otherwise, if the protocol calls sofree(),
+ * the socket will be released due to a zero refcount.
+ */
+ SOCK_LOCK(so);
+ soref(so); /* file descriptor reference */
+ SOCK_UNLOCK(so);
+
+ ACCEPT_LOCK();
+
+ TAILQ_REMOVE(&head->so_comp, so, so_list);
+ head->so_qlen--;
+ so->so_state |= (head->so_state & SS_NBIO);
+ so->so_state &= ~SS_NOFDREF;
+ so->so_qstate &= ~SQ_COMP;
+ so->so_head = NULL;
+
+ ACCEPT_UNLOCK();
+
+ error = sctp_do_peeloff(head, so, (sctp_assoc_t)uap->name);
+ if (error)
+ goto noconnection;
+ if (head->so_sigio != NULL)
+ fsetown(fgetown(&head->so_sigio), &so->so_sigio);
+
+ FILE_LOCK(nfp);
+ nfp->f_data = so;
+ nfp->f_flag = fflag;
+ nfp->f_ops = &socketops;
+ nfp->f_type = DTYPE_SOCKET;
+ FILE_UNLOCK(nfp);
+
+ noconnection:
+ /*
+ * close the new descriptor, assuming someone hasn't ripped it
+ * out from under us.
+ */
+ if (error)
+ fdclose(fdp, nfp, fd, td);
+
+ /*
+ * Release explicitly held references before returning.
+ */
+ done:
+ if (nfp != NULL)
+ fdrop(nfp, td);
+ fputsock(head);
+ done2:
+ return (error);
+#else
+ return (EOPNOTSUPP);
+#endif
+}
+
+
+int sctp_generic_sendmsg (td, uap)
+ struct thread *td;
+ register struct sctp_generic_sendmsg_args /* {
+ int sd,
+ caddr_t msg,
+ int mlen,
+ caddr_t to,
+ __socklen_t tolen,
+ struct sctp_sndrcvinfo *sinfo,
+ int flags
+ } */ *uap;
+{
+#ifdef SCTP
+ struct sctp_sndrcvinfo sinfo, *u_sinfo=NULL;
+ struct socket *so;
+ struct file *fp;
+ int use_rcvinfo=1;
+ int error=0, len;
+ struct sockaddr *to=NULL;
+#ifdef KTRACE
+ struct uio *ktruio = NULL;
+#endif
+ struct uio auio;
+ struct iovec iov[1];
+
+ if(uap->sinfo) {
+ error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
+ if (error)
+ return (error);
+ u_sinfo = &sinfo;
+ }
+
+ if(uap->tolen) {
+ error = getsockaddr(&to, uap->to, uap->tolen);
+ if (error) {
+ to = NULL;
+ goto sctp_bad2;
+ }
+ }
+ error = getsock(td->td_proc->p_fd, uap->sd, &fp, NULL);
+ if (error)
+ goto sctp_bad;
+
+ iov[0].iov_base = uap->msg;
+ iov[0].iov_len = uap->mlen;
+
+ so = (struct socket *)fp->f_data;
+#ifdef MAC
+ SOCK_LOCK(so);
+ error = mac_check_socket_send(td->td_ucred, so);
+ SOCK_UNLOCK(so);
+ if (error)
+ goto sctp_bad;
+#endif
+
+
+ auio.uio_iov = iov;
+ auio.uio_iovcnt = 1;
+ auio.uio_segflg = UIO_USERSPACE;
+ auio.uio_rw = UIO_WRITE;
+ auio.uio_td = td;
+ auio.uio_offset = 0; /* XXX */
+ auio.uio_resid = 0;
+ len = auio.uio_resid = uap->mlen;
+ error = sctp_lower_sosend(so,
+ to,
+ &auio,
+ (struct mbuf *)NULL,
+ (struct mbuf *)NULL,
+ uap->flags,
+ use_rcvinfo,
+ u_sinfo,
+ td );
+
+ if (error) {
+ if (auio.uio_resid != len && (error == ERESTART ||
+ error == EINTR || error == EWOULDBLOCK))
+ error = 0;
+ /* Generation of SIGPIPE can be controlled per socket */
+ if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
+ !(uap->flags & MSG_NOSIGNAL)) {
+ PROC_LOCK(td->td_proc);
+ psignal(td->td_proc, SIGPIPE);
+ PROC_UNLOCK(td->td_proc);
+ }
+ }
+ if (error == 0)
+ td->td_retval[0] = len - auio.uio_resid;
+#ifdef KTRACE
+ if (ktruio != NULL) {
+ ktruio->uio_resid = td->td_retval[0];
+ ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
+ }
+#endif
+ sctp_bad:
+ fdrop(fp, td);
+ sctp_bad2:
+ if (to)
+ FREE(to, M_SONAME);
+
+ return (error);
+#else
+ return (EOPNOTSUPP);
+#endif
+}
+
+
+int sctp_generic_sendmsg_iov(td, uap)
+ struct thread *td;
+ register struct sctp_generic_sendmsg_iov_args /* {
+ int sd,
+ struct iovec *iov,
+ int iovlen,
+ caddr_t to,
+ __socklen_t tolen,
+ struct sctp_sndrcvinfo *sinfo,
+ int flags
+ } */ *uap;
+{
+#ifdef SCTP
+ struct sctp_sndrcvinfo sinfo, *u_sinfo=NULL;
+ struct socket *so;
+ struct file *fp;
+ int use_rcvinfo=1;
+ int error=0, len, i;
+ struct sockaddr *to=NULL;
+#ifdef KTRACE
+ struct uio *ktruio = NULL;
+#endif
+ struct uio auio;
+ struct iovec *iov, *tiov;
+
+ if(uap->sinfo) {
+ error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
+ if (error)
+ return (error);
+ u_sinfo = &sinfo;
+ }
+
+ if(uap->tolen) {
+ error = getsockaddr(&to, uap->to, uap->tolen);
+ if (error) {
+ to = NULL;
+ goto sctp_bad2;
+ }
+ }
+ error = getsock(td->td_proc->p_fd, uap->sd, &fp, NULL);
+ if (error)
+ goto sctp_bad1;
+
+ error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
+ if (error)
+ goto sctp_bad1;
+
+
+ so = (struct socket *)fp->f_data;
+#ifdef MAC
+ SOCK_LOCK(so);
+ error = mac_check_socket_send(td->td_ucred, so);
+ SOCK_UNLOCK(so);
+ if (error)
+ goto sctp_bad;
+#endif
+
+
+ auio.uio_iov = iov;
+ auio.uio_iovcnt = uap->iovlen;
+ auio.uio_segflg = UIO_USERSPACE;
+ auio.uio_rw = UIO_WRITE;
+ auio.uio_td = td;
+ auio.uio_offset = 0; /* XXX */
+ auio.uio_resid = 0;
+ tiov = iov;
+ for (i = 0; i <uap->iovlen; i++, tiov++) {
+ if ((auio.uio_resid += tiov->iov_len) < 0) {
+ error = EINVAL;
+ goto sctp_bad;
+ }
+ }
+ len = auio.uio_resid;
+ error = sctp_lower_sosend(so,
+ to,
+ &auio,
+ (struct mbuf *)NULL,
+ (struct mbuf *)NULL,
+ uap->flags,
+ use_rcvinfo,
+ u_sinfo,
+ td );
+
+ if (error) {
+ if (auio.uio_resid != len && (error == ERESTART ||
+ error == EINTR || error == EWOULDBLOCK))
+ error = 0;
+ /* Generation of SIGPIPE can be controlled per socket */
+ if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
+ !(uap->flags & MSG_NOSIGNAL)) {
+ PROC_LOCK(td->td_proc);
+ psignal(td->td_proc, SIGPIPE);
+ PROC_UNLOCK(td->td_proc);
+ }
+ }
+ if (error == 0)
+ td->td_retval[0] = len - auio.uio_resid;
+#ifdef KTRACE
+ if (ktruio != NULL) {
+ ktruio->uio_resid = td->td_retval[0];
+ ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
+ }
+#endif
+ sctp_bad:
+ free(iov, M_IOV);
+ sctp_bad1:
+ fdrop(fp, td);
+ sctp_bad2:
+ if (to)
+ FREE(to, M_SONAME);
+
+ return (error);
+#else
+ return (EOPNOTSUPP);
+#endif
+}
+
+int sctp_generic_recvmsg(td, uap)
+ struct thread *td;
+ register struct sctp_generic_recvmsg_args /* {
+ int sd,
+ struct iovec *iov,
+ int iovlen,
+ struct sockaddr *from,
+ __socklen_t *fromlenaddr,
+ struct sctp_sndrcvinfo *sinfo,
+ int *msg_flags
+ } */ *uap;
+{
+#ifdef SCTP
+ u_int8_t sockbufstore[256];
+ struct uio auio;
+ struct iovec *iov, *tiov;
+ struct sctp_sndrcvinfo sinfo;
+ struct socket *so;
+ struct file *fp;
+ struct sockaddr *fromsa;
+ int fromlen;
+ int len, i, msg_flags=0;
+ int error=0;
+#ifdef KTRACE
+ struct uio *ktruio = NULL;
+#endif
+ error = getsock(td->td_proc->p_fd, uap->sd, &fp, NULL);
+ if (error) {
+ return (error);
+ }
+ error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
+ if (error) {
+ goto out1;
+ }
+ so = fp->f_data;
+#ifdef MAC
+ SOCK_LOCK(so);
+ error = mac_check_socket_receive(td->td_ucred, so);
+ SOCK_UNLOCK(so);
+ if (error) {
+ goto out;
+ return (error);
+ }
+#endif
+ if (uap->fromlenaddr) {
+ error = copyin(uap->fromlenaddr,
+ &fromlen, sizeof (fromlen));
+ if (error) {
+ goto out;
+ }
+ } else {
+ fromlen = 0;
+ }
+
+
+ auio.uio_iov = iov;
+ auio.uio_iovcnt = uap->iovlen;
+ auio.uio_segflg = UIO_USERSPACE;
+ auio.uio_rw = UIO_READ;
+ auio.uio_td = td;
+ auio.uio_offset = 0; /* XXX */
+ auio.uio_resid = 0;
+ tiov = iov;
+ for (i = 0; i <uap->iovlen; i++, tiov++) {
+ if ((auio.uio_resid += tiov->iov_len) < 0) {
+ error = EINVAL;
+ goto out;
+ }
+ }
+ len = auio.uio_resid;
+ fromsa = (struct sockaddr *)sockbufstore;
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_GENIO))
+ ktruio = cloneuio(&auio);
+#endif
+ error = sctp_sorecvmsg(so, &auio, (struct mbuf **)NULL,
+ fromsa, fromlen, &msg_flags, (struct sctp_sndrcvinfo *)&sinfo,
+ 1);
+ if (error) {
+ if (auio.uio_resid != (int)len && (error == ERESTART ||
+ error == EINTR || error == EWOULDBLOCK))
+ error = 0;
+ } else {
+ if(uap->sinfo)
+ error = copyout(&sinfo, uap->sinfo, sizeof (sinfo));
+ }
+#ifdef KTRACE
+ if (ktruio != NULL) {
+ ktruio->uio_resid = (int)len - auio.uio_resid;
+ ktrgenio(uap->sd, UIO_READ, ktruio, error);
+ }
+#endif
+ if (error)
+ goto out;
+ td->td_retval[0] = (int)len - auio.uio_resid;
+ if (fromlen && uap->from) {
+ len = fromlen;
+ if (len <= 0 || fromsa == 0)
+ len = 0;
+ else {
+ len = MIN(len, fromsa->sa_len);
+ error = copyout(fromsa, uap->from, (unsigned)len);
+ if (error)
+ goto out;
+ }
+ error = copyout(&len, uap->fromlenaddr, sizeof (socklen_t));
+ if(error) {
+ goto out;
+ }
+ }
+ if (uap->msg_flags) {
+ error = copyout(&msg_flags, uap->msg_flags, sizeof (int));
+ if(error) {
+ goto out;
+ }
+ }
+out:
+ free(iov, M_IOV);
+out1:
+ fdrop(fp, td);
+ return (error);
+#else
+ return (EOPNOTSUPP);
+#endif
+
+}
diff --git a/sys/net/rtsock.c b/sys/net/rtsock.c
index 8ab39e4..c4eebf7 100644
--- a/sys/net/rtsock.c
+++ b/sys/net/rtsock.c
@@ -29,7 +29,7 @@
* @(#)rtsock.c 8.7 (Berkeley) 10/12/95
* $FreeBSD$
*/
-
+#include "opt_sctp.h"
#include <sys/param.h>
#include <sys/domain.h>
#include <sys/kernel.h>
@@ -51,6 +51,10 @@
#include <netinet/in.h>
+#ifdef SCTP
+extern void sctp_addr_change(struct ifaddr *ifa, int cmd);
+#endif /* SCTP */
+
MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
/* NB: these are not modified */
@@ -879,7 +883,14 @@ rt_newaddrmsg(int cmd, struct ifaddr *ifa, int error, struct rtentry *rt)
KASSERT(cmd == RTM_ADD || cmd == RTM_DELETE,
("unexpected cmd %u", cmd));
-
+#ifdef SCTP
+ /*
+ * notify the SCTP stack
+ * this will only get called when an address is added/deleted
+ * XXX pass the ifaddr struct instead if ifa->ifa_addr...
+ */
+ sctp_addr_change(ifa, cmd);
+#endif /* SCTP */
if (route_cb.any_count == 0)
return;
for (pass = 1; pass < 3; pass++) {
diff --git a/sys/netinet/in_proto.c b/sys/netinet/in_proto.c
index d8caddd..e66f8b4 100644
--- a/sys/netinet/in_proto.c
+++ b/sys/netinet/in_proto.c
@@ -36,6 +36,7 @@
#include "opt_inet6.h"
#include "opt_pf.h"
#include "opt_carp.h"
+#include "opt_sctp.h"
#include <sys/param.h>
#include <sys/systm.h>
@@ -88,6 +89,13 @@ static struct pr_usrreqs nousrreqs;
#include <netipx/ipx_ip.h>
#endif
+#ifdef SCTP
+#include <netinet/in_pcb.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_var.h>
+#endif /* SCTP */
+
#ifdef DEV_PFSYNC
#include <net/pfvar.h>
#include <net/if_pfsync.h>
@@ -141,6 +149,43 @@ struct protosw inetsw[] = {
.pr_drain = tcp_drain,
.pr_usrreqs = &tcp_usrreqs
},
+#ifdef SCTP
+{
+ .pr_type = SOCK_DGRAM,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_SCTP,
+ .pr_flags = PR_WANTRCVD,
+ .pr_input = sctp_input,
+ .pr_ctlinput = sctp_ctlinput,
+ .pr_ctloutput = sctp_ctloutput,
+ .pr_init = sctp_init,
+ .pr_drain = sctp_drain,
+ .pr_usrreqs = &sctp_usrreqs
+},
+{
+ .pr_type = SOCK_SEQPACKET,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_SCTP,
+ .pr_flags = PR_WANTRCVD,
+ .pr_input = sctp_input,
+ .pr_ctlinput = sctp_ctlinput,
+ .pr_ctloutput = sctp_ctloutput,
+ .pr_drain = sctp_drain,
+ .pr_usrreqs = &sctp_usrreqs
+},
+
+{
+ .pr_type = SOCK_STREAM,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_SCTP,
+ .pr_flags = PR_WANTRCVD,
+ .pr_input = sctp_input,
+ .pr_ctlinput = sctp_ctlinput,
+ .pr_ctloutput = sctp_ctloutput,
+ .pr_drain = sctp_drain,
+ .pr_usrreqs = &sctp_usrreqs
+},
+#endif /* SCTP */
{
.pr_type = SOCK_RAW,
.pr_domain = &inetdomain,
@@ -376,6 +421,9 @@ SYSCTL_NODE(_net_inet, IPPROTO_IP, ip, CTLFLAG_RW, 0, "IP");
SYSCTL_NODE(_net_inet, IPPROTO_ICMP, icmp, CTLFLAG_RW, 0, "ICMP");
SYSCTL_NODE(_net_inet, IPPROTO_UDP, udp, CTLFLAG_RW, 0, "UDP");
SYSCTL_NODE(_net_inet, IPPROTO_TCP, tcp, CTLFLAG_RW, 0, "TCP");
+#ifdef SCTP
+SYSCTL_NODE(_net_inet, IPPROTO_SCTP, sctp, CTLFLAG_RW, 0, "SCTP");
+#endif
SYSCTL_NODE(_net_inet, IPPROTO_IGMP, igmp, CTLFLAG_RW, 0, "IGMP");
#ifdef FAST_IPSEC
/* XXX no protocol # to use, pick something "reserved" */
diff --git a/sys/netinet/sctp.h b/sys/netinet/sctp.h
new file mode 100644
index 0000000..6aa7dec
--- /dev/null
+++ b/sys/netinet/sctp.h
@@ -0,0 +1,349 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* $KAME: sctp.h,v 1.18 2005/03/06 16:04:16 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef _NETINET_SCTP_H_
+#define _NETINET_SCTP_H_
+
+#include <sys/types.h>
+
+/*
+ * SCTP protocol - RFC2960.
+ */
+
+struct sctphdr {
+ uint16_t src_port; /* source port */
+ uint16_t dest_port; /* destination port */
+ uint32_t v_tag; /* verification tag of packet */
+ uint32_t checksum; /* Adler32 C-Sum */
+ /* chunks follow... */
+};
+
+/*
+ * SCTP Chunks
+ */
+struct sctp_chunkhdr {
+ uint8_t chunk_type; /* chunk type */
+ uint8_t chunk_flags; /* chunk flags */
+ uint16_t chunk_length; /* chunk length */
+ /* optional params follow */
+};
+
+/*
+ * SCTP chunk parameters
+ */
+struct sctp_paramhdr {
+ uint16_t param_type; /* parameter type */
+ uint16_t param_length; /* parameter length */
+};
+
+/*
+ * user socket options: socket API defined
+ */
+/*
+ * read-write options
+ */
+#define SCTP_RTOINFO 0x00000001
+#define SCTP_ASSOCINFO 0x00000002
+#define SCTP_INITMSG 0x00000003
+#define SCTP_NODELAY 0x00000004
+#define SCTP_AUTOCLOSE 0x00000005
+#define SCTP_SET_PEER_PRIMARY_ADDR 0x00000006
+#define SCTP_PRIMARY_ADDR 0x00000007
+#define SCTP_ADAPTATION_LAYER 0x00000008
+/* same as above */
+#define SCTP_ADAPTION_LAYER 0x00000008
+#define SCTP_DISABLE_FRAGMENTS 0x00000009
+#define SCTP_PEER_ADDR_PARAMS 0x0000000a
+#define SCTP_DEFAULT_SEND_PARAM 0x0000000b
+/* ancillary data/notification interest options */
+#define SCTP_EVENTS 0x0000000c
+/* Without this applied we will give V4 and V6 addresses on a V6 socket */
+#define SCTP_I_WANT_MAPPED_V4_ADDR 0x0000000d
+#define SCTP_MAXSEG 0x0000000e
+#define SCTP_DELAYED_ACK_TIME 0x0000000f
+#define SCTP_FRAGMENT_INTERLEAVE 0x00000010
+#define SCTP_PARTIAL_DELIVERY_POINT 0x00000011
+/* authentication support */
+#define SCTP_AUTH_CHUNK 0x00000012
+#define SCTP_AUTH_KEY 0x00000013
+#define SCTP_HMAC_IDENT 0x00000014
+#define SCTP_AUTH_ACTIVE_KEY 0x00000015
+#define SCTP_AUTH_DELETE_KEY 0x00000016
+#define SCTP_USE_EXT_RCVINFO 0x00000017
+#define SCTP_AUTO_ASCONF 0x00000018 /* rw */
+#define SCTP_MAXBURST 0x00000019 /* rw */
+/* assoc level context */
+#define SCTP_CONTEXT 0x0000001a /* rw */
+/* explict EOR signalling */
+#define SCTP_EXPLICIT_EOR 0x0000001b
+
+/*
+ * read-only options
+ */
+#define SCTP_STATUS 0x00000100
+#define SCTP_GET_PEER_ADDR_INFO 0x00000101
+/* authentication support */
+#define SCTP_PEER_AUTH_CHUNKS 0x00000102
+#define SCTP_LOCAL_AUTH_CHUNKS 0x00000103
+
+
+/*
+ * user socket options: BSD implementation specific
+ */
+/*
+ * Blocking I/O is enabled on any TCP type socket by default. For the UDP
+ * model if this is turned on then the socket buffer is shared for send
+ * resources amongst all associations. The default for the UDP model is that
+ * is SS_NBIO is set. Which means all associations have a seperate send
+ * limit BUT they will NOT ever BLOCK instead you will get an error back
+ * EAGAIN if you try to send to much. If you want the blocking symantics you
+ * set this option at the cost of sharing one socket send buffer size amongst
+ * all associations. Peeled off sockets turn this option off and block. But
+ * since both TCP and peeled off sockets have only one assoc per socket this
+ * is fine. It probably does NOT make sense to set this on SS_NBIO on a TCP
+ * model OR peeled off UDP model, but we do allow you to do so. You just use
+ * the normal syscall to toggle SS_NBIO the way you want.
+ *
+ * Blocking I/O is controled by the SS_NBIO flag on the socket state so_state
+ * field.
+ */
+
+/* these should probably go into sockets API */
+#define SCTP_RESET_STREAMS 0x00001004 /* wo */
+
+
+/* here on down are more implementation specific */
+#define SCTP_SET_DEBUG_LEVEL 0x00001005
+#define SCTP_CLR_STAT_LOG 0x00001007
+/* CMT ON/OFF socket option */
+#define SCTP_CMT_ON_OFF 0x00001200
+#define SCTP_CMT_USE_DAC 0x00001201
+
+/* read only */
+#define SCTP_GET_SNDBUF_USE 0x00001101
+#define SCTP_GET_STAT_LOG 0x00001103
+#define SCTP_GET_ASOC_ID_LIST 0x00001104 /* ro */
+#define SCTP_PCB_STATUS 0x00001105
+#define SCTP_GET_NONCE_VALUES 0x00001106
+
+
+/*
+ * hidden implementation specific options these are NOT user visible (should
+ * move out of sctp.h)
+ */
+/* sctp_bindx() flags as hidden socket options */
+#define SCTP_BINDX_ADD_ADDR 0x00008001
+#define SCTP_BINDX_REM_ADDR 0x00008002
+/* Hidden socket option that gets the addresses */
+#define SCTP_GET_PEER_ADDRESSES 0x00008003
+#define SCTP_GET_LOCAL_ADDRESSES 0x00008004
+/* return the total count in bytes needed to hold all local addresses bound */
+#define SCTP_GET_LOCAL_ADDR_SIZE 0x00008005
+/* Return the total count in bytes needed to hold the remote address */
+#define SCTP_GET_REMOTE_ADDR_SIZE 0x00008006
+/* hidden option for connectx */
+#define SCTP_CONNECT_X 0x00008007
+/* hidden option for connectx_delayed, part of sendx */
+#define SCTP_CONNECT_X_DELAYED 0x00008008
+#define SCTP_CONNECT_X_COMPLETE 0x00008009
+/* hidden socket option based sctp_peeloff */
+#define SCTP_PEELOFF 0x0000800a
+/* the real worker for sctp_getaddrlen() */
+#define SCTP_GET_ADDR_LEN 0x0000800b
+/* temporary workaround for Apple listen() issue, no args used */
+#define SCTP_LISTEN_FIX 0x0000800c
+/* Debug things that need to be purged */
+#define SCTP_SET_INITIAL_DBG_SEQ 0x00009f00
+
+/*
+ * user state values
+ */
+#define SCTP_CLOSED 0x0000
+#define SCTP_BOUND 0x1000
+#define SCTP_LISTEN 0x2000
+#define SCTP_COOKIE_WAIT 0x0002
+#define SCTP_COOKIE_ECHOED 0x0004
+#define SCTP_ESTABLISHED 0x0008
+#define SCTP_SHUTDOWN_SENT 0x0010
+#define SCTP_SHUTDOWN_RECEIVED 0x0020
+#define SCTP_SHUTDOWN_ACK_SENT 0x0040
+#define SCTP_SHUTDOWN_PENDING 0x0080
+
+/*
+ * SCTP operational error codes (user visible)
+ */
+#define SCTP_CAUSE_NO_ERROR 0x0000
+#define SCTP_CAUSE_INVALID_STREAM 0x0001
+#define SCTP_CAUSE_MISSING_PARAM 0x0002
+#define SCTP_CAUSE_STALE_COOKIE 0x0003
+#define SCTP_CAUSE_OUT_OF_RESC 0x0004
+#define SCTP_CAUSE_UNRESOLVABLE_ADDR 0x0005
+#define SCTP_CAUSE_UNRECOG_CHUNK 0x0006
+#define SCTP_CAUSE_INVALID_PARAM 0x0007
+#define SCTP_CAUSE_UNRECOG_PARAM 0x0008
+#define SCTP_CAUSE_NO_USER_DATA 0x0009
+#define SCTP_CAUSE_COOKIE_IN_SHUTDOWN 0x000a
+#define SCTP_CAUSE_RESTART_W_NEWADDR 0x000b
+#define SCTP_CAUSE_USER_INITIATED_ABT 0x000c
+#define SCTP_CAUSE_PROTOCOL_VIOLATION 0x000d
+
+/* Error causes from draft-ietf-tsvwg-addip-sctp */
+#define SCTP_CAUSE_DELETING_LAST_ADDR 0x0100
+#define SCTP_CAUSE_RESOURCE_SHORTAGE 0x0101
+#define SCTP_CAUSE_DELETING_SRC_ADDR 0x0102
+#define SCTP_CAUSE_ILLEGAL_ASCONF_ACK 0x0103
+#define SCTP_CAUSE_REQUEST_REFUSED 0x0104
+
+/* Error causes from draft-ietf-tsvwg-sctp-auth */
+#define SCTP_CAUSE_UNSUPPORTED_HMACID 0x0105
+
+/*
+ * error cause parameters (user visisble)
+ */
+struct sctp_error_cause {
+ uint16_t code;
+ uint16_t length;
+ /* optional cause-specific info may follow */
+};
+
+struct sctp_error_invalid_stream {
+ struct sctp_error_cause cause; /* code=SCTP_ERROR_INVALID_STREAM */
+ uint16_t stream_id; /* stream id of the DATA in error */
+ uint16_t reserved;
+};
+
+struct sctp_error_missing_param {
+ struct sctp_error_cause cause; /* code=SCTP_ERROR_MISSING_PARAM */
+ uint32_t num_missing_params; /* number of missing parameters */
+ /* uint16_t param_type's follow */
+};
+
+struct sctp_error_stale_cookie {
+ struct sctp_error_cause cause; /* code=SCTP_ERROR_STALE_COOKIE */
+ uint32_t stale_time; /* time in usec of staleness */
+};
+
+struct sctp_error_out_of_resource {
+ struct sctp_error_cause cause; /* code=SCTP_ERROR_OUT_OF_RESOURCES */
+};
+
+struct sctp_error_unresolv_addr {
+ struct sctp_error_cause cause; /* code=SCTP_ERROR_UNRESOLVABLE_ADDR */
+
+};
+
+struct sctp_error_unrecognized_chunk {
+ struct sctp_error_cause cause; /* code=SCTP_ERROR_UNRECOG_CHUNK */
+ struct sctp_chunkhdr ch;/* header from chunk in error */
+};
+
+#define HAVE_SCTP 1
+#define HAVE_KERNEL_SCTP 1
+#define HAVE_SCTP_PRSCTP 1
+#define HAVE_SCTP_ADDIP 1
+#define HAVE_SCTP_CANSET_PRIMARY 1
+#define HAVE_SCTP_SAT_CAPABILITY 1
+#define HAVE_SCTP_MULTIBUF 1
+#define HAVE_SCTP_NOCONNECT 0
+#define HAVE_SCTP_ECN_NONCE 1 /* ECN Nonce option */
+#define HAVE_SCTP_AUTH 1
+#define HAVE_SCTP_EXT_RCVINFO 1
+#define HAVE_SCTP_CONNECTX 1
+/*
+ * Main SCTP chunk types we place these here so natd and f/w's in user land
+ * can find them.
+ */
+/************0x00 series ***********/
+#define SCTP_DATA 0x00
+#define SCTP_INITIATION 0x01
+#define SCTP_INITIATION_ACK 0x02
+#define SCTP_SELECTIVE_ACK 0x03
+#define SCTP_HEARTBEAT_REQUEST 0x04
+#define SCTP_HEARTBEAT_ACK 0x05
+#define SCTP_ABORT_ASSOCIATION 0x06
+#define SCTP_SHUTDOWN 0x07
+#define SCTP_SHUTDOWN_ACK 0x08
+#define SCTP_OPERATION_ERROR 0x09
+#define SCTP_COOKIE_ECHO 0x0a
+#define SCTP_COOKIE_ACK 0x0b
+#define SCTP_ECN_ECHO 0x0c
+#define SCTP_ECN_CWR 0x0d
+#define SCTP_SHUTDOWN_COMPLETE 0x0e
+/* draft-ietf-tsvwg-sctp-auth */
+#define SCTP_AUTHENTICATION 0x0f
+/************0x40 series ***********/
+/************0x80 series ***********/
+/* draft-ietf-tsvwg-addip-sctp */
+#define SCTP_ASCONF_ACK 0x80
+/* draft-ietf-stewart-pktdrpsctp */
+#define SCTP_PACKET_DROPPED 0x81
+/* draft-ietf-stewart-strreset-xxx */
+#define SCTP_STREAM_RESET 0x82
+/************0xc0 series ***********/
+/* RFC3758 */
+#define SCTP_FORWARD_CUM_TSN 0xc0
+/* draft-ietf-tsvwg-addip-sctp */
+#define SCTP_ASCONF 0xc1
+
+
+/* ABORT and SHUTDOWN COMPLETE FLAG */
+#define SCTP_HAD_NO_TCB 0x01
+
+/* Packet dropped flags */
+#define SCTP_FROM_MIDDLE_BOX SCTP_HAD_NO_TCB
+#define SCTP_BADCRC 0x02
+#define SCTP_PACKET_TRUNCATED 0x04
+
+#define SCTP_SAT_NETWORK_MIN 400 /* min ms for RTT to set satellite
+ * time */
+#define SCTP_SAT_NETWORK_BURST_INCR 2 /* how many times to multiply maxburst
+ * in sat */
+
+/* Data Chuck Specific Flags */
+#define SCTP_DATA_FRAG_MASK 0x03
+#define SCTP_DATA_MIDDLE_FRAG 0x00
+#define SCTP_DATA_LAST_FRAG 0x01
+#define SCTP_DATA_FIRST_FRAG 0x02
+#define SCTP_DATA_NOT_FRAG 0x03
+#define SCTP_DATA_UNORDERED 0x04
+
+/* ECN Nonce: SACK Chunk Specific Flags */
+#define SCTP_SACK_NONCE_SUM 0x01
+
+/* CMT DAC algorithm SACK flag */
+#define SCTP_SACK_CMT_DAC 0x80
+
+#include <netinet/sctp_uio.h>
+
+#endif /* !_NETINET_SCTP_H_ */
diff --git a/sys/netinet/sctp_asconf.c b/sys/netinet/sctp_asconf.c
new file mode 100644
index 0000000..729e3e6
--- /dev/null
+++ b/sys/netinet/sctp_asconf.c
@@ -0,0 +1,2856 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_asconf.c,v 1.24 2005/03/06 16:04:16 itojun Exp $ */
+__FBSDID("$FreeBSD$");
+
+#include <sys/cdefs.h>
+
+
+#include "opt_ipsec.h"
+#include "opt_compat.h"
+#include "opt_inet6.h"
+#include "opt_inet.h"
+
+#include "opt_sctp.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/route.h>
+
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/in_pcb.h>
+#include <netinet/in_var.h>
+#include <netinet/ip_var.h>
+
+#ifdef INET6
+#include <netinet/ip6.h>
+#include <netinet6/ip6_var.h>
+#include <netinet6/in6_pcb.h>
+#include <netinet/icmp6.h>
+#include <netinet6/nd6.h>
+#include <netinet6/scope6_var.h>
+#endif /* INET6 */
+
+#include <netinet/in_pcb.h>
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/sctp_asconf.h>
+
+/*
+ * debug flags:
+ * SCTP_DEBUG_ASCONF1: protocol info, general info and errors
+ * SCTP_DEBUG_ASCONF2: detailed info
+ */
+#ifdef SCTP_DEBUG
+extern uint32_t sctp_debug_on;
+
+#endif /* SCTP_DEBUG */
+
+
+static int
+sctp_asconf_get_source_ip(struct mbuf *m, struct sockaddr *sa)
+{
+ struct ip *iph;
+ struct sockaddr_in *sin;
+
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+
+#endif
+
+ iph = mtod(m, struct ip *);
+ if (iph->ip_v == IPVERSION) {
+ /* IPv4 source */
+ sin = (struct sockaddr_in *)sa;
+ bzero(sin, sizeof(*sin));
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(struct sockaddr_in);
+ sin->sin_port = 0;
+ sin->sin_addr.s_addr = iph->ip_src.s_addr;
+ return 0;
+ }
+#ifdef INET6
+ else if (iph->ip_v == (IPV6_VERSION >> 4)) {
+ /* IPv6 source */
+ struct ip6_hdr *ip6;
+
+ sin6 = (struct sockaddr_in6 *)sa;
+ bzero(sin6, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ sin6->sin6_port = 0;
+ ip6 = mtod(m, struct ip6_hdr *);
+ sin6->sin6_addr = ip6->ip6_src;
+ return 0;
+ }
+#endif /* INET6 */
+ else
+ return -1;
+}
+
+/*
+ * draft-ietf-tsvwg-addip-sctp
+ *
+ * Address management only currently supported For the bound all case: the asoc
+ * local addr list is always a "DO NOT USE" list For the subset bound case:
+ * If ASCONFs are allowed: the endpoint local addr list is the usable address
+ * list the asoc local addr list is the "DO NOT USE" list If ASCONFs are not
+ * allowed: the endpoint local addr list is the default usable list the asoc
+ * local addr list is the usable address list
+ *
+ * An ASCONF parameter queue exists per asoc which holds the pending address
+ * operations. Lists are updated upon receipt of ASCONF-ACK.
+ *
+ * Deleted addresses are always immediately removed from the lists as they will
+ * (shortly) no longer exist in the kernel. We send ASCONFs as a courtesy,
+ * only if allowed.
+ */
+
+/*
+ * ASCONF parameter processing response_required: set if a reply is required
+ * (eg. SUCCESS_REPORT) returns a mbuf to an "error" response parameter or
+ * NULL/"success" if ok FIX: allocating this many mbufs on the fly is pretty
+ * inefficient...
+ */
+static struct mbuf *
+sctp_asconf_success_response(uint32_t id)
+{
+ struct mbuf *m_reply = NULL;
+ struct sctp_asconf_paramhdr *aph;
+
+ m_reply = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_paramhdr),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_reply == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("asconf_success_response: couldn't get mbuf!\n");
+ }
+#endif /* SCTP_DEBUG */
+ return NULL;
+ }
+ aph = mtod(m_reply, struct sctp_asconf_paramhdr *);
+ aph->correlation_id = id;
+ aph->ph.param_type = htons(SCTP_SUCCESS_REPORT);
+ aph->ph.param_length = sizeof(struct sctp_asconf_paramhdr);
+ m_reply->m_len = aph->ph.param_length;
+ aph->ph.param_length = htons(aph->ph.param_length);
+
+ return m_reply;
+}
+
+static struct mbuf *
+sctp_asconf_error_response(uint32_t id, uint16_t cause, uint8_t * error_tlv,
+ uint16_t tlv_length)
+{
+ struct mbuf *m_reply = NULL;
+ struct sctp_asconf_paramhdr *aph;
+ struct sctp_error_cause *error;
+ uint8_t *tlv;
+
+ m_reply = sctp_get_mbuf_for_msg((sizeof(struct sctp_asconf_paramhdr) +
+ tlv_length +
+ sizeof(struct sctp_error_cause)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_reply == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("asconf_error_response: couldn't get mbuf!\n");
+ }
+#endif /* SCTP_DEBUG */
+ return NULL;
+ }
+ aph = mtod(m_reply, struct sctp_asconf_paramhdr *);
+ error = (struct sctp_error_cause *)(aph + 1);
+
+ aph->correlation_id = id;
+ aph->ph.param_type = htons(SCTP_ERROR_CAUSE_IND);
+ error->code = htons(cause);
+ error->length = tlv_length + sizeof(struct sctp_error_cause);
+ aph->ph.param_length = error->length +
+ sizeof(struct sctp_asconf_paramhdr);
+
+ if (aph->ph.param_length > MLEN) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("asconf_error_response: tlv_length (%xh) too big\n",
+ tlv_length);
+ }
+#endif /* SCTP_DEBUG */
+ sctp_m_freem(m_reply); /* discard */
+ return NULL;
+ }
+ if (error_tlv != NULL) {
+ tlv = (uint8_t *) (error + 1);
+ memcpy(tlv, error_tlv, tlv_length);
+ }
+ m_reply->m_len = aph->ph.param_length;
+ error->length = htons(error->length);
+ aph->ph.param_length = htons(aph->ph.param_length);
+
+ return m_reply;
+}
+
+static struct mbuf *
+sctp_process_asconf_add_ip(struct mbuf *m, struct sctp_asconf_paramhdr *aph,
+ struct sctp_tcb *stcb, int response_required)
+{
+ struct mbuf *m_reply = NULL;
+ struct sockaddr_storage sa_source, sa_store;
+ struct sctp_ipv4addr_param *v4addr;
+ uint16_t param_type, param_length, aparam_length;
+ struct sockaddr *sa;
+ struct sockaddr_in *sin;
+ int zero_address = 0;
+
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+ struct sctp_ipv6addr_param *v6addr;
+
+#endif /* INET6 */
+
+ aparam_length = ntohs(aph->ph.param_length);
+ v4addr = (struct sctp_ipv4addr_param *)(aph + 1);
+#ifdef INET6
+ v6addr = (struct sctp_ipv6addr_param *)(aph + 1);
+#endif /* INET6 */
+ param_type = ntohs(v4addr->ph.param_type);
+ param_length = ntohs(v4addr->ph.param_length);
+
+ sa = (struct sockaddr *)&sa_store;
+ switch (param_type) {
+ case SCTP_IPV4_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv4addr_param)) {
+ /* invalid param size */
+ return NULL;
+ }
+ sin = (struct sockaddr_in *)&sa_store;
+ bzero(sin, sizeof(*sin));
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(struct sockaddr_in);
+ sin->sin_port = stcb->rport;
+ sin->sin_addr.s_addr = v4addr->addr;
+ if (sin->sin_addr.s_addr == INADDR_ANY)
+ zero_address = 1;
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_add_ip: adding ");
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+ break;
+ case SCTP_IPV6_ADDRESS:
+#ifdef INET6
+ if (param_length != sizeof(struct sctp_ipv6addr_param)) {
+ /* invalid param size */
+ return NULL;
+ }
+ sin6 = (struct sockaddr_in6 *)&sa_store;
+ bzero(sin6, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ sin6->sin6_port = stcb->rport;
+ memcpy((caddr_t)&sin6->sin6_addr, v6addr->addr,
+ sizeof(struct in6_addr));
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ zero_address = 1;
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_add_ip: adding ");
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+#else
+ /* IPv6 not enabled! */
+ /* FIX ME: currently sends back an invalid param error */
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_INVALID_PARAM, (uint8_t *) aph, aparam_length);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_add_ip: v6 disabled- skipping ");
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+ return m_reply;
+#endif /* INET6 */
+ break;
+ default:
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+ aparam_length);
+ return m_reply;
+ } /* end switch */
+
+ /* if 0.0.0.0/::0, add the source address instead */
+ if (zero_address) {
+ sa = (struct sockaddr *)&sa_source;
+ sctp_asconf_get_source_ip(m, sa);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_add_ip: using source addr ");
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+ }
+ /* add the address */
+ if (sctp_add_remote_addr(stcb, sa, 0, 6) != 0) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_add_ip: error adding address\n");
+ }
+#endif /* SCTP_DEBUG */
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_RESOURCE_SHORTAGE, (uint8_t *) aph,
+ aparam_length);
+ } else {
+ /* notify upper layer */
+ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_ADD_IP, stcb, 0, sa);
+ if (response_required) {
+ m_reply =
+ sctp_asconf_success_response(aph->correlation_id);
+ }
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, NULL);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, NULL);
+
+ }
+
+ return m_reply;
+}
+
+static int
+sctp_asconf_del_remote_addrs_except(struct sctp_tcb *stcb,
+ struct sockaddr *src)
+{
+ struct sctp_nets *src_net, *net;
+
+ /* make sure the source address exists as a destination net */
+ src_net = sctp_findnet(stcb, src);
+ if (src_net == NULL) {
+ /* not found */
+ return -1;
+ }
+ /* delete all destination addresses except the source */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (net != src_net) {
+ /* delete this address */
+ sctp_remove_net(stcb, net);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("asconf_del_remote_addrs_except: deleting ");
+ sctp_print_address((struct sockaddr *)&net->ro._l_addr);
+ }
+#endif
+ /* notify upper layer */
+ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_DELETE_IP, stcb, 0,
+ (struct sockaddr *)&net->ro._l_addr);
+ }
+ }
+ return 0;
+}
+
+static struct mbuf *
+sctp_process_asconf_delete_ip(struct mbuf *m, struct sctp_asconf_paramhdr *aph,
+ struct sctp_tcb *stcb, int response_required)
+{
+ struct mbuf *m_reply = NULL;
+ struct sockaddr_storage sa_source, sa_store;
+ struct sctp_ipv4addr_param *v4addr;
+ uint16_t param_type, param_length, aparam_length;
+ struct sockaddr *sa;
+ struct sockaddr_in *sin;
+ int zero_address = 0;
+ int result;
+
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+ struct sctp_ipv6addr_param *v6addr;
+
+#endif /* INET6 */
+
+ /* get the source IP address for src and 0.0.0.0/::0 delete checks */
+ sctp_asconf_get_source_ip(m, (struct sockaddr *)&sa_source);
+
+ aparam_length = ntohs(aph->ph.param_length);
+ v4addr = (struct sctp_ipv4addr_param *)(aph + 1);
+#ifdef INET6
+ v6addr = (struct sctp_ipv6addr_param *)(aph + 1);
+#endif /* INET6 */
+ param_type = ntohs(v4addr->ph.param_type);
+ param_length = ntohs(v4addr->ph.param_length);
+
+ sa = (struct sockaddr *)&sa_store;
+ switch (param_type) {
+ case SCTP_IPV4_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv4addr_param)) {
+ /* invalid param size */
+ return NULL;
+ }
+ sin = (struct sockaddr_in *)&sa_store;
+ bzero(sin, sizeof(*sin));
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(struct sockaddr_in);
+ sin->sin_port = stcb->rport;
+ sin->sin_addr.s_addr = v4addr->addr;
+ if (sin->sin_addr.s_addr == INADDR_ANY)
+ zero_address = 1;
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_delete_ip: deleting ");
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+ break;
+ case SCTP_IPV6_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv6addr_param)) {
+ /* invalid param size */
+ return NULL;
+ }
+#ifdef INET6
+ sin6 = (struct sockaddr_in6 *)&sa_store;
+ bzero(sin6, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ sin6->sin6_port = stcb->rport;
+ memcpy(&sin6->sin6_addr, v6addr->addr,
+ sizeof(struct in6_addr));
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ zero_address = 1;
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_delete_ip: deleting ");
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+#else
+ /* IPv6 not enabled! No "action" needed; just ack it */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_delete_ip: v6 disabled- ignoring: ");
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+ /* just respond with a "success" ASCONF-ACK */
+ return NULL;
+#endif /* INET6 */
+ break;
+ default:
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+ aparam_length);
+ return m_reply;
+ }
+
+ /* make sure the source address is not being deleted */
+ if (sctp_cmpaddr(sa, (struct sockaddr *)&sa_source)) {
+ /* trying to delete the source address! */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_delete_ip: tried to delete source addr\n");
+ }
+#endif /* SCTP_DEBUG */
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_DELETING_SRC_ADDR, (uint8_t *) aph,
+ aparam_length);
+ return m_reply;
+ }
+ /* if deleting 0.0.0.0/::0, delete all addresses except src addr */
+ if (zero_address) {
+ result = sctp_asconf_del_remote_addrs_except(stcb,
+ (struct sockaddr *)&sa_source);
+
+ if (result) {
+ /* src address did not exist? */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_delete_ip: src addr does not exist?\n");
+ }
+#endif /* SCTP_DEBUG */
+ /* what error to reply with?? */
+ m_reply =
+ sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_REQUEST_REFUSED, (uint8_t *) aph,
+ aparam_length);
+ } else if (response_required) {
+ m_reply =
+ sctp_asconf_success_response(aph->correlation_id);
+ }
+ return m_reply;
+ }
+ /* delete the address */
+ result = sctp_del_remote_addr(stcb, sa);
+ /*
+ * note if result == -2, the address doesn't exist in the asoc but
+ * since it's being deleted anyways, we just ack the delete -- but
+ * this probably means something has already gone awry
+ */
+ if (result == -1) {
+ /* only one address in the asoc */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_delete_ip: tried to delete last IP addr!\n");
+ }
+#endif /* SCTP_DEBUG */
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_DELETING_LAST_ADDR, (uint8_t *) aph,
+ aparam_length);
+ } else {
+ if (response_required) {
+ m_reply = sctp_asconf_success_response(aph->correlation_id);
+ }
+ /* notify upper layer */
+ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_DELETE_IP, stcb, 0, sa);
+ }
+ return m_reply;
+}
+
+static struct mbuf *
+sctp_process_asconf_set_primary(struct mbuf *m,
+ struct sctp_asconf_paramhdr *aph, struct sctp_tcb *stcb,
+ int response_required)
+{
+ struct mbuf *m_reply = NULL;
+ struct sockaddr_storage sa_source, sa_store;
+ struct sctp_ipv4addr_param *v4addr;
+ uint16_t param_type, param_length, aparam_length;
+ struct sockaddr *sa;
+ struct sockaddr_in *sin;
+ int zero_address = 0;
+
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+ struct sctp_ipv6addr_param *v6addr;
+
+#endif /* INET6 */
+
+ aparam_length = ntohs(aph->ph.param_length);
+ v4addr = (struct sctp_ipv4addr_param *)(aph + 1);
+#ifdef INET6
+ v6addr = (struct sctp_ipv6addr_param *)(aph + 1);
+#endif /* INET6 */
+ param_type = ntohs(v4addr->ph.param_type);
+ param_length = ntohs(v4addr->ph.param_length);
+
+ sa = (struct sockaddr *)&sa_store;
+ switch (param_type) {
+ case SCTP_IPV4_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv4addr_param)) {
+ /* invalid param size */
+ return NULL;
+ }
+ sin = (struct sockaddr_in *)&sa_store;
+ bzero(sin, sizeof(*sin));
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(struct sockaddr_in);
+ sin->sin_addr.s_addr = v4addr->addr;
+ if (sin->sin_addr.s_addr == INADDR_ANY)
+ zero_address = 1;
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_set_primary: ");
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+ break;
+ case SCTP_IPV6_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv6addr_param)) {
+ /* invalid param size */
+ return NULL;
+ }
+#ifdef INET6
+ sin6 = (struct sockaddr_in6 *)&sa_store;
+ bzero(sin6, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ memcpy((caddr_t)&sin6->sin6_addr, v6addr->addr,
+ sizeof(struct in6_addr));
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ zero_address = 1;
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_set_primary: ");
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+#else
+ /* IPv6 not enabled! No "action" needed; just ack it */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_set_primary: v6 disabled- ignoring: ");
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+ /* just respond with a "success" ASCONF-ACK */
+ return NULL;
+#endif /* INET6 */
+ break;
+ default:
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+ aparam_length);
+ return m_reply;
+ }
+
+ /* if 0.0.0.0/::0, use the source address instead */
+ if (zero_address) {
+ sa = (struct sockaddr *)&sa_source;
+ sctp_asconf_get_source_ip(m, sa);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_set_primary: using source addr ");
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+ }
+ /* set the primary address */
+ if (sctp_set_primary_addr(stcb, sa, NULL) == 0) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_set_primary: primary address set\n");
+ }
+#endif /* SCTP_DEBUG */
+ /* notify upper layer */
+ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_SET_PRIMARY, stcb, 0, sa);
+
+ if (response_required) {
+ m_reply = sctp_asconf_success_response(aph->correlation_id);
+ }
+ } else {
+ /* couldn't set the requested primary address! */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_asconf_set_primary: set primary failed!\n");
+ }
+#endif /* SCTP_DEBUG */
+ /* must have been an invalid address, so report */
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+ aparam_length);
+ }
+
+ return m_reply;
+}
+
+/*
+ * handles an ASCONF chunk.
+ * if all parameters are processed ok, send a plain (empty) ASCONF-ACK
+ */
+void
+sctp_handle_asconf(struct mbuf *m, unsigned int offset,
+ struct sctp_asconf_chunk *cp, struct sctp_tcb *stcb)
+{
+ struct sctp_association *asoc;
+ uint32_t serial_num;
+ struct mbuf *m_ack, *m_result, *m_tail;
+ struct sctp_asconf_ack_chunk *ack_cp;
+ struct sctp_asconf_paramhdr *aph, *ack_aph;
+ struct sctp_ipv6addr_param *p_addr;
+ unsigned int asconf_limit;
+ int error = 0; /* did an error occur? */
+
+ /* asconf param buffer */
+ static uint8_t aparam_buf[DEFAULT_PARAM_BUFFER];
+
+ /* verify minimum length */
+ if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_asconf_chunk)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("handle_asconf: chunk too small = %xh\n",
+ ntohs(cp->ch.chunk_length));
+ }
+#endif /* SCTP_DEBUG */
+ return;
+ }
+ asoc = &stcb->asoc;
+ serial_num = ntohl(cp->serial_number);
+
+ if (serial_num == asoc->asconf_seq_in) {
+ /* got a duplicate ASCONF */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("handle_asconf: got duplicate serial number = %xh\n",
+ serial_num);
+ }
+#endif /* SCTP_DEBUG */
+ /* resend last ASCONF-ACK... */
+ sctp_send_asconf_ack(stcb, 1);
+ return;
+ } else if (serial_num != (asoc->asconf_seq_in + 1)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("handle_asconf: incorrect serial number = %xh (expected next = %xh)\n",
+ serial_num, asoc->asconf_seq_in + 1);
+ }
+#endif /* SCTP_DEBUG */
+ return;
+ }
+ /* it's the expected "next" sequence number, so process it */
+ asoc->asconf_seq_in = serial_num; /* update sequence */
+ /* get length of all the param's in the ASCONF */
+ asconf_limit = offset + ntohs(cp->ch.chunk_length);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("handle_asconf: asconf_limit=%u, sequence=%xh\n",
+ asconf_limit, serial_num);
+ }
+#endif /* SCTP_DEBUG */
+ if (asoc->last_asconf_ack_sent != NULL) {
+ /* free last ASCONF-ACK message sent */
+ sctp_m_freem(asoc->last_asconf_ack_sent);
+ asoc->last_asconf_ack_sent = NULL;
+ }
+ m_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_ack_chunk), 1,
+ M_DONTWAIT, 1, MT_DATA);
+ if (m_ack == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("handle_asconf: couldn't get mbuf!\n");
+ }
+#endif /* SCTP_DEBUG */
+ return;
+ }
+ m_tail = m_ack; /* current reply chain's tail */
+
+ /* fill in ASCONF-ACK header */
+ ack_cp = mtod(m_ack, struct sctp_asconf_ack_chunk *);
+ ack_cp->ch.chunk_type = SCTP_ASCONF_ACK;
+ ack_cp->ch.chunk_flags = 0;
+ ack_cp->serial_number = htonl(serial_num);
+ /* set initial lengths (eg. just an ASCONF-ACK), ntohx at the end! */
+ m_ack->m_len = sizeof(struct sctp_asconf_ack_chunk);
+ ack_cp->ch.chunk_length = sizeof(struct sctp_asconf_ack_chunk);
+ m_ack->m_pkthdr.len = sizeof(struct sctp_asconf_ack_chunk);
+
+ /* skip the lookup address parameter */
+ offset += sizeof(struct sctp_asconf_chunk);
+ p_addr = (struct sctp_ipv6addr_param *)sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr), (uint8_t *) & aparam_buf);
+ if (p_addr == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("handle_asconf: couldn't get lookup addr!\n");
+ }
+#endif /* SCTP_DEBUG */
+
+ /* respond with a missing/invalid mandatory parameter error */
+ return;
+ }
+ /* param_length is already validated in process_control... */
+ offset += ntohs(p_addr->ph.param_length); /* skip lookup addr */
+
+ /* get pointer to first asconf param in ASCONF-ACK */
+ ack_aph = (struct sctp_asconf_paramhdr *)(mtod(m_ack, caddr_t)+sizeof(struct sctp_asconf_ack_chunk));
+ if (ack_aph == NULL) {
+#ifdef SCTP_DEBUG
+ printf("Gak in asconf2\n");
+#endif
+ return;
+ }
+ /* get pointer to first asconf param in ASCONF */
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, sizeof(struct sctp_asconf_paramhdr), (uint8_t *) & aparam_buf);
+ if (aph == NULL) {
+#ifdef SCTP_DEBUG
+ printf("Empty ASCONF received?\n");
+#endif
+ goto send_reply;
+ }
+ /* process through all parameters */
+ while (aph != NULL) {
+ unsigned int param_length, param_type;
+
+ param_type = ntohs(aph->ph.param_type);
+ param_length = ntohs(aph->ph.param_length);
+ if (offset + param_length > asconf_limit) {
+ /* parameter goes beyond end of chunk! */
+ sctp_m_freem(m_ack);
+ return;
+ }
+ m_result = NULL;
+
+ if (param_length > sizeof(aparam_buf)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("handle_asconf: param length (%u) larger than buffer size!\n", param_length);
+ }
+#endif /* SCTP_DEBUG */
+ sctp_m_freem(m_ack);
+ return;
+ }
+ if (param_length <= sizeof(struct sctp_paramhdr)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("handle_asconf: param length (%u) too short\n", param_length);
+ }
+#endif /* SCTP_DEBUG */
+ sctp_m_freem(m_ack);
+ }
+ /* get the entire parameter */
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, param_length, aparam_buf);
+ if (aph == NULL) {
+#ifdef SCTP_DEBUG
+ printf("Gag\n");
+#endif
+ sctp_m_freem(m_ack);
+ return;
+ }
+ switch (param_type) {
+ case SCTP_ADD_IP_ADDRESS:
+ asoc->peer_supports_asconf = 1;
+ m_result = sctp_process_asconf_add_ip(m, aph, stcb,
+ error);
+ break;
+ case SCTP_DEL_IP_ADDRESS:
+ asoc->peer_supports_asconf = 1;
+ m_result = sctp_process_asconf_delete_ip(m, aph, stcb,
+ error);
+ break;
+ case SCTP_ERROR_CAUSE_IND:
+ /* not valid in an ASCONF chunk */
+ break;
+ case SCTP_SET_PRIM_ADDR:
+ asoc->peer_supports_asconf = 1;
+ m_result = sctp_process_asconf_set_primary(m, aph,
+ stcb, error);
+ break;
+ case SCTP_SUCCESS_REPORT:
+ /* not valid in an ASCONF chunk */
+ break;
+ case SCTP_ULP_ADAPTATION:
+ /* FIX */
+ break;
+ default:
+ if ((param_type & 0x8000) == 0) {
+ /* Been told to STOP at this param */
+ asconf_limit = offset;
+ /*
+ * FIX FIX - We need to call
+ * sctp_arethere_unrecognized_parameters()
+ * to get a operr and send it for any
+ * param's with the 0x4000 bit set OR do it
+ * here ourselves... note we still must STOP
+ * if the 0x8000 bit is clear.
+ */
+ }
+ /* unknown/invalid param type */
+ break;
+ } /* switch */
+
+ /* add any (error) result to the reply mbuf chain */
+ if (m_result != NULL) {
+ m_tail->m_next = m_result;
+ m_tail = m_result;
+ /* update lengths, make sure it's aligned too */
+ m_result->m_len = SCTP_SIZE32(m_result->m_len);
+ m_ack->m_pkthdr.len += m_result->m_len;
+ ack_cp->ch.chunk_length += m_result->m_len;
+ /* set flag to force success reports */
+ error = 1;
+ }
+ offset += SCTP_SIZE32(param_length);
+ /* update remaining ASCONF message length to process */
+ if (offset >= asconf_limit) {
+ /* no more data in the mbuf chain */
+ break;
+ }
+ /* get pointer to next asconf param */
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_asconf_paramhdr),
+ (uint8_t *) & aparam_buf);
+ if (aph == NULL) {
+ /* can't get an asconf paramhdr */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("handle_asconf: can't get asconf param hdr!\n");
+ }
+#endif /* SCTP_DEBUG */
+ /* FIX ME - add error here... */
+ }
+ } /* while */
+
+send_reply:
+ ack_cp->ch.chunk_length = htons(ack_cp->ch.chunk_length);
+ /* save the ASCONF-ACK reply */
+ asoc->last_asconf_ack_sent = m_ack;
+
+ /* see if last_control_chunk_from is set properly (use IP src addr) */
+ if (stcb->asoc.last_control_chunk_from == NULL) {
+ /*
+ * this could happen if the source address was just newly
+ * added
+ */
+ struct ip *iph;
+ struct sctphdr *sh;
+ struct sockaddr_storage from_store;
+ struct sockaddr *from = (struct sockaddr *)&from_store;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1)
+ printf("handle_asconf: looking up net for IP source address\n");
+#endif /* SCTP_DEBUG */
+ /* pullup already done, IP options already stripped */
+ iph = mtod(m, struct ip *);
+ sh = (struct sctphdr *)((caddr_t)iph + sizeof(*iph));
+ if (iph->ip_v == IPVERSION) {
+ struct sockaddr_in *from4;
+
+ from4 = (struct sockaddr_in *)&from_store;
+ bzero(from4, sizeof(*from4));
+ from4->sin_family = AF_INET;
+ from4->sin_len = sizeof(struct sockaddr_in);
+ from4->sin_addr.s_addr = iph->ip_src.s_addr;
+ from4->sin_port = sh->src_port;
+ } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
+ struct ip6_hdr *ip6;
+ struct sockaddr_in6 *from6;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ from6 = (struct sockaddr_in6 *)&from_store;
+ bzero(from6, sizeof(*from6));
+ from6->sin6_family = AF_INET6;
+ from6->sin6_len = sizeof(struct sockaddr_in6);
+ from6->sin6_addr = ip6->ip6_src;
+ from6->sin6_port = sh->src_port;
+ /* Get the scopes in properly to the sin6 addr's */
+ /* we probably don't need these operations */
+ (void)sa6_recoverscope(from6);
+ sa6_embedscope(from6, ip6_use_defzone);
+ } else {
+ /* unknown address type */
+ from = NULL;
+ }
+ if (from != NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("Looking for IP source: ");
+ sctp_print_address(from);
+ }
+#endif /* SCTP_DEBUG */
+ /* look up the from address */
+ stcb->asoc.last_control_chunk_from = sctp_findnet(stcb, from);
+#ifdef SCTP_DEBUG
+ if ((stcb->asoc.last_control_chunk_from == NULL) &&
+ (sctp_debug_on & SCTP_DEBUG_ASCONF1))
+ printf("handle_asconf: IP source address not found?!\n");
+#endif /* SCTP_DEBUG */
+ }
+ }
+ /* and send it (a new one) out... */
+ sctp_send_asconf_ack(stcb, 0);
+}
+
+/*
+ * does the address match? returns 0 if not, 1 if so
+ */
+static uint32_t
+sctp_asconf_addr_match(struct sctp_asconf_addr *aa, struct sockaddr *sa)
+{
+#ifdef INET6
+ if (sa->sa_family == AF_INET6) {
+ /* IPv6 sa address */
+ /* XXX scopeid */
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
+
+ if ((aa->ap.addrp.ph.param_type == SCTP_IPV6_ADDRESS) &&
+ (memcmp(&aa->ap.addrp.addr, &sin6->sin6_addr,
+ sizeof(struct in6_addr)) == 0)) {
+ return (1);
+ }
+ } else
+#endif /* INET6 */
+ if (sa->sa_family == AF_INET) {
+ /* IPv4 sa address */
+ struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+ if ((aa->ap.addrp.ph.param_type == SCTP_IPV4_ADDRESS) &&
+ (memcmp(&aa->ap.addrp.addr, &sin->sin_addr,
+ sizeof(struct in_addr)) == 0)) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
+/*
+ * Cleanup for non-responded/OP ERR'd ASCONF
+ */
+void
+sctp_asconf_cleanup(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /* mark peer as ASCONF incapable */
+ stcb->asoc.peer_supports_asconf = 0;
+ /*
+ * clear out any existing asconfs going out
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net);
+ stcb->asoc.asconf_seq_out++;
+ /* remove the old ASCONF on our outbound queue */
+ sctp_toss_old_asconf(stcb);
+}
+
+/*
+ * process an ADD/DELETE IP ack from peer.
+ * addr corresponding ifaddr to the address being added/deleted.
+ * type: SCTP_ADD_IP_ADDRESS or SCTP_DEL_IP_ADDRESS.
+ * flag: 1=success, 0=failure.
+ */
+static void
+sctp_asconf_addr_mgmt_ack(struct sctp_tcb *stcb, struct ifaddr *addr,
+ uint16_t type, uint32_t flag)
+{
+ /*
+ * do the necessary asoc list work- if we get a failure indication,
+ * leave the address on the "do not use" asoc list if we get a
+ * success indication, remove the address from the list
+ */
+ /*
+ * Note: this will only occur for ADD_IP_ADDRESS, since
+ * DEL_IP_ADDRESS is never actually added to the list...
+ */
+ if (flag) {
+ /* success case, so remove from the list */
+ sctp_del_local_addr_assoc(stcb, addr);
+ }
+ /* else, leave it on the list */
+}
+
+/*
+ * add an asconf add/delete IP address parameter to the queue.
+ * type = SCTP_ADD_IP_ADDRESS, SCTP_DEL_IP_ADDRESS, SCTP_SET_PRIM_ADDR.
+ * returns 0 if completed, non-zero if not completed.
+ * NOTE: if adding, but delete already scheduled (and not yet sent out),
+ * simply remove from queue. Same for deleting an address already scheduled
+ * for add. If a duplicate operation is found, ignore the new one.
+ */
+static uint32_t
+sctp_asconf_queue_add(struct sctp_tcb *stcb, struct ifaddr *ifa, uint16_t type)
+{
+ struct sctp_asconf_addr *aa, *aa_next;
+ struct sockaddr *sa;
+
+ /* see if peer supports ASCONF */
+ if (stcb->asoc.peer_supports_asconf == 0) {
+ return (-1);
+ }
+ /* make sure the request isn't already in the queue */
+ for (aa = TAILQ_FIRST(&stcb->asoc.asconf_queue); aa != NULL;
+ aa = aa_next) {
+ aa_next = TAILQ_NEXT(aa, next);
+ /* address match? */
+ if (sctp_asconf_addr_match(aa, ifa->ifa_addr) == 0)
+ continue;
+ /* is the request already in queue (sent or not) */
+ if (aa->ap.aph.ph.param_type == type) {
+ return (-1);
+ }
+ /* is the negative request already in queue, and not sent */
+ if (aa->sent == 0 &&
+ /* add requested, delete already queued */
+ ((type == SCTP_ADD_IP_ADDRESS &&
+ aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS) ||
+ /* delete requested, add already queued */
+ (type == SCTP_DEL_IP_ADDRESS &&
+ aa->ap.aph.ph.param_type == SCTP_ADD_IP_ADDRESS))) {
+ /* delete the existing entry in the queue */
+ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next);
+ /* take the entry off the appropriate list */
+ sctp_asconf_addr_mgmt_ack(stcb, aa->ifa, type, 1);
+ /* free the entry */
+ SCTP_FREE(aa);
+ return (-1);
+ }
+ } /* for each aa */
+
+ /* adding new request to the queue */
+ SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa), "AsconfAddr");
+ if (aa == NULL) {
+ /* didn't get memory */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("asconf_queue_add: failed to get memory!\n");
+ }
+#endif /* SCTP_DEBUG */
+ return (-1);
+ }
+ /* fill in asconf address parameter fields */
+ /* top level elements are "networked" during send */
+ aa->ap.aph.ph.param_type = type;
+ aa->ifa = ifa;
+ /* correlation_id filled in during send routine later... */
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ /* IPv6 address */
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
+ sa = (struct sockaddr *)sin6;
+ aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+ aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv6addr_param));
+ aa->ap.aph.ph.param_length =
+ sizeof(struct sctp_asconf_paramhdr) +
+ sizeof(struct sctp_ipv6addr_param);
+ memcpy(&aa->ap.addrp.addr, &sin6->sin6_addr,
+ sizeof(struct in6_addr));
+ } else if (ifa->ifa_addr->sa_family == AF_INET) {
+ /* IPv4 address */
+ struct sockaddr_in *sin = (struct sockaddr_in *)ifa->ifa_addr;
+
+ sa = (struct sockaddr *)sin;
+ aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+ aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv4addr_param));
+ aa->ap.aph.ph.param_length =
+ sizeof(struct sctp_asconf_paramhdr) +
+ sizeof(struct sctp_ipv4addr_param);
+ memcpy(&aa->ap.addrp.addr, &sin->sin_addr,
+ sizeof(struct in_addr));
+ } else {
+ /* invalid family! */
+ return (-1);
+ }
+ aa->sent = 0; /* clear sent flag */
+
+ /*
+ * if we are deleting an address it should go out last otherwise,
+ * add it to front of the pending queue
+ */
+ if (type == SCTP_ADD_IP_ADDRESS) {
+ /* add goes to the front of the queue */
+ TAILQ_INSERT_HEAD(&stcb->asoc.asconf_queue, aa, next);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF2) {
+ printf("asconf_queue_add: appended asconf ADD_IP_ADDRESS: ");
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+ } else {
+ /* delete and set primary goes to the back of the queue */
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF2) {
+ if (type == SCTP_DEL_IP_ADDRESS) {
+ printf("asconf_queue_add: inserted asconf DEL_IP_ADDRESS: ");
+ sctp_print_address(sa);
+ } else {
+ printf("asconf_queue_add: inserted asconf SET_PRIM_ADDR: ");
+ sctp_print_address(sa);
+ }
+ }
+#endif /* SCTP_DEBUG */
+ }
+
+ return (0);
+}
+
+/*
+ * add an asconf add/delete IP address parameter to the queue by addr.
+ * type = SCTP_ADD_IP_ADDRESS, SCTP_DEL_IP_ADDRESS, SCTP_SET_PRIM_ADDR.
+ * returns 0 if completed, non-zero if not completed.
+ * NOTE: if adding, but delete already scheduled (and not yet sent out),
+ * simply remove from queue. Same for deleting an address already scheduled
+ * for add. If a duplicate operation is found, ignore the new one.
+ */
+static uint32_t
+sctp_asconf_queue_add_sa(struct sctp_tcb *stcb, struct sockaddr *sa,
+ uint16_t type)
+{
+ struct sctp_asconf_addr *aa, *aa_next;
+
+ /* see if peer supports ASCONF */
+ if (stcb->asoc.peer_supports_asconf == 0) {
+ return (-1);
+ }
+ /* make sure the request isn't already in the queue */
+ for (aa = TAILQ_FIRST(&stcb->asoc.asconf_queue); aa != NULL;
+ aa = aa_next) {
+ aa_next = TAILQ_NEXT(aa, next);
+ /* address match? */
+ if (sctp_asconf_addr_match(aa, sa) == 0)
+ continue;
+ /* is the request already in queue (sent or not) */
+ if (aa->ap.aph.ph.param_type == type) {
+ return (-1);
+ }
+ /* is the negative request already in queue, and not sent */
+ if (aa->sent == 1)
+ continue;
+ if (type == SCTP_ADD_IP_ADDRESS &&
+ aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS) {
+ /* add requested, delete already queued */
+
+ /* delete the existing entry in the queue */
+ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next);
+ /* free the entry */
+ SCTP_FREE(aa);
+ return (-1);
+ } else if (type == SCTP_DEL_IP_ADDRESS &&
+ aa->ap.aph.ph.param_type == SCTP_ADD_IP_ADDRESS) {
+ /* delete requested, add already queued */
+
+ /* delete the existing entry in the queue */
+ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next);
+ /* take the entry off the appropriate list */
+ sctp_asconf_addr_mgmt_ack(stcb, aa->ifa, type, 1);
+ /* free the entry */
+ SCTP_FREE(aa);
+ return (-1);
+ }
+ } /* for each aa */
+
+ /* adding new request to the queue */
+ SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa), "AsconfAddr");
+ if (aa == NULL) {
+ /* didn't get memory */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("asconf_queue_add_sa: failed to get memory!\n");
+ }
+#endif /* SCTP_DEBUG */
+ return (-1);
+ }
+ /* fill in asconf address parameter fields */
+ /* top level elements are "networked" during send */
+ aa->ap.aph.ph.param_type = type;
+ aa->ifa = sctp_find_ifa_by_addr(sa);
+ /* correlation_id filled in during send routine later... */
+ if (sa->sa_family == AF_INET6) {
+ /* IPv6 address */
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sa;
+ aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+ aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv6addr_param));
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_ipv6addr_param);
+ memcpy(&aa->ap.addrp.addr, &sin6->sin6_addr,
+ sizeof(struct in6_addr));
+ } else if (sa->sa_family == AF_INET) {
+ /* IPv4 address */
+ struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+ aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+ aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv4addr_param));
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_ipv4addr_param);
+ memcpy(&aa->ap.addrp.addr, &sin->sin_addr,
+ sizeof(struct in_addr));
+ } else {
+ /* invalid family! */
+ return (-1);
+ }
+ aa->sent = 0; /* clear sent flag */
+
+ /*
+ * if we are deleting an address it should go out last otherwise,
+ * add it to front of the pending queue
+ */
+ if (type == SCTP_ADD_IP_ADDRESS) {
+ /* add goes to the front of the queue */
+ TAILQ_INSERT_HEAD(&stcb->asoc.asconf_queue, aa, next);
+ } else {
+ /* delete and set primary goes to the back of the queue */
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+ }
+
+ return (0);
+}
+
+/*
+ * find a specific asconf param on our "sent" queue
+ */
+static struct sctp_asconf_addr *
+sctp_asconf_find_param(struct sctp_tcb *stcb, uint32_t correlation_id)
+{
+ struct sctp_asconf_addr *aa;
+
+ TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+ if (aa->ap.aph.correlation_id == correlation_id &&
+ aa->sent == 1) {
+ /* found it */
+ return (aa);
+ }
+ }
+ /* didn't find it */
+ return (NULL);
+}
+
+/*
+ * process an SCTP_ERROR_CAUSE_IND for a ASCONF-ACK parameter and do
+ * notifications based on the error response
+ */
+static void
+sctp_asconf_process_error(struct sctp_tcb *stcb,
+ struct sctp_asconf_paramhdr *aph)
+{
+ struct sctp_error_cause *eh;
+ struct sctp_paramhdr *ph;
+ uint16_t param_type;
+ uint16_t error_code;
+
+ eh = (struct sctp_error_cause *)(aph + 1);
+ ph = (struct sctp_paramhdr *)(eh + 1);
+ /* validate lengths */
+ if (htons(eh->length) + sizeof(struct sctp_error_cause) >
+ htons(aph->ph.param_length)) {
+ /* invalid error cause length */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("asconf_process_error: cause element too long\n");
+ }
+#endif /* SCTP_DEBUG */
+ return;
+ }
+ if (htons(ph->param_length) + sizeof(struct sctp_paramhdr) >
+ htons(eh->length)) {
+ /* invalid included TLV length */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("asconf_process_error: included TLV too long\n");
+ }
+#endif /* SCTP_DEBUG */
+ return;
+ }
+ /* which error code ? */
+ error_code = ntohs(eh->code);
+ param_type = ntohs(aph->ph.param_type);
+ /* FIX: this should go back up the REMOTE_ERROR ULP notify */
+ switch (error_code) {
+ case SCTP_CAUSE_RESOURCE_SHORTAGE:
+ /* we allow ourselves to "try again" for this error */
+ break;
+ default:
+ /* peer can't handle it... */
+ switch (param_type) {
+ case SCTP_ADD_IP_ADDRESS:
+ case SCTP_DEL_IP_ADDRESS:
+ stcb->asoc.peer_supports_asconf = 0;
+ break;
+ case SCTP_SET_PRIM_ADDR:
+ stcb->asoc.peer_supports_asconf = 0;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/*
+ * process an asconf queue param aparam: parameter to process, will be
+ * removed from the queue flag: 1=success, 0=failure
+ */
+static void
+sctp_asconf_process_param_ack(struct sctp_tcb *stcb,
+ struct sctp_asconf_addr *aparam, uint32_t flag)
+{
+ uint16_t param_type;
+
+ /* process this param */
+ param_type = aparam->ap.aph.ph.param_type;
+ switch (param_type) {
+ case SCTP_ADD_IP_ADDRESS:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_param_ack: added IP address\n");
+ }
+#endif /* SCTP_DEBUG */
+ sctp_asconf_addr_mgmt_ack(stcb, aparam->ifa, param_type, flag);
+ break;
+ case SCTP_DEL_IP_ADDRESS:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("process_param_ack: deleted IP address\n");
+ }
+#endif /* SCTP_DEBUG */
+ /* nothing really to do... lists already updated */
+ break;
+ case SCTP_SET_PRIM_ADDR:
+ /* nothing to do... peer may start using this addr */
+ if (flag == 0)
+ stcb->asoc.peer_supports_asconf = 0;
+ break;
+ default:
+ /* should NEVER happen */
+ break;
+ }
+
+ /* remove the param and free it */
+ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aparam, next);
+ SCTP_FREE(aparam);
+}
+
+/*
+ * cleanup from a bad asconf ack parameter
+ */
+static void
+sctp_asconf_ack_clear(struct sctp_tcb *stcb)
+{
+ /* assume peer doesn't really know how to do asconfs */
+ stcb->asoc.peer_supports_asconf = 0;
+ /* XXX we could free the pending queue here */
+}
+
+void
+sctp_handle_asconf_ack(struct mbuf *m, int offset,
+ struct sctp_asconf_ack_chunk *cp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_association *asoc;
+ uint32_t serial_num;
+ uint16_t ack_length;
+ struct sctp_asconf_paramhdr *aph;
+ struct sctp_asconf_addr *aa, *aa_next;
+ uint32_t last_error_id = 0; /* last error correlation id */
+ uint32_t id;
+ struct sctp_asconf_addr *ap;
+
+ /* asconf param buffer */
+ static uint8_t aparam_buf[DEFAULT_PARAM_BUFFER];
+
+ /* verify minimum length */
+ if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_asconf_ack_chunk)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("handle_asconf_ack: chunk too small = %xh\n",
+ ntohs(cp->ch.chunk_length));
+ }
+#endif /* SCTP_DEBUG */
+ return;
+ }
+ asoc = &stcb->asoc;
+ serial_num = ntohl(cp->serial_number);
+
+ /*
+ * NOTE: we may want to handle this differently- currently, we will
+ * abort when we get an ack for the expected serial number + 1 (eg.
+ * we didn't send it), process an ack normally if it is the expected
+ * serial number, and re-send the previous ack for *ALL* other
+ * serial numbers
+ */
+
+ /*
+ * if the serial number is the next expected, but I didn't send it,
+ * abort the asoc, since someone probably just hijacked us...
+ */
+ if (serial_num == (asoc->asconf_seq_out + 1)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("handle_asconf_ack: got unexpected next serial number! Aborting asoc!\n");
+ }
+#endif /* SCTP_DEBUG */
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_CAUSE_ILLEGAL_ASCONF_ACK, NULL);
+ return;
+ }
+ if (serial_num != asoc->asconf_seq_out) {
+ /* got a duplicate/unexpected ASCONF-ACK */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("handle_asconf_ack: got duplicate/unexpected serial number = %xh (expected = %xh)\n", serial_num, asoc->asconf_seq_out);
+ }
+#endif /* SCTP_DEBUG */
+ return;
+ }
+ if (stcb->asoc.asconf_sent == 0) {
+ /* got a unexpected ASCONF-ACK for serial not in flight */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("handle_asconf_ack: got serial number = %xh but not in flight\n", serial_num);
+ }
+#endif /* SCTP_DEBUG */
+ /* nothing to do... duplicate ACK received */
+ return;
+ }
+ /* stop our timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net);
+
+ /* process the ASCONF-ACK contents */
+ ack_length = ntohs(cp->ch.chunk_length) -
+ sizeof(struct sctp_asconf_ack_chunk);
+ offset += sizeof(struct sctp_asconf_ack_chunk);
+ /* process through all parameters */
+ while (ack_length >= sizeof(struct sctp_asconf_paramhdr)) {
+ unsigned int param_length, param_type;
+
+ /* get pointer to next asconf parameter */
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_asconf_paramhdr), aparam_buf);
+ if (aph == NULL) {
+ /* can't get an asconf paramhdr */
+ sctp_asconf_ack_clear(stcb);
+ return;
+ }
+ param_type = ntohs(aph->ph.param_type);
+ param_length = ntohs(aph->ph.param_length);
+ if (param_length > ack_length) {
+ sctp_asconf_ack_clear(stcb);
+ return;
+ }
+ if (param_length < sizeof(struct sctp_paramhdr)) {
+ sctp_asconf_ack_clear(stcb);
+ return;
+ }
+ /* get the complete parameter... */
+ if (param_length > sizeof(aparam_buf)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("param length (%u) larger than buffer size!\n", param_length);
+ }
+#endif /* SCTP_DEBUG */
+ sctp_asconf_ack_clear(stcb);
+ return;
+ }
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, param_length, aparam_buf);
+ if (aph == NULL) {
+ sctp_asconf_ack_clear(stcb);
+ return;
+ }
+ /* correlation_id is transparent to peer, no ntohl needed */
+ id = aph->correlation_id;
+
+ switch (param_type) {
+ case SCTP_ERROR_CAUSE_IND:
+ last_error_id = id;
+ /* find the corresponding asconf param in our queue */
+ ap = sctp_asconf_find_param(stcb, id);
+ if (ap == NULL) {
+ /* hmm... can't find this in our queue! */
+ break;
+ }
+ /* process the parameter, failed flag */
+ sctp_asconf_process_param_ack(stcb, ap, 0);
+ /* process the error response */
+ sctp_asconf_process_error(stcb, aph);
+ break;
+ case SCTP_SUCCESS_REPORT:
+ /* find the corresponding asconf param in our queue */
+ ap = sctp_asconf_find_param(stcb, id);
+ if (ap == NULL) {
+ /* hmm... can't find this in our queue! */
+ break;
+ }
+ /* process the parameter, success flag */
+ sctp_asconf_process_param_ack(stcb, ap, 1);
+ break;
+ default:
+ break;
+ } /* switch */
+
+ /* update remaining ASCONF-ACK message length to process */
+ ack_length -= SCTP_SIZE32(param_length);
+ if (ack_length <= 0) {
+ /* no more data in the mbuf chain */
+ break;
+ }
+ offset += SCTP_SIZE32(param_length);
+ } /* while */
+
+ /*
+ * if there are any "sent" params still on the queue, these are
+ * implicitly "success", or "failed" (if we got an error back) ...
+ * so process these appropriately
+ *
+ * we assume that the correlation_id's are monotonically increasing
+ * beginning from 1 and that we don't have *that* many outstanding
+ * at any given time
+ */
+ if (last_error_id == 0)
+ last_error_id--;/* set to "max" value */
+ for (aa = TAILQ_FIRST(&stcb->asoc.asconf_queue); aa != NULL;
+ aa = aa_next) {
+ aa_next = TAILQ_NEXT(aa, next);
+ if (aa->sent == 1) {
+ /*
+ * implicitly successful or failed if correlation_id
+ * < last_error_id, then success else, failure
+ */
+ if (aa->ap.aph.correlation_id < last_error_id)
+ sctp_asconf_process_param_ack(stcb, aa,
+ SCTP_SUCCESS_REPORT);
+ else
+ sctp_asconf_process_param_ack(stcb, aa,
+ SCTP_ERROR_CAUSE_IND);
+ } else {
+ /*
+ * since we always process in order (FIFO queue) if
+ * we reach one that hasn't been sent, the rest
+ * should not have been sent either. so, we're
+ * done...
+ */
+ break;
+ }
+ }
+
+ /* update the next sequence number to use */
+ asoc->asconf_seq_out++;
+ /* remove the old ASCONF on our outbound queue */
+ sctp_toss_old_asconf(stcb);
+ /* clear the sent flag to allow new ASCONFs */
+ asoc->asconf_sent = 0;
+ if (!TAILQ_EMPTY(&stcb->asoc.asconf_queue)) {
+ /* we have more params, so restart our timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep,
+ stcb, net);
+ }
+}
+
+/* is this an interface that we care about at all? */
+static uint32_t
+sctp_is_desired_interface_type(struct ifaddr *ifa)
+{
+ int result;
+
+ /* check the interface type to see if it's one we care about */
+ switch (ifa->ifa_ifp->if_type) {
+ case IFT_ETHER:
+ case IFT_ISO88023:
+ case IFT_ISO88025:
+ case IFT_STARLAN:
+ case IFT_P10:
+ case IFT_P80:
+ case IFT_HY:
+ case IFT_FDDI:
+ case IFT_PPP:
+ case IFT_XETHER:
+ case IFT_SLIP:
+ case IFT_GIF:
+ result = 1;
+ break;
+ default:
+ result = 0;
+ }
+
+ return (result);
+}
+
+static uint32_t
+sctp_is_scopeid_in_nets(struct sctp_tcb *stcb, struct sockaddr *sa)
+{
+ struct sockaddr_in6 *sin6, *net6;
+ struct sctp_nets *net;
+
+ if (sa->sa_family != AF_INET6) {
+ /* wrong family */
+ return (0);
+ }
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) == 0) {
+ /* not link local address */
+ return (0);
+ }
+ /* hunt through our destination nets list for this scope_id */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (((struct sockaddr *)(&net->ro._l_addr))->sa_family !=
+ AF_INET6)
+ continue;
+ net6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ if (IN6_IS_ADDR_LINKLOCAL(&net6->sin6_addr) == 0)
+ continue;
+ if (sctp_is_same_scope(sin6, net6)) {
+ /* found one */
+ return (1);
+ }
+ }
+ /* didn't find one */
+ return (0);
+}
+
+/*
+ * address management functions
+ */
+static void
+sctp_addr_mgmt_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct ifaddr *ifa, uint16_t type)
+{
+ int status;
+
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0 &&
+ sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+ /* subset bound, no ASCONF allowed case, so ignore */
+ return;
+ }
+ /*
+ * note: we know this is not the subset bound, no ASCONF case eg.
+ * this is boundall or subset bound w/ASCONF allowed
+ */
+
+ /* first, make sure it's a good address family */
+ if (ifa->ifa_addr->sa_family != AF_INET6 &&
+ ifa->ifa_addr->sa_family != AF_INET) {
+ return;
+ }
+ /* make sure we're "allowed" to add this type of addr */
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ struct in6_ifaddr *ifa6;
+
+ /* invalid if we're not a v6 endpoint */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0)
+ return;
+ /* is the v6 addr really valid ? */
+ ifa6 = (struct in6_ifaddr *)ifa;
+ if (IFA6_IS_DEPRECATED(ifa6) ||
+ (ifa6->ia6_flags &
+ (IN6_IFF_DETACHED | IN6_IFF_ANYCAST | IN6_IFF_NOTREADY))) {
+ /* can't use an invalid address */
+ return;
+ }
+ }
+ /* put this address on the "pending/do not use yet" list */
+ /*
+ * Note: we do this primarily for the subset bind case We don't have
+ * scoping flags at the EP level, so we must add link local/site
+ * local addresses to the EP, then need to "negate" them here.
+ * Recall that this routine is only called for the subset bound
+ * w/ASCONF allowed case.
+ */
+
+ /*
+ * do a scope_id check against any link local addresses in the
+ * destination nets list to see if we should put this local address
+ * on the pending list or not eg. don't put on the list if we have a
+ * link local destination with the same scope_id
+ */
+ if (type == SCTP_ADD_IP_ADDRESS) {
+ if (sctp_is_scopeid_in_nets(stcb, ifa->ifa_addr) == 0) {
+ sctp_add_local_addr_assoc(stcb, ifa);
+ }
+ }
+ /*
+ * check address scope if address is out of scope, don't queue
+ * anything... note: this would leave the address on both inp and
+ * asoc lists
+ */
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* we skip unspecifed addresses */
+ return;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ if (stcb->asoc.local_scope == 0) {
+ return;
+ }
+ /* is it the right link local scope? */
+ if (sctp_is_scopeid_in_nets(stcb, ifa->ifa_addr) == 0) {
+ return;
+ }
+ }
+ if (stcb->asoc.site_scope == 0 &&
+ IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
+ return;
+ }
+ } else if (ifa->ifa_addr->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+ struct in6pcb *inp6;
+
+ inp6 = (struct in6pcb *)&inp->ip_inp.inp;
+ /* invalid if we are a v6 only endpoint */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ (inp6->inp_flags & IN6P_IPV6_V6ONLY)
+ )
+ return;
+
+ sin = (struct sockaddr_in *)ifa->ifa_addr;
+ if (sin->sin_addr.s_addr == 0) {
+ /* we skip unspecifed addresses */
+ return;
+ }
+ if (stcb->asoc.ipv4_local_scope == 0 &&
+ IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+ return;
+ }
+ } else {
+ /* else, not AF_INET or AF_INET6, so skip */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("addr_mgmt_assoc: not AF_INET or AF_INET6\n");
+ }
+#endif /* SCTP_DEBUG */
+ return;
+ }
+
+ /* queue an asconf for this address add/delete */
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+ /* does the peer do asconf? */
+ if (stcb->asoc.peer_supports_asconf) {
+ /* queue an asconf for this addr */
+ status = sctp_asconf_queue_add(stcb, ifa, type);
+ /*
+ * if queued ok, and in correct state, set the
+ * ASCONF timer if in non-open state, we will set
+ * this timer when the state does go open and do all
+ * the asconf's
+ */
+ if (status == 0 &&
+ SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
+ stcb, stcb->asoc.primary_destination);
+ }
+ }
+ } else {
+ /* this is the boundall, no ASCONF case */
+#if 0
+ /* Peter: Fixe me? why the if 0? */
+ /*
+ * assume kernel will delete this very shortly; add done
+ * above
+ */
+ if (type == SCTP_DEL_IP_ADDRESS) {
+ /* if deleting, add this addr to the do not use list */
+ sctp_add_local_addr_assoc(stcb, ifa);
+ }
+#endif
+ }
+}
+
+static void
+sctp_addr_mgmt_ep(struct sctp_inpcb *inp, struct ifaddr *ifa, uint16_t type)
+{
+ struct sctp_tcb *stcb;
+ int s;
+
+
+ SCTP_INP_WLOCK(inp);
+ /* make sure we're "allowed" to add this type of addr */
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ struct in6_ifaddr *ifa6;
+
+ /* invalid if we're not a v6 endpoint */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+ SCTP_INP_WUNLOCK(inp);
+ return;
+ }
+ /* is the v6 addr really valid ? */
+ ifa6 = (struct in6_ifaddr *)ifa;
+ if (IFA6_IS_DEPRECATED(ifa6) ||
+ (ifa6->ia6_flags &
+ (IN6_IFF_DETACHED | IN6_IFF_ANYCAST | IN6_IFF_NOTREADY))) {
+ /* can't use an invalid address */
+ SCTP_INP_WUNLOCK(inp);
+ return;
+ }
+ } else if (ifa->ifa_addr->sa_family == AF_INET) {
+ /* invalid if we are a v6 only endpoint */
+ struct in6pcb *inp6;
+
+ inp6 = (struct in6pcb *)&inp->ip_inp.inp;
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ (inp6->inp_flags & IN6P_IPV6_V6ONLY)
+ ) {
+ SCTP_INP_WUNLOCK(inp);
+ return;
+ }
+ } else {
+ /* invalid address family */
+ SCTP_INP_WUNLOCK(inp);
+ return;
+ }
+ /* is this endpoint subset bound ? */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /* subset bound endpoint */
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+ /*
+ * subset bound, but ASCONFs not allowed... if
+ * adding, nothing to do, since not allowed if
+ * deleting, remove address from endpoint peer will
+ * have to "timeout" this addr
+ */
+ if (type == SCTP_DEL_IP_ADDRESS) {
+ sctp_del_local_addr_ep(inp, ifa);
+ }
+ /* no asconfs to queue for this inp... */
+ SCTP_INP_WUNLOCK(inp);
+ return;
+ } else {
+ /*
+ * subset bound, ASCONFs allowed... if adding, add
+ * address to endpoint list if deleting, remove
+ * address from endpoint
+ */
+ if (type == SCTP_ADD_IP_ADDRESS) {
+ sctp_add_local_addr_ep(inp, ifa);
+ } else {
+ sctp_del_local_addr_ep(inp, ifa);
+ }
+ /* drop through and notify all asocs */
+ }
+ }
+ s = splnet();
+ /* process for all associations for this endpoint */
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ sctp_addr_mgmt_assoc(inp, stcb, ifa, type);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ splx(s);
+ SCTP_INP_WUNLOCK(inp);
+}
+
+/*
+ * restrict the use of this address
+ */
+static void
+sctp_addr_mgmt_restrict_ep(struct sctp_inpcb *inp, struct ifaddr *ifa)
+{
+ struct sctp_tcb *stcb;
+ int s;
+
+ /* is this endpoint bound to all? */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /*
+ * Nothing to do for subset bound case. Allow sctp_bindx()
+ * to manage the address lists
+ */
+ return;
+ }
+ s = splnet();
+ SCTP_INP_RLOCK(inp);
+ /* process for all associations for this endpoint */
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ /* put this address on the "pending/do not use yet" list */
+ SCTP_TCB_LOCK(stcb);
+ sctp_add_local_addr_assoc(stcb, ifa);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ splx(s);
+ SCTP_INP_RUNLOCK(inp);
+}
+
+/*
+ * this is only called for kernel initiated address changes eg. it will check
+ * the PCB_FLAGS_AUTO_ASCONF flag
+ */
+static void
+sctp_addr_mgmt(struct ifaddr *ifa, uint16_t type)
+{
+ struct sockaddr *sa;
+ struct sctp_inpcb *inp;
+
+ /* make sure we care about this interface... */
+ if (!sctp_is_desired_interface_type(ifa)) {
+ return;
+ }
+ sa = ifa->ifa_addr;
+ if (sa->sa_family != AF_INET && sa->sa_family != AF_INET6)
+ return;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ if (type == SCTP_ADD_IP_ADDRESS)
+ printf("sctp_addr_mgmt: kernel adds ");
+ else
+ printf("sctp_addr_mgmt: kernel deletes ");
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+
+ /* go through all our PCB's */
+ LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF)) {
+ sctp_addr_mgmt_ep(inp, ifa, type);
+ } else {
+ /* this address is going away anyways... */
+ if (type == SCTP_DEL_IP_ADDRESS)
+ return;
+ /* (temporarily) restrict this address */
+ sctp_addr_mgmt_restrict_ep(inp, ifa);
+ }
+ /* else, not allowing automatic asconf's, so ignore */
+ }
+}
+
+/*
+ * add/delete IP address requests from kernel (via routing change) assumed
+ * that the address is non-broadcast, non-multicast all addresses are passed
+ * from any type of interface-- need to filter duplicate addresses may get
+ * requested
+ */
+
+void
+sctp_add_ip_address(struct ifaddr *ifa)
+{
+ sctp_addr_mgmt(ifa, SCTP_ADD_IP_ADDRESS);
+}
+
+void
+sctp_delete_ip_address(struct ifaddr *ifa)
+{
+ struct sctp_inpcb *inp;
+
+ /* process the delete */
+ sctp_addr_mgmt(ifa, SCTP_DEL_IP_ADDRESS);
+
+ /*
+ * need to remove this ifaddr from any cached routes and also any
+ * from any assoc "restricted/pending" lists
+ */
+ /* make sure we care about this interface... */
+ if (!sctp_is_desired_interface_type(ifa)) {
+ return;
+ }
+ /* go through all our PCB's */
+ SCTP_INP_INFO_RLOCK();
+ LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
+ struct sctp_tcb *stcb;
+ struct sctp_laddr *laddr, *laddr_next;
+
+ /* process for all associations for this endpoint */
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ struct sctp_nets *net;
+
+ /* process through the nets list */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ struct rtentry *rt;
+
+ /* delete this address if cached */
+ rt = net->ro.ro_rt;
+ if (rt != NULL && rt->rt_ifa == ifa) {
+ /* RTFREE(rt); */
+ net->ro.ro_rt = NULL;
+ }
+ } /* for each net */
+ /* process through the asoc "pending" list */
+ laddr = LIST_FIRST(&stcb->asoc.sctp_local_addr_list);
+ while (laddr != NULL) {
+ laddr_next = LIST_NEXT(laddr, sctp_nxt_addr);
+ /* remove if in use */
+ if (laddr->ifa == ifa) {
+ sctp_remove_laddr(laddr);
+ }
+ laddr = laddr_next;
+ } /* while */
+ } /* for each stcb */
+ /* process through the inp bound addr list */
+ laddr = LIST_FIRST(&inp->sctp_addr_list);
+ while (laddr != NULL) {
+ laddr_next = LIST_NEXT(laddr, sctp_nxt_addr);
+ /* remove if in use */
+ if (laddr->ifa == ifa) {
+ sctp_remove_laddr(laddr);
+ }
+ laddr = laddr_next;
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ SCTP_INP_INFO_RUNLOCK();
+}
+
+/*
+ * sa is the sockaddr to ask the peer to set primary to returns: 0 =
+ * completed, -1 = error
+ */
+int32_t
+sctp_set_primary_ip_address_sa(struct sctp_tcb *stcb, struct sockaddr *sa)
+{
+ /* NOTE: we currently don't check the validity of the address! */
+
+ /* queue an ASCONF:SET_PRIM_ADDR to be sent */
+ if (!sctp_asconf_queue_add_sa(stcb, sa, SCTP_SET_PRIM_ADDR)) {
+ /* set primary queuing succeeded */
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+ stcb->sctp_ep, stcb,
+ stcb->asoc.primary_destination);
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("set_primary_ip_address_sa: queued on tcb=%p, ",
+ stcb);
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+ } else {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("set_primary_ip_address_sa: failed to add to queue on tcb=%p, ",
+ stcb);
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+ return (-1);
+ }
+ return (0);
+}
+
+void
+sctp_set_primary_ip_address(struct ifaddr *ifa)
+{
+ struct sctp_inpcb *inp;
+
+ /* make sure we care about this interface... */
+ if (!sctp_is_desired_interface_type(ifa)) {
+ return;
+ }
+ /* go through all our PCB's */
+ LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
+ struct sctp_tcb *stcb;
+
+ /* process for all associations for this endpoint */
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ /* queue an ASCONF:SET_PRIM_ADDR to be sent */
+ if (!sctp_asconf_queue_add(stcb, ifa,
+ SCTP_SET_PRIM_ADDR)) {
+ /* set primary queuing succeeded */
+ if (SCTP_GET_STATE(&stcb->asoc) ==
+ SCTP_STATE_OPEN) {
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+ stcb->sctp_ep, stcb,
+ stcb->asoc.primary_destination);
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("set_primary_ip_address: queued on stcb=%p, ",
+ stcb);
+ sctp_print_address(ifa->ifa_addr);
+ }
+#endif /* SCTP_DEBUG */
+ }
+ } /* for each stcb */
+ } /* for each inp */
+}
+
+static struct sockaddr *
+sctp_find_valid_localaddr(struct sctp_tcb *stcb)
+{
+ struct ifnet *ifn;
+ struct ifaddr *ifa;
+
+
+ TAILQ_FOREACH(ifn, &ifnet, if_list) {
+ if (stcb->asoc.loopback_scope == 0 && ifn->if_type == IFT_LOOP) {
+ /* Skip if loopback_scope not set */
+ continue;
+ }
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ if (ifa->ifa_addr->sa_family == AF_INET &&
+ stcb->asoc.ipv4_addr_legal) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)ifa->ifa_addr;
+ if (sin->sin_addr.s_addr == 0) {
+ /* skip unspecifed addresses */
+ continue;
+ }
+ if (stcb->asoc.ipv4_local_scope == 0 &&
+ IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))
+ continue;
+
+ if (sctp_is_addr_restricted(stcb,
+ ifa->ifa_addr))
+ continue;
+ /* found a valid local v4 address to use */
+ return (ifa->ifa_addr);
+ } else if (ifa->ifa_addr->sa_family == AF_INET6 &&
+ stcb->asoc.ipv6_addr_legal) {
+ struct sockaddr_in6 *sin6;
+ struct in6_ifaddr *ifa6;
+
+ ifa6 = (struct in6_ifaddr *)ifa;
+ if (IFA6_IS_DEPRECATED(ifa6) ||
+ (ifa6->ia6_flags & (IN6_IFF_DETACHED |
+ IN6_IFF_ANYCAST | IN6_IFF_NOTREADY)))
+ continue;
+
+ sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* we skip unspecifed addresses */
+ continue;
+ }
+ if (stcb->asoc.local_scope == 0 &&
+ IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))
+ continue;
+ if (stcb->asoc.site_scope == 0 &&
+ IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))
+ continue;
+
+ /* found a valid local v6 address to use */
+ return (ifa->ifa_addr);
+ }
+ }
+ }
+ /* no valid addresses found */
+ return (NULL);
+}
+
+static struct sockaddr *
+sctp_find_valid_localaddr_ep(struct sctp_tcb *stcb)
+{
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ continue;
+ }
+ if (laddr->ifa->ifa_addr == NULL) {
+ continue;
+ }
+ /* is the address restricted ? */
+ if (sctp_is_addr_restricted(stcb, laddr->ifa->ifa_addr))
+ continue;
+
+ /* found a valid local address to use */
+ return (laddr->ifa->ifa_addr);
+ }
+ /* no valid addresses found */
+ return (NULL);
+}
+
+/*
+ * builds an ASCONF chunk from queued ASCONF params returns NULL on error (no
+ * mbuf, no ASCONF params queued, etc)
+ */
+struct mbuf *
+sctp_compose_asconf(struct sctp_tcb *stcb)
+{
+ struct mbuf *m_asconf, *m_asconf_chk;
+ struct sctp_asconf_addr *aa;
+ struct sctp_asconf_chunk *acp;
+ struct sctp_asconf_paramhdr *aph;
+ struct sctp_asconf_addr_param *aap;
+ uint32_t p_length;
+ uint32_t correlation_id = 1; /* 0 is reserved... */
+ caddr_t ptr, lookup_ptr;
+ uint8_t lookup_used = 0;
+
+ /* are there any asconf params to send? */
+ if (TAILQ_EMPTY(&stcb->asoc.asconf_queue)) {
+ return (NULL);
+ }
+ /*
+ * get a chunk header mbuf and a cluster for the asconf params since
+ * it's simpler to fill in the asconf chunk header lookup address on
+ * the fly
+ */
+ m_asconf_chk = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_chunk), 1, M_DONTWAIT, 1, MT_DATA);
+ if (m_asconf_chk == NULL) {
+ /* no mbuf's */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1)
+ printf("compose_asconf: couldn't get chunk mbuf!\n");
+#endif /* SCTP_DEBUG */
+ return (NULL);
+ }
+ m_asconf = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
+ if (m_asconf == NULL) {
+ /* no mbuf's */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1)
+ printf("compose_asconf: couldn't get mbuf!\n");
+#endif /* SCTP_DEBUG */
+ sctp_m_freem(m_asconf_chk);
+ return (NULL);
+ }
+ m_asconf_chk->m_len = sizeof(struct sctp_asconf_chunk);
+ m_asconf->m_len = 0;
+ acp = mtod(m_asconf_chk, struct sctp_asconf_chunk *);
+ bzero(acp, sizeof(struct sctp_asconf_chunk));
+ /* save pointers to lookup address and asconf params */
+ lookup_ptr = (caddr_t)(acp + 1); /* after the header */
+ ptr = mtod(m_asconf, caddr_t); /* beginning of cluster */
+
+ /* fill in chunk header info */
+ acp->ch.chunk_type = SCTP_ASCONF;
+ acp->ch.chunk_flags = 0;
+ acp->serial_number = htonl(stcb->asoc.asconf_seq_out);
+
+ /* add parameters... up to smallest MTU allowed */
+ TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+ /* get the parameter length */
+ p_length = SCTP_SIZE32(aa->ap.aph.ph.param_length);
+ /* will it fit in current chunk? */
+ if (m_asconf->m_len + p_length > stcb->asoc.smallest_mtu) {
+ /* won't fit, so we're done with this chunk */
+ break;
+ }
+ /* assign (and store) a correlation id */
+ aa->ap.aph.correlation_id = correlation_id++;
+
+ /*
+ * fill in address if we're doing a delete this is a simple
+ * way for us to fill in the correlation address, which
+ * should only be used by the peer if we're deleting our
+ * source address and adding a new address (e.g. renumbering
+ * case)
+ */
+ if (lookup_used == 0 &&
+ aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS) {
+ struct sctp_ipv6addr_param *lookup;
+ uint16_t p_size, addr_size;
+
+ lookup = (struct sctp_ipv6addr_param *)lookup_ptr;
+ lookup->ph.param_type =
+ htons(aa->ap.addrp.ph.param_type);
+ if (aa->ap.addrp.ph.param_type == SCTP_IPV6_ADDRESS) {
+ /* copy IPv6 address */
+ p_size = sizeof(struct sctp_ipv6addr_param);
+ addr_size = sizeof(struct in6_addr);
+ } else {
+ /* copy IPv4 address */
+ p_size = sizeof(struct sctp_ipv4addr_param);
+ addr_size = sizeof(struct in_addr);
+ }
+ lookup->ph.param_length = htons(SCTP_SIZE32(p_size));
+ memcpy(lookup->addr, &aa->ap.addrp.addr, addr_size);
+ m_asconf_chk->m_len += SCTP_SIZE32(p_size);
+ lookup_used = 1;
+ }
+ /* copy into current space */
+ memcpy(ptr, &aa->ap, p_length);
+
+ /* network elements and update lengths */
+ aph = (struct sctp_asconf_paramhdr *)ptr;
+ aap = (struct sctp_asconf_addr_param *)ptr;
+ /* correlation_id is transparent to peer, no htonl needed */
+ aph->ph.param_type = htons(aph->ph.param_type);
+ aph->ph.param_length = htons(aph->ph.param_length);
+ aap->addrp.ph.param_type = htons(aap->addrp.ph.param_type);
+ aap->addrp.ph.param_length = htons(aap->addrp.ph.param_length);
+
+ m_asconf->m_len += SCTP_SIZE32(p_length);
+ ptr += SCTP_SIZE32(p_length);
+
+ /*
+ * these params are removed off the pending list upon
+ * getting an ASCONF-ACK back from the peer, just set flag
+ */
+ aa->sent = 1;
+ }
+ /* check to see if the lookup addr has been populated yet */
+ if (lookup_used == 0) {
+ /* NOTE: if the address param is optional, can skip this... */
+ /* add any valid (existing) address... */
+ struct sctp_ipv6addr_param *lookup;
+ uint16_t p_size, addr_size;
+ struct sockaddr *found_addr;
+ caddr_t addr_ptr;
+
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)
+ found_addr = sctp_find_valid_localaddr(stcb);
+ else
+ found_addr = sctp_find_valid_localaddr_ep(stcb);
+
+ lookup = (struct sctp_ipv6addr_param *)lookup_ptr;
+ if (found_addr != NULL) {
+ if (found_addr->sa_family == AF_INET6) {
+ /* copy IPv6 address */
+ lookup->ph.param_type =
+ htons(SCTP_IPV6_ADDRESS);
+ p_size = sizeof(struct sctp_ipv6addr_param);
+ addr_size = sizeof(struct in6_addr);
+ addr_ptr = (caddr_t)&((struct sockaddr_in6 *)
+ found_addr)->sin6_addr;
+ } else {
+ /* copy IPv4 address */
+ lookup->ph.param_type =
+ htons(SCTP_IPV4_ADDRESS);
+ p_size = sizeof(struct sctp_ipv4addr_param);
+ addr_size = sizeof(struct in_addr);
+ addr_ptr = (caddr_t)&((struct sockaddr_in *)
+ found_addr)->sin_addr;
+ }
+ lookup->ph.param_length = htons(SCTP_SIZE32(p_size));
+ memcpy(lookup->addr, addr_ptr, addr_size);
+ m_asconf_chk->m_len += SCTP_SIZE32(p_size);
+ lookup_used = 1;
+ } else {
+ /* uh oh... don't have any address?? */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1)
+ printf("compose_asconf: no lookup addr!\n");
+#endif /* SCTP_DEBUG */
+ /* for now, we send a IPv4 address of 0.0.0.0 */
+ lookup->ph.param_type = htons(SCTP_IPV4_ADDRESS);
+ lookup->ph.param_length = htons(SCTP_SIZE32(sizeof(struct sctp_ipv4addr_param)));
+ bzero(lookup->addr, sizeof(struct in_addr));
+ m_asconf_chk->m_len += SCTP_SIZE32(sizeof(struct sctp_ipv4addr_param));
+ lookup_used = 1;
+ }
+ }
+ /* chain it all together */
+ m_asconf_chk->m_next = m_asconf;
+ m_asconf_chk->m_pkthdr.len = m_asconf_chk->m_len + m_asconf->m_len;
+ acp->ch.chunk_length = ntohs(m_asconf_chk->m_pkthdr.len);
+
+ /* update "sent" flag */
+ stcb->asoc.asconf_sent++;
+
+ return (m_asconf_chk);
+}
+
+/*
+ * section to handle address changes before an association is up eg. changes
+ * during INIT/INIT-ACK/COOKIE-ECHO handshake
+ */
+
+/*
+ * processes the (local) addresses in the INIT-ACK chunk
+ */
+static void
+sctp_process_initack_addresses(struct sctp_tcb *stcb, struct mbuf *m,
+ unsigned int offset, unsigned int length)
+{
+ struct sctp_paramhdr tmp_param, *ph;
+ uint16_t plen, ptype;
+ struct sctp_ipv6addr_param addr_store;
+ struct sockaddr_in6 sin6;
+ struct sockaddr_in sin;
+ struct sockaddr *sa;
+ struct ifaddr *ifa;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF2) {
+ printf("processing init-ack addresses\n");
+ }
+#endif /* SCTP_DEBUG */
+
+ /* convert to upper bound */
+ length += offset;
+
+ if ((offset + sizeof(struct sctp_paramhdr)) > length) {
+ return;
+ }
+ /* init the addresses */
+ bzero(&sin6, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(sin6);
+ sin6.sin6_port = stcb->rport;
+
+ bzero(&sin, sizeof(sin));
+ sin.sin_len = sizeof(sin);
+ sin.sin_family = AF_INET;
+ sin.sin_port = stcb->rport;
+
+ /* go through the addresses in the init-ack */
+ ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_paramhdr), (uint8_t *) & tmp_param);
+ while (ph != NULL) {
+ ptype = ntohs(ph->param_type);
+ plen = ntohs(ph->param_length);
+ if (ptype == SCTP_IPV6_ADDRESS) {
+ struct sctp_ipv6addr_param *a6p;
+
+ /* get the entire IPv6 address param */
+ a6p = (struct sctp_ipv6addr_param *)
+ sctp_m_getptr(m, offset,
+ sizeof(struct sctp_ipv6addr_param),
+ (uint8_t *) & addr_store);
+ if (plen != sizeof(struct sctp_ipv6addr_param) ||
+ a6p == NULL) {
+ return;
+ }
+ memcpy(&sin6.sin6_addr, a6p->addr,
+ sizeof(struct in6_addr));
+ sa = (struct sockaddr *)&sin6;
+ } else if (ptype == SCTP_IPV4_ADDRESS) {
+ struct sctp_ipv4addr_param *a4p;
+
+ /* get the entire IPv4 address param */
+ a4p = (struct sctp_ipv4addr_param *)sctp_m_getptr(m, offset, sizeof(struct sctp_ipv4addr_param), (uint8_t *) & addr_store);
+ if (plen != sizeof(struct sctp_ipv4addr_param) ||
+ a4p == NULL) {
+ return;
+ }
+ sin.sin_addr.s_addr = a4p->addr;
+ sa = (struct sockaddr *)&sin;
+ } else {
+ goto next_addr;
+ }
+
+ /* see if this address really (still) exists */
+ ifa = sctp_find_ifa_by_addr(sa);
+ if (ifa == NULL) {
+ /* address doesn't exist anymore */
+ int status;
+
+ /* are ASCONFs allowed ? */
+ if ((sctp_is_feature_on(stcb->sctp_ep,
+ SCTP_PCB_FLAGS_DO_ASCONF)) &&
+ stcb->asoc.peer_supports_asconf) {
+ /* queue an ASCONF DEL_IP_ADDRESS */
+ status = sctp_asconf_queue_add_sa(stcb, sa,
+ SCTP_DEL_IP_ADDRESS);
+ /*
+ * if queued ok, and in correct state, set
+ * the ASCONF timer
+ */
+ if (status == 0 &&
+ SCTP_GET_STATE(&stcb->asoc) ==
+ SCTP_STATE_OPEN) {
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+ stcb->sctp_ep, stcb,
+ stcb->asoc.primary_destination);
+ }
+ }
+ } else {
+ /* address still exists */
+ /*
+ * if subset bound, ep addr's managed by default if
+ * not doing ASCONF, add the address to the assoc
+ */
+ if ((stcb->sctp_ep->sctp_flags &
+ SCTP_PCB_FLAGS_BOUNDALL) == 0 &&
+ (sctp_is_feature_off(stcb->sctp_ep,
+ SCTP_PCB_FLAGS_DO_ASCONF))) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF2) {
+ printf("process_initack_addrs: adding local addr to asoc\n");
+ }
+#endif /* SCTP_DEBUG */
+ sctp_add_local_addr_assoc(stcb, ifa);
+ }
+ }
+
+next_addr:
+ /*
+ * Sanity check: Make sure the length isn't 0, otherwise
+ * we'll be stuck in this loop for a long time...
+ */
+ if (SCTP_SIZE32(plen) == 0) {
+#ifdef SCTP_DEBUG
+ printf("process_initack_addrs: bad len (%d) type=%xh\n",
+ plen, ptype);
+#endif
+ return;
+ }
+ /* get next parameter */
+ offset += SCTP_SIZE32(plen);
+ if ((offset + sizeof(struct sctp_paramhdr)) > length)
+ return;
+ ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_paramhdr), (uint8_t *) & tmp_param);
+ } /* while */
+}
+
+/* FIX ME: need to verify return result for v6 address type if v6 disabled */
+/*
+ * checks to see if a specific address is in the initack address list returns
+ * 1 if found, 0 if not
+ */
+static uint32_t
+sctp_addr_in_initack(struct sctp_tcb *stcb, struct mbuf *m, uint32_t offset,
+ uint32_t length, struct sockaddr *sa)
+{
+ struct sctp_paramhdr tmp_param, *ph;
+ uint16_t plen, ptype;
+ struct sctp_ipv6addr_param addr_store;
+ struct sockaddr_in *sin;
+ struct sctp_ipv4addr_param *a4p;
+
+#ifdef INET6
+ struct sockaddr_in6 *sin6, sin6_tmp;
+ struct sctp_ipv6addr_param *a6p;
+
+#endif /* INET6 */
+
+ if (
+#ifdef INET6
+ (sa->sa_family != AF_INET6) &&
+#endif /* INET6 */
+ (sa->sa_family != AF_INET))
+ return (0);
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF2) {
+ printf("find_initack_addr: starting search for ");
+ sctp_print_address(sa);
+ }
+#endif /* SCTP_DEBUG */
+ /* convert to upper bound */
+ length += offset;
+
+ if ((offset + sizeof(struct sctp_paramhdr)) > length) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("find_initack_addr: invalid offset?\n");
+ }
+#endif /* SCTP_DEBUG */
+ return (0);
+ }
+ /* go through the addresses in the init-ack */
+ ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_paramhdr), (uint8_t *) & tmp_param);
+ while (ph != NULL) {
+ ptype = ntohs(ph->param_type);
+ plen = ntohs(ph->param_length);
+#ifdef INET6
+ if (ptype == SCTP_IPV6_ADDRESS && sa->sa_family == AF_INET6) {
+ /* get the entire IPv6 address param */
+ a6p = (struct sctp_ipv6addr_param *)
+ sctp_m_getptr(m, offset,
+ sizeof(struct sctp_ipv6addr_param),
+ (uint8_t *) & addr_store);
+ if (plen != sizeof(struct sctp_ipv6addr_param) ||
+ ph == NULL) {
+ return (0);
+ }
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
+ /* create a copy and clear scope */
+ memcpy(&sin6_tmp, sin6,
+ sizeof(struct sockaddr_in6));
+ sin6 = &sin6_tmp;
+ in6_clearscope(&sin6->sin6_addr);
+ }
+ if (memcmp(&sin6->sin6_addr, a6p->addr,
+ sizeof(struct in6_addr)) == 0) {
+ /* found it */
+ return (1);
+ }
+ } else
+#endif /* INET6 */
+
+ if (ptype == SCTP_IPV4_ADDRESS &&
+ sa->sa_family == AF_INET) {
+ /* get the entire IPv4 address param */
+ a4p = (struct sctp_ipv4addr_param *)sctp_m_getptr(m,
+ offset, sizeof(struct sctp_ipv4addr_param),
+ (uint8_t *) & addr_store);
+ if (plen != sizeof(struct sctp_ipv4addr_param) ||
+ ph == NULL) {
+ return (0);
+ }
+ sin = (struct sockaddr_in *)sa;
+ if (sin->sin_addr.s_addr == a4p->addr) {
+ /* found it */
+ return (1);
+ }
+ }
+ /* get next parameter */
+ offset += SCTP_SIZE32(plen);
+ if (offset + sizeof(struct sctp_paramhdr) > length)
+ return (0);
+ ph = (struct sctp_paramhdr *)
+ sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr),
+ (uint8_t *) & tmp_param);
+ } /* while */
+ /* not found! */
+ return (0);
+}
+
+/*
+ * makes sure that the current endpoint local addr list is consistent with
+ * the new association (eg. subset bound, asconf allowed) adds addresses as
+ * necessary
+ */
+static void
+sctp_check_address_list_ep(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+ int length, struct sockaddr *init_addr)
+{
+ struct sctp_laddr *laddr;
+
+ /* go through the endpoint list */
+ LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+ /* be paranoid and validate the laddr */
+ if (laddr->ifa == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("check_addr_list_ep: laddr->ifa is NULL");
+ }
+#endif /* SCTP_DEBUG */
+ continue;
+ }
+ if (laddr->ifa->ifa_addr == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_ASCONF1) {
+ printf("check_addr_list_ep: laddr->ifa->ifa_addr is NULL");
+ }
+#endif /* SCTP_DEBUG */
+ continue;
+ }
+ /* do i have it implicitly? */
+ if (sctp_cmpaddr(laddr->ifa->ifa_addr, init_addr)) {
+ continue;
+ }
+ /* check to see if in the init-ack */
+ if (!sctp_addr_in_initack(stcb, m, offset, length,
+ laddr->ifa->ifa_addr)) {
+ /* try to add it */
+ sctp_addr_mgmt_assoc(stcb->sctp_ep, stcb, laddr->ifa,
+ SCTP_ADD_IP_ADDRESS);
+ }
+ }
+}
+
+/*
+ * makes sure that the current kernel address list is consistent with the new
+ * association (with all addrs bound) adds addresses as necessary
+ */
+static void
+sctp_check_address_list_all(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+ int length, struct sockaddr *init_addr,
+ uint16_t local_scope, uint16_t site_scope,
+ uint16_t ipv4_scope, uint16_t loopback_scope)
+{
+ struct ifnet *ifn;
+ struct ifaddr *ifa;
+
+ /* go through all our known interfaces */
+ TAILQ_FOREACH(ifn, &ifnet, if_list) {
+ if (loopback_scope == 0 && ifn->if_type == IFT_LOOP) {
+ /* skip loopback interface */
+ continue;
+ }
+ /* go through each interface address */
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ /* do i have it implicitly? */
+ if (sctp_cmpaddr(ifa->ifa_addr, init_addr)) {
+ continue;
+ }
+ /* check to see if in the init-ack */
+ if (!sctp_addr_in_initack(stcb, m, offset, length,
+ ifa->ifa_addr)) {
+ /* try to add it */
+ sctp_addr_mgmt_assoc(stcb->sctp_ep, stcb,
+ ifa, SCTP_ADD_IP_ADDRESS);
+ }
+ } /* end foreach ifa */
+ } /* end foreach ifn */
+}
+
+/*
+ * validates an init-ack chunk (from a cookie-echo) with current addresses
+ * adds addresses from the init-ack into our local address list, if needed
+ * queues asconf adds/deletes addresses as needed and makes appropriate list
+ * changes for source address selection m, offset: points to the start of the
+ * address list in an init-ack chunk length: total length of the address
+ * params only init_addr: address where my INIT-ACK was sent from
+ */
+void
+sctp_check_address_list(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+ int length, struct sockaddr *init_addr,
+ uint16_t local_scope, uint16_t site_scope,
+ uint16_t ipv4_scope, uint16_t loopback_scope)
+{
+ /* process the local addresses in the initack */
+ sctp_process_initack_addresses(stcb, m, offset, length);
+
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* bound all case */
+ sctp_check_address_list_all(stcb, m, offset, length, init_addr,
+ local_scope, site_scope, ipv4_scope, loopback_scope);
+ } else {
+ /* subset bound case */
+ if (sctp_is_feature_on(stcb->sctp_ep,
+ SCTP_PCB_FLAGS_DO_ASCONF)) {
+ /* asconf's allowed */
+ sctp_check_address_list_ep(stcb, m, offset, length,
+ init_addr);
+ }
+ /* else, no asconfs allowed, so what we sent is what we get */
+ }
+}
+
+/*
+ * sctp_bindx() support
+ */
+uint32_t
+sctp_addr_mgmt_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa, uint16_t type)
+{
+ struct ifaddr *ifa;
+
+
+ if (sa->sa_len == 0)
+ return (EINVAL);
+
+ ifa = sctp_find_ifa_by_addr(sa);
+ if (ifa != NULL) {
+#ifdef INET6
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ struct in6_ifaddr *ifa6;
+
+ ifa6 = (struct in6_ifaddr *)ifa;
+ if (IFA6_IS_DEPRECATED(ifa6) ||
+ (ifa6->ia6_flags & (IN6_IFF_DETACHED |
+ IN6_IFF_ANYCAST | IN6_IFF_NOTREADY))) {
+ /* Can't bind a non-existent addr. */
+ return (EINVAL);
+ }
+ }
+#endif /* INET6 */
+ /* add this address */
+ sctp_addr_mgmt_ep(inp, ifa, type);
+ } else {
+ /* invalid address! */
+ return (EADDRNOTAVAIL);
+ }
+ return (0);
+}
+
+void
+sctp_addr_change(struct ifaddr *ifa, int cmd)
+{
+ struct sctp_laddr *wi;
+
+ wi = (struct sctp_laddr *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr);
+ if (wi == NULL) {
+ /*
+ * Gak, what can we do? We have lost an address change can
+ * you say HOSED?
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ printf("Lost and address change ???\n");
+ }
+#endif /* SCTP_DEBUG */
+ return;
+ }
+ SCTP_INCR_LADDR_COUNT();
+ bzero(wi, sizeof(*wi));
+ wi->ifa = ifa;
+ IFAREF(ifa);
+
+ wi->action = cmd;
+ SCTP_IPI_ADDR_LOCK();
+ /*
+ * Should this really be a tailq? As it is we will process the
+ * newest first :-0
+ */
+ LIST_INSERT_HEAD(&sctppcbinfo.addr_wq, wi, sctp_nxt_addr);
+ sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
+ (struct sctp_inpcb *)NULL,
+ (struct sctp_tcb *)NULL,
+ (struct sctp_nets *)NULL);
+ SCTP_IPI_ADDR_UNLOCK();
+}
diff --git a/sys/netinet/sctp_asconf.h b/sys/netinet/sctp_asconf.h
new file mode 100644
index 0000000..e3e7df4
--- /dev/null
+++ b/sys/netinet/sctp_asconf.h
@@ -0,0 +1,80 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_asconf.h,v 1.8 2005/03/06 16:04:16 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef _NETINET_SCTP_ASCONF_H_
+#define _NETINET_SCTP_ASCONF_H_
+
+
+#include <sys/malloc.h>
+
+
+
+#if defined(_KERNEL)
+
+extern void sctp_asconf_cleanup(struct sctp_tcb *, struct sctp_nets *);
+
+extern struct mbuf *sctp_compose_asconf(struct sctp_tcb *);
+
+extern void
+sctp_handle_asconf(struct mbuf *, unsigned int, struct sctp_asconf_chunk *,
+ struct sctp_tcb *);
+
+extern void
+sctp_handle_asconf_ack(struct mbuf *, int,
+ struct sctp_asconf_ack_chunk *, struct sctp_tcb *, struct sctp_nets *);
+
+extern uint32_t
+sctp_addr_mgmt_ep_sa(struct sctp_inpcb *, struct sockaddr *,
+ uint16_t);
+
+extern void sctp_add_ip_address(struct ifaddr *);
+
+extern void sctp_delete_ip_address(struct ifaddr *);
+
+extern void sctp_addr_change(struct ifaddr *ifa, int cmd);
+
+extern int32_t
+sctp_set_primary_ip_address_sa(struct sctp_tcb *,
+ struct sockaddr *);
+
+extern void sctp_set_primary_ip_address(struct ifaddr *);
+
+extern void
+sctp_check_address_list(struct sctp_tcb *, struct mbuf *, int, int,
+ struct sockaddr *, uint16_t, uint16_t, uint16_t, uint16_t);
+
+#endif /* _KERNEL */
+
+#endif /* !_NETINET_SCTP_ASCONF_H_ */
diff --git a/sys/netinet/sctp_auth.c b/sys/netinet/sctp_auth.c
new file mode 100644
index 0000000..a87f47f
--- /dev/null
+++ b/sys/netinet/sctp_auth.c
@@ -0,0 +1,2389 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_sctp.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/domain.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/in_pcb.h>
+#include <netinet/in_var.h>
+#include <netinet/ip_var.h>
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_auth.h>
+
+#ifdef SCTP_DEBUG
+extern uint32_t sctp_debug_on;
+
+#define SCTP_AUTH_DEBUG (sctp_debug_on & SCTP_DEBUG_AUTH1)
+#define SCTP_AUTH_DEBUG2 (sctp_debug_on & SCTP_DEBUG_AUTH2)
+#endif /* SCTP_DEBUG */
+
+
+inline void
+sctp_clear_chunklist(sctp_auth_chklist_t * chklist)
+{
+ bzero(chklist, sizeof(*chklist));
+ /* chklist->num_chunks = 0; */
+}
+
+sctp_auth_chklist_t *
+sctp_alloc_chunklist(void)
+{
+ sctp_auth_chklist_t *chklist;
+
+ SCTP_MALLOC(chklist, sctp_auth_chklist_t *, sizeof(*chklist),
+ "AUTH chklist");
+ if (chklist == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_AUTH_DEBUG) {
+ printf("sctp_alloc_chunklist: failed to get memory!\n");
+ }
+#endif /* SCTP_DEBUG */
+ } else {
+ sctp_clear_chunklist(chklist);
+ }
+ return (chklist);
+}
+
+void
+sctp_free_chunklist(sctp_auth_chklist_t * list)
+{
+ if (list != NULL)
+ SCTP_FREE(list);
+}
+
+sctp_auth_chklist_t *
+sctp_copy_chunklist(sctp_auth_chklist_t * list)
+{
+ sctp_auth_chklist_t *new_list;
+
+ if (list == NULL)
+ return (NULL);
+
+ /* get a new list */
+ new_list = sctp_alloc_chunklist();
+ if (new_list == NULL)
+ return (NULL);
+ /* copy it */
+ bcopy(list, new_list, sizeof(*new_list));
+
+ return (new_list);
+}
+
+
+/*
+ * add a chunk to the required chunks list
+ */
+int
+sctp_auth_add_chunk(uint8_t chunk, sctp_auth_chklist_t * list)
+{
+ if (list == NULL)
+ return (-1);
+
+ /* is chunk restricted? */
+ if ((chunk == SCTP_INITIATION) ||
+ (chunk == SCTP_INITIATION_ACK) ||
+ (chunk == SCTP_SHUTDOWN_COMPLETE) ||
+ (chunk == SCTP_AUTHENTICATION)) {
+ return (-1);
+ }
+ if (list->chunks[chunk] == 0) {
+ list->chunks[chunk] = 1;
+ list->num_chunks++;
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ printf("SCTP: added chunk %u (0x%02x) to Auth list\n",
+ chunk, chunk);
+#endif
+ }
+ return (0);
+}
+
+/*
+ * delete a chunk from the required chunks list
+ */
+int
+sctp_auth_delete_chunk(uint8_t chunk, sctp_auth_chklist_t * list)
+{
+ if (list == NULL)
+ return (-1);
+
+ /* is chunk restricted? */
+ if ((chunk == SCTP_ASCONF) ||
+ (chunk == SCTP_ASCONF_ACK)) {
+ return (-1);
+ }
+ if (list->chunks[chunk] == 1) {
+ list->chunks[chunk] = 0;
+ list->num_chunks--;
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ printf("SCTP: deleted chunk %u (0x%02x) from Auth list\n",
+ chunk, chunk);
+#endif
+ }
+ return (0);
+}
+
+inline int
+sctp_auth_get_chklist_size(const sctp_auth_chklist_t * list)
+{
+ if (list == NULL)
+ return (0);
+ else
+ return (list->num_chunks);
+}
+
+/*
+ * set the default list of chunks requiring AUTH
+ */
+void
+sctp_auth_set_default_chunks(sctp_auth_chklist_t * list)
+{
+ sctp_auth_add_chunk(SCTP_ASCONF, list);
+ sctp_auth_add_chunk(SCTP_ASCONF_ACK, list);
+}
+
+/*
+ * return the current number and list of required chunks caller must
+ * guarantee ptr has space for up to 256 bytes
+ */
+int
+sctp_serialize_auth_chunks(const sctp_auth_chklist_t * list, uint8_t * ptr)
+{
+ int i, count = 0;
+
+ if (list == NULL)
+ return (0);
+
+ for (i = 0; i < 256; i++) {
+ if (list->chunks[i] != 0) {
+ *ptr++ = i;
+ count++;
+ }
+ }
+ return (count);
+}
+
+int
+sctp_pack_auth_chunks(const sctp_auth_chklist_t * list, uint8_t * ptr)
+{
+ int i, size = 0;
+
+ if (list == NULL)
+ return (0);
+
+ if (list->num_chunks <= 32) {
+ /* just list them, one byte each */
+ for (i = 0; i < 256; i++) {
+ if (list->chunks[i] != 0) {
+ *ptr++ = i;
+ size++;
+ }
+ }
+ } else {
+ int index, offset;
+
+ /* pack into a 32 byte bitfield */
+ for (i = 0; i < 256; i++) {
+ if (list->chunks[i] != 0) {
+ index = i / 8;
+ offset = i % 8;
+ ptr[index] |= (1 << offset);
+ }
+ }
+ size = 32;
+ }
+ return (size);
+}
+
+int
+sctp_unpack_auth_chunks(const uint8_t * ptr, uint8_t num_chunks,
+ sctp_auth_chklist_t * list)
+{
+ int i;
+ int size;
+
+ if (list == NULL)
+ return (0);
+
+ if (num_chunks <= 32) {
+ /* just pull them, one byte each */
+ for (i = 0; i < num_chunks; i++) {
+ sctp_auth_add_chunk(*ptr++, list);
+ }
+ size = num_chunks;
+ } else {
+ int index, offset;
+
+ /* unpack from a 32 byte bitfield */
+ for (index = 0; index < 32; index++) {
+ for (offset = 0; offset < 8; offset++) {
+ if (ptr[index] & (1 << offset)) {
+ sctp_auth_add_chunk((index * 8) + offset, list);
+ }
+ }
+ }
+ size = 32;
+ }
+ return (size);
+}
+
+
+/*
+ * allocate structure space for a key of length keylen
+ */
+sctp_key_t *
+sctp_alloc_key(uint32_t keylen)
+{
+ sctp_key_t *new_key;
+
+ SCTP_MALLOC(new_key, sctp_key_t *, sizeof(*new_key) + keylen,
+ "AUTH key");
+ if (new_key == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ new_key->keylen = keylen;
+ return (new_key);
+}
+
+void
+sctp_free_key(sctp_key_t * key)
+{
+ if (key != NULL)
+ SCTP_FREE(key);
+}
+
+void
+sctp_print_key(sctp_key_t * key, const char *str)
+{
+ uint32_t i;
+
+ if (key == NULL) {
+ printf("%s: [Null key]\n", str);
+ return;
+ }
+ printf("%s: len %u, ", str, key->keylen);
+ if (key->keylen) {
+ for (i = 0; i < key->keylen; i++)
+ printf("%02x", key->key[i]);
+ printf("\n");
+ } else {
+ printf("[Null key]\n");
+ }
+}
+
+void
+sctp_show_key(sctp_key_t * key, const char *str)
+{
+ uint32_t i;
+
+ if (key == NULL) {
+ printf("%s: [Null key]\n", str);
+ return;
+ }
+ printf("%s: len %u, ", str, key->keylen);
+ if (key->keylen) {
+ for (i = 0; i < key->keylen; i++)
+ printf("%02x", key->key[i]);
+ printf("\n");
+ } else {
+ printf("[Null key]\n");
+ }
+}
+
+static inline uint32_t
+sctp_get_keylen(sctp_key_t * key)
+{
+ if (key != NULL)
+ return (key->keylen);
+ else
+ return (0);
+}
+
+/*
+ * generate a new random key of length 'keylen'
+ */
+sctp_key_t *
+sctp_generate_random_key(uint32_t keylen)
+{
+ sctp_key_t *new_key;
+
+ /* validate keylen */
+ if (keylen > SCTP_AUTH_RANDOM_SIZE_MAX)
+ keylen = SCTP_AUTH_RANDOM_SIZE_MAX;
+
+ new_key = sctp_alloc_key(keylen);
+ if (new_key == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ sctp_read_random(new_key->key, keylen);
+ new_key->keylen = keylen;
+ return (new_key);
+}
+
+sctp_key_t *
+sctp_set_key(uint8_t * key, uint32_t keylen)
+{
+ sctp_key_t *new_key;
+
+ new_key = sctp_alloc_key(keylen);
+ if (new_key == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ bcopy(key, new_key->key, keylen);
+ return (new_key);
+}
+
+/*
+ * given two keys of variable size, compute which key is "larger/smaller"
+ * returns: 1 if key1 > key2 -1 if key1 < key2 0 if key1 = key2
+ */
+static int
+sctp_compare_key(sctp_key_t * key1, sctp_key_t * key2)
+{
+ uint32_t maxlen;
+ uint32_t i;
+ uint32_t key1len, key2len;
+ uint8_t *key_1, *key_2;
+ uint8_t temp[SCTP_AUTH_RANDOM_SIZE_MAX];
+
+ /* sanity/length check */
+ key1len = sctp_get_keylen(key1);
+ key2len = sctp_get_keylen(key2);
+ if ((key1len == 0) && (key2len == 0))
+ return (0);
+ else if (key1len == 0)
+ return (-1);
+ else if (key2len == 0)
+ return (1);
+
+ if (key1len != key2len) {
+ if (key1len >= key2len)
+ maxlen = key1len;
+ else
+ maxlen = key2len;
+ bzero(temp, maxlen);
+ if (key1len < maxlen) {
+ /* prepend zeroes to key1 */
+ bcopy(key1->key, temp + (maxlen - key1len), key1len);
+ key_1 = temp;
+ key_2 = key2->key;
+ } else {
+ /* prepend zeroes to key2 */
+ bcopy(key2->key, temp + (maxlen - key2len), key2len);
+ key_1 = key1->key;
+ key_2 = temp;
+ }
+ } else {
+ maxlen = key1len;
+ key_1 = key1->key;
+ key_2 = key2->key;
+ }
+
+ for (i = 0; i < maxlen; i++) {
+ if (*key_1 > *key_2)
+ return (1);
+ else if (*key_1 < *key_2)
+ return (-1);
+ key_1++;
+ key_2++;
+ }
+
+ /* keys are equal value, so check lengths */
+ if (key1len == key2len)
+ return (0);
+ else if (key1len < key2len)
+ return (-1);
+ else
+ return (1);
+}
+
+/*
+ * generate the concatenated keying material based on the two keys and the
+ * shared key (if available). draft-ietf-tsvwg-auth specifies the specific
+ * order for concatenation
+ */
+sctp_key_t *
+sctp_compute_hashkey(sctp_key_t * key1, sctp_key_t * key2, sctp_key_t * shared)
+{
+ uint32_t keylen;
+ sctp_key_t *new_key;
+ uint8_t *key_ptr;
+
+ keylen = sctp_get_keylen(key1) + sctp_get_keylen(key2) +
+ sctp_get_keylen(shared);
+
+ if (keylen > 0) {
+ /* get space for the new key */
+ new_key = sctp_alloc_key(keylen);
+ if (new_key == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ new_key->keylen = keylen;
+ key_ptr = new_key->key;
+ } else {
+ /* all keys empty/null?! */
+ return (NULL);
+ }
+
+ /* concatenate the keys */
+ if (sctp_compare_key(key1, key2) <= 0) {
+ /* key is key1 + shared + key2 */
+ if (sctp_get_keylen(key1)) {
+ bcopy(key1->key, key_ptr, key1->keylen);
+ key_ptr += key1->keylen;
+ }
+ if (sctp_get_keylen(shared)) {
+ bcopy(shared->key, key_ptr, shared->keylen);
+ key_ptr += shared->keylen;
+ }
+ if (sctp_get_keylen(key2)) {
+ bcopy(key2->key, key_ptr, key2->keylen);
+ key_ptr += key2->keylen;
+ }
+ } else {
+ /* key is key2 + shared + key1 */
+ if (sctp_get_keylen(key2)) {
+ bcopy(key2->key, key_ptr, key2->keylen);
+ key_ptr += key2->keylen;
+ }
+ if (sctp_get_keylen(shared)) {
+ bcopy(shared->key, key_ptr, shared->keylen);
+ key_ptr += shared->keylen;
+ }
+ if (sctp_get_keylen(key1)) {
+ bcopy(key1->key, key_ptr, key1->keylen);
+ key_ptr += key1->keylen;
+ }
+ }
+ return (new_key);
+}
+
+
+sctp_sharedkey_t *
+sctp_alloc_sharedkey(void)
+{
+ sctp_sharedkey_t *new_key;
+
+ SCTP_MALLOC(new_key, sctp_sharedkey_t *, sizeof(*new_key),
+ "AUTH skey");
+ if (new_key == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ new_key->keyid = 0;
+ new_key->key = NULL;
+ return (new_key);
+}
+
+void
+sctp_free_sharedkey(sctp_sharedkey_t * skey)
+{
+ if (skey != NULL) {
+ if (skey->key != NULL)
+ sctp_free_key(skey->key);
+ SCTP_FREE(skey);
+ }
+}
+
+sctp_sharedkey_t *
+sctp_find_sharedkey(struct sctp_keyhead *shared_keys, uint16_t key_id)
+{
+ sctp_sharedkey_t *skey;
+
+ LIST_FOREACH(skey, shared_keys, next) {
+ if (skey->keyid == key_id)
+ return (skey);
+ }
+ return (NULL);
+}
+
+void
+sctp_insert_sharedkey(struct sctp_keyhead *shared_keys,
+ sctp_sharedkey_t * new_skey)
+{
+ sctp_sharedkey_t *skey;
+
+ if ((shared_keys == NULL) || (new_skey == NULL))
+ return;
+
+ /* insert into an empty list? */
+ if (LIST_EMPTY(shared_keys)) {
+ LIST_INSERT_HEAD(shared_keys, new_skey, next);
+ return;
+ }
+ /* insert into the existing list, ordered by key id */
+ LIST_FOREACH(skey, shared_keys, next) {
+ if (new_skey->keyid < skey->keyid) {
+ /* insert it before here */
+ LIST_INSERT_BEFORE(skey, new_skey, next);
+ return;
+ } else if (new_skey->keyid == skey->keyid) {
+ /* replace the existing key */
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ printf("replacing shared key id %u\n", new_skey->keyid);
+#endif
+ LIST_INSERT_BEFORE(skey, new_skey, next);
+ LIST_REMOVE(skey, next);
+ sctp_free_sharedkey(skey);
+ return;
+ }
+ if (LIST_NEXT(skey, next) == NULL) {
+ /* belongs at the end of the list */
+ LIST_INSERT_AFTER(skey, new_skey, next);
+ return;
+ }
+ }
+}
+
+static sctp_sharedkey_t *
+sctp_copy_sharedkey(const sctp_sharedkey_t * skey)
+{
+ sctp_sharedkey_t *new_skey;
+
+ if (skey == NULL)
+ return (NULL);
+ new_skey = sctp_alloc_sharedkey();
+ if (new_skey == NULL)
+ return (NULL);
+ if (skey->key != NULL)
+ new_skey->key = sctp_set_key(skey->key->key, skey->key->keylen);
+ else
+ new_skey->key = NULL;
+ new_skey->keyid = skey->keyid;
+ return (new_skey);
+}
+
+int
+sctp_copy_skeylist(const struct sctp_keyhead *src, struct sctp_keyhead *dest)
+{
+ sctp_sharedkey_t *skey, *new_skey;
+ int count = 0;
+
+ if ((src == NULL) || (dest == NULL))
+ return (0);
+ LIST_FOREACH(skey, src, next) {
+ new_skey = sctp_copy_sharedkey(skey);
+ if (new_skey != NULL) {
+ sctp_insert_sharedkey(dest, new_skey);
+ count++;
+ }
+ }
+ return (count);
+}
+
+
+sctp_hmaclist_t *
+sctp_alloc_hmaclist(uint8_t num_hmacs)
+{
+ sctp_hmaclist_t *new_list;
+ int alloc_size;
+
+ alloc_size = sizeof(*new_list) + num_hmacs * sizeof(new_list->hmac[0]);
+ SCTP_MALLOC(new_list, sctp_hmaclist_t *, alloc_size,
+ "AUTH HMAC list");
+ if (new_list == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ new_list->max_algo = num_hmacs;
+ new_list->num_algo = 0;
+ return (new_list);
+}
+
+void
+sctp_free_hmaclist(sctp_hmaclist_t * list)
+{
+ if (list != NULL) {
+ SCTP_FREE(list);
+ list = NULL;
+ }
+}
+
+int
+sctp_auth_add_hmacid(sctp_hmaclist_t * list, uint16_t hmac_id)
+{
+ if (list == NULL)
+ return (-1);
+ if (list->num_algo == list->max_algo) {
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ printf("SCTP: HMAC id list full, ignoring add %u\n", hmac_id);
+#endif
+ return (-1);
+ }
+ if ((hmac_id != SCTP_AUTH_HMAC_ID_SHA1) &&
+#ifdef HAVE_SHA224
+ (hmac_id != SCTP_AUTH_HMAC_ID_SHA224) &&
+#endif
+#ifdef HAVE_SHA2
+ (hmac_id != SCTP_AUTH_HMAC_ID_SHA256) &&
+ (hmac_id != SCTP_AUTH_HMAC_ID_SHA384) &&
+ (hmac_id != SCTP_AUTH_HMAC_ID_SHA512) &&
+#endif
+ (hmac_id != SCTP_AUTH_HMAC_ID_MD5)) {
+ return (-1);
+ }
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ printf("SCTP: add HMAC id %u to list\n", hmac_id);
+#endif
+ list->hmac[list->num_algo++] = hmac_id;
+ return (0);
+}
+
+sctp_hmaclist_t *
+sctp_copy_hmaclist(sctp_hmaclist_t * list)
+{
+ sctp_hmaclist_t *new_list;
+ int i;
+
+ if (list == NULL)
+ return (NULL);
+ /* get a new list */
+ new_list = sctp_alloc_hmaclist(list->max_algo);
+ if (new_list == NULL)
+ return (NULL);
+ /* copy it */
+ new_list->max_algo = list->max_algo;
+ new_list->num_algo = list->num_algo;
+ for (i = 0; i < list->num_algo; i++)
+ new_list->hmac[i] = list->hmac[i];
+ return (new_list);
+}
+
+sctp_hmaclist_t *
+sctp_default_supported_hmaclist(void)
+{
+ sctp_hmaclist_t *new_list;
+
+ new_list = sctp_alloc_hmaclist(2);
+ if (new_list == NULL)
+ return (NULL);
+ sctp_auth_add_hmacid(new_list, SCTP_AUTH_HMAC_ID_SHA1);
+ sctp_auth_add_hmacid(new_list, SCTP_AUTH_HMAC_ID_SHA256);
+ return (new_list);
+}
+
+/*
+ * HMAC algos are listed in priority/preference order find the best HMAC id
+ * to use for the peer based on local support
+ */
+uint16_t
+sctp_negotiate_hmacid(sctp_hmaclist_t * peer, sctp_hmaclist_t * local)
+{
+ int i, j;
+
+ if ((local == NULL) || (peer == NULL))
+ return (SCTP_AUTH_HMAC_ID_RSVD);
+
+ for (i = 0; i < peer->num_algo; i++) {
+ for (j = 0; j < local->num_algo; j++) {
+ if (peer->hmac[i] == local->hmac[j]) {
+#ifndef SCTP_AUTH_DRAFT_04
+ /* "skip" MD5 as it's been deprecated */
+ if (peer->hmac[i] == SCTP_AUTH_HMAC_ID_MD5)
+ continue;
+#endif
+
+ /* found the "best" one */
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ printf("SCTP: negotiated peer HMAC id %u\n", peer->hmac[i]);
+#endif
+ return (peer->hmac[i]);
+ }
+ }
+ }
+ /* didn't find one! */
+ return (SCTP_AUTH_HMAC_ID_RSVD);
+}
+
+/*
+ * serialize the HMAC algo list and return space used caller must guarantee
+ * ptr has appropriate space
+ */
+int
+sctp_serialize_hmaclist(sctp_hmaclist_t * list, uint8_t * ptr)
+{
+ int i;
+ uint16_t hmac_id;
+
+ if (list == NULL)
+ return (0);
+
+ for (i = 0; i < list->num_algo; i++) {
+ hmac_id = htons(list->hmac[i]);
+ bcopy(&hmac_id, ptr, sizeof(hmac_id));
+ ptr += sizeof(hmac_id);
+ }
+ return (list->num_algo * sizeof(hmac_id));
+}
+
+int
+sctp_verify_hmac_param(struct sctp_auth_hmac_algo *hmacs, uint32_t num_hmacs)
+{
+ uint32_t i;
+ uint16_t hmac_id;
+ uint32_t sha1_supported = 0;
+
+ for (i = 0; i < num_hmacs; i++) {
+ hmac_id = ntohs(hmacs->hmac_ids[i]);
+ if (hmac_id == SCTP_AUTH_HMAC_ID_SHA1)
+ sha1_supported = 1;
+ }
+ /* all HMAC id's are supported */
+ if (sha1_supported == 0)
+ return (-1);
+ else
+ return (0);
+}
+
+sctp_authinfo_t *
+sctp_alloc_authinfo(void)
+{
+ sctp_authinfo_t *new_authinfo;
+
+ SCTP_MALLOC(new_authinfo, sctp_authinfo_t *, sizeof(*new_authinfo),
+ "AUTH info");
+ if (new_authinfo == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ bzero(&new_authinfo, sizeof(*new_authinfo));
+ return (new_authinfo);
+}
+
+void
+sctp_free_authinfo(sctp_authinfo_t * authinfo)
+{
+ if (authinfo == NULL)
+ return;
+
+ if (authinfo->random != NULL)
+ sctp_free_key(authinfo->random);
+ if (authinfo->peer_random != NULL)
+ sctp_free_key(authinfo->peer_random);
+ if (authinfo->assoc_key != NULL)
+ sctp_free_key(authinfo->assoc_key);
+ if (authinfo->recv_key != NULL)
+ sctp_free_key(authinfo->recv_key);
+
+ /* We are NOT dynamically allocating authinfo's right now... */
+ /* SCTP_FREE(authinfo); */
+}
+
+
+inline uint32_t
+sctp_get_auth_chunk_len(uint16_t hmac_algo)
+{
+ int size;
+
+ size = sizeof(struct sctp_auth_chunk) + sctp_get_hmac_digest_len(hmac_algo);
+ return (SCTP_SIZE32(size));
+}
+
+uint32_t
+sctp_get_hmac_digest_len(uint16_t hmac_algo)
+{
+ switch (hmac_algo) {
+ case SCTP_AUTH_HMAC_ID_SHA1:
+ return (SCTP_AUTH_DIGEST_LEN_SHA1);
+ case SCTP_AUTH_HMAC_ID_MD5:
+ return (SCTP_AUTH_DIGEST_LEN_MD5);
+#ifdef HAVE_SHA224
+ case SCTP_AUTH_HMAC_ID_SHA224:
+ return (SCTP_AUTH_DIGEST_LEN_SHA224);
+#endif
+#ifdef HAVE_SHA2
+ case SCTP_AUTH_HMAC_ID_SHA256:
+ return (SCTP_AUTH_DIGEST_LEN_SHA256);
+ case SCTP_AUTH_HMAC_ID_SHA384:
+ return (SCTP_AUTH_DIGEST_LEN_SHA384);
+ case SCTP_AUTH_HMAC_ID_SHA512:
+ return (SCTP_AUTH_DIGEST_LEN_SHA512);
+#endif
+ default:
+ /* unknown HMAC algorithm: can't do anything */
+ return (0);
+ } /* end switch */
+}
+
+static inline int
+sctp_get_hmac_block_len(uint16_t hmac_algo)
+{
+ switch (hmac_algo) {
+ case SCTP_AUTH_HMAC_ID_SHA1:
+ case SCTP_AUTH_HMAC_ID_MD5:
+#ifdef HAVE_SHA224
+ case SCTP_AUTH_HMAC_ID_SHA224:
+ return (64);
+#endif
+#ifdef HAVE_SHA2
+ case SCTP_AUTH_HMAC_ID_SHA256:
+ return (64);
+ case SCTP_AUTH_HMAC_ID_SHA384:
+ case SCTP_AUTH_HMAC_ID_SHA512:
+ return (128);
+#endif
+ case SCTP_AUTH_HMAC_ID_RSVD:
+ default:
+ /* unknown HMAC algorithm: can't do anything */
+ return (0);
+ } /* end switch */
+}
+
+static void
+sctp_hmac_init(uint16_t hmac_algo, sctp_hash_context_t * ctx)
+{
+ switch (hmac_algo) {
+ case SCTP_AUTH_HMAC_ID_SHA1:
+ SHA1_Init(&ctx->sha1);
+ break;
+ case SCTP_AUTH_HMAC_ID_MD5:
+ MD5_Init(&ctx->md5);
+ break;
+#ifdef HAVE_SHA224
+ case SCTP_AUTH_HMAC_ID_SHA224:
+ break;
+#endif
+#ifdef HAVE_SHA2
+ case SCTP_AUTH_HMAC_ID_SHA256:
+ SHA256_Init(&ctx->sha256);
+ break;
+ case SCTP_AUTH_HMAC_ID_SHA384:
+ SHA384_Init(&ctx->sha384);
+ break;
+ case SCTP_AUTH_HMAC_ID_SHA512:
+ SHA512_Init(&ctx->sha512);
+ break;
+#endif
+ case SCTP_AUTH_HMAC_ID_RSVD:
+ default:
+ /* unknown HMAC algorithm: can't do anything */
+ return;
+ } /* end switch */
+}
+
+static void
+sctp_hmac_update(uint16_t hmac_algo, sctp_hash_context_t * ctx,
+ const uint8_t * text, uint32_t textlen)
+{
+ switch (hmac_algo) {
+ case SCTP_AUTH_HMAC_ID_SHA1:
+ SHA1_Update(&ctx->sha1, text, textlen);
+ break;
+ case SCTP_AUTH_HMAC_ID_MD5:
+ MD5_Update(&ctx->md5, text, textlen);
+ break;
+#ifdef HAVE_SHA224
+ case SCTP_AUTH_HMAC_ID_SHA224:
+ break;
+#endif
+#ifdef HAVE_SHA2
+ case SCTP_AUTH_HMAC_ID_SHA256:
+ SHA256_Update(&ctx->sha256, text, textlen);
+ break;
+ case SCTP_AUTH_HMAC_ID_SHA384:
+ SHA384_Update(&ctx->sha384, text, textlen);
+ break;
+ case SCTP_AUTH_HMAC_ID_SHA512:
+ SHA512_Update(&ctx->sha512, text, textlen);
+ break;
+#endif
+ case SCTP_AUTH_HMAC_ID_RSVD:
+ default:
+ /* unknown HMAC algorithm: can't do anything */
+ return;
+ } /* end switch */
+}
+
+static void
+sctp_hmac_final(uint16_t hmac_algo, sctp_hash_context_t * ctx,
+ uint8_t * digest)
+{
+ switch (hmac_algo) {
+ case SCTP_AUTH_HMAC_ID_SHA1:
+ SHA1_Final(digest, &ctx->sha1);
+ break;
+ case SCTP_AUTH_HMAC_ID_MD5:
+ MD5_Final(digest, &ctx->md5);
+ break;
+#ifdef HAVE_SHA224
+ case SCTP_AUTH_HMAC_ID_SHA224:
+ break;
+#endif
+#ifdef HAVE_SHA2
+ case SCTP_AUTH_HMAC_ID_SHA256:
+ SHA256_Final(digest, &ctx->sha256);
+ break;
+ case SCTP_AUTH_HMAC_ID_SHA384:
+ /* SHA384 is truncated SHA512 */
+ SHA384_Final(digest, &ctx->sha384);
+ break;
+ case SCTP_AUTH_HMAC_ID_SHA512:
+ SHA512_Final(digest, &ctx->sha512);
+ break;
+#endif
+ case SCTP_AUTH_HMAC_ID_RSVD:
+ default:
+ /* unknown HMAC algorithm: can't do anything */
+ return;
+ } /* end switch */
+}
+
+/*
+ * Keyed-Hashing for Message Authentication: FIPS 198 (RFC 2104)
+ *
+ * Compute the HMAC digest using the desired hash key, text, and HMAC
+ * algorithm. Resulting digest is placed in 'digest' and digest length
+ * is returned, if the HMAC was performed.
+ *
+ * WARNING: it is up to the caller to supply sufficient space to hold the
+ * resultant digest.
+ */
+uint32_t
+sctp_hmac(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
+ const uint8_t * text, uint32_t textlen, uint8_t * digest)
+{
+ uint32_t digestlen;
+ uint32_t blocklen;
+ sctp_hash_context_t ctx;
+ uint8_t ipad[128], opad[128]; /* keyed hash inner/outer pads */
+ uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+ uint32_t i;
+
+ /* sanity check the material and length */
+ if ((key == NULL) || (keylen == 0) || (text == NULL) ||
+ (textlen == 0) || (digest == NULL)) {
+ /* can't do HMAC with empty key or text or digest store */
+ return (0);
+ }
+ /* validate the hmac algo and get the digest length */
+ digestlen = sctp_get_hmac_digest_len(hmac_algo);
+ if (digestlen == 0)
+ return (0);
+
+ /* hash the key if it is longer than the hash block size */
+ blocklen = sctp_get_hmac_block_len(hmac_algo);
+ if (keylen > blocklen) {
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, key, keylen);
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+ /* set the hashed key as the key */
+ keylen = digestlen;
+ key = temp;
+ }
+ /* initialize the inner/outer pads with the key and "append" zeroes */
+ bzero(ipad, blocklen);
+ bzero(opad, blocklen);
+ bcopy(key, ipad, keylen);
+ bcopy(key, opad, keylen);
+
+ /* XOR the key with ipad and opad values */
+ for (i = 0; i < blocklen; i++) {
+ ipad[i] ^= 0x36;
+ opad[i] ^= 0x5c;
+ }
+
+ /* perform inner hash */
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, ipad, blocklen);
+ sctp_hmac_update(hmac_algo, &ctx, text, textlen);
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+
+ /* perform outer hash */
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, opad, blocklen);
+ sctp_hmac_update(hmac_algo, &ctx, temp, digestlen);
+ sctp_hmac_final(hmac_algo, &ctx, digest);
+
+ return (digestlen);
+}
+
+/* mbuf version */
+uint32_t
+sctp_hmac_m(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
+ struct mbuf *m, uint32_t m_offset, uint8_t * digest)
+{
+ uint32_t digestlen;
+ uint32_t blocklen;
+ sctp_hash_context_t ctx;
+ uint8_t ipad[128], opad[128]; /* keyed hash inner/outer pads */
+ uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+ uint32_t i;
+ struct mbuf *m_tmp;
+
+ /* sanity check the material and length */
+ if ((key == NULL) || (keylen == 0) || (m == NULL) || (digest == NULL)) {
+ /* can't do HMAC with empty key or text or digest store */
+ return (0);
+ }
+ /* validate the hmac algo and get the digest length */
+ digestlen = sctp_get_hmac_digest_len(hmac_algo);
+ if (digestlen == 0)
+ return (0);
+
+ /* hash the key if it is longer than the hash block size */
+ blocklen = sctp_get_hmac_block_len(hmac_algo);
+ if (keylen > blocklen) {
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, key, keylen);
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+ /* set the hashed key as the key */
+ keylen = digestlen;
+ key = temp;
+ }
+ /* initialize the inner/outer pads with the key and "append" zeroes */
+ bzero(ipad, blocklen);
+ bzero(opad, blocklen);
+ bcopy(key, ipad, keylen);
+ bcopy(key, opad, keylen);
+
+ /* XOR the key with ipad and opad values */
+ for (i = 0; i < blocklen; i++) {
+ ipad[i] ^= 0x36;
+ opad[i] ^= 0x5c;
+ }
+
+ /* perform inner hash */
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, ipad, blocklen);
+ /* find the correct starting mbuf and offset (get start of text) */
+ m_tmp = m;
+ while ((m_tmp != NULL) && (m_offset >= (uint32_t) m_tmp->m_len)) {
+ m_offset -= m_tmp->m_len;
+ m_tmp = m_tmp->m_next;
+ }
+ /* now use the rest of the mbuf chain for the text */
+ while (m_tmp != NULL) {
+ sctp_hmac_update(hmac_algo, &ctx, mtod(m_tmp, uint8_t *) + m_offset,
+ m_tmp->m_len - m_offset);
+ /* clear the offset since it's only for the first mbuf */
+ m_offset = 0;
+ m_tmp = m_tmp->m_next;
+ }
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+
+ /* perform outer hash */
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, opad, blocklen);
+ sctp_hmac_update(hmac_algo, &ctx, temp, digestlen);
+ sctp_hmac_final(hmac_algo, &ctx, digest);
+
+ return (digestlen);
+}
+
+/*
+ * verify the HMAC digest using the desired hash key, text, and HMAC
+ * algorithm. Returns -1 on error, 0 on success.
+ */
+int
+sctp_verify_hmac(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
+ const uint8_t * text, uint32_t textlen,
+ uint8_t * digest, uint32_t digestlen)
+{
+ uint32_t len;
+ uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+
+ /* sanity check the material and length */
+ if ((key == NULL) || (keylen == 0) ||
+ (text == NULL) || (textlen == 0) || (digest == NULL)) {
+ /* can't do HMAC with empty key or text or digest */
+ return (-1);
+ }
+ len = sctp_get_hmac_digest_len(hmac_algo);
+ if ((len == 0) || (digestlen != len))
+ return (-1);
+
+ /* compute the expected hash */
+ if (sctp_hmac(hmac_algo, key, keylen, text, textlen, temp) != len)
+ return (-1);
+
+ if (memcmp(digest, temp, digestlen) != 0)
+ return (-1);
+ else
+ return (0);
+}
+
+
+/*
+ * computes the requested HMAC using a key struct (which may be modified if
+ * the keylen exceeds the HMAC block len).
+ */
+uint32_t
+sctp_compute_hmac(uint16_t hmac_algo, sctp_key_t * key, const uint8_t * text,
+ uint32_t textlen, uint8_t * digest)
+{
+ uint32_t digestlen;
+ uint32_t blocklen;
+ sctp_hash_context_t ctx;
+ uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+
+ /* sanity check */
+ if ((key == NULL) || (text == NULL) || (textlen == 0) ||
+ (digest == NULL)) {
+ /* can't do HMAC with empty key or text or digest store */
+ return (0);
+ }
+ /* validate the hmac algo and get the digest length */
+ digestlen = sctp_get_hmac_digest_len(hmac_algo);
+ if (digestlen == 0)
+ return (0);
+
+ /* hash the key if it is longer than the hash block size */
+ blocklen = sctp_get_hmac_block_len(hmac_algo);
+ if (key->keylen > blocklen) {
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, key->key, key->keylen);
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+ /* save the hashed key as the new key */
+ key->keylen = digestlen;
+ bcopy(temp, key->key, key->keylen);
+ }
+ return (sctp_hmac(hmac_algo, key->key, key->keylen, text, textlen,
+ digest));
+}
+
+/* mbuf version */
+uint32_t
+sctp_compute_hmac_m(uint16_t hmac_algo, sctp_key_t * key, struct mbuf *m,
+ uint32_t m_offset, uint8_t * digest)
+{
+ uint32_t digestlen;
+ uint32_t blocklen;
+ sctp_hash_context_t ctx;
+ uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+
+ /* sanity check */
+ if ((key == NULL) || (m == NULL) || (digest == NULL)) {
+ /* can't do HMAC with empty key or text or digest store */
+ return (0);
+ }
+ /* validate the hmac algo and get the digest length */
+ digestlen = sctp_get_hmac_digest_len(hmac_algo);
+ if (digestlen == 0)
+ return (0);
+
+ /* hash the key if it is longer than the hash block size */
+ blocklen = sctp_get_hmac_block_len(hmac_algo);
+ if (key->keylen > blocklen) {
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, key->key, key->keylen);
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+ /* save the hashed key as the new key */
+ key->keylen = digestlen;
+ bcopy(temp, key->key, key->keylen);
+ }
+ return (sctp_hmac_m(hmac_algo, key->key, key->keylen, m, m_offset, digest));
+}
+
+int
+sctp_auth_is_supported_hmac(sctp_hmaclist_t * list, uint16_t id)
+{
+ int i;
+
+ if ((list == NULL) || (id == SCTP_AUTH_HMAC_ID_RSVD))
+ return (0);
+
+ for (i = 0; i < list->num_algo; i++)
+ if (list->hmac[i] == id)
+ return (1);
+
+ /* not in the list */
+ return (0);
+}
+
+
+/*
+ * clear any cached key(s) if they match the given key id on an association
+ * the cached key(s) will be recomputed and re-cached at next use. ASSUMES
+ * TCB_LOCK is already held
+ */
+void
+sctp_clear_cachedkeys(struct sctp_tcb *stcb, uint16_t keyid)
+{
+ if (stcb == NULL)
+ return;
+
+ if (keyid == stcb->asoc.authinfo.assoc_keyid) {
+ sctp_free_key(stcb->asoc.authinfo.assoc_key);
+ stcb->asoc.authinfo.assoc_key = NULL;
+ }
+ if (keyid == stcb->asoc.authinfo.recv_keyid) {
+ sctp_free_key(stcb->asoc.authinfo.recv_key);
+ stcb->asoc.authinfo.recv_key = NULL;
+ }
+}
+
+/*
+ * clear any cached key(s) if they match the given key id for all assocs on
+ * an association ASSUMES INP_WLOCK is already held
+ */
+void
+sctp_clear_cachedkeys_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+ struct sctp_tcb *stcb;
+
+ if (inp == NULL)
+ return;
+
+ /* clear the cached keys on all assocs on this instance */
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ sctp_clear_cachedkeys(stcb, keyid);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+}
+
+/*
+ * delete a shared key from an association ASSUMES TCB_LOCK is already held
+ */
+int
+sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey;
+
+ if (stcb == NULL)
+ return (-1);
+
+ /* is the keyid the assoc active sending key */
+ if (keyid == stcb->asoc.authinfo.assoc_keyid)
+ return (-1);
+
+ /* does the key exist? */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+ if (skey == NULL)
+ return (-1);
+
+ /* remove it */
+ LIST_REMOVE(skey, next);
+ sctp_free_sharedkey(skey); /* frees skey->key as well */
+
+ /* clear any cached keys */
+ sctp_clear_cachedkeys(stcb, keyid);
+ return (0);
+}
+
+/*
+ * deletes a shared key from the endpoint ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey;
+ struct sctp_tcb *stcb;
+
+ if (inp == NULL)
+ return (-1);
+
+ /* is the keyid the active sending key on the endpoint or any assoc */
+ if (keyid == inp->sctp_ep.default_keyid)
+ return (-1);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ if (keyid == stcb->asoc.authinfo.assoc_keyid) {
+ SCTP_TCB_UNLOCK(stcb);
+ return (-1);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+
+ /* does the key exist? */
+ skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
+ if (skey == NULL)
+ return (-1);
+
+ /* remove it */
+ LIST_REMOVE(skey, next);
+ sctp_free_sharedkey(skey); /* frees skey->key as well */
+
+ /* clear any cached keys */
+ sctp_clear_cachedkeys_ep(inp, keyid);
+ return (0);
+}
+
+/*
+ * set the active key on an association ASSUME TCB_LOCK is already held
+ */
+int
+sctp_auth_setactivekey(struct sctp_tcb *stcb, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey = NULL;
+ sctp_key_t *key = NULL;
+ int using_ep_key = 0;
+
+ /* find the key on the assoc */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+ if (skey == NULL) {
+ /* if not on the assoc, find the key on the endpoint */
+ SCTP_INP_RLOCK(stcb->sctp_ep);
+ skey = sctp_find_sharedkey(&stcb->sctp_ep->sctp_ep.shared_keys,
+ keyid);
+ using_ep_key = 1;
+ }
+ if (skey == NULL) {
+ /* that key doesn't exist */
+ if (using_ep_key)
+ SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ return (-1);
+ }
+ /* get the shared key text */
+ key = skey->key;
+
+ /* free any existing cached key */
+ if (stcb->asoc.authinfo.assoc_key != NULL)
+ sctp_free_key(stcb->asoc.authinfo.assoc_key);
+ /* compute a new assoc key and cache it */
+ stcb->asoc.authinfo.assoc_key =
+ sctp_compute_hashkey(stcb->asoc.authinfo.random,
+ stcb->asoc.authinfo.peer_random, key);
+ stcb->asoc.authinfo.assoc_keyid = keyid;
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ sctp_print_key(stcb->asoc.authinfo.assoc_key, "Assoc Key");
+#endif
+
+ if (using_ep_key)
+ SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ return (0);
+}
+
+/*
+ * set the active key on an endpoint ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey;
+
+ /* find the key */
+ skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
+ if (skey == NULL) {
+ /* that key doesn't exist */
+ return (-1);
+ }
+ inp->sctp_ep.default_keyid = keyid;
+ return (0);
+}
+
+/*
+ * get local authentication parameters from cookie (from INIT-ACK)
+ */
+void
+sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
+ uint32_t offset, uint32_t length)
+{
+ struct sctp_paramhdr *phdr, tmp_param;
+ uint16_t plen, ptype;
+ uint8_t store[384];
+ struct sctp_auth_random *random = NULL;
+ uint16_t random_len = 0;
+ struct sctp_auth_hmac_algo *hmacs = NULL;
+ uint16_t hmacs_len = 0;
+ struct sctp_auth_chunk_list *chunks = NULL;
+ uint16_t num_chunks = 0;
+ sctp_key_t *new_key;
+ uint32_t keylen;
+
+ /* convert to upper bound */
+ length += offset;
+
+ phdr = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_paramhdr), (uint8_t *) & tmp_param);
+ while (phdr != NULL) {
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+
+ if ((plen == 0) || (offset + plen > length))
+ break;
+
+ if (ptype == SCTP_RANDOM) {
+ if (plen > sizeof(store))
+ break;
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)store, plen);
+ if (phdr == NULL)
+ return;
+ /* save the random and length for the key */
+ random = (struct sctp_auth_random *)phdr;
+ random_len = plen - sizeof(*random);
+ } else if (ptype == SCTP_HMAC_LIST) {
+ int num_hmacs;
+ int i;
+
+ if (plen > sizeof(store))
+ break;
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)store, plen);
+ if (phdr == NULL)
+ return;
+ /* save the hmacs list and num for the key */
+ hmacs = (struct sctp_auth_hmac_algo *)phdr;
+ hmacs_len = plen - sizeof(*hmacs);
+ num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]);
+ if (stcb->asoc.local_hmacs != NULL)
+ sctp_free_hmaclist(stcb->asoc.local_hmacs);
+ stcb->asoc.local_hmacs = sctp_alloc_hmaclist(num_hmacs);
+ if (stcb->asoc.local_hmacs != NULL) {
+ for (i = 0; i < num_hmacs; i++) {
+ sctp_auth_add_hmacid(stcb->asoc.local_hmacs,
+ ntohs(hmacs->hmac_ids[i]));
+ }
+ }
+ } else if (ptype == SCTP_CHUNK_LIST) {
+ int i;
+
+ if (plen > sizeof(store))
+ break;
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)store, plen);
+ if (phdr == NULL)
+ return;
+ chunks = (struct sctp_auth_chunk_list *)phdr;
+ num_chunks = plen - sizeof(*chunks);
+ /* save chunks list and num for the key */
+ if (stcb->asoc.local_auth_chunks != NULL)
+ sctp_clear_chunklist(stcb->asoc.local_auth_chunks);
+ else
+ stcb->asoc.local_auth_chunks = sctp_alloc_chunklist();
+ for (i = 0; i < num_chunks; i++) {
+ sctp_auth_add_chunk(chunks->chunk_types[i],
+ stcb->asoc.local_auth_chunks);
+ }
+ }
+ /* get next parameter */
+ offset += SCTP_SIZE32(plen);
+ if (offset + sizeof(struct sctp_paramhdr) > length)
+ break;
+ phdr = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr),
+ (uint8_t *) & tmp_param);
+ }
+ /* concatenate the full random key */
+ keylen = random_len + num_chunks + hmacs_len;
+ new_key = sctp_alloc_key(keylen);
+ if (new_key != NULL) {
+ /* copy in the RANDOM */
+ if (random != NULL)
+ bcopy(random->random_data, new_key->key, random_len);
+ /* append in the AUTH chunks */
+ if (chunks != NULL)
+ bcopy(chunks->chunk_types, new_key->key + random_len,
+ num_chunks);
+ /* append in the HMACs */
+ if (hmacs != NULL)
+ bcopy(hmacs->hmac_ids, new_key->key + random_len + num_chunks,
+ hmacs_len);
+ }
+ if (stcb->asoc.authinfo.random != NULL)
+ sctp_free_key(stcb->asoc.authinfo.random);
+ stcb->asoc.authinfo.random = new_key;
+ stcb->asoc.authinfo.random_len = random_len;
+#ifdef SCTP_AUTH_DRAFT_04
+ /* don't include the chunks and hmacs for draft -04 */
+ stcb->asoc.authinfo.random->keylen = random_len;
+#endif
+ sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid);
+ sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid);
+
+ /* negotiate what HMAC to use for the peer */
+ stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
+ stcb->asoc.local_hmacs);
+ /* copy defaults from the endpoint */
+ /* FIX ME: put in cookie? */
+ stcb->asoc.authinfo.assoc_keyid = stcb->sctp_ep->sctp_ep.default_keyid;
+}
+
+/*
+ * compute and fill in the HMAC digest for a packet
+ */
+void
+sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
+ struct sctp_auth_chunk *auth, struct sctp_tcb *stcb)
+{
+ uint32_t digestlen;
+ sctp_sharedkey_t *skey;
+ sctp_key_t *key;
+
+ if ((stcb == NULL) || (auth == NULL))
+ return;
+
+ /* zero the digest + chunk padding */
+ digestlen = sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
+ bzero(auth->hmac, SCTP_SIZE32(digestlen));
+ /* is an assoc key cached? */
+ if (stcb->asoc.authinfo.assoc_key == NULL) {
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys,
+ stcb->asoc.authinfo.assoc_keyid);
+ if (skey == NULL) {
+ /* not in the assoc list, so check the endpoint list */
+ skey = sctp_find_sharedkey(&stcb->sctp_ep->sctp_ep.shared_keys,
+ stcb->asoc.authinfo.assoc_keyid);
+ }
+ /* the only way skey is NULL is if null key id 0 is used */
+ if (skey != NULL)
+ key = skey->key;
+ else
+ key = NULL;
+ /* compute a new assoc key and cache it */
+ stcb->asoc.authinfo.assoc_key =
+ sctp_compute_hashkey(stcb->asoc.authinfo.random,
+ stcb->asoc.authinfo.peer_random, key);
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG) {
+ printf("caching key id %u\n",
+ stcb->asoc.authinfo.assoc_keyid);
+ sctp_print_key(stcb->asoc.authinfo.assoc_key, "Assoc Key");
+ }
+#endif
+ }
+ /* set in the active key id */
+ auth->shared_key_id = htons(stcb->asoc.authinfo.assoc_keyid);
+
+ /* compute and fill in the digest */
+ (void)sctp_compute_hmac_m(stcb->asoc.peer_hmac_id,
+ stcb->asoc.authinfo.assoc_key,
+ m, auth_offset, auth->hmac);
+}
+
+
+static void
+sctp_bzero_m(struct mbuf *m, uint32_t m_offset, uint32_t size)
+{
+ struct mbuf *m_tmp;
+ uint8_t *data;
+
+ /* sanity check */
+ if (m == NULL)
+ return;
+
+ /* find the correct starting mbuf and offset (get start position) */
+ m_tmp = m;
+ while ((m_tmp != NULL) && (m_offset >= (uint32_t) m_tmp->m_len)) {
+ m_offset -= m_tmp->m_len;
+ m_tmp = m_tmp->m_next;
+ }
+ /* now use the rest of the mbuf chain */
+ while ((m_tmp != NULL) && (size > 0)) {
+ data = mtod(m_tmp, uint8_t *) + m_offset;
+ if (size > (uint32_t) m_tmp->m_len) {
+ bzero(data, m_tmp->m_len);
+ size -= m_tmp->m_len;
+ } else {
+ bzero(data, size);
+ size = 0;
+ }
+ /* clear the offset since it's only for the first mbuf */
+ m_offset = 0;
+ m_tmp = m_tmp->m_next;
+ }
+}
+
+/*
+ * process the incoming Authentication chunk return codes: -1 on any
+ * authentication error 0 on authentication verification
+ */
+int
+sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth,
+ struct mbuf *m, uint32_t offset)
+{
+ uint16_t chunklen;
+ uint16_t shared_key_id;
+ uint16_t hmac_id;
+ sctp_sharedkey_t *skey;
+ uint32_t digestlen;
+ uint8_t digest[SCTP_AUTH_DIGEST_LEN_MAX];
+ uint8_t computed_digest[SCTP_AUTH_DIGEST_LEN_MAX];
+
+ /* auth is checked for NULL by caller */
+ chunklen = ntohs(auth->ch.chunk_length);
+ if (chunklen < sizeof(*auth)) {
+ SCTP_STAT_INCR(sctps_recvauthfailed);
+ return (-1);
+ }
+ SCTP_STAT_INCR(sctps_recvauth);
+
+ /* get the auth params */
+ shared_key_id = ntohs(auth->shared_key_id);
+ hmac_id = ntohs(auth->hmac_id);
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ printf("SCTP AUTH Chunk: shared key %u, HMAC id %u\n",
+ shared_key_id, hmac_id);
+#endif
+
+ /* is the indicated HMAC supported? */
+ if (!sctp_auth_is_supported_hmac(stcb->asoc.local_hmacs, hmac_id)) {
+ struct mbuf *m_err;
+ struct sctp_auth_invalid_hmac *err;
+
+ SCTP_STAT_INCR(sctps_recvivalhmacid);
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ printf("SCTP Auth: unsupported HMAC id %u\n", hmac_id);
+#endif
+ /*
+ * report this in an Error Chunk: Unsupported HMAC
+ * Identifier
+ */
+ m_err = sctp_get_mbuf_for_msg(sizeof(*err), 1, M_DONTWAIT, 1, MT_HEADER);
+ if (m_err != NULL) {
+ /* pre-reserve some space */
+ m_err->m_data += sizeof(struct sctp_chunkhdr);
+ /* fill in the error */
+ err = mtod(m_err, struct sctp_auth_invalid_hmac *);
+ bzero(err, sizeof(*err));
+ err->ph.param_type = htons(SCTP_CAUSE_UNSUPPORTED_HMACID);
+ err->ph.param_length = htons(sizeof(*err));
+ err->hmac_id = ntohs(hmac_id);
+ m_err->m_pkthdr.len = m_err->m_len = sizeof(*err);
+ /* queue it */
+ sctp_queue_op_err(stcb, m_err);
+ }
+ return (-1);
+ }
+ /* get the indicated shared key, if available */
+ if ((stcb->asoc.authinfo.recv_key == NULL) ||
+ (stcb->asoc.authinfo.recv_keyid != shared_key_id)) {
+ /* find the shared key on the assoc first */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, shared_key_id);
+ if (skey == NULL) {
+ /* if not on the assoc, find it on the endpoint */
+ skey = sctp_find_sharedkey(&stcb->sctp_ep->sctp_ep.shared_keys,
+ shared_key_id);
+ }
+ /* if the shared key isn't found, discard the chunk */
+ if (skey == NULL) {
+ SCTP_STAT_INCR(sctps_recvivalkeyid);
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ printf("SCTP Auth: unknown key id %u\n",
+ shared_key_id);
+#endif
+ return (-1);
+ }
+ /* generate a notification if this is a new key id */
+ if (stcb->asoc.authinfo.recv_keyid != shared_key_id)
+ /*
+ * sctp_ulp_notify(SCTP_NOTIFY_AUTH_NEW_KEY, stcb,
+ * shared_key_id, (void
+ * *)stcb->asoc.authinfo.recv_keyid);
+ */
+ sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY,
+ shared_key_id, stcb->asoc.authinfo.recv_keyid);
+ /* compute a new recv assoc key and cache it */
+ if (stcb->asoc.authinfo.recv_key != NULL)
+ sctp_free_key(stcb->asoc.authinfo.recv_key);
+ stcb->asoc.authinfo.recv_key =
+ sctp_compute_hashkey(stcb->asoc.authinfo.random,
+ stcb->asoc.authinfo.peer_random, skey->key);
+ stcb->asoc.authinfo.recv_keyid = shared_key_id;
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ sctp_print_key(stcb->asoc.authinfo.recv_key, "Recv Key");
+#endif
+ }
+ /* validate the digest length */
+ digestlen = sctp_get_hmac_digest_len(hmac_id);
+ if (chunklen < (sizeof(*auth) + digestlen)) {
+ /* invalid digest length */
+ SCTP_STAT_INCR(sctps_recvauthfailed);
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ printf("SCTP Auth: chunk too short for HMAC\n");
+#endif
+ return (-1);
+ }
+ /* save a copy of the digest, zero the pseudo header, and validate */
+ bcopy(auth->hmac, digest, digestlen);
+ sctp_bzero_m(m, offset + sizeof(*auth), SCTP_SIZE32(digestlen));
+ (void)sctp_compute_hmac_m(hmac_id, stcb->asoc.authinfo.recv_key,
+ m, offset, computed_digest);
+
+ /* compare the computed digest with the one in the AUTH chunk */
+ if (memcmp(digest, computed_digest, digestlen) != 0) {
+ SCTP_STAT_INCR(sctps_recvauthfailed);
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ printf("SCTP Auth: HMAC digest check failed\n");
+#endif
+ return (-1);
+ }
+ return (0);
+}
+
+/*
+ * Generate NOTIFICATION
+ */
+void
+sctp_notify_authentication(struct sctp_tcb *stcb, uint32_t indication,
+ uint16_t keyid, uint16_t alt_keyid)
+{
+ struct mbuf *m_notify;
+ struct sctp_authkey_event *auth;
+ struct sctp_queued_to_read *control;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTHEVNT))
+ /* event not enabled */
+ return;
+
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_authkey_event),
+ 1, M_DONTWAIT, 1, MT_HEADER);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ m_notify->m_len = 0;
+ auth = mtod(m_notify, struct sctp_authkey_event *);
+ auth->auth_type = SCTP_AUTHENTICATION_EVENT;
+ auth->auth_flags = 0;
+ auth->auth_length = sizeof(*auth);
+ auth->auth_keynumber = keyid;
+ auth->auth_altkeynumber = alt_keyid;
+ auth->auth_indication = indication;
+ auth->auth_assoc_id = sctp_get_associd(stcb);
+
+ m_notify->m_flags |= M_EOR | M_NOTIFICATION;
+ m_notify->m_pkthdr.len = sizeof(*auth);
+ m_notify->m_pkthdr.rcvif = 0;
+ m_notify->m_len = sizeof(*auth);
+ m_notify->m_next = NULL;
+
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0, m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = m_notify->m_len;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb, control,
+ &stcb->sctp_socket->so_rcv, 1);
+}
+
+
+/*
+ * validates the AUTHentication related parameters in an INIT/INIT-ACK
+ * Note: currently only used for INIT as INIT-ACK is handled inline
+ * with sctp_load_addresses_from_init()
+ */
+int
+sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit)
+{
+ struct sctp_paramhdr *phdr, parm_buf;
+ uint16_t ptype, plen;
+ int peer_supports_asconf = 0;
+ int peer_supports_auth = 0;
+ int got_random = 0, got_hmacs = 0;
+
+ /* go through each of the params. */
+ phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
+ while (phdr) {
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+
+ if (offset + plen > limit) {
+ break;
+ }
+ if (plen == 0) {
+ break;
+ }
+ if (ptype == SCTP_SUPPORTED_CHUNK_EXT) {
+ /* A supported extension chunk */
+ struct sctp_supported_chunk_types_param *pr_supported;
+ uint8_t local_store[128];
+ int num_ent, i;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&local_store, plen);
+ if (phdr == NULL) {
+ return (-1);
+ }
+ pr_supported = (struct sctp_supported_chunk_types_param *)phdr;
+ num_ent = plen - sizeof(struct sctp_paramhdr);
+ for (i = 0; i < num_ent; i++) {
+ switch (pr_supported->chunk_types[i]) {
+ case SCTP_ASCONF:
+ case SCTP_ASCONF_ACK:
+ peer_supports_asconf = 1;
+ break;
+ case SCTP_AUTHENTICATION:
+ peer_supports_auth = 1;
+ break;
+ default:
+ /* one we don't care about */
+ break;
+ }
+ }
+ } else if (ptype == SCTP_RANDOM) {
+ got_random = 1;
+ /* enforce the random length */
+ if (plen != (sizeof(struct sctp_auth_random) +
+ SCTP_AUTH_RANDOM_SIZE_REQUIRED)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_AUTH1)
+ printf("SCTP: invalid RANDOM len\n");
+#endif
+ return (-1);
+ }
+ } else if (ptype == SCTP_HMAC_LIST) {
+ uint8_t store[256];
+ struct sctp_auth_hmac_algo *hmacs;
+ int num_hmacs;
+
+ if (plen > sizeof(store))
+ break;
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)store, plen);
+ if (phdr == NULL)
+ return (-1);
+ hmacs = (struct sctp_auth_hmac_algo *)phdr;
+ num_hmacs = (plen - sizeof(*hmacs)) /
+ sizeof(hmacs->hmac_ids[0]);
+ /* validate the hmac list */
+ if (sctp_verify_hmac_param(hmacs, num_hmacs)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_AUTH1)
+ printf("SCTP: invalid HMAC param\n");
+#endif
+ return (-1);
+ }
+ got_hmacs = 1;
+ }
+ offset += SCTP_SIZE32(plen);
+ if (offset >= limit) {
+ break;
+ }
+ phdr = sctp_get_next_param(m, offset, &parm_buf,
+ sizeof(parm_buf));
+ }
+ /* validate authentication required parameters */
+ if (got_random && got_hmacs) {
+ peer_supports_auth = 1;
+ } else {
+ peer_supports_auth = 0;
+ }
+ if (!sctp_asconf_auth_nochk && peer_supports_asconf &&
+ !peer_supports_auth) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_AUTH1)
+ printf("SCTP: peer supports ASCONF but not AUTH\n");
+#endif
+ return (-1);
+ }
+ return (0);
+}
+
+void
+sctp_initialize_auth_params(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+ uint16_t chunks_len = 0;
+ uint16_t hmacs_len = 0;
+ uint16_t random_len = sctp_auth_random_len;
+ sctp_key_t *new_key;
+ uint16_t keylen;
+
+ /* initialize hmac list from endpoint */
+ stcb->asoc.local_hmacs = sctp_copy_hmaclist(inp->sctp_ep.local_hmacs);
+ if (stcb->asoc.local_hmacs != NULL) {
+ hmacs_len = stcb->asoc.local_hmacs->num_algo *
+ sizeof(stcb->asoc.local_hmacs->hmac[0]);
+ }
+ /* initialize auth chunks list from endpoint */
+ stcb->asoc.local_auth_chunks =
+ sctp_copy_chunklist(inp->sctp_ep.local_auth_chunks);
+ if (stcb->asoc.local_auth_chunks != NULL) {
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ if (stcb->asoc.local_auth_chunks->chunks[i])
+ chunks_len++;
+ }
+ }
+ /* copy defaults from the endpoint */
+ stcb->asoc.authinfo.assoc_keyid = inp->sctp_ep.default_keyid;
+
+ /* now set the concatenated key (random + chunks + hmacs) */
+ keylen = random_len + chunks_len + hmacs_len;
+ new_key = sctp_alloc_key(keylen);
+ if (new_key != NULL) {
+ /* generate and copy in the RANDOM */
+ sctp_read_random(new_key->key, random_len);
+ keylen = random_len;
+ /* append in the AUTH chunks */
+ if (stcb->asoc.local_auth_chunks) {
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ if (stcb->asoc.local_auth_chunks->chunks[i])
+ new_key->key[keylen++] = i;
+ }
+ }
+ /* append in the HMACs */
+ sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
+ new_key->key + keylen);
+ }
+ if (stcb->asoc.authinfo.random != NULL)
+ sctp_free_key(stcb->asoc.authinfo.random);
+ stcb->asoc.authinfo.random = new_key;
+ stcb->asoc.authinfo.random_len = random_len;
+#ifdef SCTP_AUTH_DRAFT_04
+ /* don't include the chunks and hmacs for draft -04 */
+ stcb->asoc.authinfo.random->keylen = random_len;
+#endif
+}
+
+
+#ifdef SCTP_HMAC_TEST
+/*
+ * HMAC and key concatenation tests
+ */
+static void
+sctp_print_digest(uint8_t * digest, uint32_t digestlen, const char *str)
+{
+ uint32_t i;
+
+ printf("\n%s: 0x", str);
+ if (digest == NULL)
+ return;
+
+ for (i = 0; i < digestlen; i++)
+ printf("%02x", digest[i]);
+}
+
+static int
+sctp_test_hmac(const char *str, uint16_t hmac_id, uint8_t * key,
+ uint32_t keylen, uint8_t * text, uint32_t textlen,
+ uint8_t * digest, uint32_t digestlen)
+{
+ uint8_t computed_digest[SCTP_AUTH_DIGEST_LEN_MAX];
+
+ printf("\n%s:", str);
+ sctp_hmac(hmac_id, key, keylen, text, textlen, computed_digest);
+ sctp_print_digest(digest, digestlen, "Expected digest");
+ sctp_print_digest(computed_digest, digestlen, "Computed digest");
+ if (memcmp(digest, computed_digest, digestlen) != 0) {
+ printf("\nFAILED");
+ return (-1);
+ } else {
+ printf("\nPASSED");
+ return (0);
+ }
+}
+
+
+/*
+ * RFC 2202: HMAC-SHA1 test cases
+ */
+void
+sctp_test_hmac_sha1(void)
+{
+ uint8_t *digest;
+ uint8_t key[128];
+ uint32_t keylen;
+ uint8_t text[128];
+ uint32_t textlen;
+ uint32_t digestlen = 20;
+ int failed = 0;
+
+ /*
+ * test_case = 1 key =
+ * 0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b key_len = 20
+ * data = "Hi There" data_len = 8 digest =
+ * 0xb617318655057264e28bc0b6fb378c8ef146be00
+ */
+ keylen = 20;
+ memset(key, 0x0b, keylen);
+ textlen = 8;
+ strcpy(text, "Hi There");
+ digest = "\xb6\x17\x31\x86\x55\x05\x72\x64\xe2\x8b\xc0\xb6\xfb\x37\x8c\x8e\xf1\x46\xbe\x00";
+ if (sctp_test_hmac("SHA1 test case 1", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*
+ * test_case = 2 key = "Jefe" key_len = 4 data =
+ * "what do ya want for nothing?" data_len = 28 digest =
+ * 0xeffcdf6ae5eb2fa2d27416d5f184df9c259a7c79
+ */
+ keylen = 4;
+ strcpy(key, "Jefe");
+ textlen = 28;
+ strcpy(text, "what do ya want for nothing?");
+ digest = "\xef\xfc\xdf\x6a\xe5\xeb\x2f\xa2\xd2\x74\x16\xd5\xf1\x84\xdf\x9c\x25\x9a\x7c\x79";
+ if (sctp_test_hmac("SHA1 test case 2", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*
+ * test_case = 3 key =
+ * 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa key_len = 20
+ * data = 0xdd repeated 50 times data_len = 50 digest
+ * = 0x125d7342b9ac11cd91a39af48aa17b4f63f175d3
+ */
+ keylen = 20;
+ memset(key, 0xaa, keylen);
+ textlen = 50;
+ memset(text, 0xdd, textlen);
+ digest = "\x12\x5d\x73\x42\xb9\xac\x11\xcd\x91\xa3\x9a\xf4\x8a\xa1\x7b\x4f\x63\xf1\x75\xd3";
+ if (sctp_test_hmac("SHA1 test case 3", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*
+ * test_case = 4 key =
+ * 0x0102030405060708090a0b0c0d0e0f10111213141516171819 key_len = 25
+ * data = 0xcd repeated 50 times data_len = 50 digest
+ * = 0x4c9007f4026250c6bc8414f9bf50c86c2d7235da
+ */
+ keylen = 25;
+ memcpy(key, "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19", keylen);
+ textlen = 50;
+ memset(text, 0xcd, textlen);
+ digest = "\x4c\x90\x07\xf4\x02\x62\x50\xc6\xbc\x84\x14\xf9\xbf\x50\xc8\x6c\x2d\x72\x35\xda";
+ if (sctp_test_hmac("SHA1 test case 4", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*
+ * test_case = 5 key =
+ * 0x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c key_len = 20
+ * data = "Test With Truncation" data_len = 20 digest
+ * = 0x4c1a03424b55e07fe7f27be1d58bb9324a9a5a04 digest-96 =
+ * 0x4c1a03424b55e07fe7f27be1
+ */
+ keylen = 20;
+ memset(key, 0x0c, keylen);
+ textlen = 20;
+ strcpy(text, "Test With Truncation");
+ digest = "\x4c\x1a\x03\x42\x4b\x55\xe0\x7f\xe7\xf2\x7b\xe1\xd5\x8b\xb9\x32\x4a\x9a\x5a\x04";
+ if (sctp_test_hmac("SHA1 test case 5", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*
+ * test_case = 6 key = 0xaa repeated 80 times key_len
+ * = 80 data = "Test Using Larger Than Block-Size Key -
+ * Hash Key First" data_len = 54 digest =
+ * 0xaa4ae5e15272d00e95705637ce8a3b55ed402112
+ */
+ keylen = 80;
+ memset(key, 0xaa, keylen);
+ textlen = 54;
+ strcpy(text, "Test Using Larger Than Block-Size Key - Hash Key First");
+ digest = "\xaa\x4a\xe5\xe1\x52\x72\xd0\x0e\x95\x70\x56\x37\xce\x8a\x3b\x55\xed\x40\x21\x12";
+ if (sctp_test_hmac("SHA1 test case 6", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*
+ * test_case = 7 key = 0xaa repeated 80 times key_len
+ * = 80 data = "Test Using Larger Than Block-Size Key and
+ * Larger Than One Block-Size Data" data_len = 73 digest =
+ * 0xe8e99d0f45237d786d6bbaa7965c7808bbff1a91
+ */
+ keylen = 80;
+ memset(key, 0xaa, keylen);
+ textlen = 73;
+ strcpy(text, "Test Using Larger Than Block-Size Key and Larger Than One Block-Size Data");
+ digest = "\xe8\xe9\x9d\x0f\x45\x23\x7d\x78\x6d\x6b\xba\xa7\x96\x5c\x78\x08\xbb\xff\x1a\x91";
+ if (sctp_test_hmac("SHA1 test case 7", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /* done with all tests */
+ if (failed)
+ printf("\nSHA1 test results: %d cases failed", failed);
+ else
+ printf("\nSHA1 test results: all test cases passed");
+}
+
+/*
+ * RFC 2202: HMAC-MD5 test cases
+ */
+void
+sctp_test_hmac_md5(void)
+{
+ uint8_t *digest;
+ uint8_t key[128];
+ uint32_t keylen;
+ uint8_t text[128];
+ uint32_t textlen;
+ uint32_t digestlen = 16;
+ int failed = 0;
+
+ /*
+ * test_case = 1 key = 0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b
+ * key_len = 16 data = "Hi There" data_len = 8 digest =
+ * 0x9294727a3638bb1c13f48ef8158bfc9d
+ */
+ keylen = 16;
+ memset(key, 0x0b, keylen);
+ textlen = 8;
+ strcpy(text, "Hi There");
+ digest = "\x92\x94\x72\x7a\x36\x38\xbb\x1c\x13\xf4\x8e\xf8\x15\x8b\xfc\x9d";
+ if (sctp_test_hmac("MD5 test case 1", SCTP_AUTH_HMAC_ID_MD5, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*
+ * test_case = 2 key = "Jefe" key_len = 4 data =
+ * "what do ya want for nothing?" data_len = 28 digest =
+ * 0x750c783e6ab0b503eaa86e310a5db738
+ */
+ keylen = 4;
+ strcpy(key, "Jefe");
+ textlen = 28;
+ strcpy(text, "what do ya want for nothing?");
+ digest = "\x75\x0c\x78\x3e\x6a\xb0\xb5\x03\xea\xa8\x6e\x31\x0a\x5d\xb7\x38";
+ if (sctp_test_hmac("MD5 test case 2", SCTP_AUTH_HMAC_ID_MD5, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*
+ * test_case = 3 key = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * key_len = 16 data = 0xdd repeated 50 times data_len = 50
+ * digest = 0x56be34521d144c88dbb8c733f0e8b3f6
+ */
+ keylen = 16;
+ memset(key, 0xaa, keylen);
+ textlen = 50;
+ memset(text, 0xdd, textlen);
+ digest = "\x56\xbe\x34\x52\x1d\x14\x4c\x88\xdb\xb8\xc7\x33\xf0\xe8\xb3\xf6";
+ if (sctp_test_hmac("MD5 test case 3", SCTP_AUTH_HMAC_ID_MD5, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*
+ * test_case = 4 key =
+ * 0x0102030405060708090a0b0c0d0e0f10111213141516171819 key_len = 25
+ * data = 0xcd repeated 50 times data_len = 50 digest
+ * = 0x697eaf0aca3a3aea3a75164746ffaa79
+ */
+ keylen = 25;
+ memcpy(key, "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19", keylen);
+ textlen = 50;
+ memset(text, 0xcd, textlen);
+ digest = "\x69\x7e\xaf\x0a\xca\x3a\x3a\xea\x3a\x75\x16\x47\x46\xff\xaa\x79";
+ if (sctp_test_hmac("MD5 test case 4", SCTP_AUTH_HMAC_ID_MD5, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*
+ * test_case = 5 key = 0x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c
+ * key_len = 16 data = "Test With Truncation" data_len = 20
+ * digest = 0x56461ef2342edc00f9bab995690efd4c digest-96
+ * 0x56461ef2342edc00f9bab995
+ */
+ keylen = 16;
+ memset(key, 0x0c, keylen);
+ textlen = 20;
+ strcpy(text, "Test With Truncation");
+ digest = "\x56\x46\x1e\xf2\x34\x2e\xdc\x00\xf9\xba\xb9\x95\x69\x0e\xfd\x4c";
+ if (sctp_test_hmac("MD5 test case 5", SCTP_AUTH_HMAC_ID_MD5, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*
+ * test_case = 6 key = 0xaa repeated 80 times key_len
+ * = 80 data = "Test Using Larger Than Block-Size Key -
+ * Hash Key First" data_len = 54 digest =
+ * 0x6b1ab7fe4bd7bf8f0b62e6ce61b9d0cd
+ */
+ keylen = 80;
+ memset(key, 0xaa, keylen);
+ textlen = 54;
+ strcpy(text, "Test Using Larger Than Block-Size Key - Hash Key First");
+ digest = "\x6b\x1a\xb7\xfe\x4b\xd7\xbf\x8f\x0b\x62\xe6\xce\x61\xb9\xd0\xcd";
+ if (sctp_test_hmac("MD5 test case 6", SCTP_AUTH_HMAC_ID_MD5, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /*
+ * test_case = 7 key = 0xaa repeated 80 times key_len
+ * = 80 data = "Test Using Larger Than Block-Size Key and
+ * Larger Than One Block-Size Data" data_len = 73 digest =
+ * 0x6f630fad67cda0ee1fb1f562db3aa53e
+ */
+ keylen = 80;
+ memset(key, 0xaa, keylen);
+ textlen = 73;
+ strcpy(text, "Test Using Larger Than Block-Size Key and Larger Than One Block-Size Data");
+ digest = "\x6f\x63\x0f\xad\x67\xcd\xa0\xee\x1f\xb1\xf5\x62\xdb\x3a\xa5\x3e";
+ if (sctp_test_hmac("MD5 test case 7", SCTP_AUTH_HMAC_ID_MD5, key, keylen,
+ text, textlen, digest, digestlen) < 0)
+ failed++;
+
+ /* done with all tests */
+ if (failed)
+ printf("\nMD5 test results: %d cases failed", failed);
+ else
+ printf("\nMD5 test results: all test cases passed");
+}
+
+/*
+ * test assoc key concatenation
+ */
+static int
+sctp_test_key_concatenation(sctp_key_t * key1, sctp_key_t * key2,
+ sctp_key_t * expected_key)
+{
+ sctp_key_t *key;
+ int ret_val;
+
+ sctp_show_key(key1, "\nkey1");
+ sctp_show_key(key2, "\nkey2");
+ key = sctp_compute_hashkey(key1, key2, NULL);
+ sctp_show_key(expected_key, "\nExpected");
+ sctp_show_key(key, "\nComputed");
+ if (memcmp(key, expected_key, expected_key->keylen) != 0) {
+ printf("\nFAILED");
+ ret_val = -1;
+ } else {
+ printf("\nPASSED");
+ ret_val = 0;
+ }
+ sctp_free_key(key1);
+ sctp_free_key(key2);
+ sctp_free_key(expected_key);
+ sctp_free_key(key);
+ return (ret_val);
+}
+
+
+void
+sctp_test_authkey(void)
+{
+ sctp_key_t *key1, *key2, *expected_key;
+ int failed = 0;
+
+ /* test case 1 */
+ key1 = sctp_set_key("\x01\x01\x01\x01", 4);
+ key2 = sctp_set_key("\x01\x02\x03\x04", 4);
+ expected_key = sctp_set_key("\x01\x01\x01\x01\x01\x02\x03\x04", 8);
+ if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+ failed++;
+
+ /* test case 2 */
+ key1 = sctp_set_key("\x00\x00\x00\x01", 4);
+ key2 = sctp_set_key("\x02", 1);
+ expected_key = sctp_set_key("\x00\x00\x00\x01\x02", 5);
+ if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+ failed++;
+
+ /* test case 3 */
+ key1 = sctp_set_key("\x01", 1);
+ key2 = sctp_set_key("\x00\x00\x00\x02", 4);
+ expected_key = sctp_set_key("\x01\x00\x00\x00\x02", 5);
+ if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+ failed++;
+
+ /* test case 4 */
+ key1 = sctp_set_key("\x00\x00\x00\x01", 4);
+ key2 = sctp_set_key("\x01", 1);
+ expected_key = sctp_set_key("\x01\x00\x00\x00\x01", 5);
+ if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+ failed++;
+
+ /* test case 5 */
+ key1 = sctp_set_key("\x01", 1);
+ key2 = sctp_set_key("\x00\x00\x00\x01", 4);
+ expected_key = sctp_set_key("\x01\x00\x00\x00\x01", 5);
+ if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+ failed++;
+
+ /* test case 6 */
+ key1 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07", 11);
+ key2 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 11);
+ expected_key = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 22);
+ if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+ failed++;
+
+ /* test case 7 */
+ key1 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 11);
+ key2 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07", 11);
+ expected_key = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 22);
+ if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+ failed++;
+
+ /* done with all tests */
+ if (failed)
+ printf("\nKey concatenation test results: %d cases failed", failed);
+ else
+ printf("\nKey concatenation test results: all test cases passed");
+}
+
+
+#if defined(STANDALONE_HMAC_TEST)
+int
+main(void)
+{
+ sctp_test_hmac_sha1();
+ sctp_test_hmac_md5();
+ sctp_test_authkey();
+}
+
+#endif /* STANDALONE_HMAC_TEST */
+
+#endif /* SCTP_HMAC_TEST */
diff --git a/sys/netinet/sctp_auth.h b/sys/netinet/sctp_auth.h
new file mode 100644
index 0000000..c6eb88a
--- /dev/null
+++ b/sys/netinet/sctp_auth.h
@@ -0,0 +1,262 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#define HAVE_SHA2
+
+#ifndef __SCTP_AUTH_H__
+#define __SCTP_AUTH_H__
+
+#include <sys/queue.h>
+#include <sys/mbuf.h>
+
+#ifdef USE_SCTP_SHA1
+#include <netinet/sctp_sha1.h>
+#else
+#include <crypto/sha1.h>
+/* map standard crypto API names */
+#define SHA1_Init SHA1Init
+#define SHA1_Update SHA1Update
+#define SHA1_Final(x,y) SHA1Final((caddr_t)x, y)
+#endif
+
+#if defined(HAVE_SHA2)
+#include <crypto/sha2/sha2.h>
+#endif
+
+#include <sys/md5.h>
+/* map standard crypto API names */
+#define MD5_Init MD5Init
+#define MD5_Update MD5Update
+#define MD5_Final MD5Final
+
+/* digest lengths */
+#define SCTP_AUTH_DIGEST_LEN_SHA1 20
+#define SCTP_AUTH_DIGEST_LEN_MD5 16
+#define SCTP_AUTH_DIGEST_LEN_SHA224 28
+#define SCTP_AUTH_DIGEST_LEN_SHA256 32
+#define SCTP_AUTH_DIGEST_LEN_SHA384 48
+#define SCTP_AUTH_DIGEST_LEN_SHA512 64
+#define SCTP_AUTH_DIGEST_LEN_MAX 64
+
+/* random sizes */
+#define SCTP_AUTH_RANDOM_SIZE_DEFAULT 32
+#define SCTP_AUTH_RANDOM_SIZE_REQUIRED 32
+#define SCTP_AUTH_RANDOM_SIZE_MAX 256
+
+/* union of all supported HMAC algorithm contexts */
+typedef union sctp_hash_context {
+ SHA1_CTX sha1;
+ MD5_CTX md5;
+#ifdef HAVE_SHA2
+ SHA256_CTX sha256;
+ SHA384_CTX sha384;
+ SHA512_CTX sha512;
+#endif
+} sctp_hash_context_t;
+
+typedef struct sctp_key {
+ uint32_t keylen;
+ uint8_t key[0];
+} sctp_key_t;
+
+typedef struct sctp_shared_key {
+ LIST_ENTRY(sctp_shared_key) next;
+ sctp_key_t *key; /* key text */
+ uint16_t keyid; /* shared key ID */
+} sctp_sharedkey_t;
+
+LIST_HEAD(sctp_keyhead, sctp_shared_key);
+
+/* authentication chunks list */
+typedef struct sctp_auth_chklist {
+ uint8_t chunks[256];
+ uint8_t num_chunks;
+} sctp_auth_chklist_t;
+
+/* hmac algos supported list */
+typedef struct sctp_hmaclist {
+ uint16_t max_algo; /* max algorithms allocated */
+ uint16_t num_algo; /* num algorithms used */
+ uint16_t hmac[0];
+} sctp_hmaclist_t;
+
+/* authentication info */
+typedef struct sctp_authinfo {
+ sctp_key_t *random; /* local random key (concatenated) */
+ uint32_t random_len; /* local random number length for param */
+ sctp_key_t *peer_random;/* peer's random key (concatenated) */
+ uint16_t assoc_keyid; /* current send keyid (cached) */
+ uint16_t recv_keyid; /* last recv keyid (cached) */
+ sctp_key_t *assoc_key; /* cached send key */
+ sctp_key_t *recv_key; /* cached recv key */
+} sctp_authinfo_t;
+
+
+/*
+ * global variables
+ */
+extern uint32_t sctp_asconf_auth_nochk; /* sysctl to disable ASCONF auth chk */
+extern uint32_t sctp_auth_disable; /* sysctl for temp feature interop */
+extern uint32_t sctp_auth_random_len; /* sysctl */
+
+/*
+ * Macros
+ */
+
+#define sctp_auth_is_required_chunk(chunk, list) ((list == NULL) ? (0) : (list->chunks[chunk] != 0))
+
+/*
+ * function prototypes
+ */
+
+/* socket option api functions */
+extern sctp_auth_chklist_t *sctp_alloc_chunklist(void);
+extern void sctp_free_chunklist(sctp_auth_chklist_t * chklist);
+extern void sctp_clear_chunklist(sctp_auth_chklist_t * chklist);
+extern sctp_auth_chklist_t *sctp_copy_chunklist(sctp_auth_chklist_t * chklist);
+extern int sctp_auth_add_chunk(uint8_t chunk, sctp_auth_chklist_t * list);
+extern int sctp_auth_delete_chunk(uint8_t chunk, sctp_auth_chklist_t * list);
+extern int sctp_auth_get_chklist_size(const sctp_auth_chklist_t * list);
+extern void sctp_auth_set_default_chunks(sctp_auth_chklist_t * list);
+extern int
+sctp_serialize_auth_chunks(const sctp_auth_chklist_t * list,
+ uint8_t * ptr);
+extern int sctp_pack_auth_chunks(const sctp_auth_chklist_t * list, uint8_t * ptr);
+extern int
+sctp_unpack_auth_chunks(const uint8_t * ptr, uint8_t num_chunks,
+ sctp_auth_chklist_t * list);
+
+/* key handling */
+extern sctp_key_t *sctp_alloc_key(uint32_t keylen);
+extern void sctp_free_key(sctp_key_t * key);
+extern void sctp_print_key(sctp_key_t * key, const char *str);
+extern void sctp_show_key(sctp_key_t * key, const char *str);
+extern sctp_key_t *sctp_generate_random_key(uint32_t keylen);
+extern sctp_key_t *sctp_set_key(uint8_t * key, uint32_t keylen);
+extern sctp_key_t *
+sctp_compute_hashkey(sctp_key_t * key1, sctp_key_t * key2,
+ sctp_key_t * shared);
+
+/* shared key handling */
+extern sctp_sharedkey_t *sctp_alloc_sharedkey(void);
+extern void sctp_free_sharedkey(sctp_sharedkey_t * skey);
+extern sctp_sharedkey_t *
+sctp_find_sharedkey(struct sctp_keyhead *shared_keys,
+ uint16_t key_id);
+extern void
+sctp_insert_sharedkey(struct sctp_keyhead *shared_keys,
+ sctp_sharedkey_t * new_skey);
+extern int
+sctp_copy_skeylist(const struct sctp_keyhead *src,
+ struct sctp_keyhead *dest);
+
+/* hmac list handling */
+extern sctp_hmaclist_t *sctp_alloc_hmaclist(uint8_t num_hmacs);
+extern void sctp_free_hmaclist(sctp_hmaclist_t * list);
+extern int sctp_auth_add_hmacid(sctp_hmaclist_t * list, uint16_t hmac_id);
+extern sctp_hmaclist_t *sctp_copy_hmaclist(sctp_hmaclist_t * list);
+extern sctp_hmaclist_t *sctp_default_supported_hmaclist(void);
+extern uint16_t
+sctp_negotiate_hmacid(sctp_hmaclist_t * peer,
+ sctp_hmaclist_t * local);
+extern int sctp_serialize_hmaclist(sctp_hmaclist_t * list, uint8_t * ptr);
+extern int
+sctp_verify_hmac_param(struct sctp_auth_hmac_algo *hmacs,
+ uint32_t num_hmacs);
+
+extern sctp_authinfo_t *sctp_alloc_authinfo(void);
+extern void sctp_free_authinfo(sctp_authinfo_t * authinfo);
+
+/* keyed-HMAC functions */
+extern uint32_t sctp_get_auth_chunk_len(uint16_t hmac_algo);
+extern uint32_t sctp_get_hmac_digest_len(uint16_t hmac_algo);
+extern uint32_t
+sctp_hmac(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
+ const uint8_t * text, uint32_t textlen, uint8_t * digest);
+extern int
+sctp_verify_hmac(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
+ const uint8_t * text, uint32_t textlen, uint8_t * digest,
+ uint32_t digestlen);
+extern uint32_t
+sctp_compute_hmac(uint16_t hmac_algo, sctp_key_t * key,
+ const uint8_t * text, uint32_t textlen, uint8_t * digest);
+extern int sctp_auth_is_supported_hmac(sctp_hmaclist_t * list, uint16_t id);
+
+/* mbuf versions */
+extern uint32_t
+sctp_hmac_m(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
+ struct mbuf *m, uint32_t m_offset, uint8_t * digest);
+extern uint32_t
+sctp_compute_hmac_m(uint16_t hmac_algo, sctp_key_t * key, struct mbuf *m,
+ uint32_t m_offset, uint8_t * digest);
+
+/*
+ * authentication routines
+ */
+extern void sctp_clear_cachedkeys(struct sctp_tcb *stcb, uint16_t keyid);
+extern void sctp_clear_cachedkeys_ep(struct sctp_inpcb *inp, uint16_t keyid);
+extern int sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid);
+extern int sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid);
+extern int sctp_auth_setactivekey(struct sctp_tcb *stcb, uint16_t keyid);
+extern int sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid);
+
+extern void
+sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
+ uint32_t offset, uint32_t length);
+extern void
+sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
+ struct sctp_auth_chunk *auth,
+ struct sctp_tcb *stcb);
+extern struct mbuf *
+sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
+ struct sctp_auth_chunk **auth_ret,
+ uint32_t * offset, struct sctp_tcb *stcb,
+ uint8_t chunk);
+extern int
+sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *ch,
+ struct mbuf *m, uint32_t offset);
+extern void
+sctp_notify_authentication(struct sctp_tcb *stcb,
+ uint32_t indication, uint16_t keyid,
+ uint16_t alt_keyid);
+extern int
+ sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit);
+extern void
+ sctp_initialize_auth_params(struct sctp_inpcb *inp, struct sctp_tcb *stcb);
+
+
+/* test functions */
+extern void sctp_test_hmac_sha1(void);
+extern void sctp_test_hmac_md5(void);
+extern void sctp_test_authkey(void);
+
+#endif /* __SCTP_AUTH_H__ */
diff --git a/sys/netinet/sctp_bsd_addr.c b/sys/netinet/sctp_bsd_addr.c
new file mode 100644
index 0000000..8186606
--- /dev/null
+++ b/sys/netinet/sctp_bsd_addr.c
@@ -0,0 +1,2032 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_ipsec.h"
+#include "opt_compat.h"
+#include "opt_inet6.h"
+#include "opt_inet.h"
+#include "opt_sctp.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/domain.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/resourcevar.h>
+#include <sys/uio.h>
+#ifdef INET6
+#include <sys/domain.h>
+#endif
+
+#include <sys/limits.h>
+#include <machine/cpu.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+
+#include <net/if_var.h>
+
+#include <net/route.h>
+
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/in_pcb.h>
+#include <netinet/in_var.h>
+#include <netinet/ip_var.h>
+
+#ifdef INET6
+#include <netinet/ip6.h>
+#include <netinet6/ip6_var.h>
+#include <netinet6/scope6_var.h>
+#include <netinet6/nd6.h>
+
+#include <netinet6/in6_pcb.h>
+
+#include <netinet/icmp6.h>
+
+#endif /* INET6 */
+
+
+
+#ifndef in6pcb
+#define in6pcb inpcb
+#endif
+
+
+#ifdef IPSEC
+#include <netinet6/ipsec.h>
+#include <netkey/key.h>
+#endif /* IPSEC */
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_indata.h>
+
+/* XXX
+ * This module needs to be rewritten with an eye towards getting
+ * rid of the user of ifa.. and use another list method George
+ * as told me of.
+ */
+
+#ifdef SCTP_DEBUG
+extern uint32_t sctp_debug_on;
+
+#endif
+
+static struct sockaddr_in *
+sctp_is_v4_ifa_addr_prefered(struct ifaddr *ifa, uint8_t loopscope, uint8_t ipv4_scope, uint8_t * sin_loop, uint8_t * sin_local)
+{
+ struct sockaddr_in *sin;
+
+ /*
+ * Here we determine if its a prefered address. A prefered address
+ * means it is the same scope or higher scope then the destination.
+ * L = loopback, P = private, G = global
+ * ----------------------------------------- src | dest |
+ * result ----------------------------------------- L | L |
+ * yes ----------------------------------------- P | L | yes
+ * ----------------------------------------- G | L | yes
+ * ----------------------------------------- L | P | no
+ * ----------------------------------------- P | P | yes
+ * ----------------------------------------- G | P | no
+ * ----------------------------------------- L | G | no
+ * ----------------------------------------- P | G | no
+ * ----------------------------------------- G | G | yes
+ * -----------------------------------------
+ */
+
+ if (ifa->ifa_addr->sa_family != AF_INET) {
+ /* forget non-v4 */
+ return (NULL);
+ }
+ /* Ok the address may be ok */
+ sin = (struct sockaddr_in *)ifa->ifa_addr;
+ if (sin->sin_addr.s_addr == 0) {
+ return (NULL);
+ }
+ *sin_local = *sin_loop = 0;
+ if ((ifa->ifa_ifp->if_type == IFT_LOOP) ||
+ (IN4_ISLOOPBACK_ADDRESS(&sin->sin_addr))) {
+ *sin_loop = 1;
+ *sin_local = 1;
+ }
+ if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+ *sin_local = 1;
+ }
+ if (!loopscope && *sin_loop) {
+ /* Its a loopback address and we don't have loop scope */
+ return (NULL);
+ }
+ if (!ipv4_scope && *sin_local) {
+ /*
+ * Its a private address, and we don't have private address
+ * scope
+ */
+ return (NULL);
+ }
+ if (((ipv4_scope == 0) && (loopscope == 0)) && (*sin_local)) {
+ /* its a global src and a private dest */
+ return (NULL);
+ }
+ /* its a prefered address */
+ return (sin);
+}
+
+static struct sockaddr_in *
+sctp_is_v4_ifa_addr_acceptable(struct ifaddr *ifa, uint8_t loopscope, uint8_t ipv4_scope, uint8_t * sin_loop, uint8_t * sin_local)
+{
+ struct sockaddr_in *sin;
+
+ /*
+ * Here we determine if its a acceptable address. A acceptable
+ * address means it is the same scope or higher scope but we can
+ * allow for NAT which means its ok to have a global dest and a
+ * private src.
+ *
+ * L = loopback, P = private, G = global
+ * ----------------------------------------- src | dest |
+ * result ----------------------------------------- L | L |
+ * yes ----------------------------------------- P | L | yes
+ * ----------------------------------------- G | L | yes
+ * ----------------------------------------- L | P | no
+ * ----------------------------------------- P | P | yes
+ * ----------------------------------------- G | P | yes -
+ * probably this won't work.
+ * ----------------------------------------- L | G |
+ * no ----------------------------------------- P | G |
+ * yes ----------------------------------------- G | G |
+ * yes -----------------------------------------
+ */
+
+ if (ifa->ifa_addr->sa_family != AF_INET) {
+ /* forget non-v4 */
+ return (NULL);
+ }
+ /* Ok the address may be ok */
+ sin = (struct sockaddr_in *)ifa->ifa_addr;
+ if (sin->sin_addr.s_addr == 0) {
+ return (NULL);
+ }
+ *sin_local = *sin_loop = 0;
+ if ((ifa->ifa_ifp->if_type == IFT_LOOP) ||
+ (IN4_ISLOOPBACK_ADDRESS(&sin->sin_addr))) {
+ *sin_loop = 1;
+ *sin_local = 1;
+ }
+ if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+ *sin_local = 1;
+ }
+ if (!loopscope && *sin_loop) {
+ /* Its a loopback address and we don't have loop scope */
+ return (NULL);
+ }
+ /* its an acceptable address */
+ return (sin);
+}
+
+/*
+ * This treats the address list on the ep as a restricted list (negative
+ * list). If a the passed address is listed, then the address is NOT allowed
+ * on the association.
+ */
+int
+sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sockaddr *addr)
+{
+ struct sctp_laddr *laddr;
+
+#ifdef SCTP_DEBUG
+ int cnt = 0;
+
+#endif
+ if (stcb == NULL) {
+ /* There are no restrictions, no TCB :-) */
+ return (0);
+ }
+#ifdef SCTP_DEBUG
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
+ cnt++;
+ }
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
+ printf("There are %d addresses on the restricted list\n", cnt);
+ }
+ cnt = 0;
+#endif
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Help I have fallen and I can't get up!\n");
+ }
+#endif
+ continue;
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
+ cnt++;
+ printf("Restricted address[%d]:", cnt);
+ sctp_print_address(laddr->ifa->ifa_addr);
+ }
+#endif
+ if (sctp_cmpaddr(addr, laddr->ifa->ifa_addr) == 1) {
+ /* Yes it is on the list */
+ return (1);
+ }
+ }
+ return (0);
+}
+
+static int
+sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct ifaddr *ifa)
+{
+ struct sctp_laddr *laddr;
+
+ if (ifa == NULL)
+ return (0);
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Help I have fallen and I can't get up!\n");
+ }
+#endif
+ continue;
+ }
+ if (laddr->ifa->ifa_addr == NULL)
+ continue;
+ if (laddr->ifa == ifa)
+ /* same pointer */
+ return (1);
+ if (laddr->ifa->ifa_addr->sa_family != ifa->ifa_addr->sa_family) {
+ /* skip non compatible address comparison */
+ continue;
+ }
+ if (sctp_cmpaddr(ifa->ifa_addr, laddr->ifa->ifa_addr) == 1) {
+ /* Yes it is restricted */
+ return (1);
+ }
+ }
+ return (0);
+}
+
+
+
+static struct in_addr
+sctp_choose_v4_boundspecific_inp(struct sctp_inpcb *inp,
+ struct route *ro,
+ uint8_t ipv4_scope,
+ uint8_t loopscope)
+{
+ struct in_addr ans;
+ struct sctp_laddr *laddr;
+ struct sockaddr_in *sin;
+ struct ifnet *ifn;
+ struct ifaddr *ifa;
+ uint8_t sin_loop, sin_local;
+ struct rtentry *rt;
+
+ /*
+ * first question, is the ifn we will emit on in our list, if so, we
+ * want that one.
+ */
+ rt = ro->ro_rt;
+ ifn = rt->rt_ifp;
+ if (ifn) {
+ /* is a prefered one on the interface we route out? */
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ sin = sctp_is_v4_ifa_addr_prefered(ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
+ if (sin == NULL)
+ continue;
+ if (sctp_is_addr_in_ep(inp, ifa)) {
+ return (sin->sin_addr);
+ }
+ }
+ /* is an acceptable one on the interface we route out? */
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ sin = sctp_is_v4_ifa_addr_acceptable(ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
+ if (sin == NULL)
+ continue;
+ if (sctp_is_addr_in_ep(inp, ifa)) {
+ return (sin->sin_addr);
+ }
+ }
+ }
+ /* ok, what about a prefered address in the inp */
+ for (laddr = LIST_FIRST(&inp->sctp_addr_list);
+ laddr && (laddr != inp->next_addr_touse);
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ sin = sctp_is_v4_ifa_addr_prefered(laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
+ if (sin == NULL)
+ continue;
+ return (sin->sin_addr);
+
+ }
+ /* ok, what about an acceptable address in the inp */
+ for (laddr = LIST_FIRST(&inp->sctp_addr_list);
+ laddr && (laddr != inp->next_addr_touse);
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ sin = sctp_is_v4_ifa_addr_acceptable(laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
+ if (sin == NULL)
+ continue;
+ return (sin->sin_addr);
+
+ }
+
+ /*
+ * no address bound can be a source for the destination we are in
+ * trouble
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Src address selection for EP, no acceptable src address found for address\n");
+ }
+#endif
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ memset(&ans, 0, sizeof(ans));
+ return (ans);
+}
+
+
+
+static struct in_addr
+sctp_choose_v4_boundspecific_stcb(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ struct route *ro,
+ uint8_t ipv4_scope,
+ uint8_t loopscope,
+ int non_asoc_addr_ok)
+{
+ /*
+ * Here we have two cases, bound all asconf allowed. bound all
+ * asconf not allowed.
+ *
+ */
+ struct sctp_laddr *laddr, *starting_point;
+ struct in_addr ans;
+ struct ifnet *ifn;
+ struct ifaddr *ifa;
+ uint8_t sin_loop, sin_local, start_at_beginning = 0;
+ struct sockaddr_in *sin;
+ struct rtentry *rt;
+
+ /*
+ * first question, is the ifn we will emit on in our list, if so, we
+ * want that one.
+ */
+ rt = ro->ro_rt;
+ ifn = rt->rt_ifp;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+ /*
+ * Here we use the list of addresses on the endpoint. Then
+ * the addresses listed on the "restricted" list is just
+ * that, address that have not been added and can't be used
+ * (unless the non_asoc_addr_ok is set).
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Have a STCB - asconf allowed, not bound all have a netgative list\n");
+ }
+#endif
+ /*
+ * first question, is the ifn we will emit on in our list,
+ * if so, we want that one.
+ */
+ if (ifn) {
+ /* first try for an prefered address on the ep */
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ if (sctp_is_addr_in_ep(inp, ifa)) {
+ sin = sctp_is_v4_ifa_addr_prefered(ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
+ if (sin == NULL)
+ continue;
+ if ((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin))) {
+ /* on the no-no list */
+ continue;
+ }
+ return (sin->sin_addr);
+ }
+ }
+ /* next try for an acceptable address on the ep */
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ if (sctp_is_addr_in_ep(inp, ifa)) {
+ sin = sctp_is_v4_ifa_addr_acceptable(ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
+ if (sin == NULL)
+ continue;
+ if ((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin))) {
+ /* on the no-no list */
+ continue;
+ }
+ return (sin->sin_addr);
+ }
+ }
+
+ }
+ /*
+ * if we can't find one like that then we must look at all
+ * addresses bound to pick one at first prefereable then
+ * secondly acceptable.
+ */
+ starting_point = stcb->asoc.last_used_address;
+sctpv4_from_the_top:
+ if (stcb->asoc.last_used_address == NULL) {
+ start_at_beginning = 1;
+ stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
+ }
+ /* search beginning with the last used address */
+ for (laddr = stcb->asoc.last_used_address; laddr;
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ sin = sctp_is_v4_ifa_addr_prefered(laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
+ if (sin == NULL)
+ continue;
+ if ((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin))) {
+ /* on the no-no list */
+ continue;
+ }
+ return (sin->sin_addr);
+
+ }
+ if (start_at_beginning == 0) {
+ stcb->asoc.last_used_address = NULL;
+ goto sctpv4_from_the_top;
+ }
+ /* now try for any higher scope than the destination */
+ stcb->asoc.last_used_address = starting_point;
+ start_at_beginning = 0;
+sctpv4_from_the_top2:
+ if (stcb->asoc.last_used_address == NULL) {
+ start_at_beginning = 1;
+ stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
+ }
+ /* search beginning with the last used address */
+ for (laddr = stcb->asoc.last_used_address; laddr;
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ sin = sctp_is_v4_ifa_addr_acceptable(laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
+ if (sin == NULL)
+ continue;
+ if ((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin))) {
+ /* on the no-no list */
+ continue;
+ }
+ return (sin->sin_addr);
+ }
+ if (start_at_beginning == 0) {
+ stcb->asoc.last_used_address = NULL;
+ goto sctpv4_from_the_top2;
+ }
+ } else {
+ /*
+ * Here we have an address list on the association, thats
+ * the only valid source addresses that we can use.
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Have a STCB - no asconf allowed, not bound all have a postive list\n");
+ }
+#endif
+ /*
+ * First look at all addresses for one that is on the
+ * interface we route out
+ */
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
+ sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ sin = sctp_is_v4_ifa_addr_prefered(laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
+ if (sin == NULL)
+ continue;
+ /*
+ * first question, is laddr->ifa an address
+ * associated with the emit interface
+ */
+ if (ifn) {
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ if (laddr->ifa == ifa) {
+ sin = (struct sockaddr_in *)laddr->ifa->ifa_addr;
+ return (sin->sin_addr);
+ }
+ if (sctp_cmpaddr(ifa->ifa_addr, laddr->ifa->ifa_addr) == 1) {
+ sin = (struct sockaddr_in *)laddr->ifa->ifa_addr;
+ return (sin->sin_addr);
+ }
+ }
+ }
+ }
+ /* what about an acceptable one on the interface? */
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
+ sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ sin = sctp_is_v4_ifa_addr_acceptable(laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
+ if (sin == NULL)
+ continue;
+ /*
+ * first question, is laddr->ifa an address
+ * associated with the emit interface
+ */
+ if (ifn) {
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ if (laddr->ifa == ifa) {
+ sin = (struct sockaddr_in *)laddr->ifa->ifa_addr;
+ return (sin->sin_addr);
+ }
+ if (sctp_cmpaddr(ifa->ifa_addr, laddr->ifa->ifa_addr) == 1) {
+ sin = (struct sockaddr_in *)laddr->ifa->ifa_addr;
+ return (sin->sin_addr);
+ }
+ }
+ }
+ }
+ /* ok, next one that is preferable in general */
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
+ sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ sin = sctp_is_v4_ifa_addr_prefered(laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
+ if (sin == NULL)
+ continue;
+ return (sin->sin_addr);
+ }
+
+ /* last, what about one that is acceptable */
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
+ sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ sin = sctp_is_v4_ifa_addr_acceptable(laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
+ if (sin == NULL)
+ continue;
+ return (sin->sin_addr);
+ }
+ }
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ memset(&ans, 0, sizeof(ans));
+ return (ans);
+}
+
+static struct sockaddr_in *
+sctp_select_v4_nth_prefered_addr_from_ifn_boundall(struct ifnet *ifn, struct sctp_tcb *stcb, int non_asoc_addr_ok,
+ uint8_t loopscope, uint8_t ipv4_scope, int cur_addr_num)
+{
+ struct ifaddr *ifa;
+ struct sockaddr_in *sin;
+ uint8_t sin_loop, sin_local;
+ int num_eligible_addr = 0;
+
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ sin = sctp_is_v4_ifa_addr_prefered(ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
+ if (sin == NULL)
+ continue;
+ if (stcb) {
+ if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin)) {
+ /*
+ * It is restricted for some reason..
+ * probably not yet added.
+ */
+ continue;
+ }
+ }
+ if (cur_addr_num == num_eligible_addr) {
+ return (sin);
+ }
+ }
+ return (NULL);
+}
+
+
+static int
+sctp_count_v4_num_prefered_boundall(struct ifnet *ifn, struct sctp_tcb *stcb, int non_asoc_addr_ok,
+ uint8_t loopscope, uint8_t ipv4_scope, uint8_t * sin_loop, uint8_t * sin_local)
+{
+ struct ifaddr *ifa;
+ struct sockaddr_in *sin;
+ int num_eligible_addr = 0;
+
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ sin = sctp_is_v4_ifa_addr_prefered(ifa, loopscope, ipv4_scope, sin_loop, sin_local);
+ if (sin == NULL)
+ continue;
+ if (stcb) {
+ if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin)) {
+ /*
+ * It is restricted for some reason..
+ * probably not yet added.
+ */
+ continue;
+ }
+ }
+ num_eligible_addr++;
+ }
+ return (num_eligible_addr);
+
+}
+
+static struct in_addr
+sctp_choose_v4_boundall(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ struct route *ro,
+ uint8_t ipv4_scope,
+ uint8_t loopscope,
+ int non_asoc_addr_ok)
+{
+ int cur_addr_num = 0, num_prefered = 0;
+ uint8_t sin_loop, sin_local;
+ struct ifnet *ifn;
+ struct sockaddr_in *sin;
+ struct in_addr ans;
+ struct ifaddr *ifa;
+ struct rtentry *rt;
+
+ /*
+ * For v4 we can use (in boundall) any address in the association.
+ * If non_asoc_addr_ok is set we can use any address (at least in
+ * theory). So we look for prefered addresses first. If we find one,
+ * we use it. Otherwise we next try to get an address on the
+ * interface, which we should be able to do (unless non_asoc_addr_ok
+ * is false and we are routed out that way). In these cases where we
+ * can't use the address of the interface we go through all the
+ * ifn's looking for an address we can use and fill that in. Punting
+ * means we send back address 0, which will probably cause problems
+ * actually since then IP will fill in the address of the route ifn,
+ * which means we probably already rejected it.. i.e. here comes an
+ * abort :-<.
+ */
+ rt = ro->ro_rt;
+ ifn = rt->rt_ifp;
+ if (net) {
+ cur_addr_num = net->indx_of_eligible_next_to_use;
+ }
+ if (ifn == NULL) {
+ goto bound_all_v4_plan_c;
+ }
+ num_prefered = sctp_count_v4_num_prefered_boundall(ifn, stcb, non_asoc_addr_ok, loopscope, ipv4_scope, &sin_loop, &sin_local);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Found %d prefered source addresses\n", num_prefered);
+ }
+#endif
+ if (num_prefered == 0) {
+ /*
+ * no eligible addresses, we must use some other interface
+ * address if we can find one.
+ */
+ goto bound_all_v4_plan_b;
+ }
+ /*
+ * Ok we have num_eligible_addr set with how many we can use, this
+ * may vary from call to call due to addresses being deprecated
+ * etc..
+ */
+ if (cur_addr_num >= num_prefered) {
+ cur_addr_num = 0;
+ }
+ /*
+ * select the nth address from the list (where cur_addr_num is the
+ * nth) and 0 is the first one, 1 is the second one etc...
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("cur_addr_num:%d\n", cur_addr_num);
+ }
+#endif
+ sin = sctp_select_v4_nth_prefered_addr_from_ifn_boundall(ifn, stcb, non_asoc_addr_ok, loopscope,
+ ipv4_scope, cur_addr_num);
+
+ /* if sin is NULL something changed??, plan_a now */
+ if (sin) {
+ return (sin->sin_addr);
+ }
+ /*
+ * plan_b: Look at the interface that we emit on and see if we can
+ * find an acceptable address.
+ */
+bound_all_v4_plan_b:
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ sin = sctp_is_v4_ifa_addr_acceptable(ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
+ if (sin == NULL)
+ continue;
+ if (stcb) {
+ if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin)) {
+ /*
+ * It is restricted for some reason..
+ * probably not yet added.
+ */
+ continue;
+ }
+ }
+ return (sin->sin_addr);
+ }
+ /*
+ * plan_c: Look at all interfaces and find a prefered address. If we
+ * reache here we are in trouble I think.
+ */
+bound_all_v4_plan_c:
+ for (ifn = TAILQ_FIRST(&ifnet);
+ ifn && (ifn != inp->next_ifn_touse);
+ ifn = TAILQ_NEXT(ifn, if_list)) {
+ if (loopscope == 0 && ifn->if_type == IFT_LOOP) {
+ /* wrong base scope */
+ continue;
+ }
+ if (ifn == rt->rt_ifp)
+ /* already looked at this guy */
+ continue;
+ num_prefered = sctp_count_v4_num_prefered_boundall(ifn, stcb, non_asoc_addr_ok,
+ loopscope, ipv4_scope, &sin_loop, &sin_local);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Found ifn:%x %d prefered source addresses\n", (uint32_t) ifn, num_prefered);
+ }
+#endif
+ if (num_prefered == 0) {
+ /*
+ * None on this interface.
+ */
+ continue;
+ }
+ /*
+ * Ok we have num_eligible_addr set with how many we can
+ * use, this may vary from call to call due to addresses
+ * being deprecated etc..
+ */
+ if (cur_addr_num >= num_prefered) {
+ cur_addr_num = 0;
+ }
+ sin = sctp_select_v4_nth_prefered_addr_from_ifn_boundall(ifn, stcb, non_asoc_addr_ok, loopscope,
+ ipv4_scope, cur_addr_num);
+ if (sin == NULL)
+ continue;
+ return (sin->sin_addr);
+
+ }
+
+ /*
+ * plan_d: We are in deep trouble. No prefered address on any
+ * interface. And the emit interface does not even have an
+ * acceptable address. Take anything we can get! If this does not
+ * work we are probably going to emit a packet that will illicit an
+ * ABORT, falling through.
+ */
+
+ for (ifn = TAILQ_FIRST(&ifnet);
+ ifn && (ifn != inp->next_ifn_touse);
+ ifn = TAILQ_NEXT(ifn, if_list)) {
+ if (loopscope == 0 && ifn->if_type == IFT_LOOP) {
+ /* wrong base scope */
+ continue;
+ }
+ if (ifn == rt->rt_ifp)
+ /* already looked at this guy */
+ continue;
+
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ sin = sctp_is_v4_ifa_addr_acceptable(ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
+ if (sin == NULL)
+ continue;
+ if (stcb) {
+ if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin)) {
+ /*
+ * It is restricted for some
+ * reason.. probably not yet added.
+ */
+ continue;
+ }
+ }
+ return (sin->sin_addr);
+ }
+ }
+ /*
+ * Ok we can find NO address to source from that is not on our
+ * negative list. It is either the special ASCONF case where we are
+ * sourceing from a intf that has been ifconfig'd to a different
+ * address (i.e. it holds a ADD/DEL/SET-PRIM and the proper lookup
+ * address. OR we are hosed, and this baby is going to abort the
+ * association.
+ */
+ if (non_asoc_addr_ok) {
+ return (((struct sockaddr_in *)(rt->rt_ifa->ifa_addr))->sin_addr);
+ } else {
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ memset(&ans, 0, sizeof(ans));
+ return (ans);
+ }
+}
+
+
+
+/* tcb may be NULL */
+struct in_addr
+sctp_ipv4_source_address_selection(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb, struct route *ro, struct sctp_nets *net,
+ int non_asoc_addr_ok)
+{
+ struct in_addr ans;
+ struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
+ uint8_t ipv4_scope, loopscope;
+
+ /*
+ * Rules: - Find the route if needed, cache if I can. - Look at
+ * interface address in route, Is it in the bound list. If so we
+ * have the best source. - If not we must rotate amongst the
+ * addresses.
+ *
+ * Cavets and issues
+ *
+ * Do we need to pay attention to scope. We can have a private address
+ * or a global address we are sourcing or sending to. So if we draw
+ * it out source * dest * result
+ * ------------------------------------------ a Private *
+ * Global * NAT? ------------------------------------------ b
+ * Private * Private * No problem
+ * ------------------------------------------ c Global *
+ * Private * Huh, How will this work?
+ * ------------------------------------------ d Global *
+ * Global * No Problem ------------------------------------------
+ *
+ * And then we add to that what happens if there are multiple addresses
+ * assigned to an interface. Remember the ifa on a ifn is a linked
+ * list of addresses. So one interface can have more than one IPv4
+ * address. What happens if we have both a private and a global
+ * address? Do we then use context of destination to sort out which
+ * one is best? And what about NAT's sending P->G may get you a NAT
+ * translation, or should you select the G thats on the interface in
+ * preference.
+ *
+ * Decisions:
+ *
+ * - count the number of addresses on the interface. - if its one, no
+ * problem except case <c>. For <a> we will assume a NAT out there.
+ * - if there are more than one, then we need to worry about scope P
+ * or G. We should prefer G -> G and P -> P if possible. Then as a
+ * secondary fall back to mixed types G->P being a last ditch one. -
+ * The above all works for bound all, but bound specific we need to
+ * use the same concept but instead only consider the bound
+ * addresses. If the bound set is NOT assigned to the interface then
+ * we must use rotation amongst them.
+ *
+ * Notes: For v4, we can always punt and let ip_output decide by
+ * sending back a source of 0.0.0.0
+ */
+
+ if (ro->ro_rt == NULL) {
+ /*
+ * Need a route to cache.
+ *
+ */
+ rtalloc_ign(ro, 0UL);
+ }
+ if (ro->ro_rt == NULL) {
+ /* No route to host .. punt */
+ memset(&ans, 0, sizeof(ans));
+ return (ans);
+ }
+ /* Setup our scopes */
+ if (stcb) {
+ ipv4_scope = stcb->asoc.ipv4_local_scope;
+ loopscope = stcb->asoc.loopback_scope;
+ } else {
+ /* Scope based on outbound address */
+ if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
+ ipv4_scope = 1;
+ loopscope = 0;
+ } else if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
+ ipv4_scope = 1;
+ loopscope = 1;
+ } else {
+ ipv4_scope = 0;
+ loopscope = 0;
+ }
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /*
+ * When bound to all if the address list is set it is a
+ * negative list. Addresses being added by asconf.
+ */
+ return (sctp_choose_v4_boundall(inp, stcb, net, ro,
+ ipv4_scope, loopscope, non_asoc_addr_ok));
+ }
+ /*
+ * Three possiblities here:
+ *
+ * a) stcb is NULL, which means we operate only from the list of
+ * addresses (ifa's) bound to the assoc and we care not about the
+ * list. b) stcb is NOT-NULL, which means we have an assoc structure
+ * and auto-asconf is on. This means that the list of addresses is a
+ * NOT list. We use the list from the inp, but any listed address in
+ * our list is NOT yet added. However if the non_asoc_addr_ok is set
+ * we CAN use an address NOT available (i.e. being added). Its a
+ * negative list. c) stcb is NOT-NULL, which means we have an assoc
+ * structure and auto-asconf is off. This means that the list of
+ * addresses is the ONLY addresses I can use.. its positive.
+ *
+ * Note we collapse b & c into the same function just like in the v6
+ * address selection.
+ */
+ if (stcb) {
+ return (sctp_choose_v4_boundspecific_stcb(inp, stcb, net,
+ ro, ipv4_scope, loopscope, non_asoc_addr_ok));
+ } else {
+ return (sctp_choose_v4_boundspecific_inp(inp, ro,
+ ipv4_scope, loopscope));
+ }
+ /* this should not be reached */
+ memset(&ans, 0, sizeof(ans));
+ return (ans);
+}
+
+
+
+static struct sockaddr_in6 *
+sctp_is_v6_ifa_addr_acceptable(struct ifaddr *ifa, int loopscope, int loc_scope, int *sin_loop, int *sin_local)
+{
+ struct in6_ifaddr *ifa6;
+ struct sockaddr_in6 *sin6;
+
+
+ if (ifa->ifa_addr->sa_family != AF_INET6) {
+ /* forget non-v6 */
+ return (NULL);
+ }
+ ifa6 = (struct in6_ifaddr *)ifa;
+ /* ok to use deprecated addresses? */
+ if (!ip6_use_deprecated) {
+ if (IFA6_IS_DEPRECATED(ifa6)) {
+ /* can't use this type */
+ return (NULL);
+ }
+ }
+ /* are we ok, with the current state of this address? */
+ if (ifa6->ia6_flags &
+ (IN6_IFF_DETACHED | IN6_IFF_NOTREADY | IN6_IFF_ANYCAST)) {
+ /* Can't use these types */
+ return (NULL);
+ }
+ /* Ok the address may be ok */
+ sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
+ *sin_local = *sin_loop = 0;
+ if ((ifa->ifa_ifp->if_type == IFT_LOOP) ||
+ (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr))) {
+ *sin_loop = 1;
+ }
+ if (!loopscope && *sin_loop) {
+ /* Its a loopback address and we don't have loop scope */
+ return (NULL);
+ }
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* we skip unspecifed addresses */
+ return (NULL);
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ *sin_local = 1;
+ }
+ if (!loc_scope && *sin_local) {
+ /*
+ * Its a link local address, and we don't have link local
+ * scope
+ */
+ return (NULL);
+ }
+ return (sin6);
+}
+
+
+static struct sockaddr_in6 *
+sctp_choose_v6_boundspecific_stcb(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ struct route *ro,
+ uint8_t loc_scope,
+ uint8_t loopscope,
+ int non_asoc_addr_ok)
+{
+ /*
+ * Each endpoint has a list of local addresses associated with it.
+ * The address list is either a "negative list" i.e. those addresses
+ * that are NOT allowed to be used as a source OR a "postive list"
+ * i.e. those addresses that CAN be used.
+ *
+ * Its a negative list if asconf is allowed. What we do in this case is
+ * use the ep address list BUT we have to cross check it against the
+ * negative list.
+ *
+ * In the case where NO asconf is allowed, we have just a straight
+ * association level list that we must use to find a source address.
+ */
+ struct sctp_laddr *laddr, *starting_point;
+ struct sockaddr_in6 *sin6;
+ int sin_loop, sin_local;
+ int start_at_beginning = 0;
+ struct ifnet *ifn;
+ struct ifaddr *ifa;
+ struct rtentry *rt;
+
+ rt = ro->ro_rt;
+ ifn = rt->rt_ifp;
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Have a STCB - asconf allowed, not bound all have a netgative list\n");
+ }
+#endif
+ /*
+ * first question, is the ifn we will emit on in our list,
+ * if so, we want that one.
+ */
+ if (ifn) {
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ if (sctp_is_addr_in_ep(inp, ifa)) {
+ sin6 = sctp_is_v6_ifa_addr_acceptable(ifa, loopscope, loc_scope, &sin_loop, &sin_local);
+ if (sin6 == NULL)
+ continue;
+ if ((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin6))) {
+ /* on the no-no list */
+ continue;
+ }
+ return (sin6);
+ }
+ }
+ }
+ starting_point = stcb->asoc.last_used_address;
+ /* First try for matching scope */
+sctp_from_the_top:
+ if (stcb->asoc.last_used_address == NULL) {
+ start_at_beginning = 1;
+ stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
+ }
+ /* search beginning with the last used address */
+ for (laddr = stcb->asoc.last_used_address; laddr;
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ sin6 = sctp_is_v6_ifa_addr_acceptable(laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
+ if (sin6 == NULL)
+ continue;
+ if ((non_asoc_addr_ok == 0) && (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin6))) {
+ /* on the no-no list */
+ continue;
+ }
+ /* is it of matching scope ? */
+ if ((loopscope == 0) &&
+ (loc_scope == 0) &&
+ (sin_loop == 0) &&
+ (sin_local == 0)) {
+ /* all of global scope we are ok with it */
+ return (sin6);
+ }
+ if (loopscope && sin_loop)
+ /* both on the loopback, thats ok */
+ return (sin6);
+ if (loc_scope && sin_local)
+ /* both local scope */
+ return (sin6);
+
+ }
+ if (start_at_beginning == 0) {
+ stcb->asoc.last_used_address = NULL;
+ goto sctp_from_the_top;
+ }
+ /* now try for any higher scope than the destination */
+ stcb->asoc.last_used_address = starting_point;
+ start_at_beginning = 0;
+sctp_from_the_top2:
+ if (stcb->asoc.last_used_address == NULL) {
+ start_at_beginning = 1;
+ stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
+ }
+ /* search beginning with the last used address */
+ for (laddr = stcb->asoc.last_used_address; laddr;
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ sin6 = sctp_is_v6_ifa_addr_acceptable(laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
+ if (sin6 == NULL)
+ continue;
+ if ((non_asoc_addr_ok == 0) && (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin6))) {
+ /* on the no-no list */
+ continue;
+ }
+ return (sin6);
+ }
+ if (start_at_beginning == 0) {
+ stcb->asoc.last_used_address = NULL;
+ goto sctp_from_the_top2;
+ }
+ } else {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Have a STCB - no asconf allowed, not bound all have a postive list\n");
+ }
+#endif
+ /* First try for interface output match */
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
+ sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ sin6 = sctp_is_v6_ifa_addr_acceptable(laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
+ if (sin6 == NULL)
+ continue;
+ /*
+ * first question, is laddr->ifa an address
+ * associated with the emit interface
+ */
+ if (ifn) {
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ if (laddr->ifa == ifa) {
+ sin6 = (struct sockaddr_in6 *)laddr->ifa->ifa_addr;
+ return (sin6);
+ }
+ if (sctp_cmpaddr(ifa->ifa_addr, laddr->ifa->ifa_addr) == 1) {
+ sin6 = (struct sockaddr_in6 *)laddr->ifa->ifa_addr;
+ return (sin6);
+ }
+ }
+ }
+ }
+ /* Next try for matching scope */
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
+ sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ sin6 = sctp_is_v6_ifa_addr_acceptable(laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
+ if (sin6 == NULL)
+ continue;
+
+ if ((loopscope == 0) &&
+ (loc_scope == 0) &&
+ (sin_loop == 0) &&
+ (sin_local == 0)) {
+ /* all of global scope we are ok with it */
+ return (sin6);
+ }
+ if (loopscope && sin_loop)
+ /* both on the loopback, thats ok */
+ return (sin6);
+ if (loc_scope && sin_local)
+ /* both local scope */
+ return (sin6);
+ }
+ /* ok, now try for a higher scope in the source address */
+ /* First try for matching scope */
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
+ sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ sin6 = sctp_is_v6_ifa_addr_acceptable(laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
+ if (sin6 == NULL)
+ continue;
+ return (sin6);
+ }
+ }
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ return (NULL);
+}
+
+static struct sockaddr_in6 *
+sctp_choose_v6_boundspecific_inp(struct sctp_inpcb *inp,
+ struct route *ro,
+ uint8_t loc_scope,
+ uint8_t loopscope)
+{
+ /*
+ * Here we are bound specific and have only an inp. We must find an
+ * address that is bound that we can give out as a src address. We
+ * prefer two addresses of same scope if we can find them that way.
+ */
+ struct sctp_laddr *laddr;
+ struct sockaddr_in6 *sin6;
+ struct ifnet *ifn;
+ struct ifaddr *ifa;
+ int sin_loop, sin_local;
+ struct rtentry *rt;
+
+ /*
+ * first question, is the ifn we will emit on in our list, if so, we
+ * want that one.
+ */
+
+ rt = ro->ro_rt;
+ ifn = rt->rt_ifp;
+ if (ifn) {
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ sin6 = sctp_is_v6_ifa_addr_acceptable(ifa, loopscope, loc_scope, &sin_loop, &sin_local);
+ if (sin6 == NULL)
+ continue;
+ if (sctp_is_addr_in_ep(inp, ifa)) {
+ return (sin6);
+ }
+ }
+ }
+ for (laddr = LIST_FIRST(&inp->sctp_addr_list);
+ laddr && (laddr != inp->next_addr_touse);
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ sin6 = sctp_is_v6_ifa_addr_acceptable(laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
+ if (sin6 == NULL)
+ continue;
+
+ if ((loopscope == 0) &&
+ (loc_scope == 0) &&
+ (sin_loop == 0) &&
+ (sin_local == 0)) {
+ /* all of global scope we are ok with it */
+ return (sin6);
+ }
+ if (loopscope && sin_loop)
+ /* both on the loopback, thats ok */
+ return (sin6);
+ if (loc_scope && sin_local)
+ /* both local scope */
+ return (sin6);
+
+ }
+ /*
+ * if we reach here, we could not find two addresses of the same
+ * scope to give out. Lets look for any higher level scope for a
+ * source address.
+ */
+ for (laddr = LIST_FIRST(&inp->sctp_addr_list);
+ laddr && (laddr != inp->next_addr_touse);
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ sin6 = sctp_is_v6_ifa_addr_acceptable(laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
+ if (sin6 == NULL)
+ continue;
+ return (sin6);
+ }
+ /* no address bound can be a source for the destination */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Src address selection for EP, no acceptable src address found for address\n");
+ }
+#endif
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ return (NULL);
+}
+
+
+static struct sockaddr_in6 *
+sctp_select_v6_nth_addr_from_ifn_boundall(struct ifnet *ifn, struct sctp_tcb *stcb, int non_asoc_addr_ok, uint8_t loopscope,
+ uint8_t loc_scope, int cur_addr_num, int match_scope)
+{
+ struct ifaddr *ifa;
+ struct sockaddr_in6 *sin6;
+ int sin_loop, sin_local;
+ int num_eligible_addr = 0;
+
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ sin6 = sctp_is_v6_ifa_addr_acceptable(ifa, loopscope, loc_scope, &sin_loop, &sin_local);
+ if (sin6 == NULL)
+ continue;
+ if (stcb) {
+ if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin6)) {
+ /*
+ * It is restricted for some reason..
+ * probably not yet added.
+ */
+ continue;
+ }
+ }
+ if (match_scope) {
+ /* Here we are asked to match scope if possible */
+ if (loopscope && sin_loop)
+ /* src and destination are loopback scope */
+ return (sin6);
+ if (loc_scope && sin_local)
+ /* src and destination are local scope */
+ return (sin6);
+ if ((loopscope == 0) &&
+ (loc_scope == 0) &&
+ (sin_loop == 0) &&
+ (sin_local == 0)) {
+ /* src and destination are global scope */
+ return (sin6);
+ }
+ continue;
+ }
+ if (num_eligible_addr == cur_addr_num) {
+ /* this is it */
+ return (sin6);
+ }
+ num_eligible_addr++;
+ }
+ return (NULL);
+}
+
+
+static int
+sctp_count_v6_num_eligible_boundall(struct ifnet *ifn, struct sctp_tcb *stcb,
+ int non_asoc_addr_ok, uint8_t loopscope, uint8_t loc_scope)
+{
+ struct ifaddr *ifa;
+ struct sockaddr_in6 *sin6;
+ int num_eligible_addr = 0;
+ int sin_loop, sin_local;
+
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ sin6 = sctp_is_v6_ifa_addr_acceptable(ifa, loopscope, loc_scope, &sin_loop, &sin_local);
+ if (sin6 == NULL)
+ continue;
+ if (stcb) {
+ if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin6)) {
+ /*
+ * It is restricted for some reason..
+ * probably not yet added.
+ */
+ continue;
+ }
+ }
+ num_eligible_addr++;
+ }
+ return (num_eligible_addr);
+}
+
+
+static struct sockaddr_in6 *
+sctp_choose_v6_boundall(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ struct route *ro,
+ uint8_t loc_scope,
+ uint8_t loopscope,
+ int non_asoc_addr_ok)
+{
+ /*
+ * Ok, we are bound all SO any address is ok to use as long as it is
+ * NOT in the negative list.
+ */
+ int num_eligible_addr;
+ int cur_addr_num = 0;
+ int started_at_beginning = 0;
+ int match_scope_prefered;
+
+ /*
+ * first question is, how many eligible addresses are there for the
+ * destination ifn that we are using that are within the proper
+ * scope?
+ */
+ struct ifnet *ifn;
+ struct sockaddr_in6 *sin6;
+ struct rtentry *rt;
+
+ rt = ro->ro_rt;
+ ifn = rt->rt_ifp;
+ if (net) {
+ cur_addr_num = net->indx_of_eligible_next_to_use;
+ }
+ if (cur_addr_num == 0) {
+ match_scope_prefered = 1;
+ } else {
+ match_scope_prefered = 0;
+ }
+ num_eligible_addr = sctp_count_v6_num_eligible_boundall(ifn, stcb, non_asoc_addr_ok, loopscope, loc_scope);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Found %d eligible source addresses\n", num_eligible_addr);
+ }
+#endif
+ if (num_eligible_addr == 0) {
+ /*
+ * no eligible addresses, we must use some other interface
+ * address if we can find one.
+ */
+ goto bound_all_v6_plan_b;
+ }
+ /*
+ * Ok we have num_eligible_addr set with how many we can use, this
+ * may vary from call to call due to addresses being deprecated
+ * etc..
+ */
+ if (cur_addr_num >= num_eligible_addr) {
+ cur_addr_num = 0;
+ }
+ /*
+ * select the nth address from the list (where cur_addr_num is the
+ * nth) and 0 is the first one, 1 is the second one etc...
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("cur_addr_num:%d match_scope_prefered:%d select it\n",
+ cur_addr_num, match_scope_prefered);
+ }
+#endif
+ sin6 = sctp_select_v6_nth_addr_from_ifn_boundall(ifn, stcb, non_asoc_addr_ok, loopscope,
+ loc_scope, cur_addr_num, match_scope_prefered);
+ if (match_scope_prefered && (sin6 == NULL)) {
+ /* retry without the preference for matching scope */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("retry with no match_scope_prefered\n");
+ }
+#endif
+ sin6 = sctp_select_v6_nth_addr_from_ifn_boundall(ifn, stcb, non_asoc_addr_ok, loopscope,
+ loc_scope, cur_addr_num, 0);
+ }
+ if (sin6) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Selected address %d ifn:%x for the route\n", cur_addr_num, (uint32_t) ifn);
+ }
+#endif
+ if (net) {
+ /* store so we get the next one */
+ if (cur_addr_num < 255)
+ net->indx_of_eligible_next_to_use = cur_addr_num + 1;
+ else
+ net->indx_of_eligible_next_to_use = 0;
+ }
+ return (sin6);
+ }
+ num_eligible_addr = 0;
+bound_all_v6_plan_b:
+ /*
+ * ok, if we reach here we either fell through due to something
+ * changing during an interupt (unlikely) or we have NO eligible
+ * source addresses for the ifn of the route (most likely). We must
+ * look at all the other interfaces EXCEPT rt->rt_ifp and do the
+ * same game.
+ */
+ if (inp->next_ifn_touse == NULL) {
+ started_at_beginning = 1;
+ inp->next_ifn_touse = TAILQ_FIRST(&ifnet);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Start at first IFN:%x\n", (uint32_t) inp->next_ifn_touse);
+ }
+#endif
+ } else {
+ inp->next_ifn_touse = TAILQ_NEXT(inp->next_ifn_touse, if_list);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Resume at IFN:%x\n", (uint32_t) inp->next_ifn_touse);
+ }
+#endif
+ if (inp->next_ifn_touse == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("IFN Resets\n");
+ }
+#endif
+ started_at_beginning = 1;
+ inp->next_ifn_touse = TAILQ_FIRST(&ifnet);
+ }
+ }
+ for (ifn = inp->next_ifn_touse; ifn;
+ ifn = TAILQ_NEXT(ifn, if_list)) {
+ if (loopscope == 0 && ifn->if_type == IFT_LOOP) {
+ /* wrong base scope */
+ continue;
+ }
+ if (loc_scope && (ifn->if_index != loc_scope)) {
+ /*
+ * by definition the scope (from to->sin6_scopeid)
+ * must match that of the interface. If not then we
+ * could pick a wrong scope for the address.
+ * Ususally we don't hit plan-b since the route
+ * handles this. However we can hit plan-b when we
+ * send to local-host so the route is the loopback
+ * interface, but the destination is a link local.
+ */
+ continue;
+ }
+ if (ifn == rt->rt_ifp) {
+ /* already looked at this guy */
+ continue;
+ }
+ /*
+ * Address rotation will only work when we are not rotating
+ * sourced interfaces and are using the interface of the
+ * route. We would need to have a per interface index in
+ * order to do proper rotation.
+ */
+ num_eligible_addr = sctp_count_v6_num_eligible_boundall(ifn, stcb, non_asoc_addr_ok, loopscope, loc_scope);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("IFN:%x has %d eligible\n", (uint32_t) ifn, num_eligible_addr);
+ }
+#endif
+ if (num_eligible_addr == 0) {
+ /* none we can use */
+ continue;
+ }
+ /*
+ * Ok we have num_eligible_addr set with how many we can
+ * use, this may vary from call to call due to addresses
+ * being deprecated etc..
+ */
+ inp->next_ifn_touse = ifn;
+
+ /*
+ * select the first one we can find with perference for
+ * matching scope.
+ */
+ sin6 = sctp_select_v6_nth_addr_from_ifn_boundall(ifn, stcb, non_asoc_addr_ok, loopscope, loc_scope, 0, 1);
+ if (sin6 == NULL) {
+ /*
+ * can't find one with matching scope how about a
+ * source with higher scope
+ */
+ sin6 = sctp_select_v6_nth_addr_from_ifn_boundall(ifn, stcb, non_asoc_addr_ok, loopscope, loc_scope, 0, 0);
+ if (sin6 == NULL)
+ /* Hmm, can't find one in the interface now */
+ continue;
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Selected the %d'th address of ifn:%x\n",
+ cur_addr_num,
+ (uint32_t) ifn);
+ }
+#endif
+ return (sin6);
+ }
+ if (started_at_beginning == 0) {
+ /*
+ * we have not been through all of them yet, force us to go
+ * through them all.
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Force a recycle\n");
+ }
+#endif
+ inp->next_ifn_touse = NULL;
+ goto bound_all_v6_plan_b;
+ }
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ return (NULL);
+
+}
+
+/* stcb and net may be NULL */
+struct in6_addr
+sctp_ipv6_source_address_selection(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb, struct route *ro, struct sctp_nets *net,
+ int non_asoc_addr_ok)
+{
+ struct in6_addr ans;
+ struct sockaddr_in6 *rt_addr;
+ uint8_t loc_scope, loopscope;
+ struct sockaddr_in6 *to = (struct sockaddr_in6 *)&ro->ro_dst;
+
+ /*
+ * This routine is tricky standard v6 src address selection cannot
+ * take into account what we have bound etc, so we can't use it.
+ *
+ * Instead here is what we must do: 1) Make sure we have a route, if we
+ * don't have a route we can never reach the peer. 2) Once we have a
+ * route, determine the scope of the route. Link local, loopback or
+ * global. 3) Next we divide into three types. Either we are bound
+ * all.. which means we want to use one of the addresses of the
+ * interface we are going out. <or> 4a) We have not stcb, which
+ * means we are using the specific addresses bound on an inp, in
+ * this case we are similar to the stcb case (4b below) accept the
+ * list is always a positive list.<or> 4b) We are bound specific
+ * with a stcb, which means we have a list of bound addresses and we
+ * must see if the ifn of the route is actually one of the bound
+ * addresses. If not, then we must rotate addresses amongst properly
+ * scoped bound addresses, if so we use the address of the
+ * interface. 5) Always, no matter which path we take through the
+ * above we must be sure the source address we use is allowed to be
+ * used. I.e. IN6_IFF_DETACHED, IN6_IFF_NOTREADY, and
+ * IN6_IFF_ANYCAST addresses cannot be used. 6) Addresses that are
+ * deprecated MAY be used if (!ip6_use_deprecated) { if
+ * (IFA6_IS_DEPRECATED(ifa6)) { skip the address } }
+ */
+
+ /*** 1> determine route, if not already done */
+ if (ro->ro_rt == NULL) {
+ /*
+ * Need a route to cache.
+ */
+ int scope_save;
+
+ scope_save = to->sin6_scope_id;
+ to->sin6_scope_id = 0;
+
+ rtalloc_ign(ro, 0UL);
+ to->sin6_scope_id = scope_save;
+ }
+ if (ro->ro_rt == NULL) {
+ /*
+ * no route to host. this packet is going no-where. We
+ * probably should make sure we arrange to send back an
+ * error.
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("No route to host, this packet cannot be sent!\n");
+ }
+#endif
+ memset(&ans, 0, sizeof(ans));
+ return (ans);
+ }
+ /*** 2a> determine scope for outbound address/route */
+ loc_scope = loopscope = 0;
+ /*
+ * We base our scope on the outbound packet scope and route, NOT the
+ * TCB (if there is one). This way in local scope we will only use a
+ * local scope src address when we send to a local address.
+ */
+
+ if (IN6_IS_ADDR_LOOPBACK(&to->sin6_addr)) {
+ /*
+ * If the route goes to the loopback address OR the address
+ * is a loopback address, we are loopback scope.
+ */
+ loc_scope = 0;
+ loopscope = 1;
+ if (net != NULL) {
+ /* mark it as local */
+ net->addr_is_local = 1;
+ }
+ } else if (IN6_IS_ADDR_LINKLOCAL(&to->sin6_addr)) {
+ if (to->sin6_scope_id)
+ loc_scope = to->sin6_scope_id;
+ else {
+ loc_scope = 1;
+ }
+ loopscope = 0;
+ }
+ /*
+ * now, depending on which way we are bound we call the appropriate
+ * routine to do steps 3-6
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Destination address:");
+ sctp_print_address((struct sockaddr *)to);
+ }
+#endif
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ rt_addr = sctp_choose_v6_boundall(inp, stcb, net, ro, loc_scope, loopscope, non_asoc_addr_ok);
+ } else {
+ if (stcb)
+ rt_addr = sctp_choose_v6_boundspecific_stcb(inp, stcb, net, ro, loc_scope, loopscope, non_asoc_addr_ok);
+ else
+ /*
+ * we can't have a non-asoc address since we have no
+ * association
+ */
+ rt_addr = sctp_choose_v6_boundspecific_inp(inp, ro, loc_scope, loopscope);
+ }
+ if (rt_addr == NULL) {
+ /* no suitable address? */
+ struct in6_addr in6;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("V6 packet will reach dead-end no suitable src address\n");
+ }
+#endif
+ memset(&in6, 0, sizeof(in6));
+ return (in6);
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Source address selected is:");
+ sctp_print_address((struct sockaddr *)rt_addr);
+ }
+#endif
+ return (rt_addr->sin6_addr);
+}
+
+
+static
+int
+sctp_is_address_in_scope(struct ifaddr *ifa,
+ int ipv4_addr_legal,
+ int ipv6_addr_legal,
+ int loopback_scope,
+ int ipv4_local_scope,
+ int local_scope,
+ int site_scope)
+{
+ if ((loopback_scope == 0) &&
+ (ifa->ifa_ifp) &&
+ (ifa->ifa_ifp->if_type == IFT_LOOP)) {
+ /*
+ * skip loopback if not in scope *
+ */
+ return (0);
+ }
+ if ((ifa->ifa_addr->sa_family == AF_INET) && ipv4_addr_legal) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)ifa->ifa_addr;
+ if (sin->sin_addr.s_addr == 0) {
+ /* not in scope , unspecified */
+ return (0);
+ }
+ if ((ipv4_local_scope == 0) &&
+ (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+ /* private address not in scope */
+ return (0);
+ }
+ } else if ((ifa->ifa_addr->sa_family == AF_INET6) && ipv6_addr_legal) {
+ struct sockaddr_in6 *sin6;
+ struct in6_ifaddr *ifa6;
+
+ ifa6 = (struct in6_ifaddr *)ifa;
+ /* ok to use deprecated addresses? */
+ if (!ip6_use_deprecated) {
+ if (ifa6->ia6_flags &
+ IN6_IFF_DEPRECATED) {
+ return (0);
+ }
+ }
+ if (ifa6->ia6_flags &
+ (IN6_IFF_DETACHED |
+ IN6_IFF_ANYCAST |
+ IN6_IFF_NOTREADY)) {
+ return (0);
+ }
+ sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* skip unspecifed addresses */
+ return (0);
+ }
+ if ( /* (local_scope == 0) && */
+ (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
+ return (0);
+ }
+ if ((site_scope == 0) &&
+ (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+ return (0);
+ }
+ } else {
+ return (0);
+ }
+ return (1);
+}
+
+static struct mbuf *
+sctp_add_addr_to_mbuf(struct mbuf *m, struct ifaddr *ifa)
+{
+ struct sctp_paramhdr *parmh;
+ struct mbuf *mret;
+ int len;
+
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ len = sizeof(struct sctp_ipv4addr_param);
+ } else if (ifa->ifa_addr->sa_family == AF_INET6) {
+ len = sizeof(struct sctp_ipv6addr_param);
+ } else {
+ /* unknown type */
+ return (m);
+ }
+
+ if (M_TRAILINGSPACE(m) >= len) {
+ /* easy side we just drop it on the end */
+ parmh = (struct sctp_paramhdr *)(m->m_data + m->m_len);
+ mret = m;
+ } else {
+ /* Need more space */
+ mret = m;
+ while (mret->m_next != NULL) {
+ mret = mret->m_next;
+ }
+ mret->m_next = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
+ if (mret->m_next == NULL) {
+ /* We are hosed, can't add more addresses */
+ return (m);
+ }
+ mret = mret->m_next;
+ parmh = mtod(mret, struct sctp_paramhdr *);
+ }
+ /* now add the parameter */
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ struct sctp_ipv4addr_param *ipv4p;
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)ifa->ifa_addr;
+ ipv4p = (struct sctp_ipv4addr_param *)parmh;
+ parmh->param_type = htons(SCTP_IPV4_ADDRESS);
+ parmh->param_length = htons(len);
+ ipv4p->addr = sin->sin_addr.s_addr;
+ mret->m_len += len;
+ } else if (ifa->ifa_addr->sa_family == AF_INET6) {
+ struct sctp_ipv6addr_param *ipv6p;
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
+ ipv6p = (struct sctp_ipv6addr_param *)parmh;
+ parmh->param_type = htons(SCTP_IPV6_ADDRESS);
+ parmh->param_length = htons(len);
+ memcpy(ipv6p->addr, &sin6->sin6_addr,
+ sizeof(ipv6p->addr));
+ /* clear embedded scope in the address */
+ in6_clearscope((struct in6_addr *)ipv6p->addr);
+ mret->m_len += len;
+ } else {
+ return (m);
+ }
+ return (mret);
+}
+
+
+struct mbuf *
+sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_scoping *scope, struct mbuf *m_at, int cnt_inits_to)
+{
+ int cnt;
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ struct ifnet *ifn;
+ struct ifaddr *ifa;
+
+ cnt = cnt_inits_to;
+ TAILQ_FOREACH(ifn, &ifnet, if_list) {
+ if ((scope->loopback_scope == 0) &&
+ (ifn->if_type == IFT_LOOP)) {
+ /*
+ * Skip loopback devices if loopback_scope
+ * not set
+ */
+ continue;
+ }
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ if (sctp_is_address_in_scope(ifa,
+ scope->ipv4_addr_legal,
+ scope->ipv6_addr_legal,
+ scope->loopback_scope,
+ scope->ipv4_local_scope,
+ scope->local_scope,
+ scope->site_scope) == 0) {
+ continue;
+ }
+ cnt++;
+ }
+ }
+ if (cnt > 1) {
+ TAILQ_FOREACH(ifn, &ifnet, if_list) {
+ if ((scope->loopback_scope == 0) &&
+ (ifn->if_type == IFT_LOOP)) {
+ /*
+ * Skip loopback devices if
+ * loopback_scope not set
+ */
+ continue;
+ }
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ if (sctp_is_address_in_scope(ifa,
+ scope->ipv4_addr_legal,
+ scope->ipv6_addr_legal,
+ scope->loopback_scope,
+ scope->ipv4_local_scope,
+ scope->local_scope,
+ scope->site_scope) == 0) {
+ continue;
+ }
+ m_at = sctp_add_addr_to_mbuf(m_at, ifa);
+ }
+ }
+ }
+ } else {
+ struct sctp_laddr *laddr;
+ int cnt;
+
+ cnt = cnt_inits_to;
+ /* First, how many ? */
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ continue;
+ }
+ if (laddr->ifa->ifa_addr == NULL)
+ continue;
+ if (sctp_is_address_in_scope(laddr->ifa,
+ scope->ipv4_addr_legal,
+ scope->ipv6_addr_legal,
+ scope->loopback_scope,
+ scope->ipv4_local_scope,
+ scope->local_scope,
+ scope->site_scope) == 0) {
+ continue;
+ }
+ cnt++;
+ }
+ /*
+ * To get through a NAT we only list addresses if we have
+ * more than one. That way if you just bind a single address
+ * we let the source of the init dictate our address.
+ */
+ if (cnt > 1) {
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ continue;
+ }
+ if (laddr->ifa->ifa_addr == NULL) {
+ continue;
+ }
+ if (sctp_is_address_in_scope(laddr->ifa,
+ scope->ipv4_addr_legal,
+ scope->ipv6_addr_legal,
+ scope->loopback_scope,
+ scope->ipv4_local_scope,
+ scope->local_scope,
+ scope->site_scope) == 0) {
+ continue;
+ }
+ m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
+ }
+ }
+ }
+ return (m_at);
+}
diff --git a/sys/netinet/sctp_bsd_addr.h b/sys/netinet/sctp_bsd_addr.h
new file mode 100644
index 0000000..f997684
--- /dev/null
+++ b/sys/netinet/sctp_bsd_addr.h
@@ -0,0 +1,70 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_bsd_addr_h__
+#define __sctp_bsd_addr_h__
+
+
+
+
+#include <netinet/sctp_header.h>
+
+
+
+#if defined(_KERNEL)
+
+
+int sctp_is_addr_restricted(struct sctp_tcb *, struct sockaddr *);
+
+
+struct in_addr
+sctp_ipv4_source_address_selection(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct route *ro, struct sctp_nets *net,
+ int non_asoc_addr_ok);
+
+struct in6_addr
+sctp_ipv6_source_address_selection(struct sctp_inpcb *,
+ struct sctp_tcb *, struct route *,
+ struct sctp_nets *, int);
+
+
+struct mbuf *
+sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp,
+ struct sctp_scoping *scope,
+ struct mbuf *m_at,
+ int cnt_inits_to);
+
+
+#endif
+#endif
diff --git a/sys/netinet/sctp_constants.h b/sys/netinet/sctp_constants.h
new file mode 100644
index 0000000..605e0ad
--- /dev/null
+++ b/sys/netinet/sctp_constants.h
@@ -0,0 +1,903 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_constants.h,v 1.17 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_constants_h__
+#define __sctp_constants_h__
+
+#if defined(_KERNEL)
+#include <sys/kernel.h>
+#endif
+
+
+#define SCTP_VERSION_STRING "KAME-BSD 1.1"
+/* #define SCTP_AUDITING_ENABLED 1 used for debug/auditing */
+#define SCTP_AUDIT_SIZE 256
+#define SCTP_STAT_LOG_SIZE 80000
+
+/* Places that CWND log can happen from */
+#define SCTP_CWND_LOG_FROM_FR 1
+#define SCTP_CWND_LOG_FROM_RTX 2
+#define SCTP_CWND_LOG_FROM_BRST 3
+#define SCTP_CWND_LOG_FROM_SS 4
+#define SCTP_CWND_LOG_FROM_CA 5
+#define SCTP_CWND_LOG_FROM_SAT 6
+#define SCTP_BLOCK_LOG_INTO_BLK 7
+#define SCTP_BLOCK_LOG_OUTOF_BLK 8
+#define SCTP_BLOCK_LOG_CHECK 9
+#define SCTP_STR_LOG_FROM_INTO_STRD 10
+#define SCTP_STR_LOG_FROM_IMMED_DEL 11
+#define SCTP_STR_LOG_FROM_INSERT_HD 12
+#define SCTP_STR_LOG_FROM_INSERT_MD 13
+#define SCTP_STR_LOG_FROM_INSERT_TL 14
+#define SCTP_STR_LOG_FROM_MARK_TSN 15
+#define SCTP_STR_LOG_FROM_EXPRS_DEL 16
+#define SCTP_FR_LOG_BIGGEST_TSNS 17
+#define SCTP_FR_LOG_STRIKE_TEST 18
+#define SCTP_FR_LOG_STRIKE_CHUNK 19
+#define SCTP_FR_T3_TIMEOUT 20
+#define SCTP_MAP_PREPARE_SLIDE 21
+#define SCTP_MAP_SLIDE_FROM 22
+#define SCTP_MAP_SLIDE_RESULT 23
+#define SCTP_MAP_SLIDE_CLEARED 24
+#define SCTP_MAP_SLIDE_NONE 25
+#define SCTP_FR_T3_MARK_TIME 26
+#define SCTP_FR_T3_MARKED 27
+#define SCTP_FR_T3_STOPPED 28
+#define SCTP_FR_MARKED 30
+#define SCTP_CWND_LOG_NOADV_SS 31
+#define SCTP_CWND_LOG_NOADV_CA 32
+#define SCTP_MAX_BURST_APPLIED 33
+#define SCTP_MAX_IFP_APPLIED 34
+#define SCTP_MAX_BURST_ERROR_STOP 35
+#define SCTP_INCREASE_PEER_RWND 36
+#define SCTP_DECREASE_PEER_RWND 37
+#define SCTP_SET_PEER_RWND_VIA_SACK 38
+#define SCTP_LOG_MBCNT_INCREASE 39
+#define SCTP_LOG_MBCNT_DECREASE 40
+#define SCTP_LOG_MBCNT_CHKSET 41
+#define SCTP_LOG_NEW_SACK 42
+#define SCTP_LOG_TSN_ACKED 43
+#define SCTP_LOG_TSN_REVOKED 44
+#define SCTP_LOG_LOCK_TCB 45
+#define SCTP_LOG_LOCK_INP 46
+#define SCTP_LOG_LOCK_SOCK 47
+#define SCTP_LOG_LOCK_SOCKBUF_R 48
+#define SCTP_LOG_LOCK_SOCKBUF_S 49
+#define SCTP_LOG_LOCK_CREATE 50
+#define SCTP_LOG_INITIAL_RTT 51
+#define SCTP_LOG_RTTVAR 52
+#define SCTP_LOG_SBALLOC 53
+#define SCTP_LOG_SBFREE 54
+#define SCTP_LOG_SBRESULT 55
+#define SCTP_FR_DUPED 56
+#define SCTP_FR_MARKED_EARLY 57
+#define SCTP_FR_CWND_REPORT 58
+#define SCTP_FR_CWND_REPORT_START 59
+#define SCTP_FR_CWND_REPORT_STOP 60
+#define SCTP_CWND_LOG_FROM_SEND 61
+#define SCTP_CWND_INITIALIZATION 62
+#define SCTP_CWND_LOG_FROM_T3 63
+#define SCTP_CWND_LOG_FROM_SACK 64
+#define SCTP_CWND_LOG_NO_CUMACK 65
+#define SCTP_CWND_LOG_FROM_RESEND 66
+#define SCTP_FR_LOG_CHECK_STRIKE 67
+#define SCTP_SEND_NOW_COMPLETES 68
+#define SCTP_CWND_LOG_FILL_OUTQ_CALLED 69
+#define SCTP_CWND_LOG_FILL_OUTQ_FILLS 70
+#define SCTP_LOG_FREE_SENT 71
+#define SCTP_NAGLE_APPLIED 72
+#define SCTP_NAGLE_SKIPPED 73
+#define SCTP_WAKESND_FROM_SACK 74
+#define SCTP_WAKESND_FROM_FWDTSN 75
+#define SCTP_NOWAKE_FROM_SACK 76
+#define SCTP_CWNDLOG_PRESEND 77
+#define SCTP_CWNDLOG_ENDSEND 78
+#define SCTP_AT_END_OF_SACK 79
+#define SCTP_REASON_FOR_SC 80
+#define SCTP_BLOCK_LOG_INTO_BLKA 81
+#define SCTP_ENTER_USER_RECV 82
+#define SCTP_USER_RECV_SACKS 83
+#define SCTP_SORECV_BLOCKSA 84
+#define SCTP_SORECV_BLOCKSB 85
+#define SCTP_SORECV_DONE 86
+#define SCTP_SACK_RWND_UPDATE 87
+#define SCTP_SORECV_ENTER 88
+#define SCTP_SORECV_ENTERPL 89
+#define SCTP_MBUF_INPUT 90
+#define SCTP_MBUF_IALLOC 91
+#define SCTP_MBUF_IFREE 92
+#define SCTP_MBUF_ICOPY 93
+#define SCTP_SORCV_FREECTL 94
+#define SCTP_SORCV_DOESCPY 95
+#define SCTP_SORCV_DOESLCK 96
+#define SCTP_SORCV_DOESADJ 97
+#define SCTP_SORCV_BOTWHILE 98
+#define SCTP_SORCV_PASSBF 99
+#define SCTP_SORCV_ADJD 100
+#define SCTP_UNKNOWN_MAX 101
+#define SCTP_RANDY_STUFF 102
+#define SCTP_RANDY_STUFF1 103
+
+#define SCTP_LOG_MAX_TYPES 104
+/*
+ * To turn on various logging, you must first define SCTP_STAT_LOGGING. Then
+ * to get something to log you define one of the logging defines i.e.
+ *
+ * SCTP_CWND_LOGGING SCTP_BLK_LOGGING SCTP_STR_LOGGING SCTP_FR_LOGGING
+ *
+ * Any one or a combination of the logging can be turned on.
+ */
+#define SCTP_LOG_EVENT_UNKNOWN 0
+#define SCTP_LOG_EVENT_CWND 1
+#define SCTP_LOG_EVENT_BLOCK 2
+#define SCTP_LOG_EVENT_STRM 3
+#define SCTP_LOG_EVENT_FR 4
+#define SCTP_LOG_EVENT_MAP 5
+#define SCTP_LOG_EVENT_MAXBURST 6
+#define SCTP_LOG_EVENT_RWND 7
+#define SCTP_LOG_EVENT_MBCNT 8
+#define SCTP_LOG_EVENT_SACK 9
+#define SCTP_LOG_LOCK_EVENT 10
+#define SCTP_LOG_EVENT_RTT 11
+#define SCTP_LOG_EVENT_SB 12
+#define SCTP_LOG_EVENT_NAGLE 13
+#define SCTP_LOG_EVENT_WAKE 14
+#define SCTP_LOG_MISC_EVENT 15
+#define SCTP_LOG_EVENT_CLOSE 16
+#define SCTP_LOG_EVENT_MBUF 17
+
+#define SCTP_LOG_MAX_EVENT 18
+
+#define SCTP_LOCK_UNKNOWN 2
+
+
+/* number of associations by default for zone allocation */
+#define SCTP_MAX_NUM_OF_ASOC 40000
+/* how many addresses per assoc remote and local */
+#define SCTP_SCALE_FOR_ADDR 2
+
+/* default AUTO_ASCONF mode enable(1)/disable(0) value (sysctl) */
+#define SCTP_DEFAULT_AUTO_ASCONF 0
+
+/*
+ * Theshold for rwnd updates, we have to read (sb_hiwat >>
+ * SCTP_RWND_HIWAT_SHIFT) before we will look to see if we need to send a
+ * window update sack. When we look, we compare the last rwnd we sent vs the
+ * current rwnd. It too must be greater than this value. Using 3 divdes the
+ * hiwat by 8, so for 200k rwnd we need to read 24k. For a 64k rwnd we need
+ * to read 8k. This seems about right.. I hope :-D.. we do set a
+ * min of a MTU on it so if the rwnd is real small we will insist
+ * on a full MTU of 1500 bytes.
+ */
+#define SCTP_RWND_HIWAT_SHIFT 3
+
+/* How much of the rwnd must the
+ * message be taking up to start partial delivery.
+ * We calculate this by shifing the hi_water (recv_win)
+ * left the following .. set to 1, when a message holds
+ * 1/2 the rwnd. If we set it to 2 when a message holds
+ * 1/4 the rwnd...etc..
+ */
+
+#define SCTP_PARTIAL_DELIVERY_SHIFT 1
+
+/* Minimum number of bytes read by user before we
+ * condsider doing a rwnd update
+ */
+#define SCTP_MIN_READ_BEFORE_CONSIDERING 3000
+
+/*
+ * default HMAC for cookies, etc... use one of the AUTH HMAC id's
+ * SCTP_HMAC is the HMAC_ID to use
+ * SCTP_SIGNATURE_SIZE is the digest length
+ */
+#define SCTP_HMAC SCTP_AUTH_HMAC_ID_SHA1
+#define SCTP_SIGNATURE_SIZE SCTP_AUTH_DIGEST_LEN_SHA1
+#define SCTP_SIGNATURE_ALOC_SIZE SCTP_SIGNATURE_SIZE
+
+/* DEFINE HERE WHAT CRC YOU WANT TO USE */
+#define SCTP_USECRC_RFC2960 1
+/* #define SCTP_USECRC_FLETCHER 1 */
+/* #define SCTP_USECRC_SSHCRC32 1 */
+/* #define SCTP_USECRC_FASTCRC32 1 */
+/* #define SCTP_USECRC_CRC32 1 */
+/* #define SCTP_USECRC_TCP32 1 */
+/* #define SCTP_USECRC_CRC16SMAL 1 */
+/* #define SCTP_USECRC_CRC16 1 */
+/* #define SCTP_USECRC_MODADLER 1 */
+
+#ifndef SCTP_ADLER32_BASE
+#define SCTP_ADLER32_BASE 65521
+#endif
+
+/*
+ * the SCTP protocol signature this includes the version number encoded in
+ * the last 4 bits of the signature.
+ */
+#define PROTO_SIGNATURE_A 0x30000000
+#define SCTP_VERSION_NUMBER 0x3
+
+#define MAX_TSN 0xffffffff
+#define MAX_SEQ 0xffff
+
+/* how many executions every N tick's */
+#define SCTP_ITERATOR_MAX_AT_ONCE 20
+
+/* number of clock ticks between iterator executions */
+#define SCTP_ITERATOR_TICKS 1
+
+/*
+ * option: If you comment out the following you will receive the old behavior
+ * of obeying cwnd for the fast retransmit algorithm. With this defined a FR
+ * happens right away with-out waiting for the flightsize to drop below the
+ * cwnd value (which is reduced by the FR to 1/2 the inflight packets).
+ */
+#define SCTP_IGNORE_CWND_ON_FR 1
+
+/*
+ * Adds implementors guide behavior to only use newest highest update in SACK
+ * gap ack's to figure out if you need to stroke a chunk for FR.
+ */
+#define SCTP_NO_FR_UNLESS_SEGMENT_SMALLER 1
+
+/* default max I can burst out after a fast retransmit */
+#define SCTP_DEF_MAX_BURST 4
+/* IP hdr (20/40) + 12+2+2 (enet) + sctp common 12 */
+#define SCTP_FIRST_MBUF_RESV 68
+/* Packet transmit states in the sent field */
+#define SCTP_DATAGRAM_UNSENT 0
+#define SCTP_DATAGRAM_SENT 1
+#define SCTP_DATAGRAM_RESEND1 2 /* not used (in code, but may
+ * hit this value) */
+#define SCTP_DATAGRAM_RESEND2 3 /* not used (in code, but may
+ * hit this value) */
+#define SCTP_DATAGRAM_RESEND 4
+#define SCTP_DATAGRAM_ACKED 10010
+#define SCTP_DATAGRAM_INBOUND 10011
+#define SCTP_READY_TO_TRANSMIT 10012
+#define SCTP_DATAGRAM_MARKED 20010
+#define SCTP_FORWARD_TSN_SKIP 30010
+
+/* chunk output send from locations */
+#define SCTP_OUTPUT_FROM_USR_SEND 0
+#define SCTP_OUTPUT_FROM_T3 1
+#define SCTP_OUTPUT_FROM_INPUT_ERROR 2
+#define SCTP_OUTPUT_FROM_CONTROL_PROC 3
+#define SCTP_OUTPUT_FROM_SACK_TMR 4
+#define SCTP_OUTPUT_FROM_SHUT_TMR 5
+#define SCTP_OUTPUT_FROM_HB_TMR 6
+#define SCTP_OUTPUT_FROM_SHUT_ACK_TMR 7
+#define SCTP_OUTPUT_FROM_ASCONF_TMR 8
+#define SCTP_OUTPUT_FROM_STRRST_TMR 9
+#define SCTP_OUTPUT_FROM_AUTOCLOSE_TMR 10
+#define SCTP_OUTPUT_FROM_EARLY_FR_TMR 11
+#define SCTP_OUTPUT_FROM_STRRST_REQ 12
+#define SCTP_OUTPUT_FROM_USR_RCVD 13
+/* SCTP chunk types are moved sctp.h for application (NAT, FW) use */
+
+/* align to 32-bit sizes */
+#define SCTP_SIZE32(x) ((((x)+3) >> 2) << 2)
+
+#define IS_SCTP_CONTROL(a) ((a)->chunk_type != SCTP_DATA)
+#define IS_SCTP_DATA(a) ((a)->chunk_type == SCTP_DATA)
+
+
+/* SCTP parameter types */
+/*************0x0000 series*************/
+#define SCTP_HEARTBEAT_INFO 0x0001
+#define SCTP_IPV4_ADDRESS 0x0005
+#define SCTP_IPV6_ADDRESS 0x0006
+#define SCTP_STATE_COOKIE 0x0007
+#define SCTP_UNRECOG_PARAM 0x0008
+#define SCTP_COOKIE_PRESERVE 0x0009
+#define SCTP_HOSTNAME_ADDRESS 0x000b
+#define SCTP_SUPPORTED_ADDRTYPE 0x000c
+
+/* draft-ietf-stewart-strreset-xxx */
+#define SCTP_STR_RESET_OUT_REQUEST 0x000d
+#define SCTP_STR_RESET_IN_REQUEST 0x000e
+#define SCTP_STR_RESET_TSN_REQUEST 0x000f
+#define SCTP_STR_RESET_RESPONSE 0x0010
+
+#define SCTP_MAX_RESET_PARAMS 2
+#define SCTP_STREAM_RESET_TSN_DELTA 0x1000
+
+/*************0x4000 series*************/
+
+/*************0x8000 series*************/
+#define SCTP_ECN_CAPABLE 0x8000
+/* ECN Nonce: draft-ladha-sctp-ecn-nonce */
+#define SCTP_ECN_NONCE_SUPPORTED 0x8001
+/* draft-ietf-tsvwg-auth-xxx */
+#define SCTP_RANDOM 0x8002
+#define SCTP_CHUNK_LIST 0x8003
+#define SCTP_HMAC_LIST 0x8004
+/*
+ * draft-ietf-tsvwg-addip-sctp-xx param=0x8008 len=0xNNNN Byte | Byte | Byte
+ * | Byte Byte | Byte ...
+ *
+ * Where each byte is a chunk type extension supported. For example, to support
+ * all chunks one would have (in hex):
+ *
+ * 80 01 00 09 C0 C1 80 81 82 00 00 00
+ *
+ * Has the parameter. C0 = PR-SCTP (RFC3758) C1, 80 = ASCONF (addip draft) 81
+ * = Packet Drop 82 = Stream Reset 83 = Authentication
+ */
+#define SCTP_SUPPORTED_CHUNK_EXT 0x8008
+
+/*************0xC000 series*************/
+#define SCTP_PRSCTP_SUPPORTED 0xc000
+/* draft-ietf-tsvwg-addip-sctp */
+#define SCTP_ADD_IP_ADDRESS 0xc001
+#define SCTP_DEL_IP_ADDRESS 0xc002
+#define SCTP_ERROR_CAUSE_IND 0xc003
+#define SCTP_SET_PRIM_ADDR 0xc004
+#define SCTP_SUCCESS_REPORT 0xc005
+#define SCTP_ULP_ADAPTATION 0xc006
+
+/* Notification error codes */
+#define SCTP_NOTIFY_DATAGRAM_UNSENT 0x0001
+#define SCTP_NOTIFY_DATAGRAM_SENT 0x0002
+#define SCTP_FAILED_THRESHOLD 0x0004
+#define SCTP_HEARTBEAT_SUCCESS 0x0008
+#define SCTP_RESPONSE_TO_USER_REQ 0x000f
+#define SCTP_INTERNAL_ERROR 0x0010
+#define SCTP_SHUTDOWN_GUARD_EXPIRES 0x0020
+#define SCTP_RECEIVED_SACK 0x0040
+#define SCTP_PEER_FAULTY 0x0080
+
+/* bits for TOS field */
+#define SCTP_ECT0_BIT 0x02
+#define SCTP_ECT1_BIT 0x01
+#define SCTP_CE_BITS 0x03
+
+/* below turns off above */
+#define SCTP_FLEXIBLE_ADDRESS 0x20
+#define SCTP_NO_HEARTBEAT 0x40
+
+/* mask to get sticky */
+#define SCTP_STICKY_OPTIONS_MASK 0x0c
+
+
+/* Chunk flags */
+#define SCTP_WINDOW_PROBE 0x01
+
+/*
+ * SCTP states for internal state machine XXX (should match "user" values)
+ */
+#define SCTP_STATE_EMPTY 0x0000
+#define SCTP_STATE_INUSE 0x0001
+#define SCTP_STATE_COOKIE_WAIT 0x0002
+#define SCTP_STATE_COOKIE_ECHOED 0x0004
+#define SCTP_STATE_OPEN 0x0008
+#define SCTP_STATE_SHUTDOWN_SENT 0x0010
+#define SCTP_STATE_SHUTDOWN_RECEIVED 0x0020
+#define SCTP_STATE_SHUTDOWN_ACK_SENT 0x0040
+#define SCTP_STATE_SHUTDOWN_PENDING 0x0080
+#define SCTP_STATE_CLOSED_SOCKET 0x0100
+#define SCTP_STATE_ABOUT_TO_BE_FREED 0x0200
+#define SCTP_STATE_PARTIAL_MSG_LEFT 0x0400
+#define SCTP_STATE_MASK 0x007f
+
+#define SCTP_GET_STATE(asoc) ((asoc)->state & SCTP_STATE_MASK)
+
+/* SCTP reachability state for each address */
+#define SCTP_ADDR_REACHABLE 0x001
+#define SCTP_ADDR_NOT_REACHABLE 0x002
+#define SCTP_ADDR_NOHB 0x004
+#define SCTP_ADDR_BEING_DELETED 0x008
+#define SCTP_ADDR_NOT_IN_ASSOC 0x010
+#define SCTP_ADDR_WAS_PRIMARY 0x020
+#define SCTP_ADDR_SWITCH_PRIMARY 0x040
+#define SCTP_ADDR_OUT_OF_SCOPE 0x080
+#define SCTP_ADDR_DOUBLE_SWITCH 0x100
+#define SCTP_ADDR_UNCONFIRMED 0x200
+
+#define SCTP_REACHABLE_MASK 0x203
+
+/* bound address types (e.g. valid address types to allow) */
+#define SCTP_BOUND_V6 0x01
+#define SCTP_BOUND_V4 0x02
+
+/*
+ * what is the default number of mbufs in a chain I allow before switching to
+ * a cluster
+ */
+#define SCTP_DEFAULT_MBUFS_IN_CHAIN 5
+
+/* How long a cookie lives in seconds */
+#define SCTP_DEFAULT_COOKIE_LIFE 60
+
+/* resource limit of streams */
+#define MAX_SCTP_STREAMS 2048
+
+/* Maximum the mapping array will grow to (TSN mapping array) */
+#define SCTP_MAPPING_ARRAY 512
+
+/* size of the inital malloc on the mapping array */
+#define SCTP_INITIAL_MAPPING_ARRAY 16
+/* how much we grow the mapping array each call */
+#define SCTP_MAPPING_ARRAY_INCR 32
+
+/*
+ * Here we define the timer types used by the implementation as arguments in
+ * the set/get timer type calls.
+ */
+#define SCTP_TIMER_INIT 0
+#define SCTP_TIMER_RECV 1
+#define SCTP_TIMER_SEND 2
+#define SCTP_TIMER_HEARTBEAT 3
+#define SCTP_TIMER_PMTU 4
+#define SCTP_TIMER_MAXSHUTDOWN 5
+#define SCTP_TIMER_SIGNATURE 6
+/*
+ * number of timer types in the base SCTP structure used in the set/get and
+ * has the base default.
+ */
+#define SCTP_NUM_TMRS 7
+
+/* timer types */
+#define SCTP_TIMER_TYPE_NONE 0
+#define SCTP_TIMER_TYPE_SEND 1
+#define SCTP_TIMER_TYPE_INIT 2
+#define SCTP_TIMER_TYPE_RECV 3
+#define SCTP_TIMER_TYPE_SHUTDOWN 4
+#define SCTP_TIMER_TYPE_HEARTBEAT 5
+#define SCTP_TIMER_TYPE_COOKIE 6
+#define SCTP_TIMER_TYPE_NEWCOOKIE 7
+#define SCTP_TIMER_TYPE_PATHMTURAISE 8
+#define SCTP_TIMER_TYPE_SHUTDOWNACK 9
+#define SCTP_TIMER_TYPE_ASCONF 10
+#define SCTP_TIMER_TYPE_SHUTDOWNGUARD 11
+#define SCTP_TIMER_TYPE_AUTOCLOSE 12
+#define SCTP_TIMER_TYPE_EVENTWAKE 13
+#define SCTP_TIMER_TYPE_STRRESET 14
+#define SCTP_TIMER_TYPE_INPKILL 15
+#define SCTP_TIMER_TYPE_ITERATOR 16
+#define SCTP_TIMER_TYPE_EARLYFR 17
+#define SCTP_TIMER_TYPE_ASOCKILL 18
+#define SCTP_TIMER_TYPE_ADDR_WQ 19
+/* add new timers here - and increment LAST */
+#define SCTP_TIMER_TYPE_LAST 20
+
+#define SCTP_IS_TIMER_TYPE_VALID(t) (((t) > SCTP_TIMER_TYPE_NONE) && \
+ ((t) < SCTP_TIMER_TYPE_LAST))
+
+
+
+/*
+ * Number of ticks before the soxwakeup() event that is delayed is sent AFTER
+ * the accept() call
+ */
+#define SCTP_EVENTWAKEUP_WAIT_TICKS 3000
+
+/*
+ * Of course we really don't collect stale cookies, being folks of decerning
+ * taste. However we do count them, if we get too many before the association
+ * comes up.. we give up. Below is the constant that dictates when we give it
+ * up...this is a implemenation dependent treatment. In ours we do not ask
+ * for a extension of time, but just retry this many times...
+ */
+#define SCTP_MAX_STALE_COOKIES_I_COLLECT 10
+
+/* max number of TSN's dup'd that I will hold */
+#define SCTP_MAX_DUP_TSNS 20
+
+/*
+ * Here we define the types used when setting the retry amounts.
+ */
+/* constants for type of set */
+#define SCTP_MAXATTEMPT_INIT 2
+#define SCTP_MAXATTEMPT_SEND 3
+
+/* Maximum TSN's we will summarize in a drop report */
+#define SCTP_MAX_DROP_REPORT 16
+
+/* How many drop re-attempts we make on INIT/COOKIE-ECHO */
+#define SCTP_RETRY_DROPPED_THRESH 4
+
+/*
+ * And the max we will keep a history of in the tcb which MUST be lower than
+ * 256.
+ */
+#define SCTP_MAX_DROP_SAVE_REPORT 16
+
+/*
+ * Here we define the default timers and the default number of attemts we
+ * make for each respective side (send/init).
+ */
+
+/*
+ * Maxmium number of chunks a single association can have on it. Note that
+ * this is a squishy number since the count can run over this if the user
+ * sends a large message down .. the fragmented chunks don't count until
+ * AFTER the message is on queue.. it would be the next send that blocks
+ * things. This number will get tuned up at boot in the sctp_init and use the
+ * number of clusters as a base. This way high bandwidth environments will
+ * not get impacted by the lower bandwidth sending a bunch of 1 byte chunks
+ */
+#define SCTP_ASOC_MAX_CHUNKS_ON_QUEUE 512
+
+#define MSEC_TO_TICKS(x) ((hz == 1000) ? x : (((x) * hz) / 1000))
+#define TICKS_TO_MSEC(x) ((hz == 1000) ? x : (((x) * 1000) / hz));
+
+#define SEC_TO_TICKS(x) ((x) * hz)
+#define TICKS_TO_SEC(x) ((x) / hz)
+
+/*
+ * Basically the minimum amount of time before I do a early FR. Making this
+ * value to low will cause duplicate retransmissions.
+ */
+#define SCTP_MINFR_MSEC_TIMER 250
+/* The floor this value is allowed to fall to when starting a timer. */
+#define SCTP_MINFR_MSEC_FLOOR 20
+
+/* init timer def = 1 sec */
+#define SCTP_INIT_SEC 1
+
+/* send timer def = 1 seconds */
+#define SCTP_SEND_SEC 1
+
+/* recv timer def = 200ms */
+#define SCTP_RECV_MSEC 200
+
+/* 30 seconds + RTO (in ms) */
+#define SCTP_HB_DEFAULT_MSEC 30000
+
+/* Max time I will wait for Shutdown to complete */
+#define SCTP_DEF_MAX_SHUTDOWN_SEC 180
+
+
+/*
+ * This is how long a secret lives, NOT how long a cookie lives how many
+ * ticks the current secret will live.
+ */
+#define SCTP_DEFAULT_SECRET_LIFE_SEC 3600
+
+#define SCTP_RTO_UPPER_BOUND (60000) /* 60 sec in ms */
+#define SCTP_RTO_UPPER_BOUND_SEC 60 /* for the init timer */
+#define SCTP_RTO_LOWER_BOUND (1000) /* 1 sec in ms */
+#define SCTP_RTO_INITIAL (3000) /* 3 sec in ms */
+
+
+#define SCTP_INP_KILL_TIMEOUT 20/* number of ms to retry kill of inpcb */
+#define SCTP_ASOC_KILL_TIMEOUT 10 /* number of ms to retry kill of inpcb */
+
+#define SCTP_DEF_MAX_INIT 8
+#define SCTP_DEF_MAX_SEND 10
+#define SCTP_DEF_MAX_PATH_RTX 4
+
+#define SCTP_DEF_PMTU_RAISE_SEC 600 /* 10 min between raise attempts */
+#define SCTP_DEF_PMTU_MIN 600
+
+#define SCTP_MSEC_IN_A_SEC 1000
+#define SCTP_USEC_IN_A_SEC 1000000
+#define SCTP_NSEC_IN_A_SEC 1000000000
+
+#define SCTP_MAX_OUTSTANDING_DG 10000
+
+/* How many streams I request initally by default */
+#define SCTP_OSTREAM_INITIAL 10
+
+/*
+ * How many smallest_mtu's need to increase before a window update sack is
+ * sent (should be a power of 2).
+ */
+#define SCTP_SEG_TO_RWND_UPD 32
+/* Send window update (incr * this > hiwat). Should be a power of 2 */
+#define SCTP_SCALE_OF_RWND_TO_UPD 4
+#define SCTP_MINIMAL_RWND (4096) /* minimal rwnd */
+
+#define SCTP_ADDRMAX 20
+
+/* SCTP DEBUG Switch parameters */
+#define SCTP_DEBUG_TIMER1 0x00000001
+#define SCTP_DEBUG_TIMER2 0x00000002
+#define SCTP_DEBUG_TIMER3 0x00000004
+#define SCTP_DEBUG_TIMER4 0x00000008
+#define SCTP_DEBUG_OUTPUT1 0x00000010
+#define SCTP_DEBUG_OUTPUT2 0x00000020
+#define SCTP_DEBUG_OUTPUT3 0x00000040
+#define SCTP_DEBUG_OUTPUT4 0x00000080
+#define SCTP_DEBUG_UTIL1 0x00000100
+#define SCTP_DEBUG_UTIL2 0x00000200
+#define SCTP_DEBUG_AUTH1 0x00000400
+#define SCTP_DEBUG_AUTH2 0x00000800
+#define SCTP_DEBUG_INPUT1 0x00001000
+#define SCTP_DEBUG_INPUT2 0x00002000
+#define SCTP_DEBUG_INPUT3 0x00004000
+#define SCTP_DEBUG_INPUT4 0x00008000
+#define SCTP_DEBUG_ASCONF1 0x00010000
+#define SCTP_DEBUG_ASCONF2 0x00020000
+#define SCTP_DEBUG_OUTPUT5 0x00040000
+#define SCTP_DEBUG_XXX 0x00080000
+#define SCTP_DEBUG_PCB1 0x00100000
+#define SCTP_DEBUG_PCB2 0x00200000
+#define SCTP_DEBUG_PCB3 0x00400000
+#define SCTP_DEBUG_PCB4 0x00800000
+#define SCTP_DEBUG_INDATA1 0x01000000
+#define SCTP_DEBUG_INDATA2 0x02000000
+#define SCTP_DEBUG_INDATA3 0x04000000
+#define SCTP_DEBUG_INDATA4 0x08000000
+#define SCTP_DEBUG_USRREQ1 0x10000000
+#define SCTP_DEBUG_USRREQ2 0x20000000
+#define SCTP_DEBUG_PEEL1 0x40000000
+#define SCTP_DEBUG_XXXXX 0x80000000
+#define SCTP_DEBUG_ALL 0x7ff3ffff
+#define SCTP_DEBUG_NOISY 0x00040000
+
+/* What sender needs to see to avoid SWS or we consider peers rwnd 0 */
+#define SCTP_SWS_SENDER_DEF 1420
+
+/*
+ * SWS is scaled to the sb_hiwat of the socket. A value of 2 is hiwat/4, 1
+ * would be hiwat/2 etc.
+ */
+/* What receiver needs to see in sockbuf or we tell peer its 1 */
+#define SCTP_SWS_RECEIVER_DEF 3000
+
+#define SCTP_INITIAL_CWND 4380
+
+/* amount peer is obligated to have in rwnd or I will abort */
+#define SCTP_MIN_RWND 1500
+
+#define SCTP_WINDOW_MIN 1500 /* smallest rwnd can be */
+#define SCTP_WINDOW_MAX 1048576 /* biggest I can grow rwnd to My playing
+ * around suggests a value greater than 64k
+ * does not do much, I guess via the kernel
+ * limitations on the stream/socket. */
+
+/* I can handle a 1meg re-assembly */
+#define SCTP_DEFAULT_MAXMSGREASM 1048576
+
+#define SCTP_DEFAULT_MAXSEGMENT 65535
+
+#define DEFAULT_CHUNK_BUFFER 2048
+#define DEFAULT_PARAM_BUFFER 512
+
+#define SCTP_DEFAULT_MINSEGMENT 512 /* MTU size ... if no mtu disc */
+#define SCTP_HOW_MANY_SECRETS 2 /* how many secrets I keep */
+
+#define SCTP_NUMBER_OF_SECRETS 8 /* or 8 * 4 = 32 octets */
+#define SCTP_SECRET_SIZE 32 /* number of octets in a 256 bits */
+
+
+/*
+ * SCTP upper layer notifications
+ */
+#define SCTP_NOTIFY_ASSOC_UP 1
+#define SCTP_NOTIFY_ASSOC_DOWN 2
+#define SCTP_NOTIFY_INTERFACE_DOWN 3
+#define SCTP_NOTIFY_INTERFACE_UP 4
+#define SCTP_NOTIFY_DG_FAIL 5
+#define SCTP_NOTIFY_STRDATA_ERR 6
+#define SCTP_NOTIFY_ASSOC_ABORTED 7
+#define SCTP_NOTIFY_PEER_OPENED_STREAM 8
+#define SCTP_NOTIFY_STREAM_OPENED_OK 9
+#define SCTP_NOTIFY_ASSOC_RESTART 10
+#define SCTP_NOTIFY_HB_RESP 11
+#define SCTP_NOTIFY_ASCONF_SUCCESS 12
+#define SCTP_NOTIFY_ASCONF_FAILED 13
+#define SCTP_NOTIFY_PEER_SHUTDOWN 14
+#define SCTP_NOTIFY_ASCONF_ADD_IP 15
+#define SCTP_NOTIFY_ASCONF_DELETE_IP 16
+#define SCTP_NOTIFY_ASCONF_SET_PRIMARY 17
+#define SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION 18
+#define SCTP_NOTIFY_ADAPTATION_INDICATION 19
+/* same as above */
+#define SCTP_NOTIFY_ADAPTION_INDICATION 19
+#define SCTP_NOTIFY_INTERFACE_CONFIRMED 20
+#define SCTP_NOTIFY_STR_RESET_RECV 21
+#define SCTP_NOTIFY_STR_RESET_SEND 22
+#define SCTP_NOTIFY_STR_RESET_FAILED_OUT 23
+#define SCTP_NOTIFY_STR_RESET_FAILED_IN 24
+#define SCTP_NOTIFY_AUTH_NEW_KEY 25
+#define SCTP_NOTIFY_AUTH_KEY_CONFLICT 26
+#define SCTP_NOTIFY_SPECIAL_SP_FAIL 27
+#define SCTP_NOTIFY_MAX 27
+
+/* This is the value for messages that are NOT completely
+ * copied down where we will start to split the message.
+ * So, with our default, we split only if the piece we
+ * want to take will fill up a full MTU (assuming
+ * a 1500 byte MTU).
+ */
+#define SCTP_DEFAULT_SPLIT_POINT_MIN 1452
+
+/* This value determines the default for when
+ * we try to add more on the send queue., if
+ * there is room. This prevents us from cycling
+ * into the copy_resume routine to often if
+ * we have not got enough space to add a decent
+ * enough size message. Note that if we have enough
+ * space to complete the message copy we will always
+ * add to the message, no matter what the size. Its
+ * only when we reach the point that we have some left
+ * to add, there is only room for part of it that we
+ * will use this threshold. Its also a sysctl.
+ */
+#define SCTP_DEFAULT_ADD_MORE 1452
+
+#ifndef SCTP_PCBHASHSIZE
+/* default number of association hash buckets in each endpoint */
+#define SCTP_PCBHASHSIZE 256
+#endif
+#ifndef SCTP_TCBHASHSIZE
+#define SCTP_TCBHASHSIZE 1024
+#endif
+
+#ifndef SCTP_CHUNKQUEUE_SCALE
+#define SCTP_CHUNKQUEUE_SCALE 10
+#endif
+
+/* clock variance is 1 ms */
+#define SCTP_CLOCK_GRANULARITY 1
+#define IP_HDR_SIZE 40 /* we use the size of a IP6 header here this
+ * detracts a small amount for ipv4 but it
+ * simplifies the ipv6 addition */
+
+#ifndef IPPROTO_SCTP
+#define IPPROTO_SCTP 132 /* the Official IANA number :-) */
+#endif /* !IPPROTO_SCTP */
+
+#define SCTP_MAX_DATA_BUNDLING 256
+#define SCTP_MAX_CONTROL_BUNDLING 20
+
+/* modular comparison */
+/* True if a > b (mod = M) */
+#define compare_with_wrap(a, b, M) (((a > b) && ((a - b) < ((M >> 1) + 1))) || \
+ ((b > a) && ((b - a) > ((M >> 1) + 1))))
+
+
+/* Mapping array manipulation routines */
+#define SCTP_IS_TSN_PRESENT(arry, gap) ((arry[(gap >> 3)] >> (gap & 0x07)) & 0x01)
+#define SCTP_SET_TSN_PRESENT(arry, gap) (arry[(gap >> 3)] |= (0x01 << ((gap & 0x07))))
+#define SCTP_UNSET_TSN_PRESENT(arry, gap) (arry[(gap >> 3)] &= ((~(0x01 << ((gap & 0x07)))) & 0xff))
+
+
+/*
+ * This value defines the number of vtag block time wait entry's per list
+ * element. Each entry will take 2 4 byte ints (and of course the overhead
+ * of the next pointer as well). Using 15 as an example will yield * ((8 *
+ * 15) + 8) or 128 bytes of overhead for each timewait block that gets
+ * initialized. Increasing it to 31 would yeild 256 bytes per block.
+ */
+#define SCTP_NUMBER_IN_VTAG_BLOCK 15
+/*
+ * If we use the STACK option, we have an array of this size head pointers.
+ * This array is mod'd the with the size to find which bucket and then all
+ * entries must be searched to see if the tag is in timed wait. If so we
+ * reject it.
+ */
+#define SCTP_STACK_VTAG_HASH_SIZE 31
+
+/*
+ * If we use the per-endpoint model than we do not have a hash table of
+ * entries but instead have a single head pointer and we must crawl through
+ * the entire list.
+ */
+
+/*
+ * Number of seconds of time wait, tied to MSL value (2 minutes), so 2 * MSL
+ * = 4 minutes or 480 seconds.
+ */
+#define SCTP_TIME_WAIT 480
+
+/* The system retains a cache of free chunks such to
+ * cut down on calls the memory allocation system. There
+ * is a per association limit of free items and a overall
+ * system limit. If either one gets hit then the resource
+ * stops being cached.
+ */
+
+#define SCTP_DEF_ASOC_RESC_LIMIT 10
+#define SCTP_DEF_SYSTEM_RESC_LIMIT 1000
+
+
+
+#define IN4_ISPRIVATE_ADDRESS(a) \
+ ((((u_char *)&(a)->s_addr)[0] == 10) || \
+ ((((u_char *)&(a)->s_addr)[0] == 172) && \
+ (((u_char *)&(a)->s_addr)[1] >= 16) && \
+ (((u_char *)&(a)->s_addr)[1] <= 32)) || \
+ ((((u_char *)&(a)->s_addr)[0] == 192) && \
+ (((u_char *)&(a)->s_addr)[1] == 168)))
+
+#define IN4_ISLOOPBACK_ADDRESS(a) \
+ ((((u_char *)&(a)->s_addr)[0] == 127) && \
+ (((u_char *)&(a)->s_addr)[1] == 0) && \
+ (((u_char *)&(a)->s_addr)[2] == 0) && \
+ (((u_char *)&(a)->s_addr)[3] == 1))
+
+
+
+
+#if defined(_KERNEL)
+
+
+#define SCTP_GETTIME_TIMEVAL(x) (getmicrouptime(x))
+#define SCTP_GETPTIME_TIMEVAL(x) (microuptime(x))
+/*#if defined(__FreeBSD__) || defined(__APPLE__)*/
+/*#define SCTP_GETTIME_TIMEVAL(x) { \*/
+/* (x)->tv_sec = ticks / 1000; \*/
+/* (x)->tv_usec = (ticks % 1000) * 1000; \*/
+/*}*/
+
+/*#else*/
+/*#define SCTP_GETTIME_TIMEVAL(x) (microtime(x))*/
+/*#endif __FreeBSD__ */
+
+#define sctp_sowwakeup(inp, so) \
+do { \
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEOUTPUT; \
+ } else { \
+ sowwakeup(so); \
+ } \
+} while (0)
+
+#define sctp_sowwakeup_locked(inp, so) \
+do { \
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+ SOCKBUF_UNLOCK(&((so)->so_snd)); \
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEOUTPUT; \
+ } else { \
+ sowwakeup_locked(so); \
+ } \
+} while (0)
+
+#define sctp_sorwakeup(inp, so) \
+do { \
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEINPUT; \
+ } else { \
+ sorwakeup(so); \
+ } \
+} while (0)
+
+/* FIXME */
+#define sctp_sorwakeup_locked(inp, so) \
+do { \
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEINPUT; \
+ SOCKBUF_UNLOCK(&((so)->so_rcv)); \
+ } else { \
+ sorwakeup_locked(so); \
+ } \
+} while (0)
+
+#endif /* _KERNEL */
+#endif
diff --git a/sys/netinet/sctp_crc32.c b/sys/netinet/sctp_crc32.c
new file mode 100644
index 0000000..e84d7fd
--- /dev/null
+++ b/sys/netinet/sctp_crc32.c
@@ -0,0 +1,713 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_crc32.c,v 1.12 2005/03/06 16:04:17 itojun Exp $ */
+
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_sctp.h"
+#include <sys/param.h>
+#include <netinet/sctp_crc32.h>
+
+#ifndef SCTP_USE_ADLER32
+
+
+/**
+ *
+ * Routine Description:
+ *
+ * Computes the CRC32c checksum for the specified buffer using the slicing by 8
+ * algorithm over 64 bit quantities.
+ *
+ * Arguments:
+ *
+ * p_running_crc - pointer to the initial or final remainder value
+ * used in CRC computations. It should be set to
+ * non-NULL if the mode argument is equal to CONT or END
+ * p_buf - the packet buffer where crc computations are being performed
+ * length - the length of p_buf in bytes
+ * init_bytes - the number of initial bytes that need to be procesed before
+ * aligning p_buf to multiples of 4 bytes
+ * mode - can be any of the following: BEGIN, CONT, END, BODY, ALIGN
+ *
+ * Return value:
+ *
+ * The computed CRC32c value
+ */
+
+
+/*
+ * Copyright (c) 2004-2006 Intel Corporation - All Rights Reserved
+ *
+ *
+ * This software program is licensed subject to the BSD License, available at
+ * http://www.opensource.org/licenses/bsd-license.html.
+ *
+ * Abstract:
+ *
+ * Tables for software CRC generation
+ */
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41 Generator Polynomial
+ * Length = .......... 32 bits Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits Number of Slices =
+ * ..................... 8 slices Slice Lengths = ........................ 8
+ * 8 8 8 8 8 8 8 Directory Name = ....................... .\ File Name =
+ * ............................ 8x256_tables.c
+ */
+
+uint32_t sctp_crc_tableil8_o32[256] =
+{
+ 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
+ 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
+ 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
+ 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
+ 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
+ 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
+ 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
+ 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
+ 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
+ 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
+ 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
+ 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
+ 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
+ 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
+ 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
+ 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
+ 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
+ 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
+ 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
+ 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
+ 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
+ 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
+ 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
+ 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
+ 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
+ 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
+ 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
+ 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
+ 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
+ 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
+ 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
+ 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o32
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41 Generator Polynomial
+ * Length = .......... 32 bits Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits Number of Slices =
+ * ..................... 8 slices Slice Lengths = ........................ 8
+ * 8 8 8 8 8 8 8 Directory Name = ....................... .\ File Name =
+ * ............................ 8x256_tables.c
+ */
+
+uint32_t sctp_crc_tableil8_o40[256] =
+{
+ 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945,
+ 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD,
+ 0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4,
+ 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C,
+ 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47,
+ 0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF,
+ 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6,
+ 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E,
+ 0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41,
+ 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9,
+ 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0,
+ 0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78,
+ 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43,
+ 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB,
+ 0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2,
+ 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A,
+ 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC,
+ 0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004,
+ 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D,
+ 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185,
+ 0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE,
+ 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306,
+ 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F,
+ 0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287,
+ 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8,
+ 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600,
+ 0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439,
+ 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781,
+ 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA,
+ 0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502,
+ 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B,
+ 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o40
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41 Generator Polynomial
+ * Length = .......... 32 bits Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits Number of Slices =
+ * ..................... 8 slices Slice Lengths = ........................ 8
+ * 8 8 8 8 8 8 8 Directory Name = ....................... .\ File Name =
+ * ............................ 8x256_tables.c
+ */
+
+uint32_t sctp_crc_tableil8_o48[256] =
+{
+ 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469,
+ 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC,
+ 0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3,
+ 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726,
+ 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D,
+ 0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8,
+ 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7,
+ 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32,
+ 0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0,
+ 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75,
+ 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A,
+ 0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF,
+ 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4,
+ 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161,
+ 0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E,
+ 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB,
+ 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A,
+ 0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF,
+ 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0,
+ 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065,
+ 0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E,
+ 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB,
+ 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4,
+ 0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71,
+ 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3,
+ 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36,
+ 0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79,
+ 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC,
+ 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7,
+ 0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622,
+ 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D,
+ 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o48
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41 Generator Polynomial
+ * Length = .......... 32 bits Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits Number of Slices =
+ * ..................... 8 slices Slice Lengths = ........................ 8
+ * 8 8 8 8 8 8 8 Directory Name = ....................... .\ File Name =
+ * ............................ 8x256_tables.c
+ */
+
+uint32_t sctp_crc_tableil8_o56[256] =
+{
+ 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA,
+ 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C,
+ 0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7,
+ 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11,
+ 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41,
+ 0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7,
+ 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C,
+ 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A,
+ 0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D,
+ 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB,
+ 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610,
+ 0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6,
+ 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6,
+ 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040,
+ 0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B,
+ 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D,
+ 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5,
+ 0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213,
+ 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8,
+ 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E,
+ 0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E,
+ 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698,
+ 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443,
+ 0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5,
+ 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12,
+ 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4,
+ 0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F,
+ 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9,
+ 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99,
+ 0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F,
+ 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4,
+ 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o56
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41 Generator Polynomial
+ * Length = .......... 32 bits Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits Number of Slices =
+ * ..................... 8 slices Slice Lengths = ........................ 8
+ * 8 8 8 8 8 8 8 Directory Name = ....................... .\ File Name =
+ * ............................ 8x256_tables.c
+ */
+
+uint32_t sctp_crc_tableil8_o64[256] =
+{
+ 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44,
+ 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5,
+ 0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97,
+ 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406,
+ 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13,
+ 0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082,
+ 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0,
+ 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151,
+ 0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA,
+ 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B,
+ 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539,
+ 0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8,
+ 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD,
+ 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C,
+ 0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E,
+ 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF,
+ 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18,
+ 0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089,
+ 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB,
+ 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A,
+ 0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F,
+ 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE,
+ 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C,
+ 0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D,
+ 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6,
+ 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27,
+ 0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065,
+ 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4,
+ 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1,
+ 0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70,
+ 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532,
+ 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o64
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41 Generator Polynomial
+ * Length = .......... 32 bits Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits Number of Slices =
+ * ..................... 8 slices Slice Lengths = ........................ 8
+ * 8 8 8 8 8 8 8 Directory Name = ....................... .\ File Name =
+ * ............................ 8x256_tables.c
+ */
+
+uint32_t sctp_crc_tableil8_o72[256] =
+{
+ 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD,
+ 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2,
+ 0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93,
+ 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C,
+ 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20,
+ 0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F,
+ 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E,
+ 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201,
+ 0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746,
+ 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59,
+ 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778,
+ 0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67,
+ 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB,
+ 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4,
+ 0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5,
+ 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA,
+ 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B,
+ 0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364,
+ 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45,
+ 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A,
+ 0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6,
+ 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9,
+ 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8,
+ 0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7,
+ 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090,
+ 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F,
+ 0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE,
+ 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1,
+ 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D,
+ 0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02,
+ 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623,
+ 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o72
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41 Generator Polynomial
+ * Length = .......... 32 bits Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits Number of Slices =
+ * ..................... 8 slices Slice Lengths = ........................ 8
+ * 8 8 8 8 8 8 8 Directory Name = ....................... .\ File Name =
+ * ............................ 8x256_tables.c
+ */
+
+uint32_t sctp_crc_tableil8_o80[256] =
+{
+ 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089,
+ 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA,
+ 0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F,
+ 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C,
+ 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334,
+ 0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67,
+ 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992,
+ 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1,
+ 0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3,
+ 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0,
+ 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55,
+ 0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006,
+ 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E,
+ 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D,
+ 0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8,
+ 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB,
+ 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D,
+ 0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E,
+ 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB,
+ 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988,
+ 0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0,
+ 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093,
+ 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766,
+ 0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35,
+ 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907,
+ 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454,
+ 0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1,
+ 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2,
+ 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA,
+ 0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9,
+ 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C,
+ 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o80
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41 Generator Polynomial
+ * Length = .......... 32 bits Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits Number of Slices =
+ * ..................... 8 slices Slice Lengths = ........................ 8
+ * 8 8 8 8 8 8 8 Directory Name = ....................... .\ File Name =
+ * ............................ 8x256_tables.c
+ */
+
+uint32_t sctp_crc_tableil8_o88[256] =
+{
+ 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504,
+ 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE,
+ 0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0,
+ 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A,
+ 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D,
+ 0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447,
+ 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929,
+ 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3,
+ 0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36,
+ 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC,
+ 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782,
+ 0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358,
+ 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF,
+ 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75,
+ 0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B,
+ 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1,
+ 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360,
+ 0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA,
+ 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4,
+ 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E,
+ 0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9,
+ 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223,
+ 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D,
+ 0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97,
+ 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852,
+ 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88,
+ 0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6,
+ 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C,
+ 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB,
+ 0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911,
+ 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F,
+ 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o88
+ */
+
+static uint32_t
+sctp_crc32c_sb8_64_bit(uint32_t crc,
+ unsigned char *p_buf,
+ uint32_t length,
+ uint32_t init_bytes)
+{
+ uint32_t li;
+ uint32_t term1, term2;
+ uint32_t running_length;
+ uint32_t end_bytes;
+
+ running_length = ((length - init_bytes) / 8) * 8;
+ end_bytes = length - init_bytes - running_length;
+
+ for (li = 0; li < init_bytes; li++)
+ crc = sctp_crc_tableil8_o32[(crc ^ *p_buf++) & 0x000000FF] ^
+ (crc >> 8);
+ for (li = 0; li < running_length / 8; li++) {
+#if BYTE_ORDER == BIG_ENDIAN
+ crc ^= *p_buf++;
+ crc ^= (*p_buf++) << 8;
+ crc ^= (*p_buf++) << 16;
+ crc ^= (*p_buf++) << 24;
+#else
+ crc ^= *(uint32_t *) p_buf;
+ p_buf += 4;
+#endif
+ term1 = sctp_crc_tableil8_o88[crc & 0x000000FF] ^
+ sctp_crc_tableil8_o80[(crc >> 8) & 0x000000FF];
+ term2 = crc >> 16;
+ crc = term1 ^
+ sctp_crc_tableil8_o72[term2 & 0x000000FF] ^
+ sctp_crc_tableil8_o64[(term2 >> 8) & 0x000000FF];
+
+#if BYTE_ORDER == BIG_ENDIAN
+ crc ^= sctp_crc_tableil8_o56[*p_buf++];
+ crc ^= sctp_crc_tableil8_o48[*p_buf++];
+ crc ^= sctp_crc_tableil8_o40[*p_buf++];
+ crc ^= sctp_crc_tableil8_o32[*p_buf++];
+#else
+ term1 = sctp_crc_tableil8_o56[(*(uint32_t *) p_buf) & 0x000000FF] ^
+ sctp_crc_tableil8_o48[((*(uint32_t *) p_buf) >> 8) & 0x000000FF];
+
+ term2 = (*(uint32_t *) p_buf) >> 16;
+ crc = crc ^
+ term1 ^
+ sctp_crc_tableil8_o40[term2 & 0x000000FF] ^
+ sctp_crc_tableil8_o32[(term2 >> 8) & 0x000000FF];
+ p_buf += 4;
+#endif
+ }
+ for (li = 0; li < end_bytes; li++)
+ crc = sctp_crc_tableil8_o32[(crc ^ *p_buf++) & 0x000000FF] ^
+ (crc >> 8);
+ return crc;
+}
+
+
+/**
+ *
+ * Routine Description:
+ *
+ * warms the tables
+ *
+ * Arguments:
+ *
+ * none
+ *
+ * Return value:
+ *
+ * none
+ */
+uint32_t
+update_crc32(uint32_t crc32,
+ unsigned char *buffer,
+ unsigned int length)
+{
+ uint32_t offset;
+
+ if (length == 0) {
+ return (crc32);
+ }
+ offset = ((uint32_t) buffer - ((uint32_t) buffer & 0xfffffffc));
+ return (sctp_crc32c_sb8_64_bit(crc32, buffer, length, offset));
+}
+
+unsigned long sctp_crc_c[256] = {
+ 0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L,
+ 0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL,
+ 0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL,
+ 0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L, 0x5E133C24L,
+ 0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL,
+ 0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L,
+ 0x9A879FA0L, 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L,
+ 0x5D1D08BFL, 0xAF768BBCL, 0xBC267848L, 0x4E4DFB4BL,
+ 0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L, 0x33ED7D2AL,
+ 0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L,
+ 0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L,
+ 0x6DFE410EL, 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL,
+ 0x30E349B1L, 0xC288CAB2L, 0xD1D83946L, 0x23B3BA45L,
+ 0xF779DEAEL, 0x05125DADL, 0x1642AE59L, 0xE4292D5AL,
+ 0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL,
+ 0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L,
+ 0x417B1DBCL, 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L,
+ 0x86E18AA3L, 0x748A09A0L, 0x67DAFA54L, 0x95B17957L,
+ 0xCBA24573L, 0x39C9C670L, 0x2A993584L, 0xD8F2B687L,
+ 0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L,
+ 0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L,
+ 0x96BF4DCCL, 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L,
+ 0xDBFC821CL, 0x2997011FL, 0x3AC7F2EBL, 0xC8AC71E8L,
+ 0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L, 0x0F36E6F7L,
+ 0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L,
+ 0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L,
+ 0xEB1FCBADL, 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L,
+ 0x2C855CB2L, 0xDEEEDFB1L, 0xCDBE2C45L, 0x3FD5AF46L,
+ 0x7198540DL, 0x83F3D70EL, 0x90A324FAL, 0x62C8A7F9L,
+ 0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L,
+ 0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L,
+ 0x3CDB9BDDL, 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L,
+ 0x82F63B78L, 0x709DB87BL, 0x63CD4B8FL, 0x91A6C88CL,
+ 0x456CAC67L, 0xB7072F64L, 0xA457DC90L, 0x563C5F93L,
+ 0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L,
+ 0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL,
+ 0x92A8FC17L, 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L,
+ 0x55326B08L, 0xA759E80BL, 0xB4091BFFL, 0x466298FCL,
+ 0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL, 0x0B21572CL,
+ 0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L,
+ 0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L,
+ 0x65D122B9L, 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL,
+ 0x2892ED69L, 0xDAF96E6AL, 0xC9A99D9EL, 0x3BC21E9DL,
+ 0xEF087A76L, 0x1D63F975L, 0x0E330A81L, 0xFC588982L,
+ 0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL,
+ 0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L,
+ 0x38CC2A06L, 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L,
+ 0xFF56BD19L, 0x0D3D3E1AL, 0x1E6DCDEEL, 0xEC064EEDL,
+ 0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L, 0xD0DDD530L,
+ 0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL,
+ 0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL,
+ 0x8ECEE914L, 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L,
+ 0xD3D3E1ABL, 0x21B862A8L, 0x32E8915CL, 0xC083125FL,
+ 0x144976B4L, 0xE622F5B7L, 0xF5720643L, 0x07198540L,
+ 0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L,
+ 0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL,
+ 0xE330A81AL, 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL,
+ 0x24AA3F05L, 0xD6C1BC06L, 0xC5914FF2L, 0x37FACCF1L,
+ 0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L, 0x7AB90321L,
+ 0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL,
+ 0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L,
+ 0x34F4F86AL, 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL,
+ 0x79B737BAL, 0x8BDCB4B9L, 0x988C474DL, 0x6AE7C44EL,
+ 0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L, 0xAD7D5351L,
+};
+
+
+#define SCTP_CRC32C(c,d) (c=(c>>8)^sctp_crc_c[(c^(d))&0xFF])
+
+u_int32_t
+old_update_crc32(u_int32_t crc32,
+ unsigned char *buffer,
+ unsigned int length)
+{
+ unsigned int i;
+
+ for (i = 0; i < length; i++) {
+ SCTP_CRC32C(crc32, buffer[i]);
+ }
+ return (crc32);
+}
+
+
+uint32_t
+sctp_csum_finalize(uint32_t crc32)
+{
+ uint32_t result;
+
+#if BYTE_ORDER == BIG_ENDIAN
+ uint8_t byte0, byte1, byte2, byte3;
+
+#endif
+ /* Complement the result */
+ result = ~crc32;
+#if BYTE_ORDER == BIG_ENDIAN
+ /*
+ * For BIG-ENDIAN.. aka Motorola byte order the result is in
+ * little-endian form. So we must manually swap the bytes. Then we
+ * can call htonl() which does nothing...
+ */
+ byte0 = result & 0x000000ff;
+ byte1 = (result >> 8) & 0x000000ff;
+ byte2 = (result >> 16) & 0x000000ff;
+ byte3 = (result >> 24) & 0x000000ff;
+ crc32 = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
+#else
+ /*
+ * For INTEL platforms the result comes out in network order. No
+ * htonl is required or the swap above. So we optimize out both the
+ * htonl and the manual swap above.
+ */
+ crc32 = result;
+#endif
+ return (crc32);
+}
+
+#endif
diff --git a/sys/netinet/sctp_crc32.h b/sys/netinet/sctp_crc32.h
new file mode 100644
index 0000000..c9d41c9
--- /dev/null
+++ b/sys/netinet/sctp_crc32.h
@@ -0,0 +1,56 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_crc32.h,v 1.5 2004/08/17 04:06:16 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __crc32c_h__
+#define __crc32c_h__
+
+
+#include <sys/types.h>
+
+#ifndef SCTP_USE_ADLER32
+
+#if defined(_KERNEL)
+uint32_t update_crc32(uint32_t, unsigned char *, unsigned int);
+
+uint32_t old_update_crc32(uint32_t, unsigned char *, unsigned int);
+
+uint32_t sctp_csum_finalize(uint32_t);
+
+
+
+
+#endif /* _KERNEL */
+#endif /* !SCTP_USE_ADLER32 */
+#endif /* __crc32c_h__ */
diff --git a/sys/netinet/sctp_header.h b/sys/netinet/sctp_header.h
new file mode 100644
index 0000000..e5d04af
--- /dev/null
+++ b/sys/netinet/sctp_header.h
@@ -0,0 +1,562 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_header.h,v 1.14 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_header_h__
+#define __sctp_header_h__
+
+
+#include <sys/time.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_constants.h>
+
+/*
+ * Parameter structures
+ */
+struct sctp_ipv4addr_param {
+ struct sctp_paramhdr ph;/* type=SCTP_IPV4_PARAM_TYPE, len=8 */
+ uint32_t addr; /* IPV4 address */
+};
+
+struct sctp_ipv6addr_param {
+ struct sctp_paramhdr ph;/* type=SCTP_IPV6_PARAM_TYPE, len=20 */
+ uint8_t addr[16]; /* IPV6 address */
+};
+
+/* Cookie Preservative */
+struct sctp_cookie_perserve_param {
+ struct sctp_paramhdr ph;/* type=SCTP_COOKIE_PRESERVE, len=8 */
+ uint32_t time; /* time in ms to extend cookie */
+};
+
+/* Host Name Address */
+struct sctp_host_name_param {
+ struct sctp_paramhdr ph;/* type=SCTP_HOSTNAME_ADDRESS */
+ char name[1]; /* host name */
+};
+
+/* supported address type */
+struct sctp_supported_addr_param {
+ struct sctp_paramhdr ph;/* type=SCTP_SUPPORTED_ADDRTYPE */
+ uint16_t addr_type[1]; /* array of supported address types */
+};
+
+/* ECN parameter */
+struct sctp_ecn_supported_param {
+ struct sctp_paramhdr ph;/* type=SCTP_ECN_CAPABLE */
+};
+
+
+/* heartbeat info parameter */
+struct sctp_heartbeat_info_param {
+ struct sctp_paramhdr ph;
+ uint32_t time_value_1;
+ uint32_t time_value_2;
+ uint32_t random_value1;
+ uint32_t random_value2;
+ uint16_t user_req;
+ uint8_t addr_family;
+ uint8_t addr_len;
+ char address[SCTP_ADDRMAX];
+};
+
+
+/* draft-ietf-tsvwg-prsctp */
+/* PR-SCTP supported parameter */
+struct sctp_prsctp_supported_param {
+ struct sctp_paramhdr ph;
+};
+
+
+/* draft-ietf-tsvwg-addip-sctp */
+struct sctp_asconf_paramhdr { /* an ASCONF "parameter" */
+ struct sctp_paramhdr ph;/* a SCTP parameter header */
+ uint32_t correlation_id;/* correlation id for this param */
+};
+
+struct sctp_asconf_addr_param { /* an ASCONF address parameter */
+ struct sctp_asconf_paramhdr aph; /* asconf "parameter" */
+ struct sctp_ipv6addr_param addrp; /* max storage size */
+};
+
+struct sctp_asconf_addrv4_param { /* an ASCONF address (v4) parameter */
+ struct sctp_asconf_paramhdr aph; /* asconf "parameter" */
+ struct sctp_ipv4addr_param addrp; /* max storage size */
+};
+
+struct sctp_supported_chunk_types_param {
+ struct sctp_paramhdr ph;/* type = 0x8008 len = x */
+ uint8_t chunk_types[0];
+};
+
+
+/* ECN Nonce: draft-ladha-sctp-ecn-nonce */
+struct sctp_ecn_nonce_supported_param {
+ struct sctp_paramhdr ph;/* type = 0x8001 len = 4 */
+};
+
+
+/*
+ * Structures for DATA chunks
+ */
+struct sctp_data {
+ uint32_t tsn;
+ uint16_t stream_id;
+ uint16_t stream_sequence;
+ uint32_t protocol_id;
+ /* user data follows */
+};
+
+struct sctp_data_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_data dp;
+};
+
+/*
+ * Structures for the control chunks
+ */
+
+/* Initiate (INIT)/Initiate Ack (INIT ACK) */
+struct sctp_init {
+ uint32_t initiate_tag; /* initiate tag */
+ uint32_t a_rwnd; /* a_rwnd */
+ uint16_t num_outbound_streams; /* OS */
+ uint16_t num_inbound_streams; /* MIS */
+ uint32_t initial_tsn; /* I-TSN */
+ /* optional param's follow */
+};
+
+/* state cookie header */
+struct sctp_state_cookie { /* this is our definition... */
+ uint8_t identification[16]; /* id of who we are */
+ uint32_t cookie_life; /* life I will award this cookie */
+ uint32_t tie_tag_my_vtag; /* my tag in old association */
+ uint32_t tie_tag_peer_vtag; /* peers tag in old association */
+ uint32_t peers_vtag; /* peers tag in INIT (for quick ref) */
+ uint32_t my_vtag; /* my tag in INIT-ACK (for quick ref) */
+ struct timeval time_entered; /* the time I built cookie */
+ uint32_t address[4]; /* 4 ints/128 bits */
+ uint32_t addr_type; /* address type */
+ uint32_t laddress[4]; /* my local from address */
+ uint32_t laddr_type; /* my local from address type */
+ uint32_t scope_id; /* v6 scope id for link-locals */
+ uint16_t peerport; /* port address of the peer in the INIT */
+ uint16_t myport; /* my port address used in the INIT */
+ uint8_t ipv4_addr_legal;/* Are V4 addr legal? */
+ uint8_t ipv6_addr_legal;/* Are V6 addr legal? */
+ uint8_t local_scope; /* IPv6 local scope flag */
+ uint8_t site_scope; /* IPv6 site scope flag */
+ uint8_t ipv4_scope; /* IPv4 private addr scope */
+ uint8_t loopback_scope; /* loopback scope information */
+ uint16_t reserved;
+ /*
+ * at the end is tacked on the INIT chunk and the INIT-ACK chunk
+ * (minus the cookie).
+ */
+};
+
+struct sctp_inv_mandatory_param {
+ uint16_t cause;
+ uint16_t length;
+ uint32_t num_param;
+ uint16_t param;
+ /*
+ * We include this to 0 it since only a missing cookie will cause
+ * this error.
+ */
+ uint16_t resv;
+};
+
+struct sctp_unresolv_addr {
+ uint16_t cause;
+ uint16_t length;
+ uint16_t addr_type;
+ uint16_t reserved; /* Only one invalid addr type */
+};
+
+/* state cookie parameter */
+struct sctp_state_cookie_param {
+ struct sctp_paramhdr ph;
+ struct sctp_state_cookie cookie;
+};
+
+struct sctp_init_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_init init;
+};
+
+struct sctp_init_msg {
+ struct sctphdr sh;
+ struct sctp_init_chunk msg;
+};
+
+/* ... used for both INIT and INIT ACK */
+#define sctp_init_ack sctp_init
+#define sctp_init_ack_chunk sctp_init_chunk
+#define sctp_init_ack_msg sctp_init_msg
+
+
+/* Selective Ack (SACK) */
+struct sctp_gap_ack_block {
+ uint16_t start; /* Gap Ack block start */
+ uint16_t end; /* Gap Ack block end */
+};
+
+struct sctp_sack {
+ uint32_t cum_tsn_ack; /* cumulative TSN Ack */
+ uint32_t a_rwnd; /* updated a_rwnd of sender */
+ uint16_t num_gap_ack_blks; /* number of Gap Ack blocks */
+ uint16_t num_dup_tsns; /* number of duplicate TSNs */
+ /* struct sctp_gap_ack_block's follow */
+ /* uint32_t duplicate_tsn's follow */
+};
+
+struct sctp_sack_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_sack sack;
+};
+
+
+/* Heartbeat Request (HEARTBEAT) */
+struct sctp_heartbeat {
+ struct sctp_heartbeat_info_param hb_info;
+};
+
+struct sctp_heartbeat_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_heartbeat heartbeat;
+};
+
+/* ... used for Heartbeat Ack (HEARTBEAT ACK) */
+#define sctp_heartbeat_ack sctp_heartbeat
+#define sctp_heartbeat_ack_chunk sctp_heartbeat_chunk
+
+
+/* Abort Asssociation (ABORT) */
+struct sctp_abort_chunk {
+ struct sctp_chunkhdr ch;
+ /* optional error cause may follow */
+};
+
+struct sctp_abort_msg {
+ struct sctphdr sh;
+ struct sctp_abort_chunk msg;
+};
+
+
+/* Shutdown Association (SHUTDOWN) */
+struct sctp_shutdown_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t cumulative_tsn_ack;
+};
+
+
+/* Shutdown Acknowledgment (SHUTDOWN ACK) */
+struct sctp_shutdown_ack_chunk {
+ struct sctp_chunkhdr ch;
+};
+
+
+/* Operation Error (ERROR) */
+struct sctp_error_chunk {
+ struct sctp_chunkhdr ch;
+ /* optional error causes follow */
+};
+
+
+/* Cookie Echo (COOKIE ECHO) */
+struct sctp_cookie_echo_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_state_cookie cookie;
+};
+
+/* Cookie Acknowledgment (COOKIE ACK) */
+struct sctp_cookie_ack_chunk {
+ struct sctp_chunkhdr ch;
+};
+
+/* Explicit Congestion Notification Echo (ECNE) */
+struct sctp_ecne_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t tsn;
+};
+
+/* Congestion Window Reduced (CWR) */
+struct sctp_cwr_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t tsn;
+};
+
+/* Shutdown Complete (SHUTDOWN COMPLETE) */
+struct sctp_shutdown_complete_chunk {
+ struct sctp_chunkhdr ch;
+};
+
+/* Oper error holding a stale cookie */
+struct sctp_stale_cookie_msg {
+ struct sctp_paramhdr ph;/* really an error cause */
+ uint32_t time_usec;
+};
+
+struct sctp_adaptation_layer_indication {
+ struct sctp_paramhdr ph;
+ uint32_t indication;
+};
+
+struct sctp_cookie_while_shutting_down {
+ struct sctphdr sh;
+ struct sctp_chunkhdr ch;
+ struct sctp_paramhdr ph;/* really an error cause */
+};
+
+struct sctp_shutdown_complete_msg {
+ struct sctphdr sh;
+ struct sctp_shutdown_complete_chunk shut_cmp;
+};
+
+/*
+ * draft-ietf-tsvwg-addip-sctp
+ */
+/* Address/Stream Configuration Change (ASCONF) */
+struct sctp_asconf_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t serial_number;
+ /* lookup address parameter (mandatory) */
+ /* asconf parameters follow */
+};
+
+/* Address/Stream Configuration Acknowledge (ASCONF ACK) */
+struct sctp_asconf_ack_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t serial_number;
+ /* asconf parameters follow */
+};
+
+/* draft-ietf-tsvwg-prsctp */
+/* Forward Cumulative TSN (FORWARD TSN) */
+struct sctp_forward_tsn_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t new_cumulative_tsn;
+ /* stream/sequence pairs (sctp_strseq) follow */
+};
+
+struct sctp_strseq {
+ uint16_t stream;
+ uint16_t sequence;
+};
+
+struct sctp_forward_tsn_msg {
+ struct sctphdr sh;
+ struct sctp_forward_tsn_chunk msg;
+};
+
+/* should be a multiple of 4 - 1 aka 3/7/11 etc. */
+
+#define SCTP_NUM_DB_TO_VERIFY 31
+
+struct sctp_chunk_desc {
+ uint8_t chunk_type;
+ uint8_t data_bytes[SCTP_NUM_DB_TO_VERIFY];
+ uint32_t tsn_ifany;
+};
+
+
+struct sctp_pktdrop_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t bottle_bw;
+ uint32_t current_onq;
+ uint16_t trunc_len;
+ uint16_t reserved;
+ uint8_t data[0];
+};
+
+/**********STREAM RESET STUFF ******************/
+
+struct sctp_stream_reset_out_request {
+ struct sctp_paramhdr ph;
+ uint32_t request_seq; /* monotonically increasing seq no */
+ uint32_t response_seq; /* if a response, the resp seq no */
+ uint32_t send_reset_at_tsn; /* last TSN I assigned outbound */
+ uint16_t list_of_streams[0]; /* if not all list of streams */
+};
+
+struct sctp_stream_reset_in_request {
+ struct sctp_paramhdr ph;
+ uint32_t request_seq;
+ uint16_t list_of_streams[0]; /* if not all list of streams */
+};
+
+
+struct sctp_stream_reset_tsn_request {
+ struct sctp_paramhdr ph;
+ uint32_t request_seq;
+};
+
+struct sctp_stream_reset_response {
+ struct sctp_paramhdr ph;
+ uint32_t response_seq; /* if a response, the resp seq no */
+ uint32_t result;
+};
+
+struct sctp_stream_reset_response_tsn {
+ struct sctp_paramhdr ph;
+ uint32_t response_seq; /* if a response, the resp seq no */
+ uint32_t result;
+ uint32_t senders_next_tsn;
+ uint32_t receivers_next_tsn;
+};
+
+
+
+#define SCTP_STREAM_RESET_NOTHING 0x00000000 /* Nothing for me to do */
+#define SCTP_STREAM_RESET_PERFORMED 0x00000001 /* Did it */
+#define SCTP_STREAM_RESET_DENIED 0x00000002 /* refused to do it */
+#define SCTP_STREAM_RESET_ERROR_STR 0x00000003 /* bad Stream no */
+#define SCTP_STREAM_RESET_TRY_LATER 0x00000004 /* collision, try again */
+#define SCTP_STREAM_RESET_BAD_SEQNO 0x00000005 /* bad str-reset seq no */
+
+/*
+ * convience structures, note that if you are making a request for specific
+ * streams then the request will need to be an overlay structure.
+ */
+
+struct sctp_stream_reset_out_req {
+ struct sctp_chunkhdr ch;
+ struct sctp_stream_reset_out_request sr_req;
+};
+
+struct sctp_stream_reset_in_req {
+ struct sctp_chunkhdr ch;
+ struct sctp_stream_reset_in_request sr_req;
+};
+
+struct sctp_stream_reset_tsn_req {
+ struct sctp_chunkhdr ch;
+ struct sctp_stream_reset_tsn_request sr_req;
+};
+
+struct sctp_stream_reset_resp {
+ struct sctp_chunkhdr ch;
+ struct sctp_stream_reset_response sr_resp;
+};
+
+/* respone only valid with a TSN request */
+struct sctp_stream_reset_resp_tsn {
+ struct sctp_chunkhdr ch;
+ struct sctp_stream_reset_response_tsn sr_resp;
+};
+
+/****************************************************/
+
+/*
+ * Authenticated chunks support draft-ietf-tsvwg-sctp-auth
+ */
+struct sctp_auth_random {
+ struct sctp_paramhdr ph;/* type = 0x8002 */
+ uint8_t random_data[0];
+};
+
+struct sctp_auth_chunk_list {
+ struct sctp_paramhdr ph;/* type = 0x8003 */
+ uint8_t chunk_types[0];
+};
+
+struct sctp_auth_hmac_algo {
+ struct sctp_paramhdr ph;/* type = 0x8004 */
+ uint16_t hmac_ids[0];
+};
+
+struct sctp_auth_chunk {
+ struct sctp_chunkhdr ch;
+ uint16_t shared_key_id;
+ uint16_t hmac_id;
+ uint8_t hmac[0];
+};
+
+struct sctp_auth_invalid_hmac {
+ struct sctp_paramhdr ph;
+ uint16_t hmac_id;
+ uint16_t padding;
+};
+
+/*
+ * we pre-reserve enough room for a ECNE or CWR AND a SACK with no missing
+ * pieces. If ENCE is missing we could have a couple of blocks. This way we
+ * optimize so we MOST likely can bundle a SACK/ECN with the smallest size
+ * data chunk I will split into. We could increase throughput slightly by
+ * taking out these two but the 24-sack/8-CWR i.e. 32 bytes I pre-reserve I
+ * feel is worth it for now.
+ */
+#ifndef SCTP_MAX_OVERHEAD
+#ifdef AF_INET6
+#define SCTP_MAX_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+ sizeof(struct sctphdr) + \
+ sizeof(struct sctp_ecne_chunk) + \
+ sizeof(struct sctp_sack_chunk) + \
+ sizeof(struct ip6_hdr))
+
+#define SCTP_MED_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+ sizeof(struct sctphdr) + \
+ sizeof(struct ip6_hdr))
+
+
+#define SCTP_MIN_OVERHEAD (sizeof(struct ip6_hdr) + \
+ sizeof(struct sctphdr))
+
+#else
+#define SCTP_MAX_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+ sizeof(struct sctphdr) + \
+ sizeof(struct sctp_ecne_chunk) + \
+ sizeof(struct sctp_sack_chunk) + \
+ sizeof(struct ip))
+
+#define SCTP_MED_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+ sizeof(struct sctphdr) + \
+ sizeof(struct ip))
+
+
+#define SCTP_MIN_OVERHEAD (sizeof(struct ip) + \
+ sizeof(struct sctphdr))
+
+#endif /* AF_INET6 */
+#endif /* !SCTP_MAX_OVERHEAD */
+
+#define SCTP_MED_V4_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+ sizeof(struct sctphdr) + \
+ sizeof(struct ip))
+
+#define SCTP_MIN_V4_OVERHEAD (sizeof(struct ip) + \
+ sizeof(struct sctphdr))
+
+#endif /* !__sctp_header_h__ */
diff --git a/sys/netinet/sctp_indata.c b/sys/netinet/sctp_indata.c
new file mode 100644
index 0000000..5b22770
--- /dev/null
+++ b/sys/netinet/sctp_indata.c
@@ -0,0 +1,5588 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $kejKAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+
+#include "opt_ipsec.h"
+#include "opt_inet6.h"
+#include "opt_inet.h"
+
+#include "opt_sctp.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/route.h>
+
+
+#include <sys/limits.h>
+#include <machine/cpu.h>
+
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#ifdef INET6
+#include <netinet/ip6.h>
+#endif /* INET6 */
+#include <netinet/in_pcb.h>
+#include <netinet/in_var.h>
+#include <netinet/ip_var.h>
+#ifdef INET6
+#include <netinet6/ip6_var.h>
+#endif /* INET6 */
+#include <netinet/ip_icmp.h>
+#include <netinet/icmp_var.h>
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_timer.h>
+#ifdef IPSEC
+#include <netinet6/ipsec.h>
+#include <netkey/key.h>
+#endif /* IPSEC */
+
+
+#ifdef SCTP_DEBUG
+extern uint32_t sctp_debug_on;
+
+#endif
+
+/*
+ * NOTES: On the outbound side of things I need to check the sack timer to
+ * see if I should generate a sack into the chunk queue (if I have data to
+ * send that is and will be sending it .. for bundling.
+ *
+ * The callback in sctp_usrreq.c will get called when the socket is read from.
+ * This will cause sctp_service_queues() to get called on the top entry in
+ * the list.
+ */
+
+extern int sctp_strict_sacks;
+
+__inline void
+sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ uint32_t calc, calc_w_oh;
+
+ /*
+ * This is really set wrong with respect to a 1-2-m socket. Since
+ * the sb_cc is the count that everyone as put up. When we re-write
+ * sctp_soreceive then we will fix this so that ONLY this
+ * associations data is taken into account.
+ */
+ if (stcb->sctp_socket == NULL)
+ return;
+
+ if (stcb->asoc.sb_cc == 0 &&
+ asoc->size_on_reasm_queue == 0 &&
+ asoc->size_on_all_streams == 0) {
+ /* Full rwnd granted */
+ asoc->my_rwnd = max(stcb->sctp_socket->so_rcv.sb_hiwat,
+ SCTP_MINIMAL_RWND);
+ return;
+ }
+ /* get actual space */
+ calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
+
+ /*
+ * take out what has NOT been put on socket queue and we yet hold
+ * for putting up.
+ */
+ calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
+ calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
+
+ if (calc == 0) {
+ /* out of space */
+ asoc->my_rwnd = 0;
+ return;
+ }
+ /* what is the overhead of all these rwnd's */
+ calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
+ asoc->my_rwnd = calc;
+ if (calc_w_oh == 0) {
+ /*
+ * If our overhead is greater than the advertised rwnd, we
+ * clamp the rwnd to 1. This lets us still accept inbound
+ * segments, but hopefully will shut the sender down when he
+ * finally gets the message.
+ */
+ asoc->my_rwnd = 1;
+ } else {
+ /* SWS threshold */
+ if (asoc->my_rwnd &&
+ (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
+ /* SWS engaged, tell peer none left */
+ asoc->my_rwnd = 1;
+ }
+ }
+}
+
+/* Calculate what the rwnd would be */
+
+__inline uint32_t
+sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ uint32_t calc = 0, calc_w_oh;
+
+ /*
+ * This is really set wrong with respect to a 1-2-m socket. Since
+ * the sb_cc is the count that everyone as put up. When we re-write
+ * sctp_soreceive then we will fix this so that ONLY this
+ * associations data is taken into account.
+ */
+ if (stcb->sctp_socket == NULL)
+ return (calc);
+
+ if (stcb->asoc.sb_cc == 0 &&
+ asoc->size_on_reasm_queue == 0 &&
+ asoc->size_on_all_streams == 0) {
+ /* Full rwnd granted */
+ calc = max(stcb->sctp_socket->so_rcv.sb_hiwat,
+ SCTP_MINIMAL_RWND);
+ return (calc);
+ }
+ /* get actual space */
+ calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
+
+ /*
+ * take out what has NOT been put on socket queue and we yet hold
+ * for putting up.
+ */
+ calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
+ calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
+
+ if (calc == 0) {
+ /* out of space */
+ return (calc);
+ }
+ /* what is the overhead of all these rwnd's */
+ calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
+ if (calc_w_oh == 0) {
+ /*
+ * If our overhead is greater than the advertised rwnd, we
+ * clamp the rwnd to 1. This lets us still accept inbound
+ * segments, but hopefully will shut the sender down when he
+ * finally gets the message.
+ */
+ calc = 1;
+ } else {
+ /* SWS threshold */
+ if (calc &&
+ (calc < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
+ /* SWS engaged, tell peer none left */
+ calc = 1;
+ }
+ }
+ return (calc);
+}
+
+
+
+/*
+ * Build out our readq entry based on the incoming packet.
+ */
+struct sctp_queued_to_read *
+sctp_build_readq_entry(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ uint32_t tsn, uint32_t ppid,
+ uint32_t context, uint16_t stream_no,
+ uint16_t stream_seq, uint8_t flags,
+ struct mbuf *dm)
+{
+ struct sctp_queued_to_read *read_queue_e = NULL;
+
+ sctp_alloc_a_readq(stcb, read_queue_e);
+ if (read_queue_e == NULL) {
+ goto failed_build;
+ }
+ read_queue_e->sinfo_stream = stream_no;
+ read_queue_e->sinfo_ssn = stream_seq;
+ read_queue_e->sinfo_flags = (flags << 8);
+ read_queue_e->sinfo_ppid = ppid;
+ read_queue_e->sinfo_context = stcb->asoc.context;
+ read_queue_e->sinfo_timetolive = 0;
+ read_queue_e->sinfo_tsn = tsn;
+ read_queue_e->sinfo_cumtsn = tsn;
+ read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
+ read_queue_e->whoFrom = net;
+ read_queue_e->length = 0;
+ atomic_add_int(&net->ref_count, 1);
+ read_queue_e->data = dm;
+ read_queue_e->tail_mbuf = NULL;
+ read_queue_e->stcb = stcb;
+ read_queue_e->port_from = stcb->rport;
+ read_queue_e->do_not_ref_stcb = 0;
+ read_queue_e->end_added = 0;
+failed_build:
+ return (read_queue_e);
+}
+
+
+/*
+ * Build out our readq entry based on the incoming packet.
+ */
+static struct sctp_queued_to_read *
+sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
+ struct sctp_tmit_chunk *chk)
+{
+ struct sctp_queued_to_read *read_queue_e = NULL;
+
+ sctp_alloc_a_readq(stcb, read_queue_e);
+ if (read_queue_e == NULL) {
+ goto failed_build;
+ }
+ read_queue_e->sinfo_stream = chk->rec.data.stream_number;
+ read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
+ read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
+ read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
+ read_queue_e->sinfo_context = stcb->asoc.context;
+ read_queue_e->sinfo_timetolive = 0;
+ read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
+ read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
+ read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
+ read_queue_e->whoFrom = chk->whoTo;
+ read_queue_e->length = 0;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ read_queue_e->data = chk->data;
+ read_queue_e->tail_mbuf = NULL;
+ read_queue_e->stcb = stcb;
+ read_queue_e->port_from = stcb->rport;
+ read_queue_e->do_not_ref_stcb = 0;
+ read_queue_e->end_added = 0;
+failed_build:
+ return (read_queue_e);
+}
+
+
+struct mbuf *
+sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
+ struct sctp_sndrcvinfo *sinfo)
+{
+ struct sctp_sndrcvinfo *outinfo;
+ struct cmsghdr *cmh;
+ struct mbuf *ret;
+ int len;
+ int use_extended = 0;
+
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
+ /* user does not want the sndrcv ctl */
+ return (NULL);
+ }
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
+ use_extended = 1;
+ len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
+ } else {
+ len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
+ }
+
+
+ ret = sctp_get_mbuf_for_msg(len,
+ 1, M_DONTWAIT, 1, MT_DATA);
+
+ if (ret == NULL) {
+ /* No space */
+ return (ret);
+ }
+ /* We need a CMSG header followed by the struct */
+ cmh = mtod(ret, struct cmsghdr *);
+ outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
+ cmh->cmsg_level = IPPROTO_SCTP;
+ if (use_extended) {
+ cmh->cmsg_type = SCTP_EXTRCV;
+ cmh->cmsg_len = len;
+ memcpy(outinfo, sinfo, len);
+ } else {
+ cmh->cmsg_type = SCTP_SNDRCV;
+ cmh->cmsg_len = len;
+ *outinfo = *sinfo;
+ }
+ ret->m_len = cmh->cmsg_len;
+ ret->m_pkthdr.len = ret->m_len;
+ return (ret);
+}
+
+/*
+ * We are delivering currently from the reassembly queue. We must continue to
+ * deliver until we either: 1) run out of space. 2) run out of sequential
+ * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
+ */
+static void
+sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+ struct mbuf *m;
+ uint16_t nxt_todel;
+ uint16_t stream_no;
+ int end = 0;
+ int cntDel;
+
+ cntDel = stream_no = 0;
+ struct sctp_queued_to_read *control, *ctl, *ctlat;
+
+ if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
+ ) {
+ /* socket above is long gone */
+ asoc->fragmented_delivery_inprogress = 0;
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
+ asoc->size_on_reasm_queue -= chk->send_size;
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+ /*
+ * Lose the data pointer, since its in the socket
+ * buffer
+ */
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ /* Now free the address and data */
+ sctp_free_remote_addr(chk->whoTo);
+ sctp_free_a_chunk(stcb, chk);
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ }
+ return;
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ do {
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ if (chk == NULL) {
+ return;
+ }
+ if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
+ /* Can't deliver more :< */
+ return;
+ }
+ stream_no = chk->rec.data.stream_number;
+ nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
+ if (nxt_todel != chk->rec.data.stream_seq &&
+ (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
+ /*
+ * Not the next sequence to deliver in its stream OR
+ * unordered
+ */
+ return;
+ }
+ if ((chk->data->m_flags & M_PKTHDR) == 0) {
+ m = sctp_get_mbuf_for_msg(1,
+ 1, M_DONTWAIT, 1, MT_DATA);
+ if (m == NULL) {
+ /* no room! */
+ return;
+ }
+ m->m_pkthdr.len = chk->send_size;
+ m->m_len = 0;
+ m->m_next = chk->data;
+ chk->data = m;
+ }
+ if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+ if (chk->data->m_next == NULL) {
+ /* hopefully we hit here most of the time */
+ chk->data->m_flags |= M_EOR;
+ } else {
+ /*
+ * Add the flag to the LAST mbuf in the
+ * chain
+ */
+ m = chk->data;
+ while (m->m_next != NULL) {
+ m = m->m_next;
+ }
+ m->m_flags |= M_EOR;
+ }
+ }
+ if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+
+ control = sctp_build_readq_entry_chk(stcb, chk);
+ if (control == NULL) {
+ /* out of memory? */
+ return;
+ }
+ /* save it off for our future deliveries */
+ stcb->asoc.control_pdapi = control;
+ if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
+ end = 1;
+ else
+ end = 0;
+ sctp_add_to_readq(stcb->sctp_ep,
+ stcb, control, &stcb->sctp_socket->so_rcv, end);
+ cntDel++;
+ } else {
+ if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
+ end = 1;
+ else
+ end = 0;
+ if (sctp_append_to_readq(stcb->sctp_ep, stcb,
+ stcb->asoc.control_pdapi,
+ chk->data, end, chk->rec.data.TSN_seq,
+ &stcb->sctp_socket->so_rcv)) {
+ /*
+ * something is very wrong, either
+ * control_pdapi is NULL, or the tail_mbuf
+ * is corrupt, or there is a EOM already on
+ * the mbuf chain.
+ */
+ if (stcb->asoc.control_pdapi == NULL) {
+ panic("This should not happen control_pdapi NULL?");
+ }
+ if (stcb->asoc.control_pdapi->tail_mbuf == NULL) {
+ panic("This should not happen, tail_mbuf not being maintained?");
+ }
+ /* if we did not panic, it was a EOM */
+ panic("Bad chunking ??");
+ }
+ cntDel++;
+ }
+ /* pull it we did it */
+ TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
+ if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+ asoc->fragmented_delivery_inprogress = 0;
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
+ asoc->strmin[stream_no].last_sequence_delivered++;
+ }
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
+ SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
+ }
+ } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+ /*
+ * turn the flag back on since we just delivered
+ * yet another one.
+ */
+ asoc->fragmented_delivery_inprogress = 1;
+ }
+ asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
+ asoc->last_flags_delivered = chk->rec.data.rcv_flags;
+ asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
+ asoc->last_strm_no_delivered = chk->rec.data.stream_number;
+
+ asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
+ asoc->size_on_reasm_queue -= chk->send_size;
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+ /* free up the chk */
+ chk->data = NULL;
+ sctp_free_remote_addr(chk->whoTo);
+ sctp_free_a_chunk(stcb, chk);
+
+ if (asoc->fragmented_delivery_inprogress == 0) {
+ /*
+ * Now lets see if we can deliver the next one on
+ * the stream
+ */
+ uint16_t nxt_todel;
+ struct sctp_stream_in *strm;
+
+ strm = &asoc->strmin[stream_no];
+ nxt_todel = strm->last_sequence_delivered + 1;
+ ctl = TAILQ_FIRST(&strm->inqueue);
+ if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
+ while (ctl != NULL) {
+ /* Deliver more if we can. */
+ if (nxt_todel == ctl->sinfo_ssn) {
+ ctlat = TAILQ_NEXT(ctl, next);
+ TAILQ_REMOVE(&strm->inqueue, ctl, next);
+ asoc->size_on_all_streams -= ctl->length;
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ strm->last_sequence_delivered++;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ ctl,
+ &stcb->sctp_socket->so_rcv, 1);
+ ctl = ctlat;
+ } else {
+ break;
+ }
+ nxt_todel = strm->last_sequence_delivered + 1;
+ }
+ }
+ return;
+ }
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ } while (chk);
+}
+
+/*
+ * Queue the chunk either right into the socket buffer if it is the next one
+ * to go OR put it in the correct place in the delivery queue. If we do
+ * append to the so_buf, keep doing so until we are out of order. One big
+ * question still remains, what to do when the socket buffer is FULL??
+ */
+static void
+sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_queued_to_read *control, int *abort_flag)
+{
+ /*
+ * FIX-ME maybe? What happens when the ssn wraps? If we are getting
+ * all the data in one stream this could happen quite rapidly. One
+ * could use the TSN to keep track of things, but this scheme breaks
+ * down in the other type of stream useage that could occur. Send a
+ * single msg to stream 0, send 4Billion messages to stream 1, now
+ * send a message to stream 0. You have a situation where the TSN
+ * has wrapped but not in the stream. Is this worth worrying about
+ * or should we just change our queue sort at the bottom to be by
+ * TSN.
+ *
+ * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
+ * with TSN 1? If the peer is doing some sort of funky TSN/SSN
+ * assignment this could happen... and I don't see how this would be
+ * a violation. So for now I am undecided an will leave the sort by
+ * SSN alone. Maybe a hybred approach is the answer
+ *
+ */
+ struct sctp_stream_in *strm;
+ struct sctp_queued_to_read *at;
+ int queue_needed;
+ uint16_t nxt_todel;
+ struct mbuf *oper;
+
+ queue_needed = 1;
+ asoc->size_on_all_streams += control->length;
+ sctp_ucount_incr(asoc->cnt_on_all_streams);
+ strm = &asoc->strmin[control->sinfo_stream];
+ nxt_todel = strm->last_sequence_delivered + 1;
+#ifdef SCTP_STR_LOGGING
+ sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
+#endif
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
+ (uint32_t) control->sinfo_stream,
+ (uint32_t) strm->last_sequence_delivered, (uint32_t) nxt_todel);
+ }
+#endif
+ if (compare_with_wrap(strm->last_sequence_delivered,
+ control->sinfo_ssn, MAX_SEQ) ||
+ (strm->last_sequence_delivered == control->sinfo_ssn)) {
+ /* The incoming sseq is behind where we last delivered? */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
+ control->sinfo_ssn,
+ strm->last_sequence_delivered);
+ }
+#endif
+ /*
+ * throw it in the stream so it gets cleaned up in
+ * association destruction
+ */
+ TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len = sizeof(struct sctp_paramhdr) +
+ (sizeof(uint32_t) * 3);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x00000001);
+ ippp++;
+ *ippp = control->sinfo_tsn;
+ ippp++;
+ *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
+ }
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_PEER_FAULTY, oper);
+
+ *abort_flag = 1;
+ return;
+
+ }
+ if (nxt_todel == control->sinfo_ssn) {
+ /* can be delivered right away? */
+#ifdef SCTP_STR_LOGGING
+ sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
+#endif
+ queue_needed = 0;
+ asoc->size_on_all_streams -= control->length;
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ strm->last_sequence_delivered++;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1);
+ control = TAILQ_FIRST(&strm->inqueue);
+ while (control != NULL) {
+ /* all delivered */
+ nxt_todel = strm->last_sequence_delivered + 1;
+ if (nxt_todel == control->sinfo_ssn) {
+ at = TAILQ_NEXT(control, next);
+ TAILQ_REMOVE(&strm->inqueue, control, next);
+ asoc->size_on_all_streams -= control->length;
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ strm->last_sequence_delivered++;
+ /*
+ * We ignore the return of deliver_data here
+ * since we always can hold the chunk on the
+ * d-queue. And we have a finite number that
+ * can be delivered from the strq.
+ */
+#ifdef SCTP_STR_LOGGING
+ sctp_log_strm_del(control, NULL,
+ SCTP_STR_LOG_FROM_IMMED_DEL);
+#endif
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1);
+ control = at;
+ continue;
+ }
+ break;
+ }
+ }
+ if (queue_needed) {
+ /*
+ * Ok, we did not deliver this guy, find the correct place
+ * to put it on the queue.
+ */
+ if (TAILQ_EMPTY(&strm->inqueue)) {
+ /* Empty queue */
+#ifdef SCTP_STR_LOGGING
+ sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
+#endif
+ TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
+ } else {
+ TAILQ_FOREACH(at, &strm->inqueue, next) {
+ if (compare_with_wrap(at->sinfo_ssn,
+ control->sinfo_ssn, MAX_SEQ)) {
+ /*
+ * one in queue is bigger than the
+ * new one, insert before this one
+ */
+#ifdef SCTP_STR_LOGGING
+ sctp_log_strm_del(control, at,
+ SCTP_STR_LOG_FROM_INSERT_MD);
+#endif
+ TAILQ_INSERT_BEFORE(at, control, next);
+ break;
+ } else if (at->sinfo_ssn == control->sinfo_ssn) {
+ /*
+ * Gak, He sent me a duplicate str
+ * seq number
+ */
+ /*
+ * foo bar, I guess I will just free
+ * this new guy, should we abort
+ * too? FIX ME MAYBE? Or it COULD be
+ * that the SSN's have wrapped.
+ * Maybe I should compare to TSN
+ * somehow... sigh for now just blow
+ * away the chunk!
+ */
+
+ if (control->data)
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ asoc->size_on_all_streams -= control->length;
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ sctp_free_remote_addr(control->whoFrom);
+ sctp_free_a_readq(stcb, control);
+ return;
+ } else {
+ if (TAILQ_NEXT(at, next) == NULL) {
+ /*
+ * We are at the end, insert
+ * it after this one
+ */
+#ifdef SCTP_STR_LOGGING
+ sctp_log_strm_del(control, at,
+ SCTP_STR_LOG_FROM_INSERT_TL);
+#endif
+ TAILQ_INSERT_AFTER(&strm->inqueue,
+ at, control, next);
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Returns two things: You get the total size of the deliverable parts of the
+ * first fragmented message on the reassembly queue. And you get a 1 back if
+ * all of the message is ready or a 0 back if the message is still incomplete
+ */
+static int
+sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
+{
+ struct sctp_tmit_chunk *chk;
+ uint32_t tsn;
+
+ *t_size = 0;
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ if (chk == NULL) {
+ /* nothing on the queue */
+ return (0);
+ }
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
+ /* Not a first on the queue */
+ return (0);
+ }
+ tsn = chk->rec.data.TSN_seq;
+ while (chk) {
+ if (tsn != chk->rec.data.TSN_seq) {
+ return (0);
+ }
+ *t_size += chk->send_size;
+ if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+ return (1);
+ }
+ tsn++;
+ chk = TAILQ_NEXT(chk, sctp_next);
+ }
+ return (0);
+}
+
+static void
+sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+ uint16_t nxt_todel;
+ uint32_t tsize;
+
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ if (chk == NULL) {
+ /* Huh? */
+ asoc->size_on_reasm_queue = 0;
+ asoc->cnt_on_reasm_queue = 0;
+ return;
+ }
+ if (asoc->fragmented_delivery_inprogress == 0) {
+ nxt_todel =
+ asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
+ (nxt_todel == chk->rec.data.stream_seq ||
+ (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
+ /*
+ * Yep the first one is here and its ok to deliver
+ * but should we?
+ */
+ if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
+ (tsize > stcb->sctp_ep->partial_delivery_point))) {
+
+ /*
+ * Yes, we setup to start reception, by
+ * backing down the TSN just in case we
+ * can't deliver. If we
+ */
+ asoc->fragmented_delivery_inprogress = 1;
+ asoc->tsn_last_delivered =
+ chk->rec.data.TSN_seq - 1;
+ asoc->str_of_pdapi =
+ chk->rec.data.stream_number;
+ asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
+ asoc->pdapi_ppid = chk->rec.data.payloadtype;
+ asoc->fragment_flags = chk->rec.data.rcv_flags;
+ sctp_service_reassembly(stcb, asoc);
+ }
+ }
+ } else {
+ sctp_service_reassembly(stcb, asoc);
+ }
+}
+
+/*
+ * Dump onto the re-assembly queue, in its proper place. After dumping on the
+ * queue, see if anthing can be delivered. If so pull it off (or as much as
+ * we can. If we run out of space then we must dump what we can and set the
+ * appropriate flag to say we queued what we could.
+ */
+static void
+sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_tmit_chunk *chk, int *abort_flag)
+{
+ struct mbuf *oper;
+ uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
+ u_char last_flags;
+ struct sctp_tmit_chunk *at, *prev, *next;
+
+ prev = next = NULL;
+ cum_ackp1 = asoc->tsn_last_delivered + 1;
+ if (TAILQ_EMPTY(&asoc->reasmqueue)) {
+ /* This is the first one on the queue */
+ TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
+ /*
+ * we do not check for delivery of anything when only one
+ * fragment is here
+ */
+ asoc->size_on_reasm_queue = chk->send_size;
+ sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+ if (chk->rec.data.TSN_seq == cum_ackp1) {
+ if (asoc->fragmented_delivery_inprogress == 0 &&
+ (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
+ SCTP_DATA_FIRST_FRAG) {
+ /*
+ * An empty queue, no delivery inprogress,
+ * we hit the next one and it does NOT have
+ * a FIRST fragment mark.
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("Gak, Evil plot, its not first, no fragmented delivery in progress\n");
+ }
+#endif
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len =
+ sizeof(struct sctp_paramhdr) +
+ (sizeof(uint32_t) * 3);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x10000001);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+
+ }
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_PEER_FAULTY, oper);
+ *abort_flag = 1;
+ } else if (asoc->fragmented_delivery_inprogress &&
+ (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
+ /*
+ * We are doing a partial delivery and the
+ * NEXT chunk MUST be either the LAST or
+ * MIDDLE fragment NOT a FIRST
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
+ }
+#endif
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x10000002);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+ }
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_PEER_FAULTY, oper);
+ *abort_flag = 1;
+ } else if (asoc->fragmented_delivery_inprogress) {
+ /*
+ * Here we are ok with a MIDDLE or LAST
+ * piece
+ */
+ if (chk->rec.data.stream_number !=
+ asoc->str_of_pdapi) {
+ /* Got to be the right STR No */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("Gak, Evil plot, it IS not same stream number %d vs %d\n",
+ chk->rec.data.stream_number,
+ asoc->str_of_pdapi);
+ }
+#endif
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len =
+ sizeof(struct sctp_paramhdr) +
+ (sizeof(uint32_t) * 3);
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x10000003);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+ }
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper);
+ *abort_flag = 1;
+ } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
+ SCTP_DATA_UNORDERED &&
+ chk->rec.data.stream_seq !=
+ asoc->ssn_of_pdapi) {
+ /* Got to be the right STR Seq */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("Gak, Evil plot, it IS not same stream seq %d vs %d\n",
+ chk->rec.data.stream_seq,
+ asoc->ssn_of_pdapi);
+ }
+#endif
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x10000004);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+
+ }
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper);
+ *abort_flag = 1;
+ }
+ }
+ }
+ return;
+ }
+ /* Find its place */
+ TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
+ if (compare_with_wrap(at->rec.data.TSN_seq,
+ chk->rec.data.TSN_seq, MAX_TSN)) {
+ /*
+ * one in queue is bigger than the new one, insert
+ * before this one
+ */
+ /* A check */
+ asoc->size_on_reasm_queue += chk->send_size;
+ sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+ next = at;
+ TAILQ_INSERT_BEFORE(at, chk, sctp_next);
+ break;
+ } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
+ /* Gak, He sent me a duplicate str seq number */
+ /*
+ * foo bar, I guess I will just free this new guy,
+ * should we abort too? FIX ME MAYBE? Or it COULD be
+ * that the SSN's have wrapped. Maybe I should
+ * compare to TSN somehow... sigh for now just blow
+ * away the chunk!
+ */
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_remote_addr(chk->whoTo);
+ sctp_free_a_chunk(stcb, chk);
+ return;
+ } else {
+ last_flags = at->rec.data.rcv_flags;
+ last_tsn = at->rec.data.TSN_seq;
+ prev = at;
+ if (TAILQ_NEXT(at, sctp_next) == NULL) {
+ /*
+ * We are at the end, insert it after this
+ * one
+ */
+ /* check it first */
+ asoc->size_on_reasm_queue += chk->send_size;
+ sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+ TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
+ break;
+ }
+ }
+ }
+ /* Now the audits */
+ if (prev) {
+ prev_tsn = chk->rec.data.TSN_seq - 1;
+ if (prev_tsn == prev->rec.data.TSN_seq) {
+ /*
+ * Ok the one I am dropping onto the end is the
+ * NEXT. A bit of valdiation here.
+ */
+ if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
+ SCTP_DATA_FIRST_FRAG ||
+ (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
+ SCTP_DATA_MIDDLE_FRAG) {
+ /*
+ * Insert chk MUST be a MIDDLE or LAST
+ * fragment
+ */
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
+ SCTP_DATA_FIRST_FRAG) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("Prev check - It can be a midlle or last but not a first\n");
+ printf("Gak, Evil plot, it's a FIRST!\n");
+ }
+#endif
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x10000005);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+
+ }
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper);
+ *abort_flag = 1;
+ return;
+ }
+ if (chk->rec.data.stream_number !=
+ prev->rec.data.stream_number) {
+ /*
+ * Huh, need the correct STR here,
+ * they must be the same.
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
+ chk->rec.data.stream_number,
+ prev->rec.data.stream_number);
+ }
+#endif
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x10000006);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+ }
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper);
+
+ *abort_flag = 1;
+ return;
+ }
+ if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
+ chk->rec.data.stream_seq !=
+ prev->rec.data.stream_seq) {
+ /*
+ * Huh, need the correct STR here,
+ * they must be the same.
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
+ chk->rec.data.stream_seq,
+ prev->rec.data.stream_seq);
+ }
+#endif
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x10000007);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+ }
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper);
+
+ *abort_flag = 1;
+ return;
+ }
+ } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
+ SCTP_DATA_LAST_FRAG) {
+ /* Insert chk MUST be a FIRST */
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
+ SCTP_DATA_FIRST_FRAG) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("Prev check - Gak, evil plot, its not FIRST and it must be!\n");
+ }
+#endif
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x10000008);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+
+ }
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper);
+
+ *abort_flag = 1;
+ return;
+ }
+ }
+ }
+ }
+ if (next) {
+ post_tsn = chk->rec.data.TSN_seq + 1;
+ if (post_tsn == next->rec.data.TSN_seq) {
+ /*
+ * Ok the one I am inserting ahead of is my NEXT
+ * one. A bit of valdiation here.
+ */
+ if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+ /* Insert chk MUST be a last fragment */
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
+ != SCTP_DATA_LAST_FRAG) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("Next chk - Next is FIRST, we must be LAST\n");
+ printf("Gak, Evil plot, its not a last!\n");
+ }
+#endif
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x10000009);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+ }
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper);
+
+ *abort_flag = 1;
+ return;
+ }
+ } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
+ SCTP_DATA_MIDDLE_FRAG ||
+ (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
+ SCTP_DATA_LAST_FRAG) {
+ /*
+ * Insert chk CAN be MIDDLE or FIRST NOT
+ * LAST
+ */
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
+ SCTP_DATA_LAST_FRAG) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("Next chk - Next is a MIDDLE/LAST\n");
+ printf("Gak, Evil plot, new prev chunk is a LAST\n");
+ }
+#endif
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x1000000a);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+
+ }
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper);
+
+ *abort_flag = 1;
+ return;
+ }
+ if (chk->rec.data.stream_number !=
+ next->rec.data.stream_number) {
+ /*
+ * Huh, need the correct STR here,
+ * they must be the same.
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
+ chk->rec.data.stream_number,
+ next->rec.data.stream_number);
+ }
+#endif
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x1000000b);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+
+ }
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper);
+
+ *abort_flag = 1;
+ return;
+ }
+ if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
+ chk->rec.data.stream_seq !=
+ next->rec.data.stream_seq) {
+ /*
+ * Huh, need the correct STR here,
+ * they must be the same.
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
+ chk->rec.data.stream_seq,
+ next->rec.data.stream_seq);
+ }
+#endif
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x1000000c);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+ }
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper);
+
+ *abort_flag = 1;
+ return;
+
+ }
+ }
+ }
+ }
+ /* Do we need to do some delivery? check */
+ sctp_deliver_reasm_check(stcb, asoc);
+}
+
+/*
+ * This is an unfortunate routine. It checks to make sure a evil guy is not
+ * stuffing us full of bad packet fragments. A broken peer could also do this
+ * but this is doubtful. It is to bad I must worry about evil crackers sigh
+ * :< more cycles.
+ */
+static int
+sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
+ uint32_t TSN_seq)
+{
+ struct sctp_tmit_chunk *at;
+ uint32_t tsn_est;
+
+ TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
+ if (compare_with_wrap(TSN_seq,
+ at->rec.data.TSN_seq, MAX_TSN)) {
+ /* is it one bigger? */
+ tsn_est = at->rec.data.TSN_seq + 1;
+ if (tsn_est == TSN_seq) {
+ /* yep. It better be a last then */
+ if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
+ SCTP_DATA_LAST_FRAG) {
+ /*
+ * Ok this guy belongs next to a guy
+ * that is NOT last, it should be a
+ * middle/last, not a complete
+ * chunk.
+ */
+ return (1);
+ } else {
+ /*
+ * This guy is ok since its a LAST
+ * and the new chunk is a fully
+ * self- contained one.
+ */
+ return (0);
+ }
+ }
+ } else if (TSN_seq == at->rec.data.TSN_seq) {
+ /* Software error since I have a dup? */
+ return (1);
+ } else {
+ /*
+ * Ok, 'at' is larger than new chunk but does it
+ * need to be right before it.
+ */
+ tsn_est = TSN_seq + 1;
+ if (tsn_est == at->rec.data.TSN_seq) {
+ /* Yep, It better be a first */
+ if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
+ SCTP_DATA_FIRST_FRAG) {
+ return (1);
+ } else {
+ return (0);
+ }
+ }
+ }
+ }
+ return (0);
+}
+
+
+extern unsigned int sctp_max_chunks_on_queue;
+static int
+sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
+ struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
+ int *break_flag, int last_chunk)
+{
+ /* Process a data chunk */
+ /* struct sctp_tmit_chunk *chk; */
+ struct sctp_tmit_chunk *chk;
+ uint32_t tsn, gap;
+ struct mbuf *dmbuf;
+ int indx, the_len;
+ uint16_t strmno, strmseq;
+ struct mbuf *oper;
+ struct sctp_queued_to_read *control;
+
+ chk = NULL;
+ tsn = ntohl(ch->dp.tsn);
+#ifdef SCTP_MAP_LOGGING
+ sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE);
+#endif
+ if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
+ asoc->cumulative_tsn == tsn) {
+ /* It is a duplicate */
+ SCTP_STAT_INCR(sctps_recvdupdata);
+ if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
+ /* Record a dup for the next outbound sack */
+ asoc->dup_tsns[asoc->numduptsns] = tsn;
+ asoc->numduptsns++;
+ }
+ return (0);
+ }
+ /* Calculate the number of TSN's between the base and this TSN */
+ if (tsn >= asoc->mapping_array_base_tsn) {
+ gap = tsn - asoc->mapping_array_base_tsn;
+ } else {
+ gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
+ }
+ if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
+ /* Can't hold the bit in the mapping at max array, toss it */
+ return (0);
+ }
+ if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
+ if (sctp_expand_mapping_array(asoc)) {
+ /* Can't expand, drop it */
+ return (0);
+ }
+ }
+ if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
+ *high_tsn = tsn;
+ }
+ /* See if we have received this one already */
+ if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
+ SCTP_STAT_INCR(sctps_recvdupdata);
+ if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
+ /* Record a dup for the next outbound sack */
+ asoc->dup_tsns[asoc->numduptsns] = tsn;
+ asoc->numduptsns++;
+ }
+ if (!callout_pending(&asoc->dack_timer.timer)) {
+ /*
+ * By starting the timer we assure that we WILL sack
+ * at the end of the packet when sctp_sack_check
+ * gets called.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep,
+ stcb, NULL);
+ }
+ return (0);
+ }
+ /*
+ * Check to see about the GONE flag, duplicates would cause a sack
+ * to be sent up above
+ */
+ if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
+ ) {
+ /*
+ * wait a minute, this guy is gone, there is no longer a
+ * receiver. Send peer an ABORT!
+ */
+ struct mbuf *op_err;
+
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
+ sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
+ *abort_flag = 1;
+ return (0);
+ }
+ /*
+ * Now before going further we see if there is room. If NOT then we
+ * MAY let one through only IF this TSN is the one we are waiting
+ * for on a partial delivery API.
+ */
+
+ /* now do the tests */
+ if (((asoc->cnt_on_all_streams +
+ asoc->cnt_on_reasm_queue +
+ asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) ||
+ (((int)asoc->my_rwnd) <= 0)) {
+ /*
+ * When we have NO room in the rwnd we check to make sure
+ * the reader is doing its job...
+ */
+ if (stcb->sctp_socket->so_rcv.sb_cc) {
+ /* some to read, wake-up */
+ sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
+ }
+ /* now is it in the mapping array of what we have accepted? */
+ if (compare_with_wrap(tsn,
+ asoc->highest_tsn_inside_map, MAX_TSN)) {
+
+ /* Nope not in the valid range dump it */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld\n",
+ (u_long)tsn, (u_long)asoc->my_rwnd,
+ sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv));
+
+ }
+#endif
+ sctp_set_rwnd(stcb, asoc);
+ if ((asoc->cnt_on_all_streams +
+ asoc->cnt_on_reasm_queue +
+ asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) {
+ SCTP_STAT_INCR(sctps_datadropchklmt);
+ } else {
+ SCTP_STAT_INCR(sctps_datadroprwnd);
+ }
+ indx = *break_flag;
+ *break_flag = 1;
+ return (0);
+ }
+ }
+ strmno = ntohs(ch->dp.stream_id);
+ if (strmno >= asoc->streamincnt) {
+ struct sctp_paramhdr *phdr;
+ struct mbuf *mb;
+
+ mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
+ 1, M_DONTWAIT, 1, MT_DATA);
+ if (mb != NULL) {
+ /* add some space up front so prepend will work well */
+ mb->m_data += sizeof(struct sctp_chunkhdr);
+ phdr = mtod(mb, struct sctp_paramhdr *);
+ /*
+ * Error causes are just param's and this one has
+ * two back to back phdr, one with the error type
+ * and size, the other with the streamid and a rsvd
+ */
+ mb->m_pkthdr.len = mb->m_len =
+ (sizeof(struct sctp_paramhdr) * 2);
+ phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
+ phdr->param_length =
+ htons(sizeof(struct sctp_paramhdr) * 2);
+ phdr++;
+ /* We insert the stream in the type field */
+ phdr->param_type = ch->dp.stream_id;
+ /* And set the length to 0 for the rsvd field */
+ phdr->param_length = 0;
+ sctp_queue_op_err(stcb, mb);
+ }
+ SCTP_STAT_INCR(sctps_badsid);
+ return (0);
+ }
+ /*
+ * Before we continue lets validate that we are not being fooled by
+ * an evil attacker. We can only have 4k chunks based on our TSN
+ * spread allowed by the mapping array 512 * 8 bits, so there is no
+ * way our stream sequence numbers could have wrapped. We of course
+ * only validate the FIRST fragment so the bit must be set.
+ */
+ strmseq = ntohs(ch->dp.stream_sequence);
+ if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) &&
+ (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
+ (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
+ strmseq, MAX_SEQ) ||
+ asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
+ /* The incoming sseq is behind where we last delivered? */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
+ strmseq,
+ asoc->strmin[strmno].last_sequence_delivered);
+ }
+#endif
+ /*
+ * throw it in the stream so it gets cleaned up in
+ * association destruction
+ */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len = sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x20000001);
+ ippp++;
+ *ippp = tsn;
+ ippp++;
+ *ippp = ((strmno << 16) | strmseq);
+
+ }
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_PEER_FAULTY, oper);
+ *abort_flag = 1;
+ return (0);
+ }
+ the_len = (chk_length - sizeof(struct sctp_data_chunk));
+ if (last_chunk == 0) {
+ dmbuf = sctp_m_copym(*m,
+ (offset + sizeof(struct sctp_data_chunk)),
+ the_len, M_DONTWAIT);
+#ifdef SCTP_MBUF_LOGGING
+ {
+ struct mbuf *mat;
+
+ mat = dmbuf;
+ while (mat) {
+ if (mat->m_flags & M_EXT) {
+ sctp_log_mb(mat, SCTP_MBUF_ICOPY);
+ }
+ mat = mat->m_next;
+ }
+ }
+#endif
+ } else {
+ /* We can steal the last chunk */
+ dmbuf = *m;
+ /* lop off the top part */
+ m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
+ if (dmbuf->m_pkthdr.len > the_len) {
+ /* Trim the end round bytes off too */
+ m_adj(dmbuf, -(dmbuf->m_pkthdr.len - the_len));
+ }
+ }
+ if (dmbuf == NULL) {
+ SCTP_STAT_INCR(sctps_nomem);
+ return (0);
+ }
+ if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
+ asoc->fragmented_delivery_inprogress == 0 &&
+ TAILQ_EMPTY(&asoc->resetHead) &&
+ ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) ||
+ ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
+ TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
+ /* Candidate for express delivery */
+ /*
+ * Its not fragmented, No PD-API is up, Nothing in the
+ * delivery queue, Its un-ordered OR ordered and the next to
+ * deliver AND nothing else is stuck on the stream queue,
+ * And there is room for it in the socket buffer. Lets just
+ * stuff it up the buffer....
+ */
+
+ /* It would be nice to avoid this copy if we could :< */
+ sctp_alloc_a_readq(stcb, control);
+ sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
+ ch->dp.protocol_id,
+ stcb->asoc.context,
+ strmno, strmseq,
+ ch->ch.chunk_flags,
+ dmbuf);
+ if (control == NULL) {
+ goto failed_express_del;
+ }
+ sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1);
+ if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
+ /* for ordered, bump what we delivered */
+ asoc->strmin[strmno].last_sequence_delivered++;
+ }
+ SCTP_STAT_INCR(sctps_recvexpress);
+#ifdef SCTP_STR_LOGGING
+ sctp_log_strm_del_alt(tsn, strmseq,
+ SCTP_STR_LOG_FROM_EXPRS_DEL);
+#endif
+ control = NULL;
+ goto finish_express_del;
+ }
+failed_express_del:
+ /* If we reach here this is a new chunk */
+ chk = NULL;
+ control = NULL;
+ /* Express for fragmented delivery? */
+ if ((asoc->fragmented_delivery_inprogress) &&
+ (stcb->asoc.control_pdapi) &&
+ (asoc->str_of_pdapi == strmno) &&
+ (asoc->ssn_of_pdapi == strmseq)
+ ) {
+ control = stcb->asoc.control_pdapi;
+ if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
+ /* Can't be another first? */
+ goto failed_pdapi_express_del;
+ }
+ if (tsn == (control->sinfo_tsn + 1)) {
+ /* Yep, we can add it on */
+ int end = 0;
+ uint32_t cumack;
+
+ if (ch->ch.chunk_flags & SCTP_DATA_LAST_FRAG) {
+ end = 1;
+ }
+ cumack = asoc->cumulative_tsn;
+ if ((cumack + 1) == tsn)
+ cumack = tsn;
+
+ if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
+ tsn,
+ &stcb->sctp_socket->so_rcv)) {
+ printf("Append fails end:%d\n", end);
+ goto failed_pdapi_express_del;
+ }
+ SCTP_STAT_INCR(sctps_recvexpressm);
+ control->sinfo_tsn = tsn;
+ asoc->tsn_last_delivered = tsn;
+ asoc->fragment_flags = ch->ch.chunk_flags;
+ asoc->tsn_of_pdapi_last_delivered = tsn;
+ asoc->last_flags_delivered = ch->ch.chunk_flags;
+ asoc->last_strm_seq_delivered = strmseq;
+ asoc->last_strm_no_delivered = strmno;
+ asoc->tsn_last_delivered = tsn;
+
+ if (end) {
+ /* clean up the flags and such */
+ asoc->fragmented_delivery_inprogress = 0;
+ asoc->strmin[strmno].last_sequence_delivered++;
+ stcb->asoc.control_pdapi = NULL;
+ }
+ control = NULL;
+ goto finish_express_del;
+ }
+ }
+failed_pdapi_express_del:
+ control = NULL;
+ if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* No memory so we drop the chunk */
+ SCTP_STAT_INCR(sctps_nomem);
+ if (last_chunk == 0) {
+ /* we copied it, free the copy */
+ sctp_m_freem(dmbuf);
+ }
+ return (0);
+ }
+ chk->rec.data.TSN_seq = tsn;
+ chk->no_fr_allowed = 0;
+ chk->rec.data.stream_seq = strmseq;
+ chk->rec.data.stream_number = strmno;
+ chk->rec.data.payloadtype = ch->dp.protocol_id;
+ chk->rec.data.context = stcb->asoc.context;
+ chk->rec.data.doing_fast_retransmit = 0;
+ chk->rec.data.rcv_flags = ch->ch.chunk_flags;
+ chk->asoc = asoc;
+ chk->send_size = the_len;
+ chk->whoTo = net;
+ atomic_add_int(&net->ref_count, 1);
+ chk->data = dmbuf;
+ } else {
+ sctp_alloc_a_readq(stcb, control);
+ sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
+ ch->dp.protocol_id,
+ stcb->asoc.context,
+ strmno, strmseq,
+ ch->ch.chunk_flags,
+ dmbuf);
+ if (control == NULL) {
+ /* No memory so we drop the chunk */
+ SCTP_STAT_INCR(sctps_nomem);
+ if (last_chunk == 0) {
+ /* we copied it, free the copy */
+ sctp_m_freem(dmbuf);
+ }
+ return (0);
+ }
+ control->length = the_len;
+ }
+
+ /* Mark it as received */
+ /* Now queue it where it belongs */
+ if (control != NULL) {
+ /* First a sanity check */
+ if (asoc->fragmented_delivery_inprogress) {
+ /*
+ * Ok, we have a fragmented delivery in progress if
+ * this chunk is next to deliver OR belongs in our
+ * view to the reassembly, the peer is evil or
+ * broken.
+ */
+ uint32_t estimate_tsn;
+
+ estimate_tsn = asoc->tsn_last_delivered + 1;
+ if (TAILQ_EMPTY(&asoc->reasmqueue) &&
+ (estimate_tsn == control->sinfo_tsn)) {
+ /* Evil/Broke peer */
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ sctp_free_remote_addr(control->whoFrom);
+ sctp_free_a_readq(stcb, control);
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x20000002);
+ ippp++;
+ *ippp = tsn;
+ ippp++;
+ *ippp = ((strmno << 16) | strmseq);
+ }
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_PEER_FAULTY, oper);
+
+ *abort_flag = 1;
+ return (0);
+ } else {
+ if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ sctp_free_remote_addr(control->whoFrom);
+ sctp_free_a_readq(stcb, control);
+
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x20000003);
+ ippp++;
+ *ippp = tsn;
+ ippp++;
+ *ippp = ((strmno << 16) | strmseq);
+ }
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper);
+
+ *abort_flag = 1;
+ return (0);
+ }
+ }
+ } else {
+ /* No PDAPI running */
+ if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
+ /*
+ * Reassembly queue is NOT empty validate
+ * that this tsn does not need to be in
+ * reasembly queue. If it does then our peer
+ * is broken or evil.
+ */
+ if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ sctp_free_remote_addr(control->whoFrom);
+ sctp_free_a_readq(stcb, control);
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x20000004);
+ ippp++;
+ *ippp = tsn;
+ ippp++;
+ *ippp = ((strmno << 16) | strmseq);
+ }
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper);
+
+ *abort_flag = 1;
+ return (0);
+ }
+ }
+ }
+ /* ok, if we reach here we have passed the sanity checks */
+ if (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) {
+ /* queue directly into socket buffer */
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1);
+ } else {
+ /*
+ * Special check for when streams are resetting. We
+ * could be more smart about this and check the
+ * actual stream to see if it is not being reset..
+ * that way we would not create a HOLB when amongst
+ * streams being reset and those not being reset.
+ *
+ * We take complete messages that have a stream reset
+ * intervening (aka the TSN is after where our
+ * cum-ack needs to be) off and put them on a
+ * pending_reply_queue. The reassembly ones we do
+ * not have to worry about since they are all sorted
+ * and proceessed by TSN order. It is only the
+ * singletons I must worry about.
+ */
+ struct sctp_stream_reset_list *liste;
+
+ if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
+ ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)) ||
+ (tsn == ntohl(liste->tsn)))
+ ) {
+ /*
+ * yep its past where we need to reset... go
+ * ahead and queue it.
+ */
+ if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
+ /* first one on */
+ TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
+ } else {
+ struct sctp_queued_to_read *ctlOn;
+ unsigned char inserted = 0;
+
+ ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
+ while (ctlOn) {
+ if (compare_with_wrap(control->sinfo_tsn,
+ ctlOn->sinfo_tsn, MAX_TSN)) {
+ ctlOn = TAILQ_NEXT(ctlOn, next);
+ } else {
+ /* found it */
+ TAILQ_INSERT_BEFORE(ctlOn, control, next);
+ inserted = 1;
+ break;
+ }
+ }
+ if (inserted == 0) {
+ /*
+ * must be put at end, use
+ * prevP (all setup from
+ * loop) to setup nextP.
+ */
+ TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
+ }
+ }
+ } else {
+ sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
+ if (*abort_flag) {
+ return (0);
+ }
+ }
+ }
+ } else {
+ /* Into the re-assembly queue */
+ sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
+ if (*abort_flag) {
+ return (0);
+ }
+ }
+finish_express_del:
+ if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
+ /* we have a new high score */
+ asoc->highest_tsn_inside_map = tsn;
+#ifdef SCTP_MAP_LOGGING
+ sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+#endif
+ }
+ if (tsn == (asoc->cumulative_tsn + 1)) {
+ /* Update cum-ack */
+ asoc->cumulative_tsn = tsn;
+ }
+ if (last_chunk) {
+ *m = NULL;
+ }
+ if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
+ SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
+ } else {
+ SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
+ }
+ SCTP_STAT_INCR(sctps_recvdata);
+ /* Set it present please */
+#ifdef SCTP_STR_LOGGING
+ sctp_log_strm_del_alt(tsn, strmseq, SCTP_STR_LOG_FROM_MARK_TSN);
+#endif
+#ifdef SCTP_MAP_LOGGING
+ sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
+ asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
+#endif
+ SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
+ return (1);
+}
+
+int8_t sctp_map_lookup_tab[256] = {
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 3,
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 4,
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 3,
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 5,
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 3,
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 4,
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 3,
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 6,
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 3,
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 4,
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 3,
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 5,
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 3,
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 4,
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 3,
+ -1, 0, -1, 1, -1, 0, -1, 2,
+ -1, 0, -1, 1, -1, 0, -1, 7,
+};
+
+
+void
+sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
+{
+ /*
+ * Now we also need to check the mapping array in a couple of ways.
+ * 1) Did we move the cum-ack point?
+ */
+ struct sctp_association *asoc;
+ int i, at;
+ int all_ones;
+ int slide_from, slide_end, lgap, distance;
+
+#ifdef SCTP_MAP_LOGGING
+ uint32_t old_cumack, old_base, old_highest;
+ unsigned char aux_array[64];
+
+#endif
+ struct sctp_stream_reset_list *liste;
+
+ asoc = &stcb->asoc;
+ at = 0;
+
+#ifdef SCTP_MAP_LOGGING
+ old_cumack = asoc->cumulative_tsn;
+ old_base = asoc->mapping_array_base_tsn;
+ old_highest = asoc->highest_tsn_inside_map;
+ if (asoc->mapping_array_size < 64)
+ memcpy(aux_array, asoc->mapping_array,
+ asoc->mapping_array_size);
+ else
+ memcpy(aux_array, asoc->mapping_array, 64);
+#endif
+
+ /*
+ * We could probably improve this a small bit by calculating the
+ * offset of the current cum-ack as the starting point.
+ */
+ all_ones = 1;
+ at = 0;
+ for (i = 0; i < stcb->asoc.mapping_array_size; i++) {
+ if (asoc->mapping_array[i] == 0xff) {
+ at += 8;
+ } else {
+ /* there is a 0 bit */
+ all_ones = 0;
+ at += sctp_map_lookup_tab[asoc->mapping_array[i]];
+ break;
+ }
+ }
+ asoc->cumulative_tsn = asoc->mapping_array_base_tsn + at;
+ /* at is one off, since in the table a embedded -1 is present */
+ at++;
+
+ if (compare_with_wrap(asoc->cumulative_tsn,
+ asoc->highest_tsn_inside_map,
+ MAX_TSN)) {
+#ifdef INVARIENTS
+ panic("huh, cumack greater than high-tsn in map");
+#else
+ printf("huh, cumack greater than high-tsn in map - should panic?\n");
+ asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
+#endif
+ }
+ if (all_ones ||
+ (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) {
+ /* The complete array was completed by a single FR */
+ /* higest becomes the cum-ack */
+ int clr;
+
+ asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
+ /* clear the array */
+ if (all_ones)
+ clr = asoc->mapping_array_size;
+ else {
+ clr = (at >> 3) + 1;
+ /*
+ * this should be the allones case but just in case
+ * :>
+ */
+ if (clr > asoc->mapping_array_size)
+ clr = asoc->mapping_array_size;
+ }
+ memset(asoc->mapping_array, 0, clr);
+ /* base becomes one ahead of the cum-ack */
+ asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
+#ifdef SCTP_MAP_LOGGING
+ sctp_log_map(old_base, old_cumack, old_highest,
+ SCTP_MAP_PREPARE_SLIDE);
+ sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
+ asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
+#endif
+ } else if (at >= 8) {
+ /* we can slide the mapping array down */
+ /* Calculate the new byte postion we can move down */
+ slide_from = at >> 3;
+ /*
+ * now calculate the ceiling of the move using our highest
+ * TSN value
+ */
+ if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
+ lgap = asoc->highest_tsn_inside_map -
+ asoc->mapping_array_base_tsn;
+ } else {
+ lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
+ asoc->highest_tsn_inside_map + 1;
+ }
+ slide_end = lgap >> 3;
+ if (slide_end < slide_from) {
+ panic("impossible slide");
+ }
+ distance = (slide_end - slide_from) + 1;
+#ifdef SCTP_MAP_LOGGING
+ sctp_log_map(old_base, old_cumack, old_highest,
+ SCTP_MAP_PREPARE_SLIDE);
+ sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
+ (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
+#endif
+ if (distance + slide_from > asoc->mapping_array_size ||
+ distance < 0) {
+ /*
+ * Here we do NOT slide forward the array so that
+ * hopefully when more data comes in to fill it up
+ * we will be able to slide it forward. Really I
+ * don't think this should happen :-0
+ */
+
+#ifdef SCTP_MAP_LOGGING
+ sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
+ (uint32_t) asoc->mapping_array_size,
+ SCTP_MAP_SLIDE_NONE);
+#endif
+ } else {
+ int ii;
+
+ for (ii = 0; ii < distance; ii++) {
+ asoc->mapping_array[ii] =
+ asoc->mapping_array[slide_from + ii];
+ }
+ for (ii = distance; ii <= slide_end; ii++) {
+ asoc->mapping_array[ii] = 0;
+ }
+ asoc->mapping_array_base_tsn += (slide_from << 3);
+#ifdef SCTP_MAP_LOGGING
+ sctp_log_map(asoc->mapping_array_base_tsn,
+ asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
+ SCTP_MAP_SLIDE_RESULT);
+#endif
+ }
+ }
+ /* check the special flag for stream resets */
+ if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
+ ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
+ (asoc->cumulative_tsn == liste->tsn))
+ ) {
+ /*
+ * we have finished working through the backlogged TSN's now
+ * time to reset streams. 1: call reset function. 2: free
+ * pending_reply space 3: distribute any chunks in
+ * pending_reply_queue.
+ */
+ struct sctp_queued_to_read *ctl;
+
+ sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
+ TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
+ SCTP_FREE(liste);
+ liste = TAILQ_FIRST(&asoc->resetHead);
+ ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
+ if (ctl && (liste == NULL)) {
+ /* All can be removed */
+ while (ctl) {
+ TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
+ sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
+ if (*abort_flag) {
+ return;
+ }
+ ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
+ }
+ } else if (ctl) {
+ /* more than one in queue */
+ while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
+ /*
+ * if ctl->sinfo_tsn is <= liste->tsn we can
+ * process it which is the NOT of
+ * ctl->sinfo_tsn > liste->tsn
+ */
+ TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
+ sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
+ if (*abort_flag) {
+ return;
+ }
+ ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
+ }
+ }
+ /*
+ * Now service re-assembly to pick up anything that has been
+ * held on reassembly queue?
+ */
+ sctp_deliver_reasm_check(stcb, asoc);
+ }
+ /*
+ * Now we need to see if we need to queue a sack or just start the
+ * timer (if allowed).
+ */
+ if (ok_to_sack) {
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
+ /*
+ * Ok special case, in SHUTDOWN-SENT case. here we
+ * maker sure SACK timer is off and instead send a
+ * SHUTDOWN and a SACK
+ */
+ if (callout_pending(&stcb->asoc.dack_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ }
+ sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
+ sctp_send_sack(stcb);
+ } else {
+ int is_a_gap;
+
+ /* is there a gap now ? */
+ is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
+ stcb->asoc.cumulative_tsn, MAX_TSN);
+
+ /*
+ * CMT DAC algorithm: increase number of packets
+ * received since last ack
+ */
+ stcb->asoc.cmt_dac_pkts_rcvd++;
+
+ if ((stcb->asoc.first_ack_sent == 0) || /* First time we send a
+ * sack */
+ ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
+ * longer is one */
+ (stcb->asoc.numduptsns) || /* we have dup's */
+ (is_a_gap) || /* is still a gap */
+ (stcb->asoc.delayed_ack == 0) ||
+ (callout_pending(&stcb->asoc.dack_timer.timer)) /* timer was up . second
+ * packet */
+ ) {
+
+ if ((sctp_cmt_on_off) && (sctp_cmt_use_dac) &&
+ (stcb->asoc.first_ack_sent == 1) &&
+ (stcb->asoc.numduptsns == 0) &&
+ (stcb->asoc.delayed_ack) &&
+ (!callout_pending(&stcb->asoc.dack_timer.timer))) {
+
+ /*
+ * CMT DAC algorithm: With CMT,
+ * delay acks even in the face of
+ *
+ * reordering. Therefore, if acks that
+ * do not have to be sent because of
+ * the above reasons, will be
+ * delayed. That is, acks that would
+ * have been sent due to gap reports
+ * will be delayed with DAC. Start
+ * the delayed ack timer.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ } else {
+ /*
+ * Ok we must build a SACK since the
+ * timer is pending, we got our
+ * first packet OR there are gaps or
+ * duplicates.
+ */
+ stcb->asoc.first_ack_sent = 1;
+
+ sctp_send_sack(stcb);
+ /* The sending will stop the timer */
+ }
+ } else {
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ }
+ }
+ }
+}
+
+void
+sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+ uint32_t tsize;
+ uint16_t nxt_todel;
+
+ if (asoc->fragmented_delivery_inprogress) {
+ sctp_service_reassembly(stcb, asoc);
+ }
+ /* Can we proceed further, i.e. the PD-API is complete */
+ if (asoc->fragmented_delivery_inprogress) {
+ /* no */
+ return;
+ }
+ /*
+ * Now is there some other chunk I can deliver from the reassembly
+ * queue.
+ */
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ if (chk == NULL) {
+ asoc->size_on_reasm_queue = 0;
+ asoc->cnt_on_reasm_queue = 0;
+ return;
+ }
+ nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
+ ((nxt_todel == chk->rec.data.stream_seq) ||
+ (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
+ /*
+ * Yep the first one is here. We setup to start reception,
+ * by backing down the TSN just in case we can't deliver.
+ */
+
+ /*
+ * Before we start though either all of the message should
+ * be here or 1/4 the socket buffer max or nothing on the
+ * delivery queue and something can be delivered.
+ */
+ if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
+ (tsize > stcb->sctp_ep->partial_delivery_point))) {
+ asoc->fragmented_delivery_inprogress = 1;
+ asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
+ asoc->str_of_pdapi = chk->rec.data.stream_number;
+ asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
+ asoc->pdapi_ppid = chk->rec.data.payloadtype;
+ asoc->fragment_flags = chk->rec.data.rcv_flags;
+ sctp_service_reassembly(stcb, asoc);
+ }
+ }
+}
+
+extern int sctp_strict_data_order;
+
+int
+sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
+ struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, uint32_t * high_tsn)
+{
+ struct sctp_data_chunk *ch, chunk_buf;
+ struct sctp_association *asoc;
+ int num_chunks = 0; /* number of control chunks processed */
+ int stop_proc = 0;
+ int chk_length, break_flag, last_chunk;
+ int abort_flag = 0, was_a_gap = 0;
+ struct mbuf *m;
+
+ /* set the rwnd */
+ sctp_set_rwnd(stcb, &stcb->asoc);
+
+ m = *mm;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ asoc = &stcb->asoc;
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
+ /*
+ * wait a minute, this guy is gone, there is no longer a
+ * receiver. Send peer an ABORT!
+ */
+ struct mbuf *op_err;
+
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
+ sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
+ return (2);
+ }
+ if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
+ stcb->asoc.cumulative_tsn, MAX_TSN)) {
+ /* there was a gap before this data was processed */
+ was_a_gap = 1;
+ }
+ /*
+ * setup where we got the last DATA packet from for any SACK that
+ * may need to go out. Don't bump the net. This is done ONLY when a
+ * chunk is assigned.
+ */
+ asoc->last_data_chunk_from = net;
+
+ /*
+ * Now before we proceed we must figure out if this is a wasted
+ * cluster... i.e. it is a small packet sent in and yet the driver
+ * underneath allocated a full cluster for it. If so we must copy it
+ * to a smaller mbuf and free up the cluster mbuf. This will help
+ * with cluster starvation.
+ */
+ if (m->m_len < (long)MHLEN && m->m_next == NULL) {
+ /* we only handle mbufs that are singletons.. not chains */
+ m = sctp_get_mbuf_for_msg(m->m_len, 1, M_DONTWAIT, 1, MT_DATA);
+ if (m) {
+ /* ok lets see if we can copy the data up */
+ caddr_t *from, *to;
+
+ if ((*mm)->m_flags & M_PKTHDR) {
+ /* got to copy the header first */
+ M_MOVE_PKTHDR(m, (*mm));
+ }
+ /* get the pointers and copy */
+ to = mtod(m, caddr_t *);
+ from = mtod((*mm), caddr_t *);
+ memcpy(to, from, (*mm)->m_len);
+ /* copy the length and free up the old */
+ m->m_len = (*mm)->m_len;
+ sctp_m_freem(*mm);
+ /* sucess, back copy */
+ *mm = m;
+ } else {
+ /* We are in trouble in the mbuf world .. yikes */
+ m = *mm;
+ }
+ }
+ /* get pointer to the first chunk header */
+ ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
+ if (ch == NULL) {
+ return (1);
+ }
+ /*
+ * process all DATA chunks...
+ */
+ *high_tsn = asoc->cumulative_tsn;
+ break_flag = 0;
+ while (stop_proc == 0) {
+ /* validate chunk length */
+ chk_length = ntohs(ch->ch.chunk_length);
+ if (length - *offset < chk_length) {
+ /* all done, mutulated chunk */
+ stop_proc = 1;
+ break;
+ }
+ if (ch->ch.chunk_type == SCTP_DATA) {
+ if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
+ /*
+ * Need to send an abort since we had a
+ * invalid data chunk.
+ */
+ struct mbuf *op_err;
+
+ op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+
+ if (op_err) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ op_err->m_len = sizeof(struct sctp_paramhdr) +
+ (2 * sizeof(uint32_t));
+ ph = mtod(op_err, struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(op_err->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x30000001);
+ ippp++;
+ *ippp = asoc->cumulative_tsn;
+
+ }
+ sctp_abort_association(inp, stcb, m, iphlen, sh,
+ op_err);
+ return (2);
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB1, 0);
+#endif
+ if (SCTP_SIZE32(chk_length) == (length - *offset)) {
+ last_chunk = 1;
+ } else {
+ last_chunk = 0;
+ }
+ if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
+ chk_length, net, high_tsn, &abort_flag, &break_flag,
+ last_chunk)) {
+ num_chunks++;
+ }
+ if (abort_flag)
+ return (2);
+
+ if (break_flag) {
+ /*
+ * Set because of out of rwnd space and no
+ * drop rep space left.
+ */
+ stop_proc = 1;
+ break;
+ }
+ } else {
+ /* not a data chunk in the data region */
+ switch (ch->ch.chunk_type) {
+ case SCTP_INITIATION:
+ case SCTP_INITIATION_ACK:
+ case SCTP_SELECTIVE_ACK:
+ case SCTP_HEARTBEAT_REQUEST:
+ case SCTP_HEARTBEAT_ACK:
+ case SCTP_ABORT_ASSOCIATION:
+ case SCTP_SHUTDOWN:
+ case SCTP_SHUTDOWN_ACK:
+ case SCTP_OPERATION_ERROR:
+ case SCTP_COOKIE_ECHO:
+ case SCTP_COOKIE_ACK:
+ case SCTP_ECN_ECHO:
+ case SCTP_ECN_CWR:
+ case SCTP_SHUTDOWN_COMPLETE:
+ case SCTP_AUTHENTICATION:
+ case SCTP_ASCONF_ACK:
+ case SCTP_PACKET_DROPPED:
+ case SCTP_STREAM_RESET:
+ case SCTP_FORWARD_CUM_TSN:
+ case SCTP_ASCONF:
+ /*
+ * Now, what do we do with KNOWN chunks that
+ * are NOT in the right place?
+ *
+ * For now, I do nothing but ignore them. We
+ * may later want to add sysctl stuff to
+ * switch out and do either an ABORT() or
+ * possibly process them.
+ */
+ if (sctp_strict_data_order) {
+ struct mbuf *op_err;
+
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ sctp_abort_association(inp, stcb, m, iphlen, sh, op_err);
+ return (2);
+ }
+ break;
+ default:
+ /* unknown chunk type, use bit rules */
+ if (ch->ch.chunk_type & 0x40) {
+ /* Add a error report to the queue */
+ struct mbuf *mm;
+ struct sctp_paramhdr *phd;
+
+ mm = sctp_get_mbuf_for_msg(sizeof(*phd), 1, M_DONTWAIT, 1, MT_DATA);
+ if (mm) {
+ phd = mtod(mm, struct sctp_paramhdr *);
+ /*
+ * We cheat and use param
+ * type since we did not
+ * bother to define a error
+ * cause struct. They are
+ * the same basic format
+ * with different names.
+ */
+ phd->param_type =
+ htons(SCTP_CAUSE_UNRECOG_CHUNK);
+ phd->param_length =
+ htons(chk_length + sizeof(*phd));
+ mm->m_len = sizeof(*phd);
+ mm->m_next = sctp_m_copym(m, *offset,
+ SCTP_SIZE32(chk_length),
+ M_DONTWAIT);
+ if (mm->m_next) {
+ mm->m_pkthdr.len =
+ SCTP_SIZE32(chk_length) +
+ sizeof(*phd);
+ sctp_queue_op_err(stcb, mm);
+ } else {
+ sctp_m_freem(mm);
+ }
+ }
+ }
+ if ((ch->ch.chunk_type & 0x80) == 0) {
+ /* discard the rest of this packet */
+ stop_proc = 1;
+ } /* else skip this bad chunk and
+ * continue... */
+ break;
+ }; /* switch of chunk type */
+ }
+ *offset += SCTP_SIZE32(chk_length);
+ if ((*offset >= length) || stop_proc) {
+ /* no more data left in the mbuf chain */
+ stop_proc = 1;
+ continue;
+ }
+ ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
+ if (ch == NULL) {
+ *offset = length;
+ stop_proc = 1;
+ break;
+
+ }
+ } /* while */
+ if (break_flag) {
+ /*
+ * we need to report rwnd overrun drops.
+ */
+ sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
+ }
+ if (num_chunks) {
+ /*
+ * Did we get data, if so update the time for auto-close and
+ * give peer credit for being alive.
+ */
+ SCTP_STAT_INCR(sctps_recvpktwithdata);
+ stcb->asoc.overall_error_count = 0;
+ SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
+ }
+ /* now service all of the reassm queue if needed */
+ if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
+ sctp_service_queues(stcb, asoc);
+
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
+ /*
+ * Assure that we ack right away by making sure that a d-ack
+ * timer is running. So the sack_check will send a sack.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb,
+ net);
+ }
+ /* Start a sack timer or QUEUE a SACK for sending */
+ if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
+ (stcb->asoc.first_ack_sent)) {
+ /* Everything is in order */
+ if (stcb->asoc.mapping_array[0] == 0xff) {
+ /* need to do the slide */
+ sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
+ } else {
+ if (callout_pending(&stcb->asoc.dack_timer.timer)) {
+ stcb->asoc.first_ack_sent = 1;
+ callout_stop(&stcb->asoc.dack_timer.timer);
+ sctp_send_sack(stcb);
+ } else {
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ }
+ }
+ } else {
+ sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
+ }
+ if (abort_flag)
+ return (2);
+
+ return (0);
+}
+
+static void
+sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
+ uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, int num_seg, int *ecn_seg_sums)
+{
+ /************************************************/
+ /* process fragments and update sendqueue */
+ /************************************************/
+ struct sctp_sack *sack;
+ struct sctp_gap_ack_block *frag;
+ struct sctp_tmit_chunk *tp1;
+ int i;
+ unsigned int j;
+
+#ifdef SCTP_FR_LOGGING
+ int num_frs = 0;
+
+#endif
+ uint16_t frag_strt, frag_end, primary_flag_set;
+ u_long last_frag_high;
+
+ /*
+ * @@@ JRI : TODO: This flag is not used anywhere .. remove?
+ */
+ if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
+ primary_flag_set = 1;
+ } else {
+ primary_flag_set = 0;
+ }
+
+ sack = &ch->sack;
+ frag = (struct sctp_gap_ack_block *)((caddr_t)sack +
+ sizeof(struct sctp_sack));
+ tp1 = NULL;
+ last_frag_high = 0;
+ for (i = 0; i < num_seg; i++) {
+ frag_strt = ntohs(frag->start);
+ frag_end = ntohs(frag->end);
+ /* some sanity checks on the fargment offsets */
+ if (frag_strt > frag_end) {
+ /* this one is malformed, skip */
+ frag++;
+ continue;
+ }
+ if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
+ MAX_TSN))
+ *biggest_tsn_acked = frag_end + last_tsn;
+
+ /* mark acked dgs and find out the highestTSN being acked */
+ if (tp1 == NULL) {
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+
+ /* save the locations of the last frags */
+ last_frag_high = frag_end + last_tsn;
+ } else {
+ /*
+ * now lets see if we need to reset the queue due to
+ * a out-of-order SACK fragment
+ */
+ if (compare_with_wrap(frag_strt + last_tsn,
+ last_frag_high, MAX_TSN)) {
+ /*
+ * if the new frag starts after the last TSN
+ * frag covered, we are ok and this one is
+ * beyond the last one
+ */
+ ;
+ } else {
+ /*
+ * ok, they have reset us, so we need to
+ * reset the queue this will cause extra
+ * hunting but hey, they chose the
+ * performance hit when they failed to order
+ * there gaps..
+ */
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ }
+ last_frag_high = frag_end + last_tsn;
+ }
+ for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) {
+ while (tp1) {
+#ifdef SCTP_FR_LOGGING
+ if (tp1->rec.data.doing_fast_retransmit)
+ num_frs++;
+#endif
+
+ /*
+ * CMT: CUCv2 algorithm. For each TSN being
+ * processed from the sent queue, track the
+ * next expected pseudo-cumack, or
+ * rtx_pseudo_cumack, if required. Separate
+ * cumack trackers for first transmissions,
+ * and retransmissions.
+ */
+ if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
+ (tp1->snd_count == 1)) {
+ tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
+ tp1->whoTo->find_pseudo_cumack = 0;
+ }
+ if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
+ (tp1->snd_count > 1)) {
+ tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
+ tp1->whoTo->find_rtx_pseudo_cumack = 0;
+ }
+ if (tp1->rec.data.TSN_seq == j) {
+ if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
+ /*
+ * must be held until
+ * cum-ack passes
+ */
+ /*
+ * ECN Nonce: Add the nonce
+ * value to the sender's
+ * nonce sum
+ */
+ if (tp1->sent < SCTP_DATAGRAM_ACKED) {
+ /*
+ * If it is less
+ * than ACKED, it is
+ * now no-longer in
+ * flight. Higher
+ * values may
+ * already be set
+ * via previous Gap
+ * Ack Blocks...
+ * i.e. ACKED or
+ * MARKED.
+ */
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ *biggest_newly_acked_tsn, MAX_TSN)) {
+ *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
+ }
+ /*
+ * CMT: SFR algo
+ * (and HTNA) - set
+ * saw_newack to 1
+ * for dest being
+ * newly acked.
+ * update
+ * this_sack_highest_
+ * n ewack if
+ * appropriate.
+ */
+ if (tp1->rec.data.chunk_was_revoked == 0)
+ tp1->whoTo->saw_newack = 1;
+
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ tp1->whoTo->this_sack_highest_newack,
+ MAX_TSN)) {
+ tp1->whoTo->this_sack_highest_newack =
+ tp1->rec.data.TSN_seq;
+ }
+ /*
+ * CMT DAC algo:
+ * also update
+ * this_sack_lowest_n
+ * e wack
+ */
+ if (*this_sack_lowest_newack == 0) {
+#ifdef SCTP_SACK_LOGGING
+ sctp_log_sack(*this_sack_lowest_newack,
+ last_tsn,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_TSN_ACKED);
+#endif
+ *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
+ }
+ /*
+ * CMT: CUCv2
+ * algorithm. If
+ * (rtx-)pseudo-cumac
+ * k for corresp
+ * dest is being
+ * acked, then we
+ * have a new
+ * (rtx-)pseudo-cumac
+ * k . Set
+ * new_(rtx_)pseudo_c
+ * u mack to TRUE so
+ * that the cwnd for
+ * this dest can be
+ * updated. Also
+ * trigger search
+ * for the next
+ * expected
+ * (rtx-)pseudo-cumac
+ * k . Separate
+ * pseudo_cumack
+ * trackers for
+ * first
+ * transmissions and
+ * retransmissions.
+ */
+ if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
+ if (tp1->rec.data.chunk_was_revoked == 0) {
+ tp1->whoTo->new_pseudo_cumack = 1;
+ }
+ tp1->whoTo->find_pseudo_cumack = 1;
+ }
+#ifdef SCTP_CWND_LOGGING
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+#endif
+ if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
+ if (tp1->rec.data.chunk_was_revoked == 0) {
+ tp1->whoTo->new_pseudo_cumack = 1;
+ }
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+ }
+#ifdef SCTP_SACK_LOGGING
+ sctp_log_sack(*biggest_newly_acked_tsn,
+ last_tsn,
+ tp1->rec.data.TSN_seq,
+ frag_strt,
+ frag_end,
+ SCTP_LOG_TSN_ACKED);
+#endif
+
+ if (tp1->rec.data.chunk_was_revoked == 0) {
+ /*
+ * Revoked
+ * chunks
+ * don't
+ * count,
+ * since we
+ * previously
+ * pulled
+ * them from
+ * the fs.
+ */
+ if (tp1->whoTo->flight_size >= tp1->book_size)
+ tp1->whoTo->flight_size -= tp1->book_size;
+ else
+ tp1->whoTo->flight_size = 0;
+ if (asoc->total_flight >= tp1->book_size) {
+ asoc->total_flight -= tp1->book_size;
+ if (asoc->total_flight_count > 0)
+ asoc->total_flight_count--;
+ } else {
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ }
+
+ tp1->whoTo->net_ack += tp1->send_size;
+
+ if (tp1->snd_count < 2) {
+ /*
+ * Tru
+ * e
+ * no
+ * n
+ * -r
+ * e
+ * tr
+ * a
+ * ns
+ * m
+ * it
+ * e
+ * d
+ * ch
+ * u
+ * nk
+ * */
+ tp1->whoTo->net_ack2 += tp1->send_size;
+
+ /*
+ * upd
+ *
+ * ate
+ *
+ * RTO
+ *
+ * too
+ * ? */
+ if (tp1->do_rtt) {
+ tp1->whoTo->RTO =
+ sctp_calculate_rto(stcb,
+ asoc,
+ tp1->whoTo,
+ &tp1->sent_rcv_time);
+ tp1->whoTo->rto_pending = 0;
+ tp1->do_rtt = 0;
+ }
+ }
+ }
+ }
+ if (tp1->sent <= SCTP_DATAGRAM_RESEND &&
+ tp1->sent != SCTP_DATAGRAM_UNSENT &&
+ compare_with_wrap(tp1->rec.data.TSN_seq,
+ asoc->this_sack_highest_gap,
+ MAX_TSN)) {
+ asoc->this_sack_highest_gap =
+ tp1->rec.data.TSN_seq;
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB2,
+ (asoc->sent_queue_retran_cnt & 0x000000ff));
+#endif
+
+ }
+ (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
+ (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
+
+ tp1->sent = SCTP_DATAGRAM_MARKED;
+ }
+ break;
+ } /* if (tp1->TSN_seq == j) */
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
+ MAX_TSN))
+ break;
+
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ } /* end while (tp1) */
+ } /* end for (j = fragStart */
+ frag++; /* next one */
+ }
+#ifdef SCTP_FR_LOGGING
+ /*
+ * if (num_frs) sctp_log_fr(*biggest_tsn_acked,
+ * *biggest_newly_acked_tsn, last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
+ */
+#endif
+}
+
+static void
+sctp_check_for_revoked(struct sctp_association *asoc, uint32_t cumack,
+ u_long biggest_tsn_acked)
+{
+ struct sctp_tmit_chunk *tp1;
+ int tot_revoked = 0;
+
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
+ MAX_TSN)) {
+ /*
+ * ok this guy is either ACK or MARKED. If it is
+ * ACKED it has been previously acked but not this
+ * time i.e. revoked. If it is MARKED it was ACK'ed
+ * again.
+ */
+ if (tp1->sent == SCTP_DATAGRAM_ACKED) {
+ /* it has been revoked */
+ /*
+ * We do NOT add back to flight size here
+ * since it is really NOT in flight. Resend
+ * (when/if it occurs will add to flight
+ * size
+ */
+ tp1->sent = SCTP_DATAGRAM_SENT;
+ tp1->rec.data.chunk_was_revoked = 1;
+ tot_revoked++;
+#ifdef SCTP_SACK_LOGGING
+ sctp_log_sack(asoc->last_acked_seq,
+ cumack,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_TSN_REVOKED);
+#endif
+ } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
+ /* it has been re-acked in this SACK */
+ tp1->sent = SCTP_DATAGRAM_ACKED;
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_UNSENT)
+ break;
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ }
+ if (tot_revoked > 0) {
+ /*
+ * Setup the ecn nonce re-sync point. We do this since once
+ * data is revoked we begin to retransmit things, which do
+ * NOT have the ECN bits set. This means we are now out of
+ * sync and must wait until we get back in sync with the
+ * peer to check ECN bits.
+ */
+ tp1 = TAILQ_FIRST(&asoc->send_queue);
+ if (tp1 == NULL) {
+ asoc->nonce_resync_tsn = asoc->sending_seq;
+ } else {
+ asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
+ }
+ asoc->nonce_wait_for_ecne = 0;
+ asoc->nonce_sum_check = 0;
+ }
+}
+
+extern int sctp_peer_chunk_oh;
+
+static void
+sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
+{
+ struct sctp_tmit_chunk *tp1;
+ int strike_flag = 0;
+ struct timeval now;
+ int tot_retrans = 0;
+ uint32_t sending_seq;
+ struct sctp_nets *net;
+ int num_dests_sacked = 0;
+
+ /*
+ * select the sending_seq, this is either the next thing ready to be
+ * sent but not transmitted, OR, the next seq we assign.
+ */
+ tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
+ if (tp1 == NULL) {
+ sending_seq = asoc->sending_seq;
+ } else {
+ sending_seq = tp1->rec.data.TSN_seq;
+ }
+
+ /* CMT DAC algo: finding out if SACK is a mixed SACK */
+ if (sctp_cmt_on_off && sctp_cmt_use_dac) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->saw_newack)
+ num_dests_sacked++;
+ }
+ }
+ if (stcb->asoc.peer_supports_prsctp) {
+ SCTP_GETTIME_TIMEVAL(&now);
+ }
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ strike_flag = 0;
+ if (tp1->no_fr_allowed) {
+ /* this one had a timeout or something */
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ continue;
+ }
+#ifdef SCTP_FR_LOGGING
+ if (tp1->sent < SCTP_DATAGRAM_RESEND)
+ sctp_log_fr(biggest_tsn_newly_acked,
+ tp1->rec.data.TSN_seq,
+ tp1->sent,
+ SCTP_FR_LOG_CHECK_STRIKE);
+#endif
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
+ MAX_TSN) ||
+ tp1->sent == SCTP_DATAGRAM_UNSENT) {
+ /* done */
+ break;
+ }
+ if (stcb->asoc.peer_supports_prsctp) {
+ if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
+ /* Is it expired? */
+ if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
+ /* Yes so drop it */
+ if (tp1->data != NULL) {
+ sctp_release_pr_sctp_chunk(stcb, tp1,
+ (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
+ &asoc->sent_queue);
+ }
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ continue;
+ }
+ }
+ if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
+ /* Has it been retransmitted tv_sec times? */
+ if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
+ /* Yes, so drop it */
+ if (tp1->data != NULL) {
+ sctp_release_pr_sctp_chunk(stcb, tp1,
+ (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
+ &asoc->sent_queue);
+ }
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ continue;
+ }
+ }
+ }
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ asoc->this_sack_highest_gap, MAX_TSN)) {
+ /* we are beyond the tsn in the sack */
+ break;
+ }
+ if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
+ /* either a RESEND, ACKED, or MARKED */
+ /* skip */
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ continue;
+ }
+ /*
+ * CMT : SFR algo (covers part of DAC and HTNA as well)
+ */
+ if (tp1->whoTo->saw_newack == 0) {
+ /*
+ * No new acks were receieved for data sent to this
+ * dest. Therefore, according to the SFR algo for
+ * CMT, no data sent to this dest can be marked for
+ * FR using this SACK. (iyengar@cis.udel.edu,
+ * 2005/05/12)
+ */
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ continue;
+ } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
+ /*
+ * CMT: New acks were receieved for data sent to
+ * this dest. But no new acks were seen for data
+ * sent after tp1. Therefore, according to the SFR
+ * algo for CMT, tp1 cannot be marked for FR using
+ * this SACK. This step covers part of the DAC algo
+ * and the HTNA algo as well.
+ */
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ continue;
+ }
+ /*
+ * Here we check to see if we were have already done a FR
+ * and if so we see if the biggest TSN we saw in the sack is
+ * smaller than the recovery point. If so we don't strike
+ * the tsn... otherwise we CAN strike the TSN.
+ */
+ /*
+ * @@@ JRI: Check for CMT
+ */
+ if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
+ /*
+ * Strike the TSN if in fast-recovery and cum-ack
+ * moved.
+ */
+#ifdef SCTP_FR_LOGGING
+ sctp_log_fr(biggest_tsn_newly_acked,
+ tp1->rec.data.TSN_seq,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+#endif
+ tp1->sent++;
+ if (sctp_cmt_on_off && sctp_cmt_use_dac) {
+ /*
+ * CMT DAC algorithm: If SACK flag is set to
+ * 0, then lowest_newack test will not pass
+ * because it would have been set to the
+ * cumack earlier. If not already to be
+ * rtx'd, If not a mixed sack and if tp1 is
+ * not between two sacked TSNs, then mark by
+ * one more.
+ */
+ if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
+ compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
+#ifdef SCTP_FR_LOGGING
+ sctp_log_fr(16 + num_dests_sacked,
+ tp1->rec.data.TSN_seq,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+#endif
+ tp1->sent++;
+ }
+ }
+ } else if (tp1->rec.data.doing_fast_retransmit) {
+ /*
+ * For those that have done a FR we must take
+ * special consideration if we strike. I.e the
+ * biggest_newly_acked must be higher than the
+ * sending_seq at the time we did the FR.
+ */
+#ifdef SCTP_FR_TO_ALTERNATE
+ /*
+ * If FR's go to new networks, then we must only do
+ * this for singly homed asoc's. However if the FR's
+ * go to the same network (Armando's work) then its
+ * ok to FR multiple times.
+ */
+ if (asoc->numnets < 2)
+#else
+ if (1)
+#endif
+ {
+ if ((compare_with_wrap(biggest_tsn_newly_acked,
+ tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
+ (biggest_tsn_newly_acked ==
+ tp1->rec.data.fast_retran_tsn)) {
+ /*
+ * Strike the TSN, since this ack is
+ * beyond where things were when we
+ * did a FR.
+ */
+#ifdef SCTP_FR_LOGGING
+ sctp_log_fr(biggest_tsn_newly_acked,
+ tp1->rec.data.TSN_seq,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+#endif
+ tp1->sent++;
+ strike_flag = 1;
+ if (sctp_cmt_on_off && sctp_cmt_use_dac) {
+ /*
+ * CMT DAC algorithm: If
+ * SACK flag is set to 0,
+ * then lowest_newack test
+ * will not pass because it
+ * would have been set to
+ * the cumack earlier. If
+ * not already to be rtx'd,
+ * If not a mixed sack and
+ * if tp1 is not between two
+ * sacked TSNs, then mark by
+ * one more.
+ */
+ if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
+ compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
+#ifdef SCTP_FR_LOGGING
+ sctp_log_fr(32 + num_dests_sacked,
+ tp1->rec.data.TSN_seq,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+#endif
+ tp1->sent++;
+ }
+ }
+ }
+ }
+ /*
+ * @@@ JRI: TODO: remove code for HTNA algo. CMT's
+ * SFR algo covers HTNA.
+ */
+ } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ biggest_tsn_newly_acked, MAX_TSN)) {
+ /*
+ * We don't strike these: This is the HTNA
+ * algorithm i.e. we don't strike If our TSN is
+ * larger than the Highest TSN Newly Acked.
+ */
+ ;
+ } else {
+ /* Strike the TSN */
+#ifdef SCTP_FR_LOGGING
+ sctp_log_fr(biggest_tsn_newly_acked,
+ tp1->rec.data.TSN_seq,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+#endif
+ tp1->sent++;
+ if (sctp_cmt_on_off && sctp_cmt_use_dac) {
+ /*
+ * CMT DAC algorithm: If SACK flag is set to
+ * 0, then lowest_newack test will not pass
+ * because it would have been set to the
+ * cumack earlier. If not already to be
+ * rtx'd, If not a mixed sack and if tp1 is
+ * not between two sacked TSNs, then mark by
+ * one more.
+ */
+ if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
+ compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
+#ifdef SCTP_FR_LOGGING
+ sctp_log_fr(48 + num_dests_sacked,
+ tp1->rec.data.TSN_seq,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+#endif
+ tp1->sent++;
+ }
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ /* Increment the count to resend */
+ struct sctp_nets *alt;
+
+ /* printf("OK, we are now ready to FR this guy\n"); */
+#ifdef SCTP_FR_LOGGING
+ sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
+ 0, SCTP_FR_MARKED);
+#endif
+ if (strike_flag) {
+ /* This is a subsequent FR */
+ SCTP_STAT_INCR(sctps_sendmultfastretrans);
+ }
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+
+ if (sctp_cmt_on_off) {
+ /*
+ * CMT: Using RTX_SSTHRESH policy for CMT.
+ * If CMT is being used, then pick dest with
+ * largest ssthresh for any retransmission.
+ * (iyengar@cis.udel.edu, 2005/08/12)
+ */
+ tp1->no_fr_allowed = 1;
+ alt = tp1->whoTo;
+ alt = sctp_find_alternate_net(stcb, alt, 1);
+ /*
+ * CUCv2: If a different dest is picked for
+ * the retransmission, then new
+ * (rtx-)pseudo_cumack needs to be tracked
+ * for orig dest. Let CUCv2 track new (rtx-)
+ * pseudo-cumack always.
+ */
+ tp1->whoTo->find_pseudo_cumack = 1;
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+
+
+ } else {/* CMT is OFF */
+
+#ifdef SCTP_FR_TO_ALTERNATE
+ /* Can we find an alternate? */
+ alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
+#else
+ /*
+ * default behavior is to NOT retransmit
+ * FR's to an alternate. Armando Caro's
+ * paper details why.
+ */
+ alt = tp1->whoTo;
+#endif
+ }
+
+ tp1->rec.data.doing_fast_retransmit = 1;
+ tot_retrans++;
+ /* mark the sending seq for possible subsequent FR's */
+ /*
+ * printf("Marking TSN for FR new value %x\n",
+ * (uint32_t)tpi->rec.data.TSN_seq);
+ */
+ if (TAILQ_EMPTY(&asoc->send_queue)) {
+ /*
+ * If the queue of send is empty then its
+ * the next sequence number that will be
+ * assigned so we subtract one from this to
+ * get the one we last sent.
+ */
+ tp1->rec.data.fast_retran_tsn = sending_seq;
+ } else {
+ /*
+ * If there are chunks on the send queue
+ * (unsent data that has made it from the
+ * stream queues but not out the door, we
+ * take the first one (which will have the
+ * lowest TSN) and subtract one to get the
+ * one we last sent.
+ */
+ struct sctp_tmit_chunk *ttt;
+
+ ttt = TAILQ_FIRST(&asoc->send_queue);
+ tp1->rec.data.fast_retran_tsn =
+ ttt->rec.data.TSN_seq;
+ }
+
+ if (tp1->do_rtt) {
+ /*
+ * this guy had a RTO calculation pending on
+ * it, cancel it
+ */
+ tp1->whoTo->rto_pending = 0;
+ tp1->do_rtt = 0;
+ }
+ /* fix counts and things */
+
+ tp1->whoTo->net_ack++;
+ if (tp1->whoTo->flight_size >= tp1->book_size)
+ tp1->whoTo->flight_size -= tp1->book_size;
+ else
+ tp1->whoTo->flight_size = 0;
+
+#ifdef SCTP_LOG_RWND
+ sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
+ asoc->peers_rwnd, tp1->send_size, sctp_peer_chunk_oh);
+#endif
+ /* add back to the rwnd */
+ asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh);
+
+ /* remove from the total flight */
+ if (asoc->total_flight >= tp1->book_size) {
+ asoc->total_flight -= tp1->book_size;
+ if (asoc->total_flight_count > 0)
+ asoc->total_flight_count--;
+ } else {
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ }
+
+
+ if (alt != tp1->whoTo) {
+ /* yes, there is an alternate. */
+ sctp_free_remote_addr(tp1->whoTo);
+ tp1->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ }
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ } /* while (tp1) */
+
+ if (tot_retrans > 0) {
+ /*
+ * Setup the ecn nonce re-sync point. We do this since once
+ * we go to FR something we introduce a Karn's rule scenario
+ * and won't know the totals for the ECN bits.
+ */
+ asoc->nonce_resync_tsn = sending_seq;
+ asoc->nonce_wait_for_ecne = 0;
+ asoc->nonce_sum_check = 0;
+ }
+}
+
+struct sctp_tmit_chunk *
+sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
+ struct timeval now;
+ int now_filled = 0;
+
+ if (asoc->peer_supports_prsctp == 0) {
+ return (NULL);
+ }
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
+ tp1->sent != SCTP_DATAGRAM_RESEND) {
+ /* no chance to advance, out of here */
+ break;
+ }
+ if (!PR_SCTP_ENABLED(tp1->flags)) {
+ /*
+ * We can't fwd-tsn past any that are reliable aka
+ * retransmitted until the asoc fails.
+ */
+ break;
+ }
+ if (!now_filled) {
+ SCTP_GETTIME_TIMEVAL(&now);
+ now_filled = 1;
+ }
+ tp2 = TAILQ_NEXT(tp1, sctp_next);
+ /*
+ * now we got a chunk which is marked for another
+ * retransmission to a PR-stream but has run out its chances
+ * already maybe OR has been marked to skip now. Can we skip
+ * it if its a resend?
+ */
+ if (tp1->sent == SCTP_DATAGRAM_RESEND &&
+ (PR_SCTP_TTL_ENABLED(tp1->flags))) {
+ /*
+ * Now is this one marked for resend and its time is
+ * now up?
+ */
+ if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
+ /* Yes so drop it */
+ if (tp1->data) {
+ sctp_release_pr_sctp_chunk(stcb, tp1,
+ (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
+ &asoc->sent_queue);
+ }
+ } else {
+ /*
+ * No, we are done when hit one for resend
+ * whos time as not expired.
+ */
+ break;
+ }
+ }
+ /*
+ * Ok now if this chunk is marked to drop it we can clean up
+ * the chunk, advance our peer ack point and we can check
+ * the next chunk.
+ */
+ if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
+ /* advance PeerAckPoint goes forward */
+ asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
+ a_adv = tp1;
+ /*
+ * we don't want to de-queue it here. Just wait for
+ * the next peer SACK to come with a new cumTSN and
+ * then the chunk will be droped in the normal
+ * fashion.
+ */
+ if (tp1->data) {
+ sctp_free_bufspace(stcb, asoc, tp1, 1);
+ /*
+ * Maybe there should be another
+ * notification type
+ */
+ sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
+ (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
+ tp1);
+ sctp_m_freem(tp1->data);
+ tp1->data = NULL;
+ if (stcb->sctp_socket) {
+ sctp_sowwakeup(stcb->sctp_ep,
+ stcb->sctp_socket);
+#ifdef SCTP_WAKE_LOGGING
+ sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN);
+#endif
+ }
+ }
+ } else {
+ /*
+ * If it is still in RESEND we can advance no
+ * further
+ */
+ break;
+ }
+ /*
+ * If we hit here we just dumped tp1, move to next tsn on
+ * sent queue.
+ */
+ tp1 = tp2;
+ }
+ return (a_adv);
+}
+
+#ifdef SCTP_HIGH_SPEED
+struct sctp_hs_raise_drop {
+ int32_t cwnd;
+ int32_t increase;
+ int32_t drop_percent;
+};
+
+#define SCTP_HS_TABLE_SIZE 73
+
+struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
+ {38, 1, 50}, /* 0 */
+ {118, 2, 44}, /* 1 */
+ {221, 3, 41}, /* 2 */
+ {347, 4, 38}, /* 3 */
+ {495, 5, 37}, /* 4 */
+ {663, 6, 35}, /* 5 */
+ {851, 7, 34}, /* 6 */
+ {1058, 8, 33}, /* 7 */
+ {1284, 9, 32}, /* 8 */
+ {1529, 10, 31}, /* 9 */
+ {1793, 11, 30}, /* 10 */
+ {2076, 12, 29}, /* 11 */
+ {2378, 13, 28}, /* 12 */
+ {2699, 14, 28}, /* 13 */
+ {3039, 15, 27}, /* 14 */
+ {3399, 16, 27}, /* 15 */
+ {3778, 17, 26}, /* 16 */
+ {4177, 18, 26}, /* 17 */
+ {4596, 19, 25}, /* 18 */
+ {5036, 20, 25}, /* 19 */
+ {5497, 21, 24}, /* 20 */
+ {5979, 22, 24}, /* 21 */
+ {6483, 23, 23}, /* 22 */
+ {7009, 24, 23}, /* 23 */
+ {7558, 25, 22}, /* 24 */
+ {8130, 26, 22}, /* 25 */
+ {8726, 27, 22}, /* 26 */
+ {9346, 28, 21}, /* 27 */
+ {9991, 29, 21}, /* 28 */
+ {10661, 30, 21}, /* 29 */
+ {11358, 31, 20}, /* 30 */
+ {12082, 32, 20}, /* 31 */
+ {12834, 33, 20}, /* 32 */
+ {13614, 34, 19}, /* 33 */
+ {14424, 35, 19}, /* 34 */
+ {15265, 36, 19}, /* 35 */
+ {16137, 37, 19}, /* 36 */
+ {17042, 38, 18}, /* 37 */
+ {17981, 39, 18}, /* 38 */
+ {18955, 40, 18}, /* 39 */
+ {19965, 41, 17}, /* 40 */
+ {21013, 42, 17}, /* 41 */
+ {22101, 43, 17}, /* 42 */
+ {23230, 44, 17}, /* 43 */
+ {24402, 45, 16}, /* 44 */
+ {25618, 46, 16}, /* 45 */
+ {26881, 47, 16}, /* 46 */
+ {28193, 48, 16}, /* 47 */
+ {29557, 49, 15}, /* 48 */
+ {30975, 50, 15}, /* 49 */
+ {32450, 51, 15}, /* 50 */
+ {33986, 52, 15}, /* 51 */
+ {35586, 53, 14}, /* 52 */
+ {37253, 54, 14}, /* 53 */
+ {38992, 55, 14}, /* 54 */
+ {40808, 56, 14}, /* 55 */
+ {42707, 57, 13}, /* 56 */
+ {44694, 58, 13}, /* 57 */
+ {46776, 59, 13}, /* 58 */
+ {48961, 60, 13}, /* 59 */
+ {51258, 61, 13}, /* 60 */
+ {53677, 62, 12}, /* 61 */
+ {56230, 63, 12}, /* 62 */
+ {58932, 64, 12}, /* 63 */
+ {61799, 65, 12}, /* 64 */
+ {64851, 66, 11}, /* 65 */
+ {68113, 67, 11}, /* 66 */
+ {71617, 68, 11}, /* 67 */
+ {75401, 69, 10}, /* 68 */
+ {79517, 70, 10}, /* 69 */
+ {84035, 71, 10}, /* 70 */
+ {89053, 72, 10}, /* 71 */
+ {94717, 73, 9} /* 72 */
+};
+
+static void
+sctp_hs_cwnd_increase(struct sctp_nets *net)
+{
+ int cur_val, i, indx, incr;
+
+ cur_val = net->cwnd >> 10;
+ indx = SCTP_HS_TABLE_SIZE - 1;
+
+ if (cur_val < sctp_cwnd_adjust[0].cwnd) {
+ /* normal mode */
+ if (net->net_ack > net->mtu) {
+ net->cwnd += net->mtu;
+#ifdef SCTP_CWND_MONITOR
+ sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS);
+#endif
+ } else {
+ net->cwnd += net->net_ack;
+#ifdef SCTP_CWND_MONITOR
+ sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
+#endif
+ }
+ } else {
+ for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
+ if (cur_val < sctp_cwnd_adjust[i].cwnd) {
+ indx = i;
+ break;
+ }
+ }
+ net->last_hs_used = indx;
+ incr = ((sctp_cwnd_adjust[indx].increase) << 10);
+ net->cwnd += incr;
+#ifdef SCTP_CWND_MONITOR
+ sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS);
+#endif
+ }
+}
+
+static void
+sctp_hs_cwnd_decrease(struct sctp_nets *net)
+{
+ int cur_val, i, indx;
+
+#ifdef SCTP_CWND_MONITOR
+ int old_cwnd = net->cwnd;
+
+#endif
+
+ cur_val = net->cwnd >> 10;
+ indx = net->last_hs_used;
+ if (cur_val < sctp_cwnd_adjust[0].cwnd) {
+ /* normal mode */
+ net->ssthresh = net->cwnd / 2;
+ if (net->ssthresh < (net->mtu * 2)) {
+ net->ssthresh = 2 * net->mtu;
+ }
+ net->cwnd = net->ssthresh;
+ } else {
+ /* drop by the proper amount */
+ net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
+ sctp_cwnd_adjust[net->last_hs_used].drop_percent);
+ net->cwnd = net->ssthresh;
+ /* now where are we */
+ indx = net->last_hs_used;
+ cur_val = net->cwnd >> 10;
+ /* reset where we are in the table */
+ if (cur_val < sctp_cwnd_adjust[0].cwnd) {
+ /* feel out of hs */
+ net->last_hs_used = 0;
+ } else {
+ for (i = indx; i >= 1; i--) {
+ if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
+ break;
+ }
+ }
+ net->last_hs_used = indx;
+ }
+ }
+#ifdef SCTP_CWND_MONITOR
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
+#endif
+
+}
+
+#endif
+
+extern int sctp_early_fr;
+extern int sctp_L2_abc_variable;
+
+
+static __inline void
+sctp_cwnd_update(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int accum_moved, int reneged_all, int will_exit)
+{
+ struct sctp_nets *net;
+
+ /******************************/
+ /* update cwnd and Early FR */
+ /******************************/
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+#ifdef JANA_CODE_WHY_THIS
+ /*
+ * CMT fast recovery code. Need to debug.
+ */
+ if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
+ if (compare_with_wrap(asoc->last_acked_seq,
+ net->fast_recovery_tsn, MAX_TSN) ||
+ (asoc->last_acked_seq == net->fast_recovery_tsn) ||
+ compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
+ (net->pseudo_cumack == net->fast_recovery_tsn)) {
+ net->will_exit_fast_recovery = 1;
+ }
+ }
+#endif
+ if (sctp_early_fr) {
+ /*
+ * So, first of all do we need to have a Early FR
+ * timer running?
+ */
+ if (((TAILQ_FIRST(&asoc->sent_queue)) &&
+ (net->ref_count > 1) &&
+ (net->flight_size < net->cwnd)) ||
+ (reneged_all)) {
+ /*
+ * yes, so in this case stop it if its
+ * running, and then restart it. Reneging
+ * all is a special case where we want to
+ * run the Early FR timer and then force the
+ * last few unacked to be sent, causing us
+ * to illicit a sack with gaps to force out
+ * the others.
+ */
+ if (callout_pending(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
+ }
+ SCTP_STAT_INCR(sctps_earlyfrstrid);
+ sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
+ } else {
+ /* No, stop it if its running */
+ if (callout_pending(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
+ }
+ }
+ }
+ /* if nothing was acked on this destination skip it */
+ if (net->net_ack == 0) {
+#ifdef SCTP_CWND_LOGGING
+ sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
+#endif
+ continue;
+ }
+ if (net->net_ack2 > 0) {
+ /*
+ * Karn's rule applies to clearing error count, this
+ * is optional.
+ */
+ net->error_count = 0;
+ if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
+ SCTP_ADDR_NOT_REACHABLE) {
+ /* addr came good */
+ net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
+ net->dest_state |= SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
+ SCTP_RECEIVED_SACK, (void *)net);
+ /* now was it the primary? if so restore */
+ if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
+ sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
+ }
+ }
+ }
+#ifdef JANA_CODE_WHY_THIS
+ /*
+ * Cannot skip for CMT. Need to come back and check these
+ * variables for CMT. CMT fast recovery code. Need to debug.
+ */
+ if (sctp_cmt_on_off == 1 &&
+ net->fast_retran_loss_recovery &&
+ net->will_exit_fast_recovery == 0)
+#endif
+ if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
+ /*
+ * If we are in loss recovery we skip any
+ * cwnd update
+ */
+ goto skip_cwnd_update;
+ }
+ /*
+ * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
+ * moved.
+ */
+ if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) {
+ /* If the cumulative ack moved we can proceed */
+ if (net->cwnd <= net->ssthresh) {
+ /* We are in slow start */
+ if (net->flight_size + net->net_ack >=
+ net->cwnd) {
+#ifdef SCTP_HIGH_SPEED
+ sctp_hs_cwnd_increase(net);
+#else
+ if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) {
+ net->cwnd += (net->mtu * sctp_L2_abc_variable);
+#ifdef SCTP_CWND_MONITOR
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_FROM_SS);
+#endif
+
+ } else {
+ net->cwnd += net->net_ack;
+#ifdef SCTP_CWND_MONITOR
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_FROM_SS);
+#endif
+
+ }
+#endif
+ } else {
+ unsigned int dif;
+
+ dif = net->cwnd - (net->flight_size +
+ net->net_ack);
+#ifdef SCTP_CWND_LOGGING
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_NOADV_SS);
+#endif
+ }
+ } else {
+ /* We are in congestion avoidance */
+ if (net->flight_size + net->net_ack >=
+ net->cwnd) {
+ /*
+ * add to pba only if we had a
+ * cwnd's worth (or so) in flight OR
+ * the burst limit was applied.
+ */
+ net->partial_bytes_acked +=
+ net->net_ack;
+
+ /*
+ * Do we need to increase (if pba is
+ * > cwnd)?
+ */
+ if (net->partial_bytes_acked >=
+ net->cwnd) {
+ if (net->cwnd <
+ net->partial_bytes_acked) {
+ net->partial_bytes_acked -=
+ net->cwnd;
+ } else {
+ net->partial_bytes_acked =
+ 0;
+ }
+ net->cwnd += net->mtu;
+#ifdef SCTP_CWND_MONITOR
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_FROM_CA);
+#endif
+ }
+#ifdef SCTP_CWND_LOGGING
+ else {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_NOADV_CA);
+ }
+#endif
+ } else {
+ unsigned int dif;
+
+#ifdef SCTP_CWND_LOGGING
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_NOADV_CA);
+#endif
+ dif = net->cwnd - (net->flight_size +
+ net->net_ack);
+ }
+ }
+ } else {
+#ifdef SCTP_CWND_LOGGING
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_NO_CUMACK);
+#endif
+ }
+skip_cwnd_update:
+ /*
+ * NOW, according to Karn's rule do we need to restore the
+ * RTO timer back? Check our net_ack2. If not set then we
+ * have a ambiguity.. i.e. all data ack'd was sent to more
+ * than one place.
+ */
+ if (net->net_ack2) {
+ /* restore any doubled timers */
+ net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
+ if (net->RTO < stcb->asoc.minrto) {
+ net->RTO = stcb->asoc.minrto;
+ }
+ if (net->RTO > stcb->asoc.maxrto) {
+ net->RTO = stcb->asoc.maxrto;
+ }
+ }
+ }
+}
+
+
+void
+sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
+ uint32_t rwnd, int nonce_sum_flag, int *abort_now)
+{
+ struct sctp_nets *net;
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *tp1, *tp2;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ asoc = &stcb->asoc;
+ /* First setup for CC stuff */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->prev_cwnd = net->cwnd;
+ net->net_ack = 0;
+ net->net_ack2 = 0;
+ }
+ asoc->this_sack_highest_gap = cumack;
+ stcb->asoc.overall_error_count = 0;
+ /* process the new consecutive TSN first */
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ tp2 = TAILQ_NEXT(tp1, sctp_next);
+ if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
+ MAX_TSN) ||
+ cumack == tp1->rec.data.TSN_seq) {
+ if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
+ /*
+ * ECN Nonce: Add the nonce to the sender's
+ * nonce sum
+ */
+ asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
+ if (tp1->sent < SCTP_DATAGRAM_ACKED) {
+ /*
+ * If it is less than ACKED, it is
+ * now no-longer in flight. Higher
+ * values may occur during marking
+ */
+ if (tp1->rec.data.chunk_was_revoked == 1) {
+ /*
+ * If its been revoked, and
+ * now ack'd we do NOT take
+ * away fs etc. since when
+ * it is retransmitted we
+ * clear this flag.
+ */
+ goto skip_fs_update;
+ }
+ if (tp1->whoTo->flight_size >= tp1->book_size) {
+ tp1->whoTo->flight_size -= tp1->book_size;
+ } else {
+ tp1->whoTo->flight_size = 0;
+ }
+ if (asoc->total_flight >= tp1->book_size) {
+ asoc->total_flight -= tp1->book_size;
+ if (asoc->total_flight_count > 0)
+ asoc->total_flight_count--;
+ } else {
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ }
+ tp1->whoTo->net_ack += tp1->send_size;
+ if (tp1->snd_count < 2) {
+ /*
+ * True non-retransmited
+ * chunk
+ */
+ tp1->whoTo->net_ack2 +=
+ tp1->send_size;
+
+ /* update RTO too? */
+ if ((tp1->do_rtt) && (tp1->whoTo->rto_pending)) {
+ tp1->whoTo->RTO =
+ sctp_calculate_rto(stcb,
+ asoc, tp1->whoTo,
+ &tp1->sent_rcv_time);
+ tp1->whoTo->rto_pending = 0;
+ tp1->do_rtt = 0;
+ }
+ }
+#ifdef SCTP_CWND_LOGGING
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+#endif
+ }
+ skip_fs_update:
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+ }
+ tp1->sent = SCTP_DATAGRAM_ACKED;
+ }
+ } else {
+ break;
+ }
+ TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
+ if (tp1->data) {
+ sctp_free_bufspace(stcb, asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ }
+#ifdef SCTP_SACK_LOGGING
+ sctp_log_sack(asoc->last_acked_seq,
+ cumack,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_FREE_SENT);
+#endif
+ tp1->data = NULL;
+ asoc->sent_queue_cnt--;
+ sctp_free_remote_addr(tp1->whoTo);
+ sctp_free_a_chunk(stcb, tp1);
+ tp1 = tp2;
+ }
+ if (stcb->sctp_socket) {
+ SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
+#ifdef SCTP_WAKE_LOGGING
+ sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
+#endif
+ sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
+#ifdef SCTP_WAKE_LOGGING
+ } else {
+ sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
+#endif
+ }
+
+ if (asoc->last_acked_seq != cumack)
+ sctp_cwnd_update(stcb, asoc, 1, 0, 0);
+ asoc->last_acked_seq = cumack;
+ if (TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left in-flight */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->flight_size = 0;
+ net->partial_bytes_acked = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ }
+ /* Fix up the a-p-a-p for future PR-SCTP sends */
+ if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
+ asoc->advanced_peer_ack_point = cumack;
+ }
+ /* ECN Nonce updates */
+ if (asoc->ecn_nonce_allowed) {
+ if (asoc->nonce_sum_check) {
+ if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
+ if (asoc->nonce_wait_for_ecne == 0) {
+ struct sctp_tmit_chunk *lchk;
+
+ lchk = TAILQ_FIRST(&asoc->send_queue);
+ asoc->nonce_wait_for_ecne = 1;
+ if (lchk) {
+ asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
+ } else {
+ asoc->nonce_wait_tsn = asoc->sending_seq;
+ }
+ } else {
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
+ (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
+ /*
+ * Misbehaving peer. We need
+ * to react to this guy
+ */
+ asoc->ecn_allowed = 0;
+ asoc->ecn_nonce_allowed = 0;
+ }
+ }
+ }
+ } else {
+ /* See if Resynchronization Possible */
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
+ asoc->nonce_sum_check = 1;
+ /*
+ * now we must calculate what the base is.
+ * We do this based on two things, we know
+ * the total's for all the segments
+ * gap-acked in the SACK (none), We also
+ * know the SACK's nonce sum, its in
+ * nonce_sum_flag. So we can build a truth
+ * table to back-calculate the new value of
+ * asoc->nonce_sum_expect_base:
+ *
+ * SACK-flag-Value Seg-Sums Base 0 0 0
+ * 1 0 1 0 1 1 1
+ * 1 0
+ */
+ asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
+ }
+ }
+ }
+ /* RWND update */
+ asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
+ (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ /* Now assure a timer where data is queued at */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->flight_size) {
+ int to_ticks;
+
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ callout_reset(&net->rxt_timer.timer, to_ticks,
+ sctp_timeout_handler, &net->rxt_timer);
+ } else {
+ if (callout_pending(&net->rxt_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net);
+ }
+ if (sctp_early_fr) {
+ if (callout_pending(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
+ }
+ }
+ }
+ }
+
+ /**********************************/
+ /* Now what about shutdown issues */
+ /**********************************/
+ if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left on sendqueue.. consider done */
+ /* clean up */
+ if ((asoc->stream_queue_cnt == 1) &&
+ ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+ (asoc->locked_on_sending)
+ ) {
+ struct sctp_stream_queue_pending *sp;
+
+ /*
+ * I may be in a state where we got all across.. but
+ * cannot write more due to a shutdown... we abort
+ * since the user did not indicate EOR in this case.
+ * The sp will be cleaned during free of the asoc.
+ */
+ sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
+ sctp_streamhead);
+ if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) {
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ asoc->locked_on_sending = NULL;
+ asoc->stream_queue_cnt--;
+ }
+ }
+ if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ /* Need to abort here */
+ struct mbuf *oper;
+
+ abort_out_now:
+ *abort_now = 1;
+ /* XXX */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x30000003);
+ }
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper);
+ } else {
+ asoc->state = SCTP_STATE_SHUTDOWN_SENT;
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown(stcb,
+ stcb->asoc.primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ }
+ } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ goto abort_out_now;
+ }
+ asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ sctp_send_shutdown_ack(stcb,
+ stcb->asoc.primary_destination);
+
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ }
+ }
+#ifdef SCTP_SACK_RWND_LOGGING
+ sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
+ rwnd,
+ stcb->asoc.peers_rwnd,
+ stcb->asoc.total_flight,
+ stcb->asoc.total_output_queue_size);
+
+#endif
+}
+
+
+
+void
+sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
+ struct sctp_nets *net_from, int *abort_now)
+{
+ struct sctp_association *asoc;
+ struct sctp_sack *sack;
+ struct sctp_tmit_chunk *tp1, *tp2;
+ uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
+ this_sack_lowest_newack;
+ uint16_t num_seg, num_dup;
+ uint16_t wake_him = 0;
+ unsigned int sack_length;
+ uint32_t send_s;
+ long j;
+ int accum_moved = 0;
+ int will_exit_fast_recovery = 0;
+ uint32_t a_rwnd;
+ struct sctp_nets *net = NULL;
+ int nonce_sum_flag, ecn_seg_sums = 0;
+ uint8_t reneged_all = 0;
+ uint8_t cmt_dac_flag;
+
+ /*
+ * we take any chance we can to service our queues since we cannot
+ * get awoken when the socket is read from :<
+ */
+ /*
+ * Now perform the actual SACK handling: 1) Verify that it is not an
+ * old sack, if so discard. 2) If there is nothing left in the send
+ * queue (cum-ack is equal to last acked) then you have a duplicate
+ * too, update any rwnd change and verify no timers are running.
+ * then return. 3) Process any new consequtive data i.e. cum-ack
+ * moved process these first and note that it moved. 4) Process any
+ * sack blocks. 5) Drop any acked from the queue. 6) Check for any
+ * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
+ * sync up flightsizes and things, stop all timers and also check
+ * for shutdown_pending state. If so then go ahead and send off the
+ * shutdown. If in shutdown recv, send off the shutdown-ack and
+ * start that timer, Ret. 9) Strike any non-acked things and do FR
+ * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
+ * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
+ * if in shutdown_recv state.
+ */
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ sack = &ch->sack;
+ /* CMT DAC algo */
+ this_sack_lowest_newack = 0;
+ j = 0;
+ sack_length = ntohs(ch->ch.chunk_length);
+ if (sack_length < sizeof(struct sctp_sack_chunk)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("Bad size on sack chunk .. to small\n");
+ }
+#endif
+ return;
+ }
+ /* ECN Nonce */
+ SCTP_STAT_INCR(sctps_slowpath_sack);
+ nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
+ cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
+ num_seg = ntohs(sack->num_gap_ack_blks);
+ a_rwnd = (uint32_t) ntohl(sack->a_rwnd);
+
+ /* CMT DAC algo */
+ cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
+ num_dup = ntohs(sack->num_dup_tsns);
+
+
+ stcb->asoc.overall_error_count = 0;
+ asoc = &stcb->asoc;
+#ifdef SCTP_SACK_LOGGING
+ sctp_log_sack(asoc->last_acked_seq,
+ cum_ack,
+ 0,
+ num_seg,
+ num_dup,
+ SCTP_LOG_NEW_SACK);
+#endif
+#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
+ if (num_dup) {
+ int off_to_dup, iii;
+ uint32_t *dupdata;
+
+ off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
+ if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
+ dupdata = (uint32_t *) ((caddr_t)ch + off_to_dup);
+ for (iii = 0; iii < num_dup; iii++) {
+ sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
+ dupdata++;
+
+ }
+ } else {
+ printf("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
+ off_to_dup, num_dup, sack_length, num_seg);
+ }
+ }
+#endif
+ /* reality check */
+ if (TAILQ_EMPTY(&asoc->send_queue)) {
+ send_s = asoc->sending_seq;
+ } else {
+ tp1 = TAILQ_FIRST(&asoc->send_queue);
+ send_s = tp1->rec.data.TSN_seq;
+ }
+
+ if (sctp_strict_sacks) {
+ if (cum_ack == send_s ||
+ compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
+ struct mbuf *oper;
+
+ /*
+ * no way, we have not even sent this TSN out yet.
+ * Peer is hopelessly messed up with us.
+ */
+ hopeless_peer:
+ *abort_now = 1;
+ /* XXX */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x30000002);
+ }
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper);
+ return;
+ }
+ }
+ /**********************/
+ /* 1) check the range */
+ /**********************/
+ if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
+ /* acking something behind */
+ return;
+ }
+ /* update the Rwnd of the peer */
+ if (TAILQ_EMPTY(&asoc->sent_queue) &&
+ TAILQ_EMPTY(&asoc->send_queue) &&
+ (asoc->stream_queue_cnt == 0)
+ ) {
+ /* nothing left on send/sent and strmq */
+#ifdef SCTP_LOG_RWND
+ sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+ asoc->peers_rwnd, 0, 0, a_rwnd);
+#endif
+ asoc->peers_rwnd = a_rwnd;
+ if (asoc->sent_queue_retran_cnt) {
+ asoc->sent_queue_retran_cnt = 0;
+ }
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ /* stop any timers */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net);
+ if (sctp_early_fr) {
+ if (callout_pending(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
+ }
+ }
+ net->partial_bytes_acked = 0;
+ net->flight_size = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ return;
+ }
+ /*
+ * We init netAckSz and netAckSz2 to 0. These are used to track 2
+ * things. The total byte count acked is tracked in netAckSz AND
+ * netAck2 is used to track the total bytes acked that are un-
+ * amibguious and were never retransmitted. We track these on a per
+ * destination address basis.
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->prev_cwnd = net->cwnd;
+ net->net_ack = 0;
+ net->net_ack2 = 0;
+
+ /*
+ * CMT: Reset CUC algo variable before SACK processing
+ */
+ net->new_pseudo_cumack = 0;
+ net->will_exit_fast_recovery = 0;
+ }
+ /* process the new consecutive TSN first */
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
+ MAX_TSN) ||
+ last_tsn == tp1->rec.data.TSN_seq) {
+ if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
+ /*
+ * ECN Nonce: Add the nonce to the sender's
+ * nonce sum
+ */
+ asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
+ accum_moved = 1;
+ if (tp1->sent < SCTP_DATAGRAM_ACKED) {
+ /*
+ * If it is less than ACKED, it is
+ * now no-longer in flight. Higher
+ * values may occur during marking
+ */
+ if ((tp1->whoTo->dest_state &
+ SCTP_ADDR_UNCONFIRMED) &&
+ (tp1->snd_count < 2)) {
+ /*
+ * If there was no retran
+ * and the address is
+ * un-confirmed and we sent
+ * there and are now
+ * sacked.. its confirmed,
+ * mark it so.
+ */
+ tp1->whoTo->dest_state &=
+ ~SCTP_ADDR_UNCONFIRMED;
+ }
+ if (tp1->rec.data.chunk_was_revoked == 1) {
+ /*
+ * If its been revoked, and
+ * now ack'd we do NOT take
+ * away fs etc. since when
+ * it is retransmitted we
+ * clear this flag.
+ */
+ goto skip_fs_update;
+ }
+ if (tp1->whoTo->flight_size >= tp1->book_size) {
+ tp1->whoTo->flight_size -= tp1->book_size;
+ } else {
+ tp1->whoTo->flight_size = 0;
+ }
+ if (asoc->total_flight >= tp1->book_size) {
+ asoc->total_flight -= tp1->book_size;
+ if (asoc->total_flight_count > 0)
+ asoc->total_flight_count--;
+ } else {
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ }
+ tp1->whoTo->net_ack += tp1->send_size;
+
+ /* CMT SFR and DAC algos */
+ this_sack_lowest_newack = tp1->rec.data.TSN_seq;
+ tp1->whoTo->saw_newack = 1;
+
+ if (tp1->snd_count < 2) {
+ /*
+ * True non-retransmited
+ * chunk
+ */
+ tp1->whoTo->net_ack2 +=
+ tp1->send_size;
+
+ /* update RTO too? */
+ if (tp1->do_rtt) {
+ tp1->whoTo->RTO =
+ sctp_calculate_rto(stcb,
+ asoc, tp1->whoTo,
+ &tp1->sent_rcv_time);
+ tp1->whoTo->rto_pending = 0;
+ tp1->do_rtt = 0;
+ }
+ }
+ skip_fs_update:
+ /*
+ * CMT: CUCv2 algorithm. From the
+ * cumack'd TSNs, for each TSN being
+ * acked for the first time, set the
+ * following variables for the
+ * corresp destination.
+ * new_pseudo_cumack will trigger a
+ * cwnd update.
+ * find_(rtx_)pseudo_cumack will
+ * trigger search for the next
+ * expected (rtx-)pseudo-cumack.
+ */
+ tp1->whoTo->new_pseudo_cumack = 1;
+ tp1->whoTo->find_pseudo_cumack = 1;
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+
+
+#ifdef SCTP_SACK_LOGGING
+ sctp_log_sack(asoc->last_acked_seq,
+ cum_ack,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_TSN_ACKED);
+#endif
+#ifdef SCTP_CWND_LOGGING
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+#endif
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB3,
+ (asoc->sent_queue_retran_cnt & 0x000000ff));
+#endif
+ }
+ tp1->sent = SCTP_DATAGRAM_ACKED;
+ }
+ } else {
+ break;
+ }
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ }
+ biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
+ /* always set this up to cum-ack */
+ asoc->this_sack_highest_gap = last_tsn;
+
+ if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
+
+ /* skip corrupt segments */
+ goto skip_segments;
+ }
+ if (num_seg > 0) {
+
+ /*
+ * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
+ * to be greater than the cumack. Also reset saw_newack to 0
+ * for all dests.
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->saw_newack = 0;
+ net->this_sack_highest_newack = last_tsn;
+ }
+
+ /*
+ * thisSackHighestGap will increase while handling NEW
+ * segments this_sack_highest_newack will increase while
+ * handling NEWLY ACKED chunks. this_sack_lowest_newack is
+ * used for CMT DAC algo. saw_newack will also change.
+ */
+ sctp_handle_segments(stcb, asoc, ch, last_tsn,
+ &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
+ num_seg, &ecn_seg_sums);
+
+ if (sctp_strict_sacks) {
+ /*
+ * validate the biggest_tsn_acked in the gap acks if
+ * strict adherence is wanted.
+ */
+ if ((biggest_tsn_acked == send_s) ||
+ (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
+ /*
+ * peer is either confused or we are under
+ * attack. We must abort.
+ */
+ goto hopeless_peer;
+ }
+ }
+ }
+skip_segments:
+ /*******************************************/
+ /* cancel ALL T3-send timer if accum moved */
+ /*******************************************/
+ if (sctp_cmt_on_off) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->new_pseudo_cumack)
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net);
+
+ }
+ } else {
+ if (accum_moved) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net);
+ }
+ }
+ }
+ /********************************************/
+ /* drop the acked chunks from the sendqueue */
+ /********************************************/
+ asoc->last_acked_seq = cum_ack;
+
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ if (tp1 == NULL)
+ goto done_with_it;
+ do {
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
+ MAX_TSN)) {
+ break;
+ }
+ if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
+ /* no more sent on list */
+ break;
+ }
+ tp2 = TAILQ_NEXT(tp1, sctp_next);
+ TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
+ /*
+ * Friendlier printf in lieu of panic now that I think its
+ * fixed
+ */
+
+ if (tp1->pr_sctp_on) {
+ if (asoc->pr_sctp_cnt != 0)
+ asoc->pr_sctp_cnt--;
+ }
+ if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
+ (asoc->total_flight > 0)) {
+ printf("Warning flight size incorrect should be 0 is %d\n",
+ asoc->total_flight);
+ asoc->total_flight = 0;
+ }
+ if (tp1->data) {
+ sctp_free_bufspace(stcb, asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
+ asoc->sent_queue_cnt_removeable--;
+ }
+ }
+#ifdef SCTP_SACK_LOGGING
+ sctp_log_sack(asoc->last_acked_seq,
+ cum_ack,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_FREE_SENT);
+#endif
+ tp1->data = NULL;
+ asoc->sent_queue_cnt--;
+ sctp_free_remote_addr(tp1->whoTo);
+
+ sctp_free_a_chunk(stcb, tp1);
+ wake_him++;
+ tp1 = tp2;
+ } while (tp1 != NULL);
+
+done_with_it:
+ if ((wake_him) && (stcb->sctp_socket)) {
+ SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
+#ifdef SCTP_WAKE_LOGGING
+ sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
+#endif
+ sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
+#ifdef SCTP_WAKE_LOGGING
+ } else {
+ sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
+#endif
+ }
+
+ if ((sctp_cmt_on_off == 0) && asoc->fast_retran_loss_recovery && accum_moved) {
+ if (compare_with_wrap(asoc->last_acked_seq,
+ asoc->fast_recovery_tsn, MAX_TSN) ||
+ asoc->last_acked_seq == asoc->fast_recovery_tsn) {
+ /* Setup so we will exit RFC2582 fast recovery */
+ will_exit_fast_recovery = 1;
+ }
+ }
+ /*
+ * Check for revoked fragments:
+ *
+ * if Previous sack - Had no frags then we can't have any revoked if
+ * Previous sack - Had frag's then - If we now have frags aka
+ * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
+ * some of them. else - The peer revoked all ACKED fragments, since
+ * we had some before and now we have NONE.
+ */
+
+ if (sctp_cmt_on_off) {
+ /*
+ * Don't check for revoked if CMT is ON. CMT causes
+ * reordering of data and acks (received on different
+ * interfaces) can be persistently reordered. Acking
+ * followed by apparent revoking and re-acking causes
+ * unexpected weird behavior. So, at this time, CMT does not
+ * respect renegs. Renegs will have to be recovered through
+ * a timeout. Not a big deal for such a rare event.
+ */
+ } else if (num_seg)
+ sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked);
+ else if (asoc->saw_sack_with_frags) {
+ int cnt_revoked = 0;
+
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ if (tp1 != NULL) {
+ /* Peer revoked all dg's marked or acked */
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
+ (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
+ tp1->sent = SCTP_DATAGRAM_SENT;
+ cnt_revoked++;
+ }
+ }
+ if (cnt_revoked) {
+ reneged_all = 1;
+ }
+ }
+ asoc->saw_sack_with_frags = 0;
+ }
+ if (num_seg)
+ asoc->saw_sack_with_frags = 1;
+ else
+ asoc->saw_sack_with_frags = 0;
+
+
+ sctp_cwnd_update(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
+
+ if (TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left in-flight */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ /* stop all timers */
+ if (sctp_early_fr) {
+ if (callout_pending(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
+ }
+ }
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net);
+ net->flight_size = 0;
+ net->partial_bytes_acked = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ }
+ /**********************************/
+ /* Now what about shutdown issues */
+ /**********************************/
+ if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left on sendqueue.. consider done */
+#ifdef SCTP_LOG_RWND
+ sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+ asoc->peers_rwnd, 0, 0, a_rwnd);
+#endif
+ asoc->peers_rwnd = a_rwnd;
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ /* clean up */
+ if ((asoc->stream_queue_cnt == 1) &&
+ ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+ (asoc->locked_on_sending)
+ ) {
+ struct sctp_stream_queue_pending *sp;
+
+ /*
+ * I may be in a state where we got all across.. but
+ * cannot write more due to a shutdown... we abort
+ * since the user did not indicate EOR in this case.
+ */
+ sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
+ sctp_streamhead);
+ if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) {
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ asoc->locked_on_sending = NULL;
+ asoc->stream_queue_cnt--;
+ }
+ }
+ if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ /* Need to abort here */
+ struct mbuf *oper;
+
+ abort_out_now:
+ *abort_now = 1;
+ /* XXX */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x30000003);
+ }
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper);
+ return;
+ } else {
+ asoc->state = SCTP_STATE_SHUTDOWN_SENT;
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown(stcb,
+ stcb->asoc.primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ }
+ return;
+ } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ goto abort_out_now;
+ }
+ asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ sctp_send_shutdown_ack(stcb,
+ stcb->asoc.primary_destination);
+
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ return;
+ }
+ }
+ /*
+ * Now here we are going to recycle net_ack for a different use...
+ * HEADS UP.
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->net_ack = 0;
+ }
+
+ /*
+ * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
+ * to be done. Setting this_sack_lowest_newack to the cum_ack will
+ * automatically ensure that.
+ */
+ if (sctp_cmt_on_off && sctp_cmt_use_dac && (cmt_dac_flag == 0)) {
+ this_sack_lowest_newack = cum_ack;
+ }
+ if (num_seg > 0) {
+ sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
+ biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
+ }
+ /*********************************************/
+ /* Here we perform PR-SCTP procedures */
+ /* (section 4.2) */
+ /*********************************************/
+ /* C1. update advancedPeerAckPoint */
+ if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
+ asoc->advanced_peer_ack_point = cum_ack;
+ }
+ /* C2. try to further move advancedPeerAckPoint ahead */
+
+ if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
+ struct sctp_tmit_chunk *lchk;
+
+ lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
+ /* C3. See if we need to send a Fwd-TSN */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
+ MAX_TSN)) {
+ /*
+ * ISSUE with ECN, see FWD-TSN processing for notes
+ * on issues that will occur when the ECN NONCE
+ * stuff is put into SCTP for cross checking.
+ */
+ send_forward_tsn(stcb, asoc);
+
+ /*
+ * ECN Nonce: Disable Nonce Sum check when FWD TSN
+ * is sent and store resync tsn
+ */
+ asoc->nonce_sum_check = 0;
+ asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
+ if (lchk) {
+ /* Assure a timer is up */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, lchk->whoTo);
+ }
+ }
+ }
+ /*
+ * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) &&
+ * (net->fast_retran_loss_recovery == 0)))
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) {
+ /* out of a RFC2582 Fast recovery window? */
+ if (net->net_ack > 0) {
+ /*
+ * per section 7.2.3, are there any
+ * destinations that had a fast retransmit
+ * to them. If so what we need to do is
+ * adjust ssthresh and cwnd.
+ */
+ struct sctp_tmit_chunk *lchk;
+
+#ifdef SCTP_HIGH_SPEED
+ sctp_hs_cwnd_decrease(net);
+#else
+#ifdef SCTP_CWND_MONITOR
+ int old_cwnd = net->cwnd;
+
+#endif
+ net->ssthresh = net->cwnd / 2;
+ if (net->ssthresh < (net->mtu * 2)) {
+ net->ssthresh = 2 * net->mtu;
+ }
+ net->cwnd = net->ssthresh;
+#ifdef SCTP_CWND_MONITOR
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
+ SCTP_CWND_LOG_FROM_FR);
+#endif
+#endif
+
+ lchk = TAILQ_FIRST(&asoc->send_queue);
+
+ net->partial_bytes_acked = 0;
+ /* Turn on fast recovery window */
+ asoc->fast_retran_loss_recovery = 1;
+ if (lchk == NULL) {
+ /* Mark end of the window */
+ asoc->fast_recovery_tsn = asoc->sending_seq - 1;
+ } else {
+ asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+ }
+
+ /*
+ * CMT fast recovery -- per destination
+ * recovery variable.
+ */
+ net->fast_retran_loss_recovery = 1;
+
+ if (lchk == NULL) {
+ /* Mark end of the window */
+ net->fast_recovery_tsn = asoc->sending_seq - 1;
+ } else {
+ net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+ }
+
+
+
+ /*
+ * Disable Nonce Sum Checking and store the
+ * resync tsn
+ */
+ asoc->nonce_sum_check = 0;
+ asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
+
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+ }
+ } else if (net->net_ack > 0) {
+ /*
+ * Mark a peg that we WOULD have done a cwnd
+ * reduction but RFC2582 prevented this action.
+ */
+ SCTP_STAT_INCR(sctps_fastretransinrtt);
+ }
+ }
+
+
+ /******************************************************************
+ * Here we do the stuff with ECN Nonce checking.
+ * We basically check to see if the nonce sum flag was incorrect
+ * or if resynchronization needs to be done. Also if we catch a
+ * misbehaving receiver we give him the kick.
+ ******************************************************************/
+
+ if (asoc->ecn_nonce_allowed) {
+ if (asoc->nonce_sum_check) {
+ if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
+ if (asoc->nonce_wait_for_ecne == 0) {
+ struct sctp_tmit_chunk *lchk;
+
+ lchk = TAILQ_FIRST(&asoc->send_queue);
+ asoc->nonce_wait_for_ecne = 1;
+ if (lchk) {
+ asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
+ } else {
+ asoc->nonce_wait_tsn = asoc->sending_seq;
+ }
+ } else {
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
+ (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
+ /*
+ * Misbehaving peer. We need
+ * to react to this guy
+ */
+ asoc->ecn_allowed = 0;
+ asoc->ecn_nonce_allowed = 0;
+ }
+ }
+ }
+ } else {
+ /* See if Resynchronization Possible */
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
+ asoc->nonce_sum_check = 1;
+ /*
+ * now we must calculate what the base is.
+ * We do this based on two things, we know
+ * the total's for all the segments
+ * gap-acked in the SACK, its stored in
+ * ecn_seg_sums. We also know the SACK's
+ * nonce sum, its in nonce_sum_flag. So we
+ * can build a truth table to back-calculate
+ * the new value of
+ * asoc->nonce_sum_expect_base:
+ *
+ * SACK-flag-Value Seg-Sums Base 0 0 0
+ * 1 0 1 0 1 1 1
+ * 1 0
+ */
+ asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
+ }
+ }
+ }
+ /* Now are we exiting loss recovery ? */
+ if (will_exit_fast_recovery) {
+ /* Ok, we must exit fast recovery */
+ asoc->fast_retran_loss_recovery = 0;
+ }
+ if ((asoc->sat_t3_loss_recovery) &&
+ ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
+ MAX_TSN) ||
+ (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
+ /* end satellite t3 loss recovery */
+ asoc->sat_t3_loss_recovery = 0;
+ }
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->will_exit_fast_recovery) {
+ /* Ok, we must exit fast recovery */
+ net->fast_retran_loss_recovery = 0;
+ }
+ }
+
+ /* Adjust and set the new rwnd value */
+#ifdef SCTP_LOG_RWND
+ sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+ asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd);
+#endif
+
+ asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
+ (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ /*
+ * Now we must setup so we have a timer up for anyone with
+ * outstanding data.
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->flight_size) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+ }
+ }
+#ifdef SCTP_SACK_RWND_LOGGING
+ sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
+ a_rwnd,
+ stcb->asoc.peers_rwnd,
+ stcb->asoc.total_flight,
+ stcb->asoc.total_output_queue_size);
+
+#endif
+
+}
+
+void
+sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
+ struct sctp_nets *netp, int *abort_flag)
+{
+ /* Copy cum-ack */
+ uint32_t cum_ack, a_rwnd;
+
+ cum_ack = ntohl(cp->cumulative_tsn_ack);
+ /* Arrange so a_rwnd does NOT change */
+ a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
+
+ /* Now call the express sack handling */
+ sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
+}
+
+static void
+sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
+ struct sctp_stream_in *strmin)
+{
+ struct sctp_queued_to_read *ctl, *nctl;
+ struct sctp_association *asoc;
+ int tt;
+
+ asoc = &stcb->asoc;
+ tt = strmin->last_sequence_delivered;
+ /*
+ * First deliver anything prior to and including the stream no that
+ * came in
+ */
+ ctl = TAILQ_FIRST(&strmin->inqueue);
+ while (ctl) {
+ nctl = TAILQ_NEXT(ctl, next);
+ if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
+ (tt == ctl->sinfo_ssn)) {
+ /* this is deliverable now */
+ TAILQ_REMOVE(&strmin->inqueue, ctl, next);
+ /* subtract pending on streams */
+ asoc->size_on_all_streams -= ctl->length;
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ /* deliver it to at least the delivery-q */
+ if (stcb->sctp_socket) {
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ ctl,
+ &stcb->sctp_socket->so_rcv, 1);
+ }
+ } else {
+ /* no more delivery now. */
+ break;
+ }
+ ctl = nctl;
+ }
+ /*
+ * now we must deliver things in queue the normal way if any are
+ * now ready.
+ */
+ tt = strmin->last_sequence_delivered + 1;
+ ctl = TAILQ_FIRST(&strmin->inqueue);
+ while (ctl) {
+ nctl = TAILQ_NEXT(ctl, next);
+ if (tt == ctl->sinfo_ssn) {
+ /* this is deliverable now */
+ TAILQ_REMOVE(&strmin->inqueue, ctl, next);
+ /* subtract pending on streams */
+ asoc->size_on_all_streams -= ctl->length;
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ /* deliver it to at least the delivery-q */
+ strmin->last_sequence_delivered = ctl->sinfo_ssn;
+ if (stcb->sctp_socket) {
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ ctl,
+ &stcb->sctp_socket->so_rcv, 1);
+ }
+ tt = strmin->last_sequence_delivered + 1;
+ } else {
+ break;
+ }
+ ctl = nctl;
+ }
+}
+
+void
+sctp_handle_forward_tsn(struct sctp_tcb *stcb,
+ struct sctp_forward_tsn_chunk *fwd, int *abort_flag)
+{
+ /*
+ * ISSUES that MUST be fixed for ECN! When we are the sender of the
+ * forward TSN, when the SACK comes back that acknowledges the
+ * FWD-TSN we must reset the NONCE sum to match correctly. This will
+ * get quite tricky since we may have sent more data interveneing
+ * and must carefully account for what the SACK says on the nonce
+ * and any gaps that are reported. This work will NOT be done here,
+ * but I note it here since it is really related to PR-SCTP and
+ * FWD-TSN's
+ */
+
+ /* The pr-sctp fwd tsn */
+ /*
+ * here we will perform all the data receiver side steps for
+ * processing FwdTSN, as required in by pr-sctp draft:
+ *
+ * Assume we get FwdTSN(x):
+ *
+ * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
+ * others we have 3) examine and update re-ordering queue on
+ * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
+ * report where we are.
+ */
+ struct sctp_strseq *stseq;
+ struct sctp_association *asoc;
+ uint32_t new_cum_tsn, gap, back_out_htsn;
+ unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
+ struct sctp_stream_in *strm;
+ struct sctp_tmit_chunk *chk, *at;
+
+ cumack_set_flag = 0;
+ asoc = &stcb->asoc;
+ cnt_gone = 0;
+ if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
+ printf("Bad size too small/big fwd-tsn\n");
+ }
+#endif
+ return;
+ }
+ m_size = (stcb->asoc.mapping_array_size << 3);
+ /*************************************************************/
+ /* 1. Here we update local cumTSN and shift the bitmap array */
+ /*************************************************************/
+ new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
+
+ if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
+ asoc->cumulative_tsn == new_cum_tsn) {
+ /* Already got there ... */
+ return;
+ }
+ back_out_htsn = asoc->highest_tsn_inside_map;
+ if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
+ MAX_TSN)) {
+ asoc->highest_tsn_inside_map = new_cum_tsn;
+#ifdef SCTP_MAP_LOGGING
+ sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+#endif
+ }
+ /*
+ * now we know the new TSN is more advanced, let's find the actual
+ * gap
+ */
+ if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
+ MAX_TSN)) ||
+ (new_cum_tsn == asoc->mapping_array_base_tsn)) {
+ gap = new_cum_tsn - asoc->mapping_array_base_tsn;
+ } else {
+ /* try to prevent underflow here */
+ gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
+ }
+
+ if (gap > m_size || gap < 0) {
+ asoc->highest_tsn_inside_map = back_out_htsn;
+ if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
+ /*
+ * out of range (of single byte chunks in the rwnd I
+ * give out) too questionable. better to drop it
+ * silently
+ */
+ return;
+ }
+ if (asoc->highest_tsn_inside_map >
+ asoc->mapping_array_base_tsn) {
+ gap = asoc->highest_tsn_inside_map -
+ asoc->mapping_array_base_tsn;
+ } else {
+ gap = asoc->highest_tsn_inside_map +
+ (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
+ }
+ cumack_set_flag = 1;
+ }
+ for (i = 0; i <= gap; i++) {
+ SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
+ }
+ /*
+ * Now after marking all, slide thing forward but no sack please.
+ */
+ sctp_sack_check(stcb, 0, 0, abort_flag);
+ if (*abort_flag)
+ return;
+
+ if (cumack_set_flag) {
+ /*
+ * fwd-tsn went outside my gap array - not a common
+ * occurance. Do the same thing we do when a cookie-echo
+ * arrives.
+ */
+ asoc->highest_tsn_inside_map = new_cum_tsn - 1;
+ asoc->mapping_array_base_tsn = new_cum_tsn;
+ asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
+#ifdef SCTP_MAP_LOGGING
+ sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+#endif
+ asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
+ }
+ /*************************************************************/
+ /* 2. Clear up re-assembly queue */
+ /*************************************************************/
+
+ /*
+ * First service it if pd-api is up, just in case we can progress it
+ * forward
+ */
+ if (asoc->fragmented_delivery_inprogress) {
+ sctp_service_reassembly(stcb, asoc);
+ }
+ if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
+ /* For each one on here see if we need to toss it */
+ /*
+ * For now large messages held on the reasmqueue that are
+ * complete will be tossed too. We could in theory do more
+ * work to spin through and stop after dumping one msg aka
+ * seeing the start of a new msg at the head, and call the
+ * delivery function... to see if it can be delivered... But
+ * for now we just dump everything on the queue.
+ */
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ while (chk) {
+ at = TAILQ_NEXT(chk, sctp_next);
+ if (compare_with_wrap(asoc->cumulative_tsn,
+ chk->rec.data.TSN_seq, MAX_TSN) ||
+ asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
+ /* It needs to be tossed */
+ TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
+ if (compare_with_wrap(chk->rec.data.TSN_seq,
+ asoc->tsn_last_delivered, MAX_TSN)) {
+ asoc->tsn_last_delivered =
+ chk->rec.data.TSN_seq;
+ asoc->str_of_pdapi =
+ chk->rec.data.stream_number;
+ asoc->ssn_of_pdapi =
+ chk->rec.data.stream_seq;
+ asoc->fragment_flags =
+ chk->rec.data.rcv_flags;
+ }
+ asoc->size_on_reasm_queue -= chk->send_size;
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+ cnt_gone++;
+
+ /* Clear up any stream problem */
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
+ SCTP_DATA_UNORDERED &&
+ (compare_with_wrap(chk->rec.data.stream_seq,
+ asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
+ MAX_SEQ))) {
+ /*
+ * We must dump forward this streams
+ * sequence number if the chunk is
+ * not unordered that is being
+ * skipped. There is a chance that
+ * if the peer does not include the
+ * last fragment in its FWD-TSN we
+ * WILL have a problem here since
+ * you would have a partial chunk in
+ * queue that may not be
+ * deliverable. Also if a Partial
+ * delivery API as started the user
+ * may get a partial chunk. The next
+ * read returning a new chunk...
+ * really ugly but I see no way
+ * around it! Maybe a notify??
+ */
+ asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
+ chk->rec.data.stream_seq;
+ }
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_remote_addr(chk->whoTo);
+ sctp_free_a_chunk(stcb, chk);
+ } else {
+ /*
+ * Ok we have gone beyond the end of the
+ * fwd-tsn's mark. Some checks...
+ */
+ if ((asoc->fragmented_delivery_inprogress) &&
+ (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
+ /*
+ * Special case PD-API is up and
+ * what we fwd-tsn' over includes
+ * one that had the LAST_FRAG. We no
+ * longer need to do the PD-API.
+ */
+ asoc->fragmented_delivery_inprogress = 0;
+ sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
+ stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
+
+ }
+ break;
+ }
+ chk = at;
+ }
+ }
+ if (asoc->fragmented_delivery_inprogress) {
+ /*
+ * Ok we removed cnt_gone chunks in the PD-API queue that
+ * were being delivered. So now we must turn off the flag.
+ */
+ sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
+ stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
+ asoc->fragmented_delivery_inprogress = 0;
+ }
+ /*************************************************************/
+ /* 3. Update the PR-stream re-ordering queues */
+ /*************************************************************/
+ stseq = (struct sctp_strseq *)((caddr_t)fwd + sizeof(*fwd));
+ fwd_sz -= sizeof(*fwd);
+ {
+ /* New method. */
+ int num_str, i;
+
+ num_str = fwd_sz / sizeof(struct sctp_strseq);
+ for (i = 0; i < num_str; i++) {
+ uint16_t st;
+ unsigned char *xx;
+
+ /* Convert */
+ xx = (unsigned char *)&stseq[i];
+ st = ntohs(stseq[i].stream);
+ stseq[i].stream = st;
+ st = ntohs(stseq[i].sequence);
+ stseq[i].sequence = st;
+ /* now process */
+ if (stseq[i].stream > asoc->streamincnt) {
+ /*
+ * It is arguable if we should continue.
+ * Since the peer sent bogus stream info we
+ * may be in deep trouble.. a return may be
+ * a better choice?
+ */
+ continue;
+ }
+ strm = &asoc->strmin[stseq[i].stream];
+ if (compare_with_wrap(stseq[i].sequence,
+ strm->last_sequence_delivered, MAX_SEQ)) {
+ /* Update the sequence number */
+ strm->last_sequence_delivered =
+ stseq[i].sequence;
+ }
+ /* now kick the stream the new way */
+ sctp_kick_prsctp_reorder_queue(stcb, strm);
+ }
+ }
+}
diff --git a/sys/netinet/sctp_indata.h b/sys/netinet/sctp_indata.h
new file mode 100644
index 0000000..9de4694
--- /dev/null
+++ b/sys/netinet/sctp_indata.h
@@ -0,0 +1,118 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_indata.h,v 1.9 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_indata_h__
+#define __sctp_indata_h__
+
+
+
+
+
+#if defined(_KERNEL)
+
+
+struct sctp_queued_to_read *
+sctp_build_readq_entry(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ uint32_t tsn, uint32_t ppid,
+ uint32_t context, uint16_t stream_no,
+ uint16_t stream_seq, uint8_t flags,
+ struct mbuf *dm);
+
+
+#define sctp_build_readq_entry_mac(_ctl, in_it, a, net, tsn, ppid, context, stream_no, stream_seq, flags, dm) do { \
+ if (_ctl) { \
+ (_ctl)->sinfo_context = a; \
+ (_ctl)->stcb = (in_it); \
+ (_ctl)->sinfo_assoc_id = sctp_get_associd((in_it)); \
+ (_ctl)->port_from = (in_it)->rport; \
+ (_ctl)->sinfo_stream = stream_no; \
+ (_ctl)->sinfo_ssn = stream_seq; \
+ (_ctl)->sinfo_flags = (flags << 8); \
+ (_ctl)->sinfo_ppid = ppid; \
+ (_ctl)->sinfo_timetolive = 0; \
+ (_ctl)->sinfo_tsn = tsn; \
+ (_ctl)->sinfo_cumtsn = tsn; \
+ (_ctl)->whoFrom = net; \
+ (_ctl)->length = 0; \
+ atomic_add_int(&((net)->ref_count), 1); \
+ (_ctl)->data = dm; \
+ (_ctl)->tail_mbuf = NULL; \
+ (_ctl)->do_not_ref_stcb = 0; \
+ (_ctl)->end_added = 0; \
+ } \
+} while (0)
+
+
+
+struct mbuf *
+sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
+ struct sctp_sndrcvinfo *sinfo);
+
+void sctp_set_rwnd(struct sctp_tcb *, struct sctp_association *);
+
+uint32_t
+sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc);
+
+void
+sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
+ uint32_t rwnd, int nonce_sum_flag, int *abort_now);
+
+void
+sctp_handle_sack(struct sctp_sack_chunk *, struct sctp_tcb *,
+ struct sctp_nets *, int *);
+
+/* draft-ietf-tsvwg-usctp */
+void
+sctp_handle_forward_tsn(struct sctp_tcb *,
+ struct sctp_forward_tsn_chunk *, int *);
+
+struct sctp_tmit_chunk *
+ sctp_try_advance_peer_ack_point(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_service_queues(struct sctp_tcb *, struct sctp_association *);
+
+void
+sctp_update_acked(struct sctp_tcb *, struct sctp_shutdown_chunk *,
+ struct sctp_nets *, int *);
+
+int
+sctp_process_data(struct mbuf **, int, int *, int, struct sctphdr *,
+ struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *, uint32_t *);
+
+void sctp_sack_check(struct sctp_tcb *, int, int, int *);
+
+#endif
+#endif
diff --git a/sys/netinet/sctp_input.c b/sys/netinet/sctp_input.c
new file mode 100644
index 0000000..e417f29
--- /dev/null
+++ b/sys/netinet/sctp_input.c
@@ -0,0 +1,4749 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_ipsec.h"
+#include "opt_compat.h"
+#include "opt_inet6.h"
+#include "opt_inet.h"
+#include "opt_sctp.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sysctl.h>
+#include <sys/domain.h>
+#include <sys/protosw.h>
+#include <sys/kernel.h>
+#include <sys/errno.h>
+#include <sys/syslog.h>
+
+#include <sys/limits.h>
+#include <machine/cpu.h>
+
+#include <net/if.h>
+#include <net/route.h>
+#include <net/if_types.h>
+
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/in_pcb.h>
+#include <netinet/in_var.h>
+#include <netinet/ip_var.h>
+
+#ifdef INET6
+#include <netinet/ip6.h>
+#include <netinet6/ip6_var.h>
+#endif /* INET6 */
+
+#include <netinet/ip_icmp.h>
+#include <netinet/icmp_var.h>
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_asconf.h>
+
+#include <netinet/ip_options.h>
+
+
+#ifdef IPSEC
+#include <netinet6/ipsec.h>
+#include <netkey/key.h>
+#endif /* IPSEC */
+
+
+#ifdef SCTP_DEBUG
+extern uint32_t sctp_debug_on;
+
+#endif
+
+
+static void
+sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
+{
+ struct sctp_nets *net;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if ((callout_pending(&net->rxt_timer.timer)) &&
+ (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
+ stcb->sctp_ep,
+ stcb,
+ net);
+ }
+ }
+}
+
+/* INIT handler */
+static void
+sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
+ struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_init *init;
+ struct mbuf *op_err;
+ uint32_t init_limit;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("sctp_handle_init: handling INIT tcb:%p\n", stcb);
+ }
+#endif
+ op_err = NULL;
+ init = &cp->init;
+ /* First are we accepting? */
+ if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("sctp_handle_init: Abort, so_qlimit:%d\n", inp->sctp_socket->so_qlimit);
+ }
+#endif
+ /*
+ * FIX ME ?? What about TCP model and we have a
+ * match/restart case?
+ */
+ sctp_abort_association(inp, stcb, m, iphlen, sh, op_err);
+ return;
+ }
+ if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
+ /* Invalid length */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(inp, stcb, m, iphlen, sh, op_err);
+ return;
+ }
+ /* validate parameters */
+ if (init->initiate_tag == 0) {
+ /* protocol error... send abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(inp, stcb, m, iphlen, sh, op_err);
+ return;
+ }
+ if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
+ /* invalid parameter... send abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(inp, stcb, m, iphlen, sh, op_err);
+ return;
+ }
+ if (init->num_inbound_streams == 0) {
+ /* protocol error... send abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(inp, stcb, m, iphlen, sh, op_err);
+ return;
+ }
+ if (init->num_outbound_streams == 0) {
+ /* protocol error... send abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(inp, stcb, m, iphlen, sh, op_err);
+ return;
+ }
+ init_limit = offset + ntohs(cp->ch.chunk_length);
+ if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
+ init_limit)) {
+ /* auth parameter(s) error... send abort */
+ sctp_abort_association(inp, stcb, m, iphlen, sh, NULL);
+ return;
+ }
+ /* send an INIT-ACK w/cookie */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("sctp_handle_init: sending INIT-ACK\n");
+ }
+#endif
+ sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp);
+}
+
+/*
+ * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
+ */
+static int
+sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_init *init;
+ struct sctp_association *asoc;
+ struct sctp_nets *lnet;
+ unsigned int i;
+
+ init = &cp->init;
+ asoc = &stcb->asoc;
+ /* save off parameters */
+ asoc->peer_vtag = ntohl(init->initiate_tag);
+ asoc->peers_rwnd = ntohl(init->a_rwnd);
+ if (TAILQ_FIRST(&asoc->nets)) {
+ /* update any ssthresh's that may have a default */
+ TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
+ lnet->ssthresh = asoc->peers_rwnd;
+
+#if defined(SCTP_CWND_MONITOR) || defined(SCTP_CWND_LOGGING)
+ sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
+#endif
+
+ }
+ }
+ if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
+ unsigned int newcnt;
+ struct sctp_stream_out *outs;
+ struct sctp_stream_queue_pending *sp;
+
+ /* cut back on number of streams */
+ newcnt = ntohs(init->num_inbound_streams);
+ /* This if is probably not needed but I am cautious */
+ if (asoc->strmout) {
+ /* First make sure no data chunks are trapped */
+ for (i = newcnt; i < asoc->pre_open_streams; i++) {
+ outs = &asoc->strmout[i];
+ sp = TAILQ_FIRST(&outs->outqueue);
+ while (sp) {
+ TAILQ_REMOVE(&outs->outqueue, sp,
+ next);
+ asoc->stream_queue_cnt--;
+ sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
+ stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
+ sp);
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ }
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ /* Free the chunk */
+ printf("sp:%x tcb:%x weird free case\n",
+ (u_int)sp, (u_int)stcb);
+
+ sctp_free_a_strmoq(stcb, sp);
+ sp = TAILQ_FIRST(&outs->outqueue);
+ }
+ }
+ }
+ /* cut back the count and abandon the upper streams */
+ asoc->pre_open_streams = newcnt;
+ }
+ asoc->streamincnt = ntohs(init->num_outbound_streams);
+ if (asoc->streamincnt > MAX_SCTP_STREAMS) {
+ asoc->streamincnt = MAX_SCTP_STREAMS;
+ }
+ asoc->streamoutcnt = asoc->pre_open_streams;
+ /* init tsn's */
+ asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
+#ifdef SCTP_MAP_LOGGING
+ sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+#endif
+ /* This is the next one we expect */
+ asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
+
+ asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
+ asoc->cumulative_tsn = asoc->asconf_seq_in;
+ asoc->last_echo_tsn = asoc->asconf_seq_in;
+ asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+ /* open the requested streams */
+ if (asoc->strmin != NULL) {
+ /* Free the old ones */
+ SCTP_FREE(asoc->strmin);
+ }
+ SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
+ sizeof(struct sctp_stream_in), "StreamsIn");
+ if (asoc->strmin == NULL) {
+ /* we didn't get memory for the streams! */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("process_init: couldn't get memory for the streams!\n");
+ }
+#endif
+ return (-1);
+ }
+ for (i = 0; i < asoc->streamincnt; i++) {
+ asoc->strmin[i].stream_no = i;
+ asoc->strmin[i].last_sequence_delivered = 0xffff;
+ /*
+ * U-stream ranges will be set when the cookie is unpacked.
+ * Or for the INIT sender they are un set (if pr-sctp not
+ * supported) when the INIT-ACK arrives.
+ */
+ TAILQ_INIT(&asoc->strmin[i].inqueue);
+ /*
+ * we are not on any wheel, pr-sctp streams will go on the
+ * wheel when they have data waiting for reorder.
+ */
+ asoc->strmin[i].next_spoke.tqe_next = 0;
+ asoc->strmin[i].next_spoke.tqe_prev = 0;
+ }
+
+ /*
+ * load_address_from_init will put the addresses into the
+ * association when the COOKIE is processed or the INIT-ACK is
+ * processed. Both types of COOKIE's existing and new call this
+ * routine. It will remove addresses that are no longer in the
+ * association (for the restarting case where addresses are
+ * removed). Up front when the INIT arrives we will discard it if it
+ * is a restart and new addresses have been added.
+ */
+ return (0);
+}
+
+/*
+ * INIT-ACK message processing/consumption returns value < 0 on error
+ */
+static int
+sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_association *asoc;
+ struct mbuf *op_err;
+ int retval, abort_flag;
+ uint32_t initack_limit;
+
+ /* First verify that we have no illegal param's */
+ abort_flag = 0;
+ op_err = NULL;
+
+ op_err = sctp_arethere_unrecognized_parameters(m,
+ (offset + sizeof(struct sctp_init_chunk)),
+ &abort_flag, (struct sctp_chunkhdr *)cp);
+ if (abort_flag) {
+ /* Send an abort and notify peer */
+ if (op_err != NULL) {
+ sctp_send_operr_to(m, iphlen, op_err, cp->init.initiate_tag);
+ } else {
+ /*
+ * Just notify (abort_assoc does this if we send an
+ * abort).
+ */
+ sctp_abort_notification(stcb, 0);
+ /*
+ * No sense in further INIT's since we will get the
+ * same param back
+ */
+ sctp_free_assoc(stcb->sctp_ep, stcb, 0);
+ }
+ return (-1);
+ }
+ asoc = &stcb->asoc;
+ /* process the peer's parameters in the INIT-ACK */
+ retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
+ if (retval < 0) {
+ return (retval);
+ }
+ initack_limit = offset + ntohs(cp->ch.chunk_length);
+ /* load all addresses */
+ if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
+ (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
+ NULL))) {
+ /* Huh, we should abort */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("Load addresses from INIT causes an abort %d\n", retval);
+ }
+#endif
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
+ NULL);
+ return (-1);
+ }
+ stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
+ stcb->asoc.local_hmacs);
+ if (op_err) {
+ sctp_queue_op_err(stcb, op_err);
+ /* queuing will steal away the mbuf chain to the out queue */
+ op_err = NULL;
+ }
+ /* extract the cookie and queue it to "echo" it back... */
+ stcb->asoc.overall_error_count = 0;
+ net->error_count = 0;
+
+ /*
+ * Cancel the INIT timer, We do this first before queueing the
+ * cookie. We always cancel at the primary to assue that we are
+ * canceling the timer started by the INIT which always goes to the
+ * primary.
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+
+ retval = sctp_send_cookie_echo(m, offset, stcb, net);
+ if (retval < 0) {
+ /*
+ * No cookie, we probably should send a op error. But in any
+ * case if there is no cookie in the INIT-ACK, we can
+ * abandon the peer, its broke.
+ */
+ if (retval == -3) {
+ /* We abort with an error of missing mandatory param */
+ struct mbuf *op_err;
+
+ op_err =
+ sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
+ if (op_err) {
+ /*
+ * Expand beyond to include the mandatory
+ * param cookie
+ */
+ struct sctp_inv_mandatory_param *mp;
+
+ op_err->m_len =
+ sizeof(struct sctp_inv_mandatory_param);
+ mp = mtod(op_err,
+ struct sctp_inv_mandatory_param *);
+ /* Subtract the reserved param */
+ mp->length =
+ htons(sizeof(struct sctp_inv_mandatory_param) - 2);
+ mp->num_param = htonl(1);
+ mp->param = htons(SCTP_STATE_COOKIE);
+ mp->resv = 0;
+ }
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+ sh, op_err);
+ }
+ return (retval);
+ }
+ /* calculate the RTO */
+ net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered);
+
+ return (0);
+}
+
+static void
+sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ struct sockaddr_storage store;
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 *sin6;
+ struct sctp_nets *r_net;
+ struct timeval tv;
+
+ if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
+ /* Invalid length */
+ return;
+ }
+ sin = (struct sockaddr_in *)&store;
+ sin6 = (struct sockaddr_in6 *)&store;
+
+ memset(&store, 0, sizeof(store));
+ if (cp->heartbeat.hb_info.addr_family == AF_INET &&
+ cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
+ sin->sin_family = cp->heartbeat.hb_info.addr_family;
+ sin->sin_len = cp->heartbeat.hb_info.addr_len;
+ sin->sin_port = stcb->rport;
+ memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
+ sizeof(sin->sin_addr));
+ } else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
+ cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
+ sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
+ sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
+ sin6->sin6_port = stcb->rport;
+ memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
+ sizeof(sin6->sin6_addr));
+ } else {
+ return;
+ }
+ r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
+ if (r_net == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("Huh? I can't find the address I sent it to, discard\n");
+ }
+#endif
+ return;
+ }
+ if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
+ (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
+ (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
+ /*
+ * If the its a HB and it's random value is correct when can
+ * confirm the destination.
+ */
+ r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+ stcb, 0, (void *)r_net);
+ }
+ r_net->error_count = 0;
+ r_net->hb_responded = 1;
+ tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
+ tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
+ if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
+ r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
+ r_net->dest_state |= SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
+ SCTP_HEARTBEAT_SUCCESS, (void *)r_net);
+ /* now was it the primary? if so restore */
+ if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
+ sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
+ }
+ }
+ /* Now lets do a RTO with this */
+ r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv);
+}
+
+static void
+sctp_handle_abort(struct sctp_abort_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("sctp_handle_abort: handling ABORT\n");
+ }
+#endif
+ if (stcb == NULL)
+ return;
+ /* verify that the destination addr is in the association */
+ /* ignore abort for addresses being deleted */
+
+ /* stop any receive timers */
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net);
+ /* notify user of the abort and clean up... */
+ sctp_abort_notification(stcb, 0);
+ /* free the tcb */
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ sctp_free_assoc(stcb->sctp_ep, stcb, 0);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("sctp_handle_abort: finished\n");
+ }
+#endif
+}
+
+static void
+sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
+{
+ struct sctp_association *asoc;
+ int some_on_streamwheel;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("sctp_handle_shutdown: handling SHUTDOWN\n");
+ }
+#endif
+ if (stcb == NULL)
+ return;
+
+ if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+ return;
+ }
+ if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
+ /* Shutdown NOT the expected size */
+ return;
+ } else {
+ sctp_update_acked(stcb, cp, net, abort_flag);
+ }
+ asoc = &stcb->asoc;
+ if (stcb->asoc.control_pdapi) {
+ /*
+ * With a normal shutdown we assume the end of last record.
+ */
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
+ stcb->asoc.control_pdapi->end_added = 1;
+ if (stcb->asoc.control_pdapi->tail_mbuf) {
+ stcb->asoc.control_pdapi->tail_mbuf->m_flags |= M_EOR;
+ }
+ stcb->asoc.control_pdapi = NULL;
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+ }
+ /* goto SHUTDOWN_RECEIVED state to block new requests */
+ if (stcb->sctp_socket) {
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
+ asoc->state = SCTP_STATE_SHUTDOWN_RECEIVED;
+ /*
+ * notify upper layer that peer has initiated a
+ * shutdown
+ */
+ sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL);
+
+ /* reset time */
+ SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
+ }
+ }
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
+ /*
+ * stop the shutdown timer, since we WILL move to
+ * SHUTDOWN-ACK-SENT.
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net);
+ }
+ /* Now are we there yet? */
+ some_on_streamwheel = 0;
+ if (!TAILQ_EMPTY(&asoc->out_wheel)) {
+ /* Check to see if some data queued */
+ struct sctp_stream_out *outs;
+
+ TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
+ if (!TAILQ_EMPTY(&outs->outqueue)) {
+ some_on_streamwheel = 1;
+ break;
+ }
+ }
+ }
+ if (!TAILQ_EMPTY(&asoc->send_queue) ||
+ !TAILQ_EMPTY(&asoc->sent_queue) ||
+ some_on_streamwheel) {
+ /* By returning we will push more data out */
+ return;
+ } else {
+ /* no outstanding data to send, so move on... */
+ /* send SHUTDOWN-ACK */
+ sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
+ /* move to SHUTDOWN-ACK-SENT state */
+ if (asoc->state == SCTP_STATE_SHUTDOWN_RECEIVED) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
+
+ /* start SHUTDOWN timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
+ stcb, net);
+ }
+}
+
+static void
+sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ struct sctp_association *asoc;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
+ }
+#endif
+ if (stcb == NULL)
+ return;
+
+ asoc = &stcb->asoc;
+ /* process according to association state */
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ /* unexpected SHUTDOWN-ACK... so ignore... */
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ if (stcb->asoc.control_pdapi) {
+ /*
+ * With a normal shutdown we assume the end of last record.
+ */
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
+ stcb->asoc.control_pdapi->end_added = 1;
+ if (stcb->asoc.control_pdapi->tail_mbuf) {
+ stcb->asoc.control_pdapi->tail_mbuf->m_flags |= M_EOR;
+ }
+ stcb->asoc.control_pdapi = NULL;
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+ }
+ /* are the queues empty? */
+ if (!TAILQ_EMPTY(&asoc->send_queue) ||
+ !TAILQ_EMPTY(&asoc->sent_queue) ||
+ !TAILQ_EMPTY(&asoc->out_wheel)) {
+ sctp_report_all_outbound(stcb);
+ }
+ /* stop the timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net);
+ /* send SHUTDOWN-COMPLETE */
+ sctp_send_shutdown_complete(stcb, net);
+ /* notify upper layer protocol */
+ if (stcb->sctp_socket) {
+ sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL);
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /* Set the connected flag to disconnected */
+ stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
+ }
+ }
+ SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
+ /* free the TCB but first save off the ep */
+ sctp_free_assoc(stcb->sctp_ep, stcb, 0);
+}
+
+/*
+ * Skip past the param header and then we will find the chunk that caused the
+ * problem. There are two possiblities ASCONF or FWD-TSN other than that and
+ * our peer must be broken.
+ */
+static void
+sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
+ struct sctp_nets *net)
+{
+ struct sctp_chunkhdr *chk;
+
+ chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
+ switch (chk->chunk_type) {
+ case SCTP_ASCONF_ACK:
+ case SCTP_ASCONF:
+ sctp_asconf_cleanup(stcb, net);
+ break;
+ case SCTP_FORWARD_CUM_TSN:
+ stcb->asoc.peer_supports_prsctp = 0;
+ break;
+ default:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("Peer does not support chunk type %d(%x)??\n",
+ chk->chunk_type, (uint32_t) chk->chunk_type);
+ }
+#endif
+ break;
+ }
+}
+
+/*
+ * Skip past the param header and then we will find the param that caused the
+ * problem. There are a number of param's in a ASCONF OR the prsctp param
+ * these will turn of specific features.
+ */
+static void
+sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
+{
+ struct sctp_paramhdr *pbad;
+
+ pbad = phdr + 1;
+ switch (ntohs(pbad->param_type)) {
+ /* pr-sctp draft */
+ case SCTP_PRSCTP_SUPPORTED:
+ stcb->asoc.peer_supports_prsctp = 0;
+ break;
+ case SCTP_SUPPORTED_CHUNK_EXT:
+ break;
+ /* draft-ietf-tsvwg-addip-sctp */
+ case SCTP_ECN_NONCE_SUPPORTED:
+ stcb->asoc.peer_supports_ecn_nonce = 0;
+ stcb->asoc.ecn_nonce_allowed = 0;
+ stcb->asoc.ecn_allowed = 0;
+ break;
+ case SCTP_ADD_IP_ADDRESS:
+ case SCTP_DEL_IP_ADDRESS:
+ case SCTP_SET_PRIM_ADDR:
+ stcb->asoc.peer_supports_asconf = 0;
+ break;
+ case SCTP_SUCCESS_REPORT:
+ case SCTP_ERROR_CAUSE_IND:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("Huh, the peer does not support success? or error cause?\n");
+ printf("Turning off ASCONF to this strange peer\n");
+ }
+#endif
+ stcb->asoc.peer_supports_asconf = 0;
+ break;
+ default:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("Peer does not support param type %d(%x)??\n",
+ pbad->param_type, (uint32_t) pbad->param_type);
+ }
+#endif
+ break;
+ }
+}
+
+static int
+sctp_handle_error(struct sctp_chunkhdr *ch,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ int chklen;
+ struct sctp_paramhdr *phdr;
+ uint16_t error_type;
+ uint16_t error_len;
+ struct sctp_association *asoc;
+
+ int adjust;
+
+ /* parse through all of the errors and process */
+ asoc = &stcb->asoc;
+ phdr = (struct sctp_paramhdr *)((caddr_t)ch +
+ sizeof(struct sctp_chunkhdr));
+ chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
+ while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
+ /* Process an Error Cause */
+ error_type = ntohs(phdr->param_type);
+ error_len = ntohs(phdr->param_length);
+ if ((error_len > chklen) || (error_len == 0)) {
+ /* invalid param length for this param */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("Bogus length in error param- chunk left:%d errorlen:%d\n",
+ chklen, error_len);
+ }
+#endif /* SCTP_DEBUG */
+ return (0);
+ }
+ switch (error_type) {
+ case SCTP_CAUSE_INVALID_STREAM:
+ case SCTP_CAUSE_MISSING_PARAM:
+ case SCTP_CAUSE_INVALID_PARAM:
+ case SCTP_CAUSE_NO_USER_DATA:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("Software error we got a %d back? We have a bug :/ (or do they?)\n",
+ error_type);
+ }
+#endif
+ break;
+ case SCTP_CAUSE_STALE_COOKIE:
+ /*
+ * We only act if we have echoed a cookie and are
+ * waiting.
+ */
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
+ int *p;
+
+ p = (int *)((caddr_t)phdr + sizeof(*phdr));
+ /* Save the time doubled */
+ asoc->cookie_preserve_req = ntohl(*p) << 1;
+ asoc->stale_cookie_count++;
+ if (asoc->stale_cookie_count >
+ asoc->max_init_times) {
+ sctp_abort_notification(stcb, 0);
+ /* now free the asoc */
+ sctp_free_assoc(stcb->sctp_ep, stcb, 0);
+ return (-1);
+ }
+ /* blast back to INIT state */
+ asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
+ asoc->state |= SCTP_STATE_COOKIE_WAIT;
+
+ sctp_stop_all_cookie_timers(stcb);
+ sctp_send_initiate(stcb->sctp_ep, stcb);
+ }
+ break;
+ case SCTP_CAUSE_UNRESOLVABLE_ADDR:
+ /*
+ * Nothing we can do here, we don't do hostname
+ * addresses so if the peer does not like my IPv6
+ * (or IPv4 for that matter) it does not matter. If
+ * they don't support that type of address, they can
+ * NOT possibly get that packet type... i.e. with no
+ * IPv6 you can't recieve a IPv6 packet. so we can
+ * safely ignore this one. If we ever added support
+ * for HOSTNAME Addresses, then we would need to do
+ * something here.
+ */
+ break;
+ case SCTP_CAUSE_UNRECOG_CHUNK:
+ sctp_process_unrecog_chunk(stcb, phdr, net);
+ break;
+ case SCTP_CAUSE_UNRECOG_PARAM:
+ sctp_process_unrecog_param(stcb, phdr);
+ break;
+ case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
+ /*
+ * We ignore this since the timer will drive out a
+ * new cookie anyway and there timer will drive us
+ * to send a SHUTDOWN_COMPLETE. We can't send one
+ * here since we don't have their tag.
+ */
+ break;
+ case SCTP_CAUSE_DELETING_LAST_ADDR:
+ case SCTP_CAUSE_RESOURCE_SHORTAGE:
+ case SCTP_CAUSE_DELETING_SRC_ADDR:
+ /*
+ * We should NOT get these here, but in a
+ * ASCONF-ACK. n
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("Peer sends ASCONF errors in a Operational Error?<%d>?\n",
+ error_type);
+ }
+#endif
+ break;
+ case SCTP_CAUSE_OUT_OF_RESC:
+ /*
+ * And what, pray tell do we do with the fact that
+ * the peer is out of resources? Not really sure we
+ * could do anything but abort. I suspect this n *
+ * should have came WITH an abort instead of in a
+ * OP-ERROR.
+ */
+ break;
+ default:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ /* don't know what this error cause is... */
+ printf("sctp_handle_error: unknown error type = 0x%xh\n",
+ error_type);
+ }
+#endif /* SCTP_DEBUG */
+ break;
+ }
+ adjust = SCTP_SIZE32(error_len);
+ chklen -= adjust;
+ phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
+ }
+ return (0);
+}
+
+static int
+sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
+ struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_init_ack *init_ack;
+ int *state;
+ struct mbuf *op_err;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("sctp_handle_init_ack: handling INIT-ACK\n");
+ }
+#endif
+ if (stcb == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("sctp_handle_init_ack: TCB is null\n");
+ }
+#endif
+ return (-1);
+ }
+ if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
+ /* Invalid length */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
+ op_err);
+ return (-1);
+ }
+ init_ack = &cp->init;
+ /* validate parameters */
+ if (init_ack->initiate_tag == 0) {
+ /* protocol error... send an abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
+ op_err);
+ return (-1);
+ }
+ if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
+ /* protocol error... send an abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
+ op_err);
+ return (-1);
+ }
+ if (init_ack->num_inbound_streams == 0) {
+ /* protocol error... send an abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
+ op_err);
+ return (-1);
+ }
+ if (init_ack->num_outbound_streams == 0) {
+ /* protocol error... send an abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
+ op_err);
+ return (-1);
+ }
+ /* process according to association state... */
+ state = &stcb->asoc.state;
+ switch (*state & SCTP_STATE_MASK) {
+ case SCTP_STATE_COOKIE_WAIT:
+ /* this is the expected state for this chunk */
+ /* process the INIT-ACK parameters */
+ if (stcb->asoc.primary_destination->dest_state &
+ SCTP_ADDR_UNCONFIRMED) {
+ /*
+ * The primary is where we sent the INIT, we can
+ * always consider it confirmed when the INIT-ACK is
+ * returned. Do this before we load addresses
+ * though.
+ */
+ stcb->asoc.primary_destination->dest_state &=
+ ~SCTP_ADDR_UNCONFIRMED;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+ stcb, 0, (void *)stcb->asoc.primary_destination);
+ }
+ if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb, net
+ ) < 0) {
+ /* error in parsing parameters */
+ return (-1);
+ }
+ /* update our state */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("moving to COOKIE-ECHOED state\n");
+ }
+#endif
+ if (*state & SCTP_STATE_SHUTDOWN_PENDING) {
+ *state = SCTP_STATE_COOKIE_ECHOED |
+ SCTP_STATE_SHUTDOWN_PENDING;
+ } else {
+ *state = SCTP_STATE_COOKIE_ECHOED;
+ }
+
+ /* reset the RTO calc */
+ stcb->asoc.overall_error_count = 0;
+ SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+ /*
+ * collapse the init timer back in case of a exponential
+ * backoff n
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
+ stcb, net);
+ /*
+ * the send at the end of the inbound data processing will
+ * cause the cookie to be sent
+ */
+ break;
+ case SCTP_STATE_SHUTDOWN_SENT:
+ /* incorrect state... discard */
+ break;
+ case SCTP_STATE_COOKIE_ECHOED:
+ /* incorrect state... discard */
+ break;
+ case SCTP_STATE_OPEN:
+ /* incorrect state... discard */
+ break;
+ case SCTP_STATE_EMPTY:
+ case SCTP_STATE_INUSE:
+ default:
+ /* incorrect state... discard */
+ return (-1);
+ break;
+ } /* end switch asoc state */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("Leaving handle-init-ack end\n");
+ }
+#endif
+ return (0);
+}
+
+
+/*
+ * handle a state cookie for an existing association m: input packet mbuf
+ * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
+ * "split" mbuf and the cookie signature does not exist offset: offset into
+ * mbuf to the cookie-echo chunk
+ */
+static struct sctp_tcb *
+sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
+ struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
+ struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id)
+{
+ struct sctp_association *asoc;
+ struct sctp_init_chunk *init_cp, init_buf;
+ struct sctp_init_ack_chunk *initack_cp, initack_buf;
+ int chk_length;
+ int init_offset, initack_offset;
+ int retval;
+
+ /* I know that the TCB is non-NULL from the caller */
+ asoc = &stcb->asoc;
+
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
+ /* SHUTDOWN came in after sending INIT-ACK */
+ struct mbuf *op_err;
+ struct sctp_paramhdr *ph;
+
+ sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
+ op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
+ 1, M_DONTWAIT, 1, MT_DATA);
+ if (op_err == NULL) {
+ /* FOOBAR */
+ return (NULL);
+ }
+ /* pre-reserve some space */
+ op_err->m_data += sizeof(struct ip6_hdr);
+ op_err->m_data += sizeof(struct sctphdr);
+ op_err->m_data += sizeof(struct sctp_chunkhdr);
+ /* Set the len */
+ op_err->m_len = op_err->m_pkthdr.len = sizeof(struct sctp_paramhdr);
+ ph = mtod(op_err, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
+ ph->param_length = htons(sizeof(struct sctp_paramhdr));
+ sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag);
+ return (NULL);
+ }
+ /*
+ * find and validate the INIT chunk in the cookie (peer's info) the
+ * INIT should start after the cookie-echo header struct (chunk
+ * header, state cookie header struct)
+ */
+ init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
+
+ init_cp = (struct sctp_init_chunk *)
+ sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
+ (uint8_t *) & init_buf);
+ if (init_cp == NULL) {
+ /* could not pull a INIT chunk in cookie */
+ return (NULL);
+ }
+ chk_length = ntohs(init_cp->ch.chunk_length);
+ if (init_cp->ch.chunk_type != SCTP_INITIATION) {
+ return (NULL);
+ }
+ /*
+ * find and validate the INIT-ACK chunk in the cookie (my info) the
+ * INIT-ACK follows the INIT chunk
+ */
+ initack_offset = init_offset + SCTP_SIZE32(chk_length);
+ initack_cp = (struct sctp_init_ack_chunk *)
+ sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
+ (uint8_t *) & initack_buf);
+ if (initack_cp == NULL) {
+ /* could not pull INIT-ACK chunk in cookie */
+ return (NULL);
+ }
+ chk_length = ntohs(initack_cp->ch.chunk_length);
+ if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
+ return (NULL);
+ }
+ if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
+ (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
+ /*
+ * case D in Section 5.2.4 Table 2: MMAA process accordingly
+ * to get into the OPEN state
+ */
+ switch SCTP_GET_STATE
+ (asoc) {
+ case SCTP_STATE_COOKIE_WAIT:
+ /*
+ * INIT was sent, but got got a COOKIE_ECHO with the
+ * correct tags... just accept it...
+ */
+ /* First we must process the INIT !! */
+ retval = sctp_process_init(init_cp, stcb, net);
+ if (retval < 0) {
+ return (NULL);
+ }
+ /* intentional fall through to below... */
+
+ case SCTP_STATE_COOKIE_ECHOED:
+ /* Duplicate INIT case */
+ /* we have already processed the INIT so no problem */
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
+ net);
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
+ sctp_stop_all_cookie_timers(stcb);
+ /* update current state */
+ if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+ asoc->state = SCTP_STATE_OPEN |
+ SCTP_STATE_SHUTDOWN_PENDING;
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+
+ } else if ((asoc->state & SCTP_STATE_SHUTDOWN_SENT) == 0) {
+ /* if ok, move to OPEN state */
+ asoc->state = SCTP_STATE_OPEN;
+ }
+ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+ (inp->sctp_socket->so_qlimit == 0)
+ ) {
+ /*
+ * Here is where collision would go if we
+ * did a connect() and instead got a
+ * init/init-ack/cookie done before the
+ * init-ack came back..
+ */
+ stcb->sctp_ep->sctp_flags |=
+ SCTP_PCB_FLAGS_CONNECTED;
+ soisconnected(stcb->sctp_ep->sctp_socket);
+ }
+ /* notify upper layer */
+ *notification = SCTP_NOTIFY_ASSOC_UP;
+ /*
+ * since we did not send a HB make sure we don't
+ * double things
+ */
+ net->hb_responded = 1;
+
+ if (stcb->asoc.sctp_autoclose_ticks &&
+ (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
+ sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
+ inp, stcb, NULL);
+ }
+ break;
+ default:
+ /*
+ * we're in the OPEN state (or beyond), so peer must
+ * have simply lost the COOKIE-ACK
+ */
+ break;
+ } /* end switch */
+
+ /*
+ * We ignore the return code here.. not sure if we should
+ * somehow abort.. but we do have an existing asoc. This
+ * really should not fail.
+ */
+ if (sctp_load_addresses_from_init(stcb, m, iphlen,
+ init_offset + sizeof(struct sctp_init_chunk),
+ initack_offset, sh, init_src)) {
+ return (NULL);
+ }
+ /* respond with a COOKIE-ACK */
+ sctp_send_cookie_ack(stcb);
+ return (stcb);
+ } /* end if */
+ if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
+ ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
+ cookie->tie_tag_my_vtag == 0 &&
+ cookie->tie_tag_peer_vtag == 0) {
+ /*
+ * case C in Section 5.2.4 Table 2: XMOO silently discard
+ */
+ return (NULL);
+ }
+ if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag &&
+ (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag ||
+ init_cp->init.initiate_tag == 0)) {
+ /*
+ * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
+ * should be ok, re-accept peer info
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
+ sctp_stop_all_cookie_timers(stcb);
+ /*
+ * since we did not send a HB make sure we don't double
+ * things
+ */
+ net->hb_responded = 1;
+ if (stcb->asoc.sctp_autoclose_ticks &&
+ sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
+ NULL);
+ }
+ asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
+ asoc->pre_open_streams =
+ ntohs(initack_cp->init.num_outbound_streams);
+ asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
+ asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out =
+ asoc->init_seq_number;
+ asoc->last_cwr_tsn = asoc->init_seq_number - 1;
+ asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
+ asoc->str_reset_seq_in = asoc->init_seq_number;
+ asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+
+ /* process the INIT info (peer's info) */
+ retval = sctp_process_init(init_cp, stcb, net);
+ if (retval < 0) {
+ return (NULL);
+ }
+ if (sctp_load_addresses_from_init(stcb, m, iphlen,
+ init_offset + sizeof(struct sctp_init_chunk),
+ initack_offset, sh, init_src)) {
+ return (NULL);
+ }
+ if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
+ (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
+ *notification = SCTP_NOTIFY_ASSOC_UP;
+
+ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+ (inp->sctp_socket->so_qlimit == 0)) {
+ stcb->sctp_ep->sctp_flags |=
+ SCTP_PCB_FLAGS_CONNECTED;
+ soisconnected(stcb->sctp_ep->sctp_socket);
+ }
+ }
+ if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+ asoc->state = SCTP_STATE_OPEN |
+ SCTP_STATE_SHUTDOWN_PENDING;
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+
+ } else {
+ asoc->state = SCTP_STATE_OPEN;
+ }
+ sctp_stop_all_cookie_timers(stcb);
+ sctp_send_cookie_ack(stcb);
+ return (stcb);
+ }
+ if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
+ ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
+ cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
+ cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
+ cookie->tie_tag_peer_vtag != 0) {
+ struct sctpasochead *head;
+
+ /*
+ * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+ *sac_assoc_id = sctp_get_associd(stcb);
+ /* notify upper layer */
+ *notification = SCTP_NOTIFY_ASSOC_RESTART;
+
+
+ /* send up all the data */
+ sctp_report_all_outbound(stcb);
+
+ /* process the INIT-ACK info (my info) */
+ asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
+ asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
+
+ /* pull from vtag hash */
+ LIST_REMOVE(stcb, sctp_asocs);
+ /* re-insert to new vtag position */
+ head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
+ sctppcbinfo.hashasocmark)];
+ /*
+ * put it in the bucket in the vtag hash of assoc's for the
+ * system
+ */
+ LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+
+ /* Is this the first restart? */
+ if (stcb->asoc.in_restart_hash == 0) {
+ /* Ok add it to assoc_id vtag hash */
+ head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
+ sctppcbinfo.hashrestartmark)];
+ LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash);
+ stcb->asoc.in_restart_hash = 1;
+ }
+ asoc->pre_open_streams =
+ ntohs(initack_cp->init.num_outbound_streams);
+ asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
+ asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
+
+ asoc->last_cwr_tsn = asoc->init_seq_number - 1;
+ asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
+
+ asoc->str_reset_seq_in = asoc->init_seq_number;
+
+ asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+ if (asoc->mapping_array)
+ memset(asoc->mapping_array, 0,
+ asoc->mapping_array_size);
+ /* process the INIT info (peer's info) */
+ retval = sctp_process_init(init_cp, stcb, net);
+ if (retval < 0) {
+ return (NULL);
+ }
+ /*
+ * since we did not send a HB make sure we don't double
+ * things
+ */
+ net->hb_responded = 1;
+
+ if (sctp_load_addresses_from_init(stcb, m, iphlen,
+ init_offset + sizeof(struct sctp_init_chunk),
+ initack_offset, sh, init_src)) {
+ return (NULL);
+ }
+ if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+ asoc->state = SCTP_STATE_OPEN |
+ SCTP_STATE_SHUTDOWN_PENDING;
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+
+ } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
+ /* move to OPEN state, if not in SHUTDOWN_SENT */
+ asoc->state = SCTP_STATE_OPEN;
+ }
+ /* respond with a COOKIE-ACK */
+ sctp_stop_all_cookie_timers(stcb);
+ sctp_send_cookie_ack(stcb);
+
+ return (stcb);
+ }
+ /* if we are not a restart we need the assoc_id field pop'd */
+ asoc->assoc_id = ntohl(initack_cp->init.initiate_tag);
+
+ /* all other cases... */
+ return (NULL);
+}
+
+/*
+ * handle a state cookie for a new association m: input packet mbuf chain--
+ * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
+ * and the cookie signature does not exist offset: offset into mbuf to the
+ * cookie-echo chunk length: length of the cookie chunk to: where the init
+ * was from returns a new TCB
+ */
+static struct sctp_tcb *
+sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
+ struct sctp_inpcb *inp, struct sctp_nets **netp,
+ struct sockaddr *init_src, int *notification,
+ int auth_skipped, uint32_t auth_offset, uint32_t auth_len)
+{
+ struct sctp_tcb *stcb;
+ struct sctp_init_chunk *init_cp, init_buf;
+ struct sctp_init_ack_chunk *initack_cp, initack_buf;
+ struct sockaddr_storage sa_store;
+ struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 *sin6;
+ struct sctp_association *asoc;
+ int chk_length;
+ int init_offset, initack_offset, initack_limit;
+ int retval;
+ int error = 0;
+ uint32_t old_tag;
+ uint8_t chunk_buf[DEFAULT_CHUNK_BUFFER];
+
+ /*
+ * find and validate the INIT chunk in the cookie (peer's info) the
+ * INIT should start after the cookie-echo header struct (chunk
+ * header, state cookie header struct)
+ */
+ init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
+ init_cp = (struct sctp_init_chunk *)
+ sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
+ (uint8_t *) & init_buf);
+ if (init_cp == NULL) {
+ /* could not pull a INIT chunk in cookie */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("process_cookie_new: could not pull INIT chunk hdr\n");
+ }
+#endif /* SCTP_DEBUG */
+ return (NULL);
+ }
+ chk_length = ntohs(init_cp->ch.chunk_length);
+ if (init_cp->ch.chunk_type != SCTP_INITIATION) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("HUH? process_cookie_new: could not find INIT chunk!\n");
+ }
+#endif /* SCTP_DEBUG */
+ return (NULL);
+ }
+ initack_offset = init_offset + SCTP_SIZE32(chk_length);
+ /*
+ * find and validate the INIT-ACK chunk in the cookie (my info) the
+ * INIT-ACK follows the INIT chunk
+ */
+ initack_cp = (struct sctp_init_ack_chunk *)
+ sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
+ (uint8_t *) & initack_buf);
+ if (initack_cp == NULL) {
+ /* could not pull INIT-ACK chunk in cookie */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("process_cookie_new: could not pull INIT-ACK chunk hdr\n");
+ }
+#endif /* SCTP_DEBUG */
+ return (NULL);
+ }
+ chk_length = ntohs(initack_cp->ch.chunk_length);
+ if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
+ return (NULL);
+ }
+ /*
+ * NOTE: We can't use the INIT_ACK's chk_length to determine the
+ * "initack_limit" value. This is because the chk_length field
+ * includes the length of the cookie, but the cookie is omitted when
+ * the INIT and INIT_ACK are tacked onto the cookie...
+ */
+ initack_limit = offset + cookie_len;
+
+ /*
+ * now that we know the INIT/INIT-ACK are in place, create a new TCB
+ * and popluate
+ */
+ stcb = sctp_aloc_assoc(inp, init_src, 0, &error,
+ ntohl(initack_cp->init.initiate_tag));
+ if (stcb == NULL) {
+ struct mbuf *op_err;
+
+ /* memory problem? */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("process_cookie_new: no room for another TCB!\n");
+ }
+#endif /* SCTP_DEBUG */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
+ sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
+ sh, op_err);
+ return (NULL);
+ }
+ /* get the correct sctp_nets */
+ *netp = sctp_findnet(stcb, init_src);
+ asoc = &stcb->asoc;
+ /* get scope variables out of cookie */
+ asoc->ipv4_local_scope = cookie->ipv4_scope;
+ asoc->site_scope = cookie->site_scope;
+ asoc->local_scope = cookie->local_scope;
+ asoc->loopback_scope = cookie->loopback_scope;
+
+ if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
+ (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
+ struct mbuf *op_err;
+
+ /*
+ * Houston we have a problem. The EP changed while the
+ * cookie was in flight. Only recourse is to abort the
+ * association.
+ */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
+ sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
+ sh, op_err);
+ return (NULL);
+ }
+ /* process the INIT-ACK info (my info) */
+ old_tag = asoc->my_vtag;
+ asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
+ asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
+ asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
+ asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
+ asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
+ asoc->last_cwr_tsn = asoc->init_seq_number - 1;
+ asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
+ asoc->str_reset_seq_in = asoc->init_seq_number;
+
+ asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+
+ /* process the INIT info (peer's info) */
+ retval = sctp_process_init(init_cp, stcb, *netp);
+ if (retval < 0) {
+ sctp_free_assoc(inp, stcb, 0);
+ return (NULL);
+ }
+ /* load all addresses */
+ if (sctp_load_addresses_from_init(stcb, m, iphlen,
+ init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
+ init_src)) {
+ sctp_free_assoc(inp, stcb, 0);
+ return (NULL);
+ }
+ /*
+ * verify any preceding AUTH chunk that was skipped
+ */
+ /* pull the local authentication parameters from the cookie/init-ack */
+ sctp_auth_get_cookie_params(stcb, m,
+ initack_offset + sizeof(struct sctp_init_ack_chunk),
+ initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
+ if (auth_skipped) {
+ struct sctp_auth_chunk *auth;
+
+ auth = (struct sctp_auth_chunk *)
+ sctp_m_getptr(m, auth_offset, auth_len, chunk_buf);
+ if (sctp_handle_auth(stcb, auth, m, auth_offset)) {
+ /* auth HMAC failed, dump the assoc and packet */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_AUTH1)
+ printf("COOKIE-ECHO: AUTH failed\n");
+#endif /* SCTP_DEBUG */
+ sctp_free_assoc(inp, stcb, 0);
+ return (NULL);
+ } else {
+ /* remaining chunks checked... good to go */
+ stcb->asoc.authenticated = 1;
+ }
+ }
+ /* update current state */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("moving to OPEN state\n");
+ }
+#endif
+ if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+ asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ } else {
+ asoc->state = SCTP_STATE_OPEN;
+ }
+ SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
+ SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+ sctp_stop_all_cookie_timers(stcb);
+ /* calculate the RTT */
+ (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
+ &cookie->time_entered);
+
+ /*
+ * if we're doing ASCONFs, check to see if we have any new local
+ * addresses that need to get added to the peer (eg. addresses
+ * changed while cookie echo in flight). This needs to be done
+ * after we go to the OPEN state to do the correct asconf
+ * processing. else, make sure we have the correct addresses in our
+ * lists
+ */
+
+ /* warning, we re-use sin, sin6, sa_store here! */
+ /* pull in local_address (our "from" address) */
+ if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
+ /* source addr is IPv4 */
+ sin = (struct sockaddr_in *)initack_src;
+ memset(sin, 0, sizeof(*sin));
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(struct sockaddr_in);
+ sin->sin_addr.s_addr = cookie->laddress[0];
+ } else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
+ /* source addr is IPv6 */
+ sin6 = (struct sockaddr_in6 *)initack_src;
+ memset(sin6, 0, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ sin6->sin6_scope_id = cookie->scope_id;
+ memcpy(&sin6->sin6_addr, cookie->laddress,
+ sizeof(sin6->sin6_addr));
+ } else {
+ sctp_free_assoc(inp, stcb, 0);
+ return (NULL);
+ }
+
+ sctp_check_address_list(stcb, m,
+ initack_offset + sizeof(struct sctp_init_ack_chunk),
+ initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
+ initack_src, cookie->local_scope, cookie->site_scope,
+ cookie->ipv4_scope, cookie->loopback_scope);
+
+
+ /* set up to notify upper layer */
+ *notification = SCTP_NOTIFY_ASSOC_UP;
+ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+ (inp->sctp_socket->so_qlimit == 0)) {
+ /*
+ * This is an endpoint that called connect() how it got a
+ * cookie that is NEW is a bit of a mystery. It must be that
+ * the INIT was sent, but before it got there.. a complete
+ * INIT/INIT-ACK/COOKIE arrived. But of course then it
+ * should have went to the other code.. not here.. oh well..
+ * a bit of protection is worth having..
+ */
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+ soisconnected(stcb->sctp_ep->sctp_socket);
+ } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_socket->so_qlimit)) {
+ /*
+ * We don't want to do anything with this one. Since it is
+ * the listening guy. The timer will get started for
+ * accepted connections in the caller.
+ */
+ ;
+ }
+ /* since we did not send a HB make sure we don't double things */
+ (*netp)->hb_responded = 1;
+
+ if (stcb->asoc.sctp_autoclose_ticks &&
+ sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
+ }
+ /* respond with a COOKIE-ACK */
+ sctp_send_cookie_ack(stcb);
+ return (stcb);
+}
+
+
+/*
+ * handles a COOKIE-ECHO message stcb: modified to either a new or left as
+ * existing (non-NULL) TCB
+ */
+static struct mbuf *
+sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
+ struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
+ int auth_skipped, uint32_t auth_offset, uint32_t auth_len)
+{
+ struct sctp_state_cookie *cookie;
+ struct sockaddr_in6 sin6;
+ struct sockaddr_in sin;
+ struct sctp_tcb *l_stcb = *stcb;
+ struct sctp_inpcb *l_inp;
+ struct sockaddr *to;
+ sctp_assoc_t sac_restart_id;
+ struct sctp_pcb *ep;
+ struct mbuf *m_sig;
+ uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
+ uint8_t *sig;
+ uint8_t cookie_ok = 0;
+ unsigned int size_of_pkt, sig_offset, cookie_offset;
+ unsigned int cookie_len;
+ struct timeval now;
+ struct timeval time_expires;
+ struct sockaddr_storage dest_store;
+ struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
+ struct ip *iph;
+ int notification = 0;
+ struct sctp_nets *netl;
+ int had_a_existing_tcb = 0;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("sctp_handle_cookie: handling COOKIE-ECHO\n");
+ }
+#endif
+
+ if (inp_p == NULL) {
+ return (NULL);
+ }
+ /* First get the destination address setup too. */
+ iph = mtod(m, struct ip *);
+ if (iph->ip_v == IPVERSION) {
+ /* its IPv4 */
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)(localep_sa);
+ memset(sin, 0, sizeof(*sin));
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(*sin);
+ sin->sin_port = sh->dest_port;
+ sin->sin_addr.s_addr = iph->ip_dst.s_addr;
+ } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
+ /* its IPv6 */
+ struct ip6_hdr *ip6;
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)(localep_sa);
+ memset(sin6, 0, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ ip6 = mtod(m, struct ip6_hdr *);
+ sin6->sin6_port = sh->dest_port;
+ sin6->sin6_addr = ip6->ip6_dst;
+ } else {
+ return (NULL);
+ }
+
+ cookie = &cp->cookie;
+ cookie_offset = offset + sizeof(struct sctp_chunkhdr);
+ cookie_len = ntohs(cp->ch.chunk_length);
+
+ if ((cookie->peerport != sh->src_port) &&
+ (cookie->myport != sh->dest_port) &&
+ (cookie->my_vtag != sh->v_tag)) {
+ /*
+ * invalid ports or bad tag. Note that we always leave the
+ * v_tag in the header in network order and when we stored
+ * it in the my_vtag slot we also left it in network order.
+ * This maintians the match even though it may be in the
+ * opposite byte order of the machine :->
+ */
+ return (NULL);
+ }
+ /* compute size of packet */
+ if (m->m_flags & M_PKTHDR) {
+ size_of_pkt = m->m_pkthdr.len;
+ } else {
+ /* Should have a pkt hdr really */
+ struct mbuf *mat;
+
+ mat = m;
+ size_of_pkt = 0;
+ while (mat != NULL) {
+ size_of_pkt += mat->m_len;
+ mat = mat->m_next;
+ }
+ }
+ if (cookie_len > size_of_pkt ||
+ cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
+ sizeof(struct sctp_init_chunk) +
+ sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
+ /* cookie too long! or too small */
+ return (NULL);
+ }
+ /*
+ * split off the signature into its own mbuf (since it should not be
+ * calculated in the sctp_hmac_m() call).
+ */
+ sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
+ if (sig_offset > size_of_pkt) {
+ /* packet not correct size! */
+ /* XXX this may already be accounted for earlier... */
+ return (NULL);
+ }
+ m_sig = m_split(m, sig_offset, M_DONTWAIT);
+ if (m_sig == NULL) {
+ /* out of memory or ?? */
+ return (NULL);
+ }
+ /*
+ * compute the signature/digest for the cookie
+ */
+ ep = &(*inp_p)->sctp_ep;
+ l_inp = *inp_p;
+ if (l_stcb) {
+ SCTP_TCB_UNLOCK(l_stcb);
+ }
+ SCTP_INP_RLOCK(l_inp);
+ if (l_stcb) {
+ SCTP_TCB_LOCK(l_stcb);
+ }
+ /* which cookie is it? */
+ if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
+ (ep->current_secret_number != ep->last_secret_number)) {
+ /* it's the old cookie */
+ sctp_hmac_m(SCTP_HMAC,
+ (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
+ SCTP_SECRET_SIZE, m, cookie_offset, calc_sig);
+ } else {
+ /* it's the current cookie */
+ sctp_hmac_m(SCTP_HMAC,
+ (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
+ SCTP_SECRET_SIZE, m, cookie_offset, calc_sig);
+ }
+ /* get the signature */
+ SCTP_INP_RUNLOCK(l_inp);
+ sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
+ if (sig == NULL) {
+ /* couldn't find signature */
+ return (NULL);
+ }
+ /* compare the received digest with the computed digest */
+ if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
+ /* try the old cookie? */
+ if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
+ (ep->current_secret_number != ep->last_secret_number)) {
+ /* compute digest with old */
+ sctp_hmac_m(SCTP_HMAC,
+ (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
+ SCTP_SECRET_SIZE, m, cookie_offset, calc_sig);
+ /* compare */
+ if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
+ cookie_ok = 1;
+ }
+ } else {
+ cookie_ok = 1;
+ }
+
+ /*
+ * Now before we continue we must reconstruct our mbuf so that
+ * normal processing of any other chunks will work.
+ */
+ {
+ struct mbuf *m_at;
+
+ m_at = m;
+ while (m_at->m_next != NULL) {
+ m_at = m_at->m_next;
+ }
+ m_at->m_next = m_sig;
+ if (m->m_flags & M_PKTHDR) {
+ /*
+ * We should only do this if and only if the front
+ * mbuf has a m_pkthdr... it should in theory.
+ */
+ if (m_sig->m_flags & M_PKTHDR) {
+ /* Add back to the pkt hdr of main m chain */
+ m->m_pkthdr.len += m_sig->m_pkthdr.len;
+ } else {
+ /*
+ * Got a problem, no pkthdr in split chain.
+ * TSNH but we will handle it just in case
+ */
+ int mmlen = 0;
+ struct mbuf *lat;
+
+ printf("Warning: Hitting m_split join TSNH code - fixed\n");
+ lat = m_sig;
+ while (lat) {
+ mmlen += lat->m_len;
+ lat = lat->m_next;
+ }
+ m->m_pkthdr.len += mmlen;
+ }
+ }
+ }
+
+ if (cookie_ok == 0) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("handle_cookie_echo: cookie signature validation failed!\n");
+ printf("offset = %u, cookie_offset = %u, sig_offset = %u\n",
+ (uint32_t) offset, cookie_offset, sig_offset);
+ }
+#endif
+ return (NULL);
+ }
+ /*
+ * check the cookie timestamps to be sure it's not stale
+ */
+ SCTP_GETTIME_TIMEVAL(&now);
+ /* Expire time is in Ticks, so we convert to seconds */
+ time_expires.tv_sec = cookie->time_entered.tv_sec + cookie->cookie_life;
+ time_expires.tv_usec = cookie->time_entered.tv_usec;
+ if (timevalcmp(&now, &time_expires, >)) {
+ /* cookie is stale! */
+ struct mbuf *op_err;
+ struct sctp_stale_cookie_msg *scm;
+ uint32_t tim;
+
+ op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
+ 1, M_DONTWAIT, 1, MT_DATA);
+ if (op_err == NULL) {
+ /* FOOBAR */
+ return (NULL);
+ }
+ /* pre-reserve some space */
+ op_err->m_data += sizeof(struct ip6_hdr);
+ op_err->m_data += sizeof(struct sctphdr);
+ op_err->m_data += sizeof(struct sctp_chunkhdr);
+
+ /* Set the len */
+ op_err->m_len = op_err->m_pkthdr.len = sizeof(struct sctp_stale_cookie_msg);
+ scm = mtod(op_err, struct sctp_stale_cookie_msg *);
+ scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
+ scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
+ (sizeof(uint32_t))));
+ /* seconds to usec */
+ tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
+ /* add in usec */
+ if (tim == 0)
+ tim = now.tv_usec - cookie->time_entered.tv_usec;
+ scm->time_usec = htonl(tim);
+ sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag);
+ return (NULL);
+ }
+ /*
+ * Now we must see with the lookup address if we have an existing
+ * asoc. This will only happen if we were in the COOKIE-WAIT state
+ * and a INIT collided with us and somewhere the peer sent the
+ * cookie on another address besides the single address our assoc
+ * had for him. In this case we will have one of the tie-tags set at
+ * least AND the address field in the cookie can be used to look it
+ * up.
+ */
+ to = NULL;
+ if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(sin6);
+ sin6.sin6_port = sh->src_port;
+ sin6.sin6_scope_id = cookie->scope_id;
+ memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
+ sizeof(sin6.sin6_addr.s6_addr));
+ to = (struct sockaddr *)&sin6;
+ } else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_len = sizeof(sin);
+ sin.sin_port = sh->src_port;
+ sin.sin_addr.s_addr = cookie->address[0];
+ to = (struct sockaddr *)&sin;
+ }
+ if ((*stcb == NULL) && to) {
+ /* Yep, lets check */
+ *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
+ if (*stcb == NULL) {
+ /*
+ * We should have only got back the same inp. If we
+ * got back a different ep we have a problem. The
+ * original findep got back l_inp and now
+ */
+ if (l_inp != *inp_p) {
+ printf("Bad problem find_ep got a diff inp then special_locate?\n");
+ }
+ }
+ }
+ cookie_len -= SCTP_SIGNATURE_SIZE;
+ if (*stcb == NULL) {
+ /* this is the "normal" case... get a new TCB */
+ *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
+ cookie_len, *inp_p, netp, to, &notification,
+ auth_skipped, auth_offset, auth_len);
+ } else {
+ /* this is abnormal... cookie-echo on existing TCB */
+ had_a_existing_tcb = 1;
+ *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
+ cookie, cookie_len, *inp_p, *stcb, *netp, to, &notification,
+ &sac_restart_id);
+ }
+
+ if (*stcb == NULL) {
+ /* still no TCB... must be bad cookie-echo */
+ return (NULL);
+ }
+ /*
+ * Ok, we built an association so confirm the address we sent the
+ * INIT-ACK to.
+ */
+ netl = sctp_findnet(*stcb, to);
+ /*
+ * This code should in theory NOT run but
+ */
+ if (netl == NULL) {
+ /* TSNH! Huh, why do I need to add this address here? */
+ int ret;
+
+ ret = sctp_add_remote_addr(*stcb, to, 0, 100);
+ netl = sctp_findnet(*stcb, to);
+ }
+ if (netl) {
+ if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+ sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
+ netl);
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+ (*stcb), 0, (void *)netl);
+ }
+ }
+ if (*stcb) {
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
+ *stcb, NULL);
+ }
+ if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ if (!had_a_existing_tcb ||
+ (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
+ /*
+ * If we have a NEW cookie or the connect never
+ * reached the connected state during collision we
+ * must do the TCP accept thing.
+ */
+ struct socket *so, *oso;
+ struct sctp_inpcb *inp;
+
+ if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
+ /*
+ * For a restart we will keep the same
+ * socket, no need to do anything. I THINK!!
+ */
+ sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id);
+ return (m);
+ }
+ oso = (*inp_p)->sctp_socket;
+ /*
+ * We do this to keep the sockets side happy durin
+ * the sonewcon ONLY.
+ */
+ NET_LOCK_GIANT();
+ SCTP_TCB_UNLOCK((*stcb));
+ so = sonewconn(oso, SS_ISCONNECTED
+ );
+ NET_UNLOCK_GIANT();
+ SCTP_INP_WLOCK((*stcb)->sctp_ep);
+ SCTP_TCB_LOCK((*stcb));
+ SCTP_INP_WUNLOCK((*stcb)->sctp_ep);
+ if (so == NULL) {
+ struct mbuf *op_err;
+
+ /* Too many sockets */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("process_cookie_new: no room for another socket!\n");
+ }
+#endif /* SCTP_DEBUG */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
+ sctp_abort_association(*inp_p, NULL, m, iphlen,
+ sh, op_err);
+ sctp_free_assoc(*inp_p, *stcb, 0);
+ return (NULL);
+ }
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
+ SCTP_PCB_FLAGS_CONNECTED |
+ SCTP_PCB_FLAGS_IN_TCPPOOL |
+ (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
+ SCTP_PCB_FLAGS_DONT_WAKE);
+ inp->sctp_features = (*inp_p)->sctp_features;
+ inp->sctp_socket = so;
+ inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
+ inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
+ inp->sctp_context = (*inp_p)->sctp_context;
+ inp->inp_starting_point_for_iterator = NULL;
+ /*
+ * copy in the authentication parameters from the
+ * original endpoint
+ */
+ if (inp->sctp_ep.local_hmacs)
+ sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
+ inp->sctp_ep.local_hmacs =
+ sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
+ if (inp->sctp_ep.local_auth_chunks)
+ sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
+ inp->sctp_ep.local_auth_chunks =
+ sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
+ (void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys,
+ &inp->sctp_ep.shared_keys);
+
+ /*
+ * Now we must move it from one hash table to
+ * another and get the tcb in the right place.
+ */
+ sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
+
+ sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb);
+
+ /* Switch over to the new guy */
+ *inp_p = inp;
+
+ sctp_ulp_notify(notification, *stcb, 0, NULL);
+ return (m);
+ }
+ }
+ if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
+ sctp_ulp_notify(notification, *stcb, 0, NULL);
+ }
+ return (m);
+}
+
+static void
+sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /* cp must not be used, others call this without a c-ack :-) */
+ struct sctp_association *asoc;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("sctp_handle_cookie_ack: handling COOKIE-ACK\n");
+ }
+#endif
+ if (stcb == NULL)
+ return;
+
+ asoc = &stcb->asoc;
+
+ sctp_stop_all_cookie_timers(stcb);
+ /* process according to association state */
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
+ /* state change only needed when I am in right state */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("moving to OPEN state\n");
+ }
+#endif
+ if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+ asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+
+ } else {
+ asoc->state = SCTP_STATE_OPEN;
+ }
+ /* update RTO */
+ SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
+ SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+ if (asoc->overall_error_count == 0) {
+ net->RTO = sctp_calculate_rto(stcb, asoc, net,
+ &asoc->time_entered);
+ }
+ SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
+ sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL);
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+ soisconnected(stcb->sctp_ep->sctp_socket);
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
+ stcb, net);
+ /*
+ * since we did not send a HB make sure we don't double
+ * things
+ */
+ net->hb_responded = 1;
+
+ if (stcb->asoc.sctp_autoclose_ticks &&
+ sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
+ stcb->sctp_ep, stcb, NULL);
+ }
+ /*
+ * set ASCONF timer if ASCONFs are pending and allowed (eg.
+ * addresses changed when init/cookie echo in flight)
+ */
+ if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
+ (stcb->asoc.peer_supports_asconf) &&
+ (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+ stcb->sctp_ep, stcb,
+ stcb->asoc.primary_destination);
+ }
+ }
+ /* Toss the cookie if I can */
+ sctp_toss_old_cookies(stcb, asoc);
+ if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* Restart the timer if we have pending data */
+ struct sctp_tmit_chunk *chk;
+
+ chk = TAILQ_FIRST(&asoc->sent_queue);
+ if (chk) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, chk->whoTo);
+ }
+ }
+}
+
+static void
+sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
+ struct sctp_tcb *stcb)
+{
+ struct sctp_nets *net;
+ struct sctp_tmit_chunk *lchk;
+ uint32_t tsn;
+
+ if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) {
+ return;
+ }
+ SCTP_STAT_INCR(sctps_recvecne);
+ tsn = ntohl(cp->tsn);
+ /* ECN Nonce stuff: need a resync and disable the nonce sum check */
+ /* Also we make sure we disable the nonce_wait */
+ lchk = TAILQ_FIRST(&stcb->asoc.send_queue);
+ if (lchk == NULL) {
+ stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
+ } else {
+ stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq;
+ }
+ stcb->asoc.nonce_wait_for_ecne = 0;
+ stcb->asoc.nonce_sum_check = 0;
+
+ /* Find where it was sent, if possible */
+ net = NULL;
+ lchk = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ while (lchk) {
+ if (lchk->rec.data.TSN_seq == tsn) {
+ net = lchk->whoTo;
+ break;
+ }
+ if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ))
+ break;
+ lchk = TAILQ_NEXT(lchk, sctp_next);
+ }
+ if (net == NULL)
+ /* default is we use the primary */
+ net = stcb->asoc.primary_destination;
+
+ if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) {
+#ifdef SCTP_CWND_MONITOR
+ int old_cwnd;
+
+ old_cwnd = net->cwnd;
+#endif
+ SCTP_STAT_INCR(sctps_ecnereducedcwnd);
+ net->ssthresh = net->cwnd / 2;
+ if (net->ssthresh < net->mtu) {
+ net->ssthresh = net->mtu;
+ /* here back off the timer as well, to slow us down */
+ net->RTO <<= 2;
+ }
+ net->cwnd = net->ssthresh;
+#ifdef SCTP_CWND_MONITOR
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
+#endif
+ /*
+ * we reduce once every RTT. So we will only lower cwnd at
+ * the next sending seq i.e. the resync_tsn.
+ */
+ stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn;
+ }
+ /*
+ * We always send a CWR this way if our previous one was lost our
+ * peer will get an update, or if it is not time again to reduce we
+ * still get the cwr to the peer.
+ */
+ sctp_send_cwr(stcb, net, tsn);
+}
+
+static void
+sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb)
+{
+ /*
+ * Here we get a CWR from the peer. We must look in the outqueue and
+ * make sure that we have a covered ECNE in teh control chunk part.
+ * If so remove it.
+ */
+ struct sctp_tmit_chunk *chk;
+ struct sctp_ecne_chunk *ecne;
+
+ TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
+ continue;
+ }
+ /*
+ * Look for and remove if it is the right TSN. Since there
+ * is only ONE ECNE on the control queue at any one time we
+ * don't need to worry about more than one!
+ */
+ ecne = mtod(chk->data, struct sctp_ecne_chunk *);
+ if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn),
+ MAX_TSN) || (cp->tsn == ecne->tsn)) {
+ /* this covers this ECNE, we can remove it */
+ stcb->asoc.ecn_echo_cnt_onq--;
+ TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
+ sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ stcb->asoc.ctrl_queue_cnt--;
+ sctp_free_remote_addr(chk->whoTo);
+ sctp_free_a_chunk(stcb, chk);
+ break;
+ }
+ }
+}
+
+static void
+sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ struct sctp_association *asoc;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
+ }
+#endif
+ if (stcb == NULL)
+ return;
+
+ asoc = &stcb->asoc;
+ /* process according to association state */
+ if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
+ /* unexpected SHUTDOWN-COMPLETE... so ignore... */
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ /* notify upper layer protocol */
+ if (stcb->sctp_socket) {
+ sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL);
+ /* are the queues empty? they should be */
+ if (!TAILQ_EMPTY(&asoc->send_queue) ||
+ !TAILQ_EMPTY(&asoc->sent_queue) ||
+ !TAILQ_EMPTY(&asoc->out_wheel)) {
+ sctp_report_all_outbound(stcb);
+ }
+ }
+ /* stop the timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net);
+ SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
+ /* free the TCB */
+ sctp_free_assoc(stcb->sctp_ep, stcb, 0);
+ return;
+}
+
+static int
+process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
+ struct sctp_nets *net, uint8_t flg)
+{
+ switch (desc->chunk_type) {
+ case SCTP_DATA:
+ /* find the tsn to resend (possibly */
+ {
+ uint32_t tsn;
+ struct sctp_tmit_chunk *tp1;
+
+ tsn = ntohl(desc->tsn_ifany);
+ tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ while (tp1) {
+ if (tp1->rec.data.TSN_seq == tsn) {
+ /* found it */
+ break;
+ }
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn,
+ MAX_TSN)) {
+ /* not found */
+ tp1 = NULL;
+ break;
+ }
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ }
+ if (tp1 == NULL) {
+ /*
+ * Do it the other way , aka without paying
+ * attention to queue seq order.
+ */
+ SCTP_STAT_INCR(sctps_pdrpdnfnd);
+ tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ while (tp1) {
+ if (tp1->rec.data.TSN_seq == tsn) {
+ /* found it */
+ break;
+ }
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ }
+ }
+ if (tp1 == NULL) {
+ SCTP_STAT_INCR(sctps_pdrptsnnf);
+ }
+ if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
+ uint8_t *ddp;
+
+ if ((stcb->asoc.peers_rwnd == 0) &&
+ ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
+ SCTP_STAT_INCR(sctps_pdrpdiwnp);
+ return (0);
+ }
+ if (stcb->asoc.peers_rwnd == 0 &&
+ (flg & SCTP_FROM_MIDDLE_BOX)) {
+ SCTP_STAT_INCR(sctps_pdrpdizrw);
+ return (0);
+ }
+ ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
+ sizeof(struct sctp_data_chunk));
+ {
+ unsigned int iii;
+
+ for (iii = 0; iii < sizeof(desc->data_bytes);
+ iii++) {
+ if (ddp[iii] != desc->data_bytes[iii]) {
+ SCTP_STAT_INCR(sctps_pdrpbadd);
+ return (-1);
+ }
+ }
+ }
+ /*
+ * We zero out the nonce so resync not
+ * needed
+ */
+ tp1->rec.data.ect_nonce = 0;
+
+ if (tp1->do_rtt) {
+ /*
+ * this guy had a RTO calculation
+ * pending on it, cancel it
+ */
+ tp1->whoTo->rto_pending = 0;
+ tp1->do_rtt = 0;
+ }
+ SCTP_STAT_INCR(sctps_pdrpmark);
+ if (tp1->sent != SCTP_DATAGRAM_RESEND)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ tp1->sent = SCTP_DATAGRAM_RESEND;
+ /*
+ * mark it as if we were doing a FR, since
+ * we will be getting gap ack reports behind
+ * the info from the router.
+ */
+ tp1->rec.data.doing_fast_retransmit = 1;
+ /*
+ * mark the tsn with what sequences can
+ * cause a new FR.
+ */
+ if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
+ tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
+ } else {
+ tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
+ }
+
+ /* restart the timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, tp1->whoTo);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, tp1->whoTo);
+
+ /* fix counts and things */
+ if (tp1->whoTo->flight_size >= tp1->book_size)
+ tp1->whoTo->flight_size -= tp1->book_size;
+ else
+ tp1->whoTo->flight_size = 0;
+
+ if (stcb->asoc.total_flight >= tp1->book_size) {
+ stcb->asoc.total_flight -= tp1->book_size;
+ if (stcb->asoc.total_flight_count > 0)
+ stcb->asoc.total_flight_count--;
+ } else {
+ stcb->asoc.total_flight = 0;
+ stcb->asoc.total_flight_count = 0;
+ }
+ tp1->snd_count--;
+ } {
+ /* audit code */
+ unsigned int audit;
+
+ audit = 0;
+ TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
+ if (tp1->sent == SCTP_DATAGRAM_RESEND)
+ audit++;
+ }
+ TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
+ sctp_next) {
+ if (tp1->sent == SCTP_DATAGRAM_RESEND)
+ audit++;
+ }
+ if (audit != stcb->asoc.sent_queue_retran_cnt) {
+ printf("**Local Audit finds cnt:%d asoc cnt:%d\n",
+ audit, stcb->asoc.sent_queue_retran_cnt);
+#ifndef SCTP_AUDITING_ENABLED
+ stcb->asoc.sent_queue_retran_cnt = audit;
+#endif
+ }
+ }
+ }
+ break;
+ case SCTP_ASCONF:
+ {
+ struct sctp_tmit_chunk *asconf;
+
+ TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
+ sctp_next) {
+ if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
+ break;
+ }
+ }
+ if (asconf) {
+ if (asconf->sent != SCTP_DATAGRAM_RESEND)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ asconf->sent = SCTP_DATAGRAM_RESEND;
+ asconf->snd_count--;
+ }
+ }
+ break;
+ case SCTP_INITIATION:
+ /* resend the INIT */
+ stcb->asoc.dropped_special_cnt++;
+ if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
+ /*
+ * If we can get it in, in a few attempts we do
+ * this, otherwise we let the timer fire.
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
+ stcb, net);
+ sctp_send_initiate(stcb->sctp_ep, stcb);
+ }
+ break;
+ case SCTP_SELECTIVE_ACK:
+ /* resend the sack */
+ sctp_send_sack(stcb);
+ break;
+ case SCTP_HEARTBEAT_REQUEST:
+ /* resend a demand HB */
+ sctp_send_hb(stcb, 1, net);
+ break;
+ case SCTP_SHUTDOWN:
+ sctp_send_shutdown(stcb, net);
+ break;
+ case SCTP_SHUTDOWN_ACK:
+ sctp_send_shutdown_ack(stcb, net);
+ break;
+ case SCTP_COOKIE_ECHO:
+ {
+ struct sctp_tmit_chunk *cookie;
+
+ cookie = NULL;
+ TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
+ sctp_next) {
+ if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+ break;
+ }
+ }
+ if (cookie) {
+ if (cookie->sent != SCTP_DATAGRAM_RESEND)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ cookie->sent = SCTP_DATAGRAM_RESEND;
+ sctp_stop_all_cookie_timers(stcb);
+ }
+ }
+ break;
+ case SCTP_COOKIE_ACK:
+ sctp_send_cookie_ack(stcb);
+ break;
+ case SCTP_ASCONF_ACK:
+ /* resend last asconf ack */
+ sctp_send_asconf_ack(stcb, 1);
+ break;
+ case SCTP_FORWARD_CUM_TSN:
+ send_forward_tsn(stcb, &stcb->asoc);
+ break;
+ /* can't do anything with these */
+ case SCTP_PACKET_DROPPED:
+ case SCTP_INITIATION_ACK: /* this should not happen */
+ case SCTP_HEARTBEAT_ACK:
+ case SCTP_ABORT_ASSOCIATION:
+ case SCTP_OPERATION_ERROR:
+ case SCTP_SHUTDOWN_COMPLETE:
+ case SCTP_ECN_ECHO:
+ case SCTP_ECN_CWR:
+ default:
+ break;
+ }
+ return (0);
+}
+
+void
+sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
+{
+ int i;
+ uint16_t temp;
+
+ /*
+ * We set things to 0xffff since this is the last delivered sequence
+ * and we will be sending in 0 after the reset.
+ */
+
+ if (number_entries) {
+ for (i = 0; i < number_entries; i++) {
+ temp = ntohs(list[i]);
+ if (temp >= stcb->asoc.streamincnt) {
+ continue;
+ }
+ stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
+ }
+ } else {
+ list = NULL;
+ for (i = 0; i < stcb->asoc.streamincnt; i++) {
+ stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
+ }
+ }
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list);
+}
+
+static void
+sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
+{
+ int i;
+
+ if (number_entries == 0) {
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ stcb->asoc.strmout[i].next_sequence_sent = 0;
+ }
+ } else if (number_entries) {
+ for (i = 0; i < number_entries; i++) {
+ uint16_t temp;
+
+ temp = ntohs(list[i]);
+ if (temp >= stcb->asoc.streamoutcnt) {
+ /* no such stream */
+ continue;
+ }
+ stcb->asoc.strmout[temp].next_sequence_sent = 0;
+ }
+ }
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list);
+}
+
+
+struct sctp_stream_reset_out_request *
+sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
+{
+ struct sctp_association *asoc;
+ struct sctp_stream_reset_out_req *req;
+ struct sctp_stream_reset_out_request *r;
+ struct sctp_tmit_chunk *chk;
+ int len, clen;
+
+ asoc = &stcb->asoc;
+ if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
+ return (NULL);
+ }
+ if (stcb->asoc.str_reset == NULL) {
+ return (NULL);
+ }
+ chk = stcb->asoc.str_reset;
+ if (chk->data == NULL) {
+ return (NULL);
+ }
+ if (bchk) {
+ /* he wants a copy of the chk pointer */
+ *bchk = chk;
+ }
+ clen = chk->send_size;
+ req = mtod(chk->data, struct sctp_stream_reset_out_req *);
+ r = &req->sr_req;
+ if (ntohl(r->request_seq) == seq) {
+ /* found it */
+ return (r);
+ }
+ len = SCTP_SIZE32(ntohs(r->ph.param_length));
+ if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
+ /* move to the next one, there can only be a max of two */
+ r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
+ if (ntohl(r->request_seq) == seq) {
+ return (r);
+ }
+ }
+ /* that seq is not here */
+ return (NULL);
+}
+
+static void
+sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
+{
+ struct sctp_association *asoc;
+
+ asoc = &stcb->asoc;
+ struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
+
+ if (stcb->asoc.str_reset == NULL) {
+ return;
+ }
+ sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
+ TAILQ_REMOVE(&asoc->control_send_queue,
+ chk,
+ sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ asoc->ctrl_queue_cnt--;
+ sctp_free_remote_addr(chk->whoTo);
+
+ sctp_free_a_chunk(stcb, chk);
+ stcb->asoc.str_reset = NULL;
+}
+
+
+static int
+sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
+ uint32_t seq, uint32_t action,
+ struct sctp_stream_reset_response *respin
+)
+{
+ uint16_t type;
+ int lparm_len;
+ struct sctp_association *asoc = &stcb->asoc;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_stream_reset_out_request *srparam;
+ int number_entries;
+
+ if (asoc->stream_reset_outstanding == 0) {
+ /* duplicate */
+ return (0);
+ }
+ if (seq == stcb->asoc.str_reset_seq_out) {
+ srparam = sctp_find_stream_reset(stcb, seq, &chk);
+ if (srparam) {
+ stcb->asoc.str_reset_seq_out++;
+ type = ntohs(srparam->ph.param_type);
+ lparm_len = ntohs(srparam->ph.param_length);
+ number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
+ if (type == SCTP_STR_RESET_OUT_REQUEST) {
+ asoc->stream_reset_out_is_outstanding = 0;
+ if (asoc->stream_reset_outstanding)
+ asoc->stream_reset_outstanding--;
+ if (action == SCTP_STREAM_RESET_PERFORMED) {
+ /* do it */
+ sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
+ } else {
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams);
+ }
+ } else if (type == SCTP_STR_RESET_IN_REQUEST) {
+ /* Answered my request */
+ if (asoc->stream_reset_outstanding)
+ asoc->stream_reset_outstanding--;
+ if (action != SCTP_STREAM_RESET_PERFORMED) {
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams);
+ }
+ } else if (type == SCTP_STR_RESET_TSN_REQUEST) {
+ /**
+ * a) Adopt the new in tsn.
+ * b) reset the map
+ * c) Adopt the new out-tsn
+ */
+ struct sctp_stream_reset_response_tsn *resp;
+ struct sctp_forward_tsn_chunk fwdtsn;
+ int abort_flag = 0;
+
+ if (respin == NULL) {
+ /* huh ? */
+ return (0);
+ }
+ if (action == SCTP_STREAM_RESET_PERFORMED) {
+ resp = (struct sctp_stream_reset_response_tsn *)respin;
+ asoc->stream_reset_outstanding--;
+ fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
+ fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
+ fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
+ sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag);
+ if (abort_flag) {
+ return (1);
+ }
+ stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
+ stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
+ stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
+ memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
+ stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
+ stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
+
+ sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
+ sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
+
+ }
+ }
+ /* get rid of the request and get the request flags */
+ if (asoc->stream_reset_outstanding == 0) {
+ sctp_clean_up_stream_reset(stcb);
+ }
+ }
+ }
+ return (0);
+}
+
+static void
+sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
+ struct sctp_tmit_chunk *chk,
+ struct sctp_stream_reset_in_request *req)
+{
+ uint32_t seq;
+ int len, i;
+ int number_entries;
+ uint16_t temp;
+
+ /*
+ * peer wants me to send a str-reset to him for my outgoing seq's if
+ * seq_in is right.
+ */
+ struct sctp_association *asoc = &stcb->asoc;
+
+ seq = ntohl(req->request_seq);
+ if (asoc->str_reset_seq_in == seq) {
+ if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
+ len = ntohs(req->ph.param_length);
+ number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
+ for (i = 0; i < number_entries; i++) {
+ temp = ntohs(req->list_of_streams[i]);
+ req->list_of_streams[i] = temp;
+ }
+ /* move the reset action back one */
+ asoc->last_reset_action[1] = asoc->last_reset_action[0];
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
+ sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
+ asoc->str_reset_seq_out,
+ seq, (asoc->sending_seq - 1));
+ asoc->stream_reset_out_is_outstanding = 1;
+ asoc->str_reset = chk;
+ sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
+ stcb->asoc.stream_reset_outstanding++;
+ } else {
+ /* Can't do it, since we have sent one out */
+ asoc->last_reset_action[1] = asoc->last_reset_action[0];
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ }
+ asoc->str_reset_seq_in++;
+ } else if (asoc->str_reset_seq_in - 1 == seq) {
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ } else if (asoc->str_reset_seq_in - 2 == seq) {
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
+ } else {
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
+ }
+}
+
+static int
+sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
+ struct sctp_tmit_chunk *chk,
+ struct sctp_stream_reset_tsn_request *req)
+{
+ /* reset all in and out and update the tsn */
+ /*
+ * A) reset my str-seq's on in and out. B) Select a receive next,
+ * and set cum-ack to it. Also process this selected number as a
+ * fwd-tsn as well. C) set in the response my next sending seq.
+ */
+ struct sctp_forward_tsn_chunk fwdtsn;
+ struct sctp_association *asoc = &stcb->asoc;
+ int abort_flag = 0;
+ uint32_t seq;
+
+ seq = ntohl(req->request_seq);
+ if (asoc->str_reset_seq_in == seq) {
+ fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
+ fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
+ fwdtsn.ch.chunk_flags = 0;
+ fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
+ sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag);
+ if (abort_flag) {
+ return (1);
+ }
+ stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
+ stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
+ stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
+ memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
+ atomic_add_int(&stcb->asoc.sending_seq, 1);
+ /* save off historical data for retrans */
+ stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
+ stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
+ stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
+ stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
+
+ sctp_add_stream_reset_result_tsn(chk,
+ ntohl(req->request_seq),
+ SCTP_STREAM_RESET_PERFORMED,
+ stcb->asoc.sending_seq,
+ stcb->asoc.mapping_array_base_tsn);
+ sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
+ sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
+ stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
+ stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
+
+ asoc->str_reset_seq_in++;
+ } else if (asoc->str_reset_seq_in - 1 == seq) {
+ sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
+ stcb->asoc.last_sending_seq[0],
+ stcb->asoc.last_base_tsnsent[0]
+ );
+ } else if (asoc->str_reset_seq_in - 2 == seq) {
+ sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
+ stcb->asoc.last_sending_seq[1],
+ stcb->asoc.last_base_tsnsent[1]
+ );
+ } else {
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
+ }
+ return (0);
+}
+
+static void
+sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
+ struct sctp_tmit_chunk *chk,
+ struct sctp_stream_reset_out_request *req)
+{
+ uint32_t seq, tsn;
+ int number_entries, len;
+ struct sctp_association *asoc = &stcb->asoc;
+
+ seq = ntohl(req->request_seq);
+
+ /* now if its not a duplicate we process it */
+ if (asoc->str_reset_seq_in == seq) {
+ len = ntohs(req->ph.param_length);
+ number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
+ /*
+ * the sender is resetting, handle the list issue.. we must
+ * a) verify if we can do the reset, if so no problem b) If
+ * we can't do the reset we must copy the request. c) queue
+ * it, and setup the data in processor to trigger it off
+ * when needed and dequeue all the queued data.
+ */
+ tsn = ntohl(req->send_reset_at_tsn);
+
+ /* move the reset action back one */
+ asoc->last_reset_action[1] = asoc->last_reset_action[0];
+ if ((tsn == asoc->cumulative_tsn) ||
+ (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) {
+ /* we can do it now */
+ sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
+ } else {
+ /*
+ * we must queue it up and thus wait for the TSN's
+ * to arrive that are at or before tsn
+ */
+ struct sctp_stream_reset_list *liste;
+ int siz;
+
+ siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
+ SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
+ siz, "StrRstList");
+ if (liste == NULL) {
+ /* gak out of memory */
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
+ return;
+ }
+ liste->tsn = tsn;
+ liste->number_entries = number_entries;
+ memcpy(&liste->req, req,
+ (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
+ TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
+ }
+ asoc->str_reset_seq_in++;
+ } else if ((asoc->str_reset_seq_in - 1) == seq) {
+ /*
+ * one seq back, just echo back last action since my
+ * response was lost.
+ */
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ } else if ((asoc->str_reset_seq_in - 2) == seq) {
+ /*
+ * two seq back, just echo back last action since my
+ * response was lost.
+ */
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
+ } else {
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
+ }
+}
+
+static int
+sctp_handle_stream_reset(struct sctp_tcb *stcb, struct sctp_stream_reset_out_req *sr_req)
+{
+ int chk_length, param_len, ptype;
+ uint32_t seq;
+ int num_req = 0;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_chunkhdr *ch;
+ struct sctp_paramhdr *ph;
+
+ /* now it may be a reset or a reset-response */
+ chk_length = ntohs(sr_req->ch.chunk_length);
+ int ret_code = 0;
+ int num_param = 0;
+
+ /* setup for adding the response */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return (ret_code);
+ }
+ chk->rec.chunk_id.id = SCTP_STREAM_RESET;
+ chk->asoc = &stcb->asoc;
+ chk->no_fr_allowed = 0;
+ chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
+ chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
+ if (chk->data == NULL) {
+strres_nochunk:
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk);
+ return (ret_code);
+ }
+ chk->data->m_data += SCTP_MIN_OVERHEAD;
+
+ /* setup chunk parameters */
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = stcb->asoc.primary_destination;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ ch->chunk_type = SCTP_STREAM_RESET;
+ ch->chunk_flags = 0;
+ ch->chunk_length = htons(chk->send_size);
+ chk->data->m_pkthdr.len = chk->data->m_len = SCTP_SIZE32(chk->send_size);
+
+
+ ph = (struct sctp_paramhdr *)&sr_req->sr_req;
+ while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
+ param_len = ntohs(ph->param_length);
+ if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
+ /* bad param */
+ break;
+ }
+ ptype = ntohs(ph->param_type);
+ num_param++;
+ if (num_param > SCTP_MAX_RESET_PARAMS) {
+ /* hit the max of parameters already sorry.. */
+ break;
+ }
+ if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
+ struct sctp_stream_reset_out_request *req_out;
+
+ req_out = (struct sctp_stream_reset_out_request *)ph;
+ num_req++;
+ if (stcb->asoc.stream_reset_outstanding) {
+ seq = ntohl(req_out->response_seq);
+ if (seq == stcb->asoc.str_reset_seq_out) {
+ /* implicit ack */
+ sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
+ }
+ }
+ sctp_handle_str_reset_request_out(stcb, chk, req_out);
+ } else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
+ struct sctp_stream_reset_in_request *req_in;
+
+ num_req++;
+ req_in = (struct sctp_stream_reset_in_request *)ph;
+ sctp_handle_str_reset_request_in(stcb, chk, req_in);
+ } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
+ struct sctp_stream_reset_tsn_request *req_tsn;
+
+ num_req++;
+ req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
+ if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
+ ret_code = 1;
+ goto strres_nochunk;
+ }
+ /* no more */
+ break;
+ } else if (ptype == SCTP_STR_RESET_RESPONSE) {
+ struct sctp_stream_reset_response *resp;
+ uint32_t result;
+
+ resp = (struct sctp_stream_reset_response *)ph;
+ seq = ntohl(resp->response_seq);
+ result = ntohl(resp->result);
+ if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
+ ret_code = 1;
+ goto strres_nochunk;
+ }
+ } else {
+ break;
+ }
+
+ ph = (struct sctp_paramhdr *)((caddr_t)ph + SCTP_SIZE32(param_len));
+ chk_length -= SCTP_SIZE32(param_len);
+ }
+ if (num_req == 0) {
+ /* we have no response free the stuff */
+ goto strres_nochunk;
+ }
+ /* ok we have a chunk to link in */
+ TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
+ chk,
+ sctp_next);
+ stcb->asoc.ctrl_queue_cnt++;
+ return (ret_code);
+}
+
+/*
+ * Handle a router or endpoints report of a packet loss, there are two ways
+ * to handle this, either we get the whole packet and must disect it
+ * ourselves (possibly with truncation and or corruption) or it is a summary
+ * from a middle box that did the disectting for us.
+ */
+static void
+sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ uint32_t bottle_bw, on_queue;
+ uint16_t trunc_len;
+ unsigned int chlen;
+ unsigned int at;
+ struct sctp_chunk_desc desc;
+ struct sctp_chunkhdr *ch;
+
+ chlen = ntohs(cp->ch.chunk_length);
+ chlen -= sizeof(struct sctp_pktdrop_chunk);
+ /* XXX possible chlen underflow */
+ if (chlen == 0) {
+ ch = NULL;
+ if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
+ SCTP_STAT_INCR(sctps_pdrpbwrpt);
+ } else {
+ ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
+ chlen -= sizeof(struct sctphdr);
+ /* XXX possible chlen underflow */
+ memset(&desc, 0, sizeof(desc));
+ }
+ trunc_len = (uint16_t) ntohs(cp->trunc_len);
+ /* now the chunks themselves */
+ while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
+ desc.chunk_type = ch->chunk_type;
+ /* get amount we need to move */
+ at = ntohs(ch->chunk_length);
+ if (at < sizeof(struct sctp_chunkhdr)) {
+ /* corrupt chunk, maybe at the end? */
+ SCTP_STAT_INCR(sctps_pdrpcrupt);
+ break;
+ }
+ if (trunc_len == 0) {
+ /* we are supposed to have all of it */
+ if (at > chlen) {
+ /* corrupt skip it */
+ SCTP_STAT_INCR(sctps_pdrpcrupt);
+ break;
+ }
+ } else {
+ /* is there enough of it left ? */
+ if (desc.chunk_type == SCTP_DATA) {
+ if (chlen < (sizeof(struct sctp_data_chunk) +
+ sizeof(desc.data_bytes))) {
+ break;
+ }
+ } else {
+ if (chlen < sizeof(struct sctp_chunkhdr)) {
+ break;
+ }
+ }
+ }
+ if (desc.chunk_type == SCTP_DATA) {
+ /* can we get out the tsn? */
+ if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
+ SCTP_STAT_INCR(sctps_pdrpmbda);
+
+ if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
+ /* yep */
+ struct sctp_data_chunk *dcp;
+ uint8_t *ddp;
+ unsigned int iii;
+
+ dcp = (struct sctp_data_chunk *)ch;
+ ddp = (uint8_t *) (dcp + 1);
+ for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
+ desc.data_bytes[iii] = ddp[iii];
+ }
+ desc.tsn_ifany = dcp->dp.tsn;
+ } else {
+ /* nope we are done. */
+ SCTP_STAT_INCR(sctps_pdrpnedat);
+ break;
+ }
+ } else {
+ if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
+ SCTP_STAT_INCR(sctps_pdrpmbct);
+ }
+
+ if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
+ SCTP_STAT_INCR(sctps_pdrppdbrk);
+ break;
+ }
+ if (SCTP_SIZE32(at) > chlen) {
+ break;
+ }
+ chlen -= SCTP_SIZE32(at);
+ if (chlen < sizeof(struct sctp_chunkhdr)) {
+ /* done, none left */
+ break;
+ }
+ ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
+ }
+ /* Now update any rwnd --- possibly */
+ if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
+ /* From a peer, we get a rwnd report */
+ uint32_t a_rwnd;
+
+ SCTP_STAT_INCR(sctps_pdrpfehos);
+
+ bottle_bw = ntohl(cp->bottle_bw);
+ on_queue = ntohl(cp->current_onq);
+ if (bottle_bw && on_queue) {
+ /* a rwnd report is in here */
+ if (bottle_bw > on_queue)
+ a_rwnd = bottle_bw - on_queue;
+ else
+ a_rwnd = 0;
+
+ if (a_rwnd == 0)
+ stcb->asoc.peers_rwnd = 0;
+ else {
+ if (a_rwnd > stcb->asoc.total_flight) {
+ stcb->asoc.peers_rwnd =
+ a_rwnd - stcb->asoc.total_flight;
+ } else {
+ stcb->asoc.peers_rwnd = 0;
+ }
+ if (stcb->asoc.peers_rwnd <
+ stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ stcb->asoc.peers_rwnd = 0;
+ }
+ }
+ }
+ } else {
+ SCTP_STAT_INCR(sctps_pdrpfmbox);
+ }
+
+ /* now middle boxes in sat networks get a cwnd bump */
+ if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
+ (stcb->asoc.sat_t3_loss_recovery == 0) &&
+ (stcb->asoc.sat_network)) {
+ /*
+ * This is debateable but for sat networks it makes sense
+ * Note if a T3 timer has went off, we will prohibit any
+ * changes to cwnd until we exit the t3 loss recovery.
+ */
+ uint32_t bw_avail;
+ int rtt, incr;
+
+#ifdef SCTP_CWND_MONITOR
+ int old_cwnd = net->cwnd;
+
+#endif
+ /* need real RTT for this calc */
+ rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
+ /* get bottle neck bw */
+ bottle_bw = ntohl(cp->bottle_bw);
+ /* and whats on queue */
+ on_queue = ntohl(cp->current_onq);
+ /*
+ * adjust the on-queue if our flight is more it could be
+ * that the router has not yet gotten data "in-flight" to it
+ */
+ if (on_queue < net->flight_size)
+ on_queue = net->flight_size;
+
+ /* calculate the available space */
+ bw_avail = (bottle_bw * rtt) / 1000;
+ if (bw_avail > bottle_bw) {
+ /*
+ * Cap the growth to no more than the bottle neck.
+ * This can happen as RTT slides up due to queues.
+ * It also means if you have more than a 1 second
+ * RTT with a empty queue you will be limited to the
+ * bottle_bw per second no matter if other points
+ * have 1/2 the RTT and you could get more out...
+ */
+ bw_avail = bottle_bw;
+ }
+ if (on_queue > bw_avail) {
+ /*
+ * No room for anything else don't allow anything
+ * else to be "added to the fire".
+ */
+ int seg_inflight, seg_onqueue, my_portion;
+
+ net->partial_bytes_acked = 0;
+
+ /* how much are we over queue size? */
+ incr = on_queue - bw_avail;
+ if (stcb->asoc.seen_a_sack_this_pkt) {
+ /*
+ * undo any cwnd adjustment that the sack
+ * might have made
+ */
+ net->cwnd = net->prev_cwnd;
+ }
+ /* Now how much of that is mine? */
+ seg_inflight = net->flight_size / net->mtu;
+ seg_onqueue = on_queue / net->mtu;
+ my_portion = (incr * seg_inflight) / seg_onqueue;
+
+ /* Have I made an adjustment already */
+ if (net->cwnd > net->flight_size) {
+ /*
+ * for this flight I made an adjustment we
+ * need to decrease the portion by a share
+ * our previous adjustment.
+ */
+ int diff_adj;
+
+ diff_adj = net->cwnd - net->flight_size;
+ if (diff_adj > my_portion)
+ my_portion = 0;
+ else
+ my_portion -= diff_adj;
+ }
+ /*
+ * back down to the previous cwnd (assume we have
+ * had a sack before this packet). minus what ever
+ * portion of the overage is my fault.
+ */
+ net->cwnd -= my_portion;
+
+ /* we will NOT back down more than 1 MTU */
+ if (net->cwnd <= net->mtu) {
+ net->cwnd = net->mtu;
+ }
+ /* force into CA */
+ net->ssthresh = net->cwnd - 1;
+ } else {
+ /*
+ * Take 1/4 of the space left or max burst up ..
+ * whichever is less.
+ */
+ incr = min((bw_avail - on_queue) >> 2,
+ (int)stcb->asoc.max_burst * (int)net->mtu);
+ net->cwnd += incr;
+ }
+ if (net->cwnd > bw_avail) {
+ /* We can't exceed the pipe size */
+ net->cwnd = bw_avail;
+ }
+ if (net->cwnd < net->mtu) {
+ /* We always have 1 MTU */
+ net->cwnd = net->mtu;
+ }
+#ifdef SCTP_CWND_MONITOR
+ if (net->cwnd - old_cwnd != 0) {
+ /* log only changes */
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
+ SCTP_CWND_LOG_FROM_SAT);
+ }
+#endif
+ }
+}
+
+extern int sctp_strict_init;
+extern int sctp_abort_if_one_2_one_hits_limit;
+
+/*
+ * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
+ * still contain IP/SCTP header - stcb: is the tcb found for this packet -
+ * offset: offset into the mbuf chain to first chunkhdr - length: is the
+ * length of the complete packet outputs: - length: modified to remaining
+ * length after control processing - netp: modified to new sctp_nets after
+ * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
+ * bad packet,...) otherwise return the tcb for this packet
+ */
+static struct sctp_tcb *
+sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
+ struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen)
+{
+ struct sctp_association *asoc;
+ uint32_t vtag_in;
+ int num_chunks = 0; /* number of control chunks processed */
+ int chk_length;
+ int ret;
+
+ /*
+ * How big should this be, and should it be alloc'd? Lets try the
+ * d-mtu-ceiling for now (2k) and that should hopefully work ...
+ * until we get into jumbo grams and such..
+ */
+ uint8_t chunk_buf[DEFAULT_CHUNK_BUFFER];
+ struct sctp_tcb *locked_tcb = stcb;
+ int got_auth = 0;
+ uint32_t auth_offset = 0, auth_len = 0;
+ int auth_skipped = 0;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
+ iphlen, *offset, length, stcb);
+ }
+#endif /* SCTP_DEBUG */
+
+ /* validate chunk header length... */
+ if (ntohs(ch->chunk_length) < sizeof(*ch)) {
+ return (NULL);
+ }
+ /*
+ * validate the verification tag
+ */
+ vtag_in = ntohl(sh->v_tag);
+
+ if (ch->chunk_type == SCTP_INITIATION) {
+ if (vtag_in != 0) {
+ /* protocol error- silently discard... */
+ SCTP_STAT_INCR(sctps_badvtag);
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ return (NULL);
+ }
+ } else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
+ /*
+ * If there is no stcb, skip the AUTH chunk and process
+ * later after a stcb is found (to validate the lookup was
+ * valid.
+ */
+ if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
+ (stcb == NULL) && !sctp_auth_disable) {
+ /* save this chunk for later processing */
+ auth_skipped = 1;
+ auth_offset = *offset;
+ auth_len = ntohs(ch->chunk_length);
+
+ /* (temporarily) move past this chunk */
+ *offset += SCTP_SIZE32(auth_len);
+ if (*offset >= length) {
+ /* no more data left in the mbuf chain */
+ *offset = length;
+ return (NULL);
+ }
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_chunkhdr), chunk_buf);
+ }
+ if (ch->chunk_type == SCTP_COOKIE_ECHO) {
+ goto process_control_chunks;
+ }
+ /*
+ * first check if it's an ASCONF with an unknown src addr we
+ * need to look inside to find the association
+ */
+ if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
+ /* inp's refcount may be reduced */
+ SCTP_INP_INCR_REF(inp);
+
+ stcb = sctp_findassociation_ep_asconf(m, iphlen,
+ *offset, sh, &inp, netp);
+ if (stcb == NULL) {
+ /*
+ * reduce inp's refcount if not reduced in
+ * sctp_findassociation_ep_asconf().
+ */
+ SCTP_INP_DECR_REF(inp);
+ }
+ /* now go back and verify any auth chunk to be sure */
+ if (auth_skipped && (stcb != NULL)) {
+ struct sctp_auth_chunk *auth;
+
+ auth = (struct sctp_auth_chunk *)
+ sctp_m_getptr(m, auth_offset,
+ auth_len, chunk_buf);
+ got_auth = 1;
+ auth_skipped = 0;
+ if (sctp_handle_auth(stcb, auth, m,
+ auth_offset)) {
+ /* auth HMAC failed so dump it */
+ *offset = length;
+ return (NULL);
+ } else {
+ /* remaining chunks are HMAC checked */
+ stcb->asoc.authenticated = 1;
+ }
+ }
+ }
+ if (stcb == NULL) {
+ /* no association, so it's out of the blue... */
+ sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL);
+ *offset = length;
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ return (NULL);
+ }
+ asoc = &stcb->asoc;
+ /* ABORT and SHUTDOWN can use either v_tag... */
+ if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
+ (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
+ (ch->chunk_type == SCTP_PACKET_DROPPED)) {
+ if ((vtag_in == asoc->my_vtag) ||
+ ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
+ (vtag_in == asoc->peer_vtag))) {
+ /* this is valid */
+ } else {
+ /* drop this packet... */
+ SCTP_STAT_INCR(sctps_badvtag);
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ return (NULL);
+ }
+ } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
+ if (vtag_in != asoc->my_vtag) {
+ /*
+ * this could be a stale SHUTDOWN-ACK or the
+ * peer never got the SHUTDOWN-COMPLETE and
+ * is still hung; we have started a new asoc
+ * but it won't complete until the shutdown
+ * is completed
+ */
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ sctp_handle_ootb(m, iphlen, *offset, sh, inp,
+ NULL);
+ return (NULL);
+ }
+ } else {
+ /* for all other chunks, vtag must match */
+ if (vtag_in != asoc->my_vtag) {
+ /* invalid vtag... */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("invalid vtag: %xh, expect %xh\n", vtag_in, asoc->my_vtag);
+ }
+#endif /* SCTP_DEBUG */
+ SCTP_STAT_INCR(sctps_badvtag);
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ *offset = length;
+ return (NULL);
+ }
+ }
+ } /* end if !SCTP_COOKIE_ECHO */
+ /*
+ * process all control chunks...
+ */
+ if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
+ (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+ /* implied cookie-ack.. we must have lost the ack */
+ stcb->asoc.overall_error_count = 0;
+ sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
+ *netp);
+ }
+process_control_chunks:
+
+ while (IS_SCTP_CONTROL(ch)) {
+ /* validate chunk length */
+ chk_length = ntohs(ch->chunk_length);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT2) {
+ printf("sctp_process_control: processing a chunk type=%u, len=%u\n",
+ ch->chunk_type, chk_length);
+ }
+#endif /* SCTP_DEBUG */
+ if ((size_t)chk_length < sizeof(*ch) ||
+ (*offset + chk_length) > length) {
+ *offset = length;
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ return (NULL);
+ }
+ SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
+ /*
+ * INIT-ACK only gets the init ack "header" portion only
+ * because we don't have to process the peer's COOKIE. All
+ * others get a complete chunk.
+ */
+ if (ch->chunk_type == SCTP_INITIATION_ACK) {
+ /* get an init-ack chunk */
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_init_ack_chunk), chunk_buf);
+ if (ch == NULL) {
+ *offset = length;
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ return (NULL);
+ }
+ } else {
+ /* get a complete chunk... */
+ if ((size_t)chk_length > sizeof(chunk_buf)) {
+ struct mbuf *oper;
+ struct sctp_paramhdr *phdr;
+
+ oper = NULL;
+ oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
+ 1, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ /* pre-reserve some space */
+ oper->m_data += sizeof(struct sctp_chunkhdr);
+ oper->m_len = sizeof(struct sctp_paramhdr);
+ oper->m_pkthdr.len = oper->m_len;
+ phdr = mtod(oper, struct sctp_paramhdr *);
+ phdr->param_type = htons(SCTP_CAUSE_OUT_OF_RESC);
+ phdr->param_length = htons(sizeof(struct sctp_paramhdr));
+ sctp_queue_op_err(stcb, oper);
+ }
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ return (NULL);
+ }
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ chk_length, chunk_buf);
+ if (ch == NULL) {
+ printf("sctp_process_control: Can't get the all data....\n");
+ *offset = length;
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ return (NULL);
+ }
+ }
+ num_chunks++;
+ /* Save off the last place we got a control from */
+ if (stcb != NULL) {
+ if ((*netp != NULL) || (ch->chunk_type == SCTP_ASCONF)) {
+ /*
+ * allow last_control to be NULL if
+ * ASCONF... ASCONF processing will find the
+ * right net later
+ */
+ stcb->asoc.last_control_chunk_from = *netp;
+ }
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB0, ch->chunk_type);
+#endif
+
+ /* check to see if this chunk required auth, but isn't */
+ if ((stcb != NULL) && !sctp_auth_disable &&
+ sctp_auth_is_required_chunk(ch->chunk_type,
+ stcb->asoc.local_auth_chunks) &&
+ !stcb->asoc.authenticated) {
+ /* "silently" ignore */
+ SCTP_STAT_INCR(sctps_recvauthmissing);
+ goto next_chunk;
+ }
+ switch (ch->chunk_type) {
+ case SCTP_INITIATION:
+ /* must be first and only chunk */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_INIT\n");
+ }
+#endif /* SCTP_DEBUG */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* We are not interested anymore? */
+ if ((stcb) && (stcb->asoc.total_output_queue_size)) {
+ /*
+ * collision case where we are
+ * sending to them too
+ */
+ ;
+ } else {
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ *offset = length;
+ return (NULL);
+ }
+ }
+ if ((num_chunks > 1) ||
+ (sctp_strict_init && (length - *offset > SCTP_SIZE32(chk_length)))) {
+ *offset = length;
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ return (NULL);
+ }
+ if ((stcb != NULL) &&
+ (SCTP_GET_STATE(&stcb->asoc) ==
+ SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ sctp_send_shutdown_ack(stcb,
+ stcb->asoc.primary_destination);
+ *offset = length;
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ return (NULL);
+ }
+ sctp_handle_init(m, iphlen, *offset, sh,
+ (struct sctp_init_chunk *)ch, inp, stcb, *netp);
+ *offset = length;
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ return (NULL);
+ break;
+ case SCTP_INITIATION_ACK:
+ /* must be first and only chunk */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_INIT-ACK\n");
+ }
+#endif /* SCTP_DEBUG */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* We are not interested anymore */
+ if ((stcb) && (stcb->asoc.total_output_queue_size)) {
+ ;
+ } else {
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ *offset = length;
+ if (stcb) {
+ sctp_free_assoc(inp, stcb, 0);
+ }
+ return (NULL);
+ }
+ }
+ if ((num_chunks > 1) ||
+ (sctp_strict_init && (length - *offset > SCTP_SIZE32(chk_length)))) {
+ *offset = length;
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ return (NULL);
+ }
+ ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
+ (struct sctp_init_ack_chunk *)ch, stcb, *netp);
+ /*
+ * Special case, I must call the output routine to
+ * get the cookie echoed
+ */
+ if ((stcb) && ret == 0)
+ sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
+ *offset = length;
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ return (NULL);
+ break;
+ case SCTP_SELECTIVE_ACK:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_SACK\n");
+ }
+#endif /* SCTP_DEBUG */
+ SCTP_STAT_INCR(sctps_recvsacks);
+ {
+ struct sctp_sack_chunk *sack;
+ int abort_now = 0;
+ uint32_t a_rwnd, cum_ack;
+ uint16_t num_seg;
+ int nonce_sum_flag;
+
+ sack = (struct sctp_sack_chunk *)ch;
+
+ nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
+ cum_ack = ntohl(sack->sack.cum_tsn_ack);
+ num_seg = ntohs(sack->sack.num_gap_ack_blks);
+ a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
+ stcb->asoc.seen_a_sack_this_pkt = 1;
+ if ((stcb->asoc.pr_sctp_cnt == 0) &&
+ (num_seg == 0) &&
+ ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
+ (cum_ack == stcb->asoc.last_acked_seq)) &&
+ (stcb->asoc.saw_sack_with_frags == 0) &&
+ (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
+ ) {
+ /*
+ * We have a SIMPLE sack having no
+ * prior segments and data on sent
+ * queue to be acked.. Use the
+ * faster path sack processing. We
+ * also allow window update sacks
+ * with no missing segments to go
+ * this way too.
+ */
+ sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag, &abort_now);
+ } else {
+ sctp_handle_sack(sack, stcb, *netp, &abort_now);
+ }
+ if (abort_now) {
+ /* ABORT signal from sack processing */
+ *offset = length;
+ return (NULL);
+ }
+ }
+ break;
+ case SCTP_HEARTBEAT_REQUEST:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_HEARTBEAT\n");
+ }
+#endif /* SCTP_DEBUG */
+ SCTP_STAT_INCR(sctps_recvheartbeat);
+ sctp_send_heartbeat_ack(stcb, m, *offset, chk_length,
+ *netp);
+
+ /* He's alive so give him credit */
+ stcb->asoc.overall_error_count = 0;
+ break;
+ case SCTP_HEARTBEAT_ACK:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_HEARTBEAT-ACK\n");
+ }
+#endif /* SCTP_DEBUG */
+
+ /* He's alive so give him credit */
+ stcb->asoc.overall_error_count = 0;
+ SCTP_STAT_INCR(sctps_recvheartbeatack);
+ sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
+ stcb, *netp);
+ break;
+ case SCTP_ABORT_ASSOCIATION:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_ABORT\n");
+ }
+#endif /* SCTP_DEBUG */
+ sctp_handle_abort((struct sctp_abort_chunk *)ch,
+ stcb, *netp);
+ *offset = length;
+ return (NULL);
+ break;
+ case SCTP_SHUTDOWN:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_SHUTDOWN\n");
+ }
+#endif /* SCTP_DEBUG */
+ {
+ int abort_flag = 0;
+
+ sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
+ stcb, *netp, &abort_flag);
+ if (abort_flag) {
+ *offset = length;
+ return (NULL);
+ }
+ }
+ break;
+ case SCTP_SHUTDOWN_ACK:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_SHUTDOWN-ACK\n");
+ }
+#endif /* SCTP_DEBUG */
+ sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
+ *offset = length;
+ return (NULL);
+ break;
+ case SCTP_OPERATION_ERROR:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_OP-ERR\n");
+ }
+#endif /* SCTP_DEBUG */
+ if (sctp_handle_error(ch, stcb, *netp) < 0) {
+ *offset = length;
+ return (NULL);
+ }
+ break;
+ case SCTP_COOKIE_ECHO:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_COOKIE-ECHO stcb is %p\n", stcb);
+ }
+#endif /* SCTP_DEBUG */
+ if ((stcb) && (stcb->asoc.total_output_queue_size)) {
+ ;
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
+ (stcb == NULL)) {
+ /* We are not interested anymore */
+ *offset = length;
+ return (NULL);
+ }
+ }
+ /*
+ * First are we accepting? We do this again here
+ * since it is possible that a previous endpoint WAS
+ * listening responded to a INIT-ACK and then
+ * closed. We opened and bound.. and are now no
+ * longer listening.
+ */
+ if (inp->sctp_socket->so_qlimit == 0) {
+ if ((stcb) && (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /*
+ * special case, is this a retran'd
+ * COOKIE-ECHO or a restarting assoc
+ * that is a peeled off or
+ * one-to-one style socket.
+ */
+ goto process_cookie_anyway;
+ }
+ sctp_abort_association(inp, stcb, m, iphlen, sh,
+ NULL);
+ *offset = length;
+ return (NULL);
+ } else if (inp->sctp_socket->so_qlimit) {
+ /* we are accepting so check limits like TCP */
+ if (inp->sctp_socket->so_qlen >
+ inp->sctp_socket->so_qlimit) {
+ /* no space */
+ struct mbuf *oper;
+ struct sctp_paramhdr *phdr;
+
+ if (sctp_abort_if_one_2_one_hits_limit) {
+ oper = NULL;
+ oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
+ 1, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ oper->m_len =
+ oper->m_pkthdr.len =
+ sizeof(struct sctp_paramhdr);
+ phdr = mtod(oper,
+ struct sctp_paramhdr *);
+ phdr->param_type =
+ htons(SCTP_CAUSE_OUT_OF_RESC);
+ phdr->param_length =
+ htons(sizeof(struct sctp_paramhdr));
+ }
+ sctp_abort_association(inp, stcb, m,
+ iphlen, sh, oper);
+ }
+ *offset = length;
+ return (NULL);
+ }
+ }
+ process_cookie_anyway:
+ {
+ struct mbuf *ret_buf;
+
+ ret_buf =
+ sctp_handle_cookie_echo(m, iphlen,
+ *offset, sh,
+ (struct sctp_cookie_echo_chunk *)ch,
+ &inp, &stcb, netp,
+ auth_skipped,
+ auth_offset,
+ auth_len);
+
+ if (ret_buf == NULL) {
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("GAK, null buffer\n");
+ }
+#endif /* SCTP_DEBUG */
+ auth_skipped = 0;
+ *offset = length;
+ return (NULL);
+ }
+ /* if AUTH skipped, see if it verified... */
+ if (auth_skipped) {
+ got_auth = 1;
+ auth_skipped = 0;
+ }
+ if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
+ /*
+ * Restart the timer if we have
+ * pending data
+ */
+ struct sctp_tmit_chunk *chk;
+
+ chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ if (chk) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb,
+ chk->whoTo);
+ }
+ }
+ }
+ break;
+ case SCTP_COOKIE_ACK:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_COOKIE-ACK\n");
+ }
+#endif /* SCTP_DEBUG */
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* We are not interested anymore */
+ if ((stcb) && (stcb->asoc.total_output_queue_size)) {
+ ;
+ } else {
+ sctp_free_assoc(inp, stcb, 0);
+ *offset = length;
+ return (NULL);
+ }
+ }
+ /* He's alive so give him credit */
+ stcb->asoc.overall_error_count = 0;
+ sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
+ break;
+ case SCTP_ECN_ECHO:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_ECN-ECHO\n");
+ }
+#endif /* SCTP_DEBUG */
+ /* He's alive so give him credit */
+ stcb->asoc.overall_error_count = 0;
+ sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
+ stcb);
+ break;
+ case SCTP_ECN_CWR:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_ECN-CWR\n");
+ }
+#endif /* SCTP_DEBUG */
+ /* He's alive so give him credit */
+ stcb->asoc.overall_error_count = 0;
+
+ sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb);
+ break;
+ case SCTP_SHUTDOWN_COMPLETE:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_SHUTDOWN-COMPLETE\n");
+ }
+#endif /* SCTP_DEBUG */
+ /* must be first and only chunk */
+ if ((num_chunks > 1) ||
+ (length - *offset > SCTP_SIZE32(chk_length))) {
+ *offset = length;
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+
+ return (NULL);
+ }
+ sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
+ stcb, *netp);
+ *offset = length;
+ return (NULL);
+ break;
+ case SCTP_ASCONF:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_ASCONF\n");
+ }
+#endif /* SCTP_DEBUG */
+ /* He's alive so give him credit */
+ stcb->asoc.overall_error_count = 0;
+
+ sctp_handle_asconf(m, *offset,
+ (struct sctp_asconf_chunk *)ch, stcb);
+ break;
+ case SCTP_ASCONF_ACK:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_ASCONF-ACK\n");
+ }
+#endif /* SCTP_DEBUG */
+ /* He's alive so give him credit */
+ stcb->asoc.overall_error_count = 0;
+
+ sctp_handle_asconf_ack(m, *offset,
+ (struct sctp_asconf_ack_chunk *)ch, stcb, *netp);
+ break;
+ case SCTP_FORWARD_CUM_TSN:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_FWD-TSN\n");
+ }
+#endif /* SCTP_DEBUG */
+ /* He's alive so give him credit */
+ {
+ int abort_flag = 0;
+
+ stcb->asoc.overall_error_count = 0;
+ *fwd_tsn_seen = 1;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* We are not interested anymore */
+ sctp_free_assoc(inp, stcb, 0);
+ *offset = length;
+ return (NULL);
+ }
+ sctp_handle_forward_tsn(stcb,
+ (struct sctp_forward_tsn_chunk *)ch, &abort_flag);
+ if (abort_flag) {
+ *offset = length;
+ return (NULL);
+ } else {
+ stcb->asoc.overall_error_count = 0;
+ }
+
+ }
+ break;
+ case SCTP_STREAM_RESET:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_STREAM_RESET\n");
+ }
+#endif /* SCTP_DEBUG */
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ chk_length, chunk_buf);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* We are not interested anymore */
+ sctp_free_assoc(inp, stcb, 0);
+ *offset = length;
+ return (NULL);
+ }
+ if (stcb->asoc.peer_supports_strreset == 0) {
+ /*
+ * hmm, peer should have announced this, but
+ * we will turn it on since he is sending us
+ * a stream reset.
+ */
+ stcb->asoc.peer_supports_strreset = 1;
+ }
+ if (sctp_handle_stream_reset(stcb, (struct sctp_stream_reset_out_req *)ch)) {
+ /* stop processing */
+ *offset = length;
+ return (NULL);
+ }
+ break;
+ case SCTP_PACKET_DROPPED:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_PACKET_DROPPED\n");
+ }
+#endif /* SCTP_DEBUG */
+ /* re-get it all please */
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ chk_length, chunk_buf);
+
+ sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
+ stcb, *netp);
+
+ break;
+
+ case SCTP_AUTHENTICATION:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("SCTP_AUTHENTICATION\n");
+ }
+#endif /* SCTP_DEBUG */
+ if (sctp_auth_disable)
+ goto unknown_chunk;
+
+ if (stcb == NULL) {
+ /* save the first AUTH for later processing */
+ if (auth_skipped == 0) {
+ auth_offset = *offset;
+ auth_len = chk_length;
+ auth_skipped = 1;
+ }
+ /* skip this chunk (temporarily) */
+ goto next_chunk;
+ }
+ if (got_auth == 1) {
+ /* skip this chunk... it's already auth'd */
+ goto next_chunk;
+ }
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ chk_length, chunk_buf);
+ got_auth = 1;
+ if (sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
+ m, *offset)) {
+ /* auth HMAC failed so dump the packet */
+ *offset = length;
+ return (stcb);
+ } else {
+ /* remaining chunks are HMAC checked */
+ stcb->asoc.authenticated = 1;
+ }
+ break;
+
+ default:
+ unknown_chunk:
+ /* it's an unknown chunk! */
+ if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
+ struct mbuf *mm;
+ struct sctp_paramhdr *phd;
+
+ mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
+ 1, M_DONTWAIT, 1, MT_DATA);
+ if (mm) {
+ phd = mtod(mm, struct sctp_paramhdr *);
+ /*
+ * We cheat and use param type since
+ * we did not bother to define a
+ * error cause struct. They are the
+ * same basic format with different
+ * names.
+ */
+ phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
+ phd->param_length = htons(chk_length + sizeof(*phd));
+ mm->m_len = sizeof(*phd);
+ mm->m_next = sctp_m_copym(m, *offset, SCTP_SIZE32(chk_length),
+ M_DONTWAIT);
+ if (mm->m_next) {
+ mm->m_pkthdr.len = SCTP_SIZE32(chk_length) + sizeof(*phd);
+ sctp_queue_op_err(stcb, mm);
+ } else {
+ sctp_m_freem(mm);
+ }
+ }
+ }
+ if ((ch->chunk_type & 0x80) == 0) {
+ /* discard this packet */
+ *offset = length;
+ return (stcb);
+ } /* else skip this bad chunk and continue... */
+ break;
+ } /* switch (ch->chunk_type) */
+
+
+next_chunk:
+ /* get the next chunk */
+ *offset += SCTP_SIZE32(chk_length);
+ if (*offset >= length) {
+ /* no more data left in the mbuf chain */
+ break;
+ }
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_chunkhdr), chunk_buf);
+ if (ch == NULL) {
+ if (locked_tcb)
+ SCTP_TCB_UNLOCK(locked_tcb);
+ *offset = length;
+ return (NULL);
+ }
+ } /* while */
+ return (stcb);
+}
+
+
+/*
+ * Process the ECN bits we have something set so we must look to see if it is
+ * ECN(0) or ECN(1) or CE
+ */
+static __inline void
+sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net,
+ uint8_t ecn_bits)
+{
+ if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
+ ;
+ } else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) {
+ /*
+ * we only add to the nonce sum for ECT1, ECT0 does not
+ * change the NS bit (that we have yet to find a way to send
+ * it yet).
+ */
+
+ /* ECN Nonce stuff */
+ stcb->asoc.receiver_nonce_sum++;
+ stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM;
+
+ /*
+ * Drag up the last_echo point if cumack is larger since we
+ * don't want the point falling way behind by more than
+ * 2^^31 and then having it be incorrect.
+ */
+ if (compare_with_wrap(stcb->asoc.cumulative_tsn,
+ stcb->asoc.last_echo_tsn, MAX_TSN)) {
+ stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
+ }
+ } else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) {
+ /*
+ * Drag up the last_echo point if cumack is larger since we
+ * don't want the point falling way behind by more than
+ * 2^^31 and then having it be incorrect.
+ */
+ if (compare_with_wrap(stcb->asoc.cumulative_tsn,
+ stcb->asoc.last_echo_tsn, MAX_TSN)) {
+ stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
+ }
+ }
+}
+
+static __inline void
+sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net,
+ uint32_t high_tsn, uint8_t ecn_bits)
+{
+ if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
+ /*
+ * we possibly must notify the sender that a congestion
+ * window reduction is in order. We do this by adding a ECNE
+ * chunk to the output chunk queue. The incoming CWR will
+ * remove this chunk.
+ */
+ if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn,
+ MAX_TSN)) {
+ /* Yep, we need to add a ECNE */
+ sctp_send_ecn_echo(stcb, net, high_tsn);
+ stcb->asoc.last_echo_tsn = high_tsn;
+ }
+ }
+}
+
+/*
+ * common input chunk processing (v4 and v6)
+ */
+int
+sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
+ int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
+ struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
+ uint8_t ecn_bits)
+{
+ /*
+ * Control chunk processing
+ */
+ uint32_t high_tsn;
+ int fwd_tsn_seen = 0, data_processed = 0;
+ struct mbuf *m = *mm;
+ int abort_flag = 0;
+ int un_sent;
+
+ SCTP_STAT_INCR(sctps_recvdatagrams);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xE0, 1);
+ sctp_auditing(0, inp, stcb, net);
+#endif
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("Ok, Common input processing called, m:%x iphlen:%d offset:%d\n",
+ (uint32_t) m, iphlen, offset);
+ }
+#endif /* SCTP_DEBUG */
+
+ if (stcb) {
+ /* always clear this before beginning a packet */
+ stcb->asoc.authenticated = 0;
+ stcb->asoc.seen_a_sack_this_pkt = 0;
+ }
+ if (IS_SCTP_CONTROL(ch)) {
+ /* process the control portion of the SCTP packet */
+ stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
+ inp, stcb, &net, &fwd_tsn_seen);
+ if (stcb) {
+ /*
+ * This covers us if the cookie-echo was there and
+ * it changes our INP.
+ */
+ inp = stcb->sctp_ep;
+ }
+ } else {
+ /*
+ * no control chunks, so pre-process DATA chunks (these
+ * checks are taken care of by control processing)
+ */
+
+ /*
+ * if DATA only packet, and auth is required, then punt...
+ * can't have authenticated without any AUTH (control)
+ * chunks
+ */
+ if ((stcb != NULL) && !sctp_auth_disable &&
+ sctp_auth_is_required_chunk(SCTP_DATA,
+ stcb->asoc.local_auth_chunks)) {
+ /* "silently" ignore */
+ SCTP_STAT_INCR(sctps_recvauthmissing);
+ SCTP_TCB_UNLOCK(stcb);
+ return (1);
+ }
+ if (stcb == NULL) {
+ /* out of the blue DATA chunk */
+ sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL);
+ return (1);
+ }
+ if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
+ /* v_tag mismatch! */
+ SCTP_STAT_INCR(sctps_badvtag);
+ SCTP_TCB_UNLOCK(stcb);
+ return (1);
+ }
+ }
+
+ if (stcb == NULL) {
+ /*
+ * no valid TCB for this packet, or we found it's a bad
+ * packet while processing control, or we're done with this
+ * packet (done or skip rest of data), so we drop it...
+ */
+ return (1);
+ }
+ /*
+ * DATA chunk processing
+ */
+ /* plow through the data chunks while length > offset */
+
+ /*
+ * Rest should be DATA only. Check authentication state if AUTH for
+ * DATA is required.
+ */
+ if ((stcb != NULL) && !sctp_auth_disable &&
+ sctp_auth_is_required_chunk(SCTP_DATA,
+ stcb->asoc.local_auth_chunks) &&
+ !stcb->asoc.authenticated) {
+ /* "silently" ignore */
+ SCTP_STAT_INCR(sctps_recvauthmissing);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_AUTH1)
+ printf("Data chunk requires AUTH, skipped\n");
+#endif
+ SCTP_TCB_UNLOCK(stcb);
+ return (1);
+ }
+ if (length > offset) {
+ int retval;
+
+ /*
+ * First check to make sure our state is correct. We would
+ * not get here unless we really did have a tag, so we don't
+ * abort if this happens, just dump the chunk silently.
+ */
+ switch (SCTP_GET_STATE(&stcb->asoc)) {
+ case SCTP_STATE_COOKIE_ECHOED:
+ /*
+ * we consider data with valid tags in this state
+ * shows us the cookie-ack was lost. Imply it was
+ * there.
+ */
+ stcb->asoc.overall_error_count = 0;
+ sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
+ break;
+ case SCTP_STATE_COOKIE_WAIT:
+ /*
+ * We consider OOTB any data sent during asoc setup.
+ */
+ sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL);
+ SCTP_TCB_UNLOCK(stcb);
+ return (1);
+ break;
+ case SCTP_STATE_EMPTY: /* should not happen */
+ case SCTP_STATE_INUSE: /* should not happen */
+ case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */
+ case SCTP_STATE_SHUTDOWN_ACK_SENT:
+ default:
+ SCTP_TCB_UNLOCK(stcb);
+ return (1);
+ break;
+ case SCTP_STATE_OPEN:
+ case SCTP_STATE_SHUTDOWN_SENT:
+ break;
+ }
+ /* take care of ECN, part 1. */
+ if (stcb->asoc.ecn_allowed &&
+ (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
+ sctp_process_ecn_marked_a(stcb, net, ecn_bits);
+ }
+ /* plow through the data chunks while length > offset */
+ retval = sctp_process_data(mm, iphlen, &offset, length, sh,
+ inp, stcb, net, &high_tsn);
+ if (retval == 2) {
+ /*
+ * The association aborted, NO UNLOCK needed since
+ * the association is destroyed.
+ */
+ return (0);
+ }
+ data_processed = 1;
+ if (retval == 0) {
+ /* take care of ecn part 2. */
+ if (stcb->asoc.ecn_allowed &&
+ (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
+ sctp_process_ecn_marked_b(stcb, net, high_tsn,
+ ecn_bits);
+ }
+ }
+ /*
+ * Anything important needs to have been m_copy'ed in
+ * process_data
+ */
+ }
+ if ((data_processed == 0) && (fwd_tsn_seen)) {
+ int was_a_gap = 0;
+
+ if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
+ stcb->asoc.cumulative_tsn, MAX_TSN)) {
+ /* there was a gap before this data was processed */
+ was_a_gap = 1;
+ }
+ sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
+ if (abort_flag) {
+ /* Again, we aborted so NO UNLOCK needed */
+ return (0);
+ }
+ }
+ /* trigger send of any chunks in queue... */
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xE0, 2);
+ sctp_auditing(1, inp, stcb, net);
+#endif
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("Check for chunk output prw:%d tqe:%d tf=%d\n",
+ stcb->asoc.peers_rwnd,
+ TAILQ_EMPTY(&stcb->asoc.control_send_queue),
+ stcb->asoc.total_flight);
+ }
+#endif
+ un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
+
+ if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) ||
+ ((un_sent) &&
+ (stcb->asoc.peers_rwnd > 0 ||
+ (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("Calling chunk OUTPUT\n");
+ }
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("chunk OUTPUT returns\n");
+ }
+#endif
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xE0, 3);
+ sctp_auditing(2, inp, stcb, net);
+#endif
+ SCTP_TCB_UNLOCK(stcb);
+ return (0);
+}
+
+extern int sctp_no_csum_on_loopback;
+
+
+void
+sctp_input(m, off)
+ struct mbuf *m;
+ int off;
+
+{
+#ifdef SCTP_MBUF_LOGGING
+ struct mbuf *mat;
+
+#endif
+ int iphlen;
+ int s;
+ uint8_t ecn_bits;
+ struct ip *ip;
+ struct sctphdr *sh;
+ struct sctp_inpcb *inp = NULL;
+
+ uint32_t check, calc_check;
+ struct sctp_nets *net;
+ struct sctp_tcb *stcb = NULL;
+ struct sctp_chunkhdr *ch;
+ int refcount_up = 0;
+ int length, mlen, offset;
+
+
+ iphlen = off;
+ net = NULL;
+ SCTP_STAT_INCR(sctps_recvpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
+
+ /*
+ * Strip IP options, we don't allow any in or out.
+ */
+#ifdef SCTP_MBUF_LOGGING
+ /* Log in any input mbufs */
+ mat = m;
+ while (mat) {
+ if (mat->m_flags & M_EXT) {
+ sctp_log_mb(mat, SCTP_MBUF_INPUT);
+ }
+ mat = mat->m_next;
+ }
+#endif
+ if ((size_t)iphlen > sizeof(struct ip)) {
+ ip_stripoptions(m, (struct mbuf *)0);
+ iphlen = sizeof(struct ip);
+ }
+ /*
+ * Get IP, SCTP, and first chunk header together in first mbuf.
+ */
+ ip = mtod(m, struct ip *);
+ offset = iphlen + sizeof(*sh) + sizeof(*ch);
+ if (m->m_len < offset) {
+ if ((m = m_pullup(m, offset)) == 0) {
+ SCTP_STAT_INCR(sctps_hdrops);
+ return;
+ }
+ ip = mtod(m, struct ip *);
+ }
+ sh = (struct sctphdr *)((caddr_t)ip + iphlen);
+ ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
+
+ /* SCTP does not allow broadcasts or multicasts */
+ if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
+ goto bad;
+ }
+ if (((ch->chunk_type == SCTP_INITIATION) ||
+ (ch->chunk_type == SCTP_INITIATION_ACK) ||
+ (ch->chunk_type == SCTP_COOKIE_ECHO)) &&
+ (in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))) {
+ /*
+ * We only look at broadcast if its a front state, All
+ * others we will not have a tcb for anyway.
+ */
+ goto bad;
+ }
+ /* destination port of 0 is illegal, based on RFC2960. */
+ if (sh->dest_port == 0) {
+ SCTP_STAT_INCR(sctps_hdrops);
+ goto bad;
+ }
+ /* validate SCTP checksum */
+ if ((sctp_no_csum_on_loopback == 0) ||
+ (m->m_pkthdr.rcvif == NULL) ||
+ (m->m_pkthdr.rcvif->if_type != IFT_LOOP)) {
+ /*
+ * we do NOT validate things from the loopback if the sysctl
+ * is set to 1.
+ */
+ check = sh->checksum; /* save incoming checksum */
+ if ((check == 0) && (sctp_no_csum_on_loopback)) {
+ /*
+ * special hook for where we got a local address
+ * somehow routed across a non IFT_LOOP type
+ * interface
+ */
+ if (ip->ip_src.s_addr == ip->ip_dst.s_addr)
+ goto sctp_skip_csum_4;
+ }
+ sh->checksum = 0; /* prepare for calc */
+ calc_check = sctp_calculate_sum(m, &mlen, iphlen);
+ if (calc_check != check) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("Bad CSUM on SCTP packet calc_check:%x check:%x m:%x mlen:%d iphlen:%d\n",
+ calc_check, check, (uint32_t) m, mlen, iphlen);
+ }
+#endif
+
+ stcb = sctp_findassociation_addr(m, iphlen,
+ offset - sizeof(*ch),
+ sh, ch, &inp, &net);
+ if ((inp) && (stcb)) {
+ sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR);
+ } else if ((inp != NULL) && (stcb == NULL)) {
+ refcount_up = 1;
+ }
+ SCTP_STAT_INCR(sctps_badsum);
+ SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
+ goto bad;
+ }
+ sh->checksum = calc_check;
+ } else {
+sctp_skip_csum_4:
+ mlen = m->m_pkthdr.len;
+ }
+ /* validate mbuf chain length with IP payload length */
+ if (mlen < (ip->ip_len - iphlen)) {
+ SCTP_STAT_INCR(sctps_hdrops);
+ goto bad;
+ }
+ /*
+ * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
+ * IP/SCTP/first chunk header...
+ */
+ stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
+ sh, ch, &inp, &net);
+ /* inp's ref-count increased && stcb locked */
+ if (inp == NULL) {
+ struct sctp_init_chunk *init_chk, chunk_buf;
+
+ SCTP_STAT_INCR(sctps_noport);
+#ifdef ICMP_BANDLIM
+ /*
+ * we use the bandwidth limiting to protect against sending
+ * too many ABORTS all at once. In this case these count the
+ * same as an ICMP message.
+ */
+ if (badport_bandlim(0) < 0)
+ goto bad;
+#endif /* ICMP_BANDLIM */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("Sending a ABORT from packet entry!\n");
+ }
+#endif
+ if (ch->chunk_type == SCTP_INITIATION) {
+ /*
+ * we do a trick here to get the INIT tag, dig in
+ * and get the tag from the INIT and put it in the
+ * common header.
+ */
+ init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
+ iphlen + sizeof(*sh), sizeof(*init_chk),
+ (uint8_t *) & chunk_buf);
+ if (init_chk != NULL)
+ sh->v_tag = init_chk->init.initiate_tag;
+ }
+ sctp_send_abort(m, iphlen, sh, 0, NULL);
+ goto bad;
+ } else if (stcb == NULL) {
+ refcount_up = 1;
+ }
+#ifdef IPSEC
+ /*
+ * I very much doubt any of the IPSEC stuff will work but I have no
+ * idea, so I will leave it in place.
+ */
+
+ if (ipsec4_in_reject_so(m, inp->ip_inp.inp.inp_socket)) {
+ ipsecstat.in_polvio++;
+ SCTP_STAT_INCR(sctps_hdrops);
+ goto bad;
+ }
+#endif /* IPSEC */
+
+
+
+ /*
+ * common chunk processing
+ */
+ length = ip->ip_len + iphlen;
+ offset -= sizeof(struct sctp_chunkhdr);
+
+ ecn_bits = ip->ip_tos;
+ s = splnet();
+
+ sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
+ inp, stcb, net, ecn_bits);
+ /* inp's ref-count reduced && stcb unlocked */
+ splx(s);
+ if (m) {
+ sctp_m_freem(m);
+ }
+ if ((inp) && (refcount_up)) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ return;
+bad:
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+
+ if ((inp) && (refcount_up)) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if (m) {
+ sctp_m_freem(m);
+ }
+ return;
+}
diff --git a/sys/netinet/sctp_input.h b/sys/netinet/sctp_input.h
new file mode 100644
index 0000000..e6be0f7
--- /dev/null
+++ b/sys/netinet/sctp_input.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_input.h,v 1.6 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_input_h__
+#define __sctp_input_h__
+
+
+
+
+#if defined(_KERNEL)
+int
+sctp_common_input_processing(struct mbuf **, int, int, int,
+ struct sctphdr *, struct sctp_chunkhdr *, struct sctp_inpcb *,
+ struct sctp_tcb *, struct sctp_nets *, uint8_t);
+
+
+struct sctp_stream_reset_out_request *
+ sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk);
+
+void
+ sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list);
+
+
+#endif
+#endif
diff --git a/sys/netinet/sctp_lock_bsd.h b/sys/netinet/sctp_lock_bsd.h
new file mode 100644
index 0000000..4cab124
--- /dev/null
+++ b/sys/netinet/sctp_lock_bsd.h
@@ -0,0 +1,355 @@
+#ifndef __sctp_lock_bsd_h__
+#define __sctp_lock_bsd_h__
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * General locking concepts: The goal of our locking is to of course provide
+ * consistency and yet minimize overhead. We will attempt to use
+ * non-recursive locks which are supposed to be quite inexpensive. Now in
+ * order to do this the goal is that most functions are not aware of locking.
+ * Once we have a TCB we lock it and unlock when we are through. This means
+ * that the TCB lock is kind-of a "global" lock when working on an
+ * association. Caution must be used when asserting a TCB_LOCK since if we
+ * recurse we deadlock.
+ *
+ * Most other locks (INP and INFO) attempt to localize the locking i.e. we try
+ * to contain the lock and unlock within the function that needs to lock it.
+ * This sometimes mean we do extra locks and unlocks and lose a bit of
+ * efficency, but if the performance statements about non-recursive locks are
+ * true this should not be a problem. One issue that arises with this only
+ * lock when needed is that if an implicit association setup is done we have
+ * a problem. If at the time I lookup an association I have NULL in the tcb
+ * return, by the time I call to create the association some other processor
+ * could have created it. This is what the CREATE lock on the endpoint.
+ * Places where we will be implicitly creating the association OR just
+ * creating an association (the connect call) will assert the CREATE_INP
+ * lock. This will assure us that during all the lookup of INP and INFO if
+ * another creator is also locking/looking up we can gate the two to
+ * synchronize. So the CREATE_INP lock is also another one we must use
+ * extreme caution in locking to make sure we don't hit a re-entrancy issue.
+ *
+ * For non FreeBSD 5.x we provide a bunch of EMPTY lock macros so we can
+ * blatantly put locks everywhere and they reduce to nothing on
+ * NetBSD/OpenBSD and FreeBSD 4.x
+ *
+ */
+
+/*
+ * When working with the global SCTP lists we lock and unlock the INP_INFO
+ * lock. So when we go to lookup an association we will want to do a
+ * SCTP_INP_INFO_RLOCK() and then when we want to add a new association to
+ * the sctppcbinfo list's we will do a SCTP_INP_INFO_WLOCK().
+ */
+
+__FBSDID("$FreeBSD$");
+
+#define SCTP_IPI_COUNT_INIT()
+
+#define SCTP_STATLOG_INIT_LOCK()
+#define SCTP_STATLOG_LOCK()
+#define SCTP_STATLOG_UNLOCK()
+#define SCTP_STATLOG_DESTROY()
+
+#define SCTP_STATLOG_GETREF(x) { \
+ x = atomic_fetchadd_int(&global_sctp_cwnd_log_at, 1); \
+ if(x == SCTP_STAT_LOG_SIZE) { \
+ global_sctp_cwnd_log_at = 1; \
+ x = 0; \
+ global_sctp_cwnd_log_rolled = 1; \
+ } \
+}
+
+#define SCTP_INP_INFO_LOCK_INIT() \
+ mtx_init(&sctppcbinfo.ipi_ep_mtx, "sctp-info", "inp_info", MTX_DEF)
+
+
+#define SCTP_INP_INFO_RLOCK() do { \
+ mtx_lock(&sctppcbinfo.ipi_ep_mtx); \
+} while (0)
+
+
+#define SCTP_INP_INFO_WLOCK() do { \
+ mtx_lock(&sctppcbinfo.ipi_ep_mtx); \
+} while (0)
+
+
+
+#define SCTP_IPI_ADDR_INIT() \
+ mtx_init(&sctppcbinfo.ipi_addr_mtx, "sctp-addr-wq", "sctp_addr_wq", MTX_DEF)
+
+#define SCTP_IPI_ADDR_DESTROY() \
+ mtx_destroy(&sctppcbinfo.ipi_addr_mtx)
+
+#define SCTP_IPI_ADDR_LOCK() do { \
+ mtx_lock(&sctppcbinfo.ipi_addr_mtx); \
+} while (0)
+
+#define SCTP_IPI_ADDR_UNLOCK() mtx_unlock(&sctppcbinfo.ipi_addr_mtx)
+
+#define SCTP_INP_INFO_RUNLOCK() mtx_unlock(&sctppcbinfo.ipi_ep_mtx)
+#define SCTP_INP_INFO_WUNLOCK() mtx_unlock(&sctppcbinfo.ipi_ep_mtx)
+
+/*
+ * The INP locks we will use for locking an SCTP endpoint, so for example if
+ * we want to change something at the endpoint level for example random_store
+ * or cookie secrets we lock the INP level.
+ */
+
+#define SCTP_INP_READ_INIT(_inp) \
+ mtx_init(&(_inp)->inp_rdata_mtx, "sctp-read", "inpr", MTX_DEF | MTX_DUPOK)
+
+#define SCTP_INP_READ_DESTROY(_inp) \
+ mtx_destroy(&(_inp)->inp_rdata_mtx)
+
+#define SCTP_INP_READ_LOCK(_inp) do { \
+ mtx_lock(&(_inp)->inp_rdata_mtx); \
+} while (0)
+
+
+#define SCTP_INP_READ_UNLOCK(_inp) mtx_unlock(&(_inp)->inp_rdata_mtx)
+
+
+#define SCTP_INP_LOCK_INIT(_inp) \
+ mtx_init(&(_inp)->inp_mtx, "sctp-inp", "inp", MTX_DEF | MTX_DUPOK)
+#define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
+ mtx_init(&(_inp)->inp_create_mtx, "sctp-create", "inp_create", \
+ MTX_DEF | MTX_DUPOK)
+
+#define SCTP_INP_LOCK_DESTROY(_inp) \
+ mtx_destroy(&(_inp)->inp_mtx)
+
+#define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
+ mtx_destroy(&(_inp)->inp_create_mtx)
+
+
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_INP_RLOCK(_inp) do { \
+ sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
+ mtx_lock(&(_inp)->inp_mtx); \
+} while (0)
+
+#define SCTP_INP_WLOCK(_inp) do { \
+ sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
+ mtx_lock(&(_inp)->inp_mtx); \
+} while (0)
+
+#else
+
+#define SCTP_INP_RLOCK(_inp) do { \
+ mtx_lock(&(_inp)->inp_mtx); \
+} while (0)
+
+#define SCTP_INP_WLOCK(_inp) do { \
+ mtx_lock(&(_inp)->inp_mtx); \
+} while (0)
+
+#endif
+
+
+#define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
+ mtx_init(&(_tcb)->tcb_send_mtx, "sctp-send-tcb", "tcbs", MTX_DEF | MTX_DUPOK)
+
+#define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) mtx_destroy(&(_tcb)->tcb_send_mtx)
+
+#define SCTP_TCB_SEND_LOCK(_tcb) do { \
+ mtx_lock(&(_tcb)->tcb_send_mtx); \
+} while (0)
+
+#define SCTP_TCB_SEND_UNLOCK(_tcb) mtx_unlock(&(_tcb)->tcb_send_mtx)
+
+
+#define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
+#define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
+
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_ASOC_CREATE_LOCK(_inp) \
+ do { \
+ sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_CREATE); \
+ mtx_lock(&(_inp)->inp_create_mtx); \
+ } while (0)
+#else
+
+#define SCTP_ASOC_CREATE_LOCK(_inp) \
+ do { \
+ mtx_lock(&(_inp)->inp_create_mtx); \
+ } while (0)
+#endif
+
+#define SCTP_INP_RUNLOCK(_inp) mtx_unlock(&(_inp)->inp_mtx)
+#define SCTP_INP_WUNLOCK(_inp) mtx_unlock(&(_inp)->inp_mtx)
+#define SCTP_ASOC_CREATE_UNLOCK(_inp) mtx_unlock(&(_inp)->inp_create_mtx)
+
+/*
+ * For the majority of things (once we have found the association) we will
+ * lock the actual association mutex. This will protect all the assoiciation
+ * level queues and streams and such. We will need to lock the socket layer
+ * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
+ * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
+ */
+
+#define SCTP_TCB_LOCK_INIT(_tcb) \
+ mtx_init(&(_tcb)->tcb_mtx, "sctp-tcb", "tcb", MTX_DEF | MTX_DUPOK)
+
+#define SCTP_TCB_LOCK_DESTROY(_tcb) mtx_destroy(&(_tcb)->tcb_mtx)
+
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_TCB_LOCK(_tcb) do { \
+ sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB); \
+ mtx_lock(&(_tcb)->tcb_mtx); \
+} while (0)
+
+#else
+#define SCTP_TCB_LOCK(_tcb) do { \
+ mtx_lock(&(_tcb)->tcb_mtx); \
+} while (0)
+
+#endif
+
+
+#define SCTP_TCB_TRYLOCK(_tcb) mtx_trylock(&(_tcb)->tcb_mtx)
+
+#define SCTP_TCB_UNLOCK(_tcb) mtx_unlock(&(_tcb)->tcb_mtx)
+
+#define SCTP_TCB_UNLOCK_IFOWNED(_tcb) do { \
+ if (mtx_owned(&(_tcb)->tcb_mtx)) \
+ mtx_unlock(&(_tcb)->tcb_mtx); \
+ } while (0)
+
+
+
+#ifdef INVARIANTS
+#define SCTP_TCB_LOCK_ASSERT(_tcb) do { \
+ if (mtx_owned(&(_tcb)->tcb_mtx) == 0) \
+ panic("Don't own TCB lock"); \
+ } while (0)
+#else
+#define SCTP_TCB_LOCK_ASSERT(_tcb)
+#endif
+
+#define SCTP_ITERATOR_LOCK_INIT() \
+ mtx_init(&sctppcbinfo.it_mtx, "sctp-it", "iterator", MTX_DEF)
+
+#ifdef INVARIANTS
+#define SCTP_ITERATOR_LOCK() \
+ do { \
+ if (mtx_owned(&sctppcbinfo.it_mtx)) \
+ panic("Iterator Lock"); \
+ mtx_lock(&sctppcbinfo.it_mtx); \
+ } while (0)
+#else
+#define SCTP_ITERATOR_LOCK() \
+ do { \
+ mtx_lock(&sctppcbinfo.it_mtx); \
+ } while (0)
+
+#endif
+
+#define SCTP_ITERATOR_UNLOCK() mtx_unlock(&sctppcbinfo.it_mtx)
+#define SCTP_ITERATOR_LOCK_DESTROY() mtx_destroy(&sctppcbinfo.it_mtx)
+
+
+#define SCTP_INCR_EP_COUNT() \
+ do { \
+ atomic_add_int(&sctppcbinfo.ipi_count_ep, 1); \
+ } while (0)
+
+#define SCTP_DECR_EP_COUNT() \
+ do { \
+ atomic_add_int(&sctppcbinfo.ipi_count_ep,-1); \
+ } while (0)
+
+#define SCTP_INCR_ASOC_COUNT() \
+ do { \
+ atomic_add_int(&sctppcbinfo.ipi_count_asoc, 1); \
+ } while (0)
+
+#define SCTP_DECR_ASOC_COUNT() \
+ do { \
+ atomic_add_int(&sctppcbinfo.ipi_count_asoc, -1); \
+ } while (0)
+
+#define SCTP_INCR_LADDR_COUNT() \
+ do { \
+ atomic_add_int(&sctppcbinfo.ipi_count_laddr, 1); \
+ } while (0)
+
+#define SCTP_DECR_LADDR_COUNT() \
+ do { \
+ atomic_add_int(&sctppcbinfo.ipi_count_laddr, -1); \
+ } while (0)
+
+#define SCTP_INCR_RADDR_COUNT() \
+ do { \
+ atomic_add_int(&sctppcbinfo.ipi_count_raddr,1); \
+ } while (0)
+
+#define SCTP_DECR_RADDR_COUNT() \
+ do { \
+ atomic_add_int(&sctppcbinfo.ipi_count_raddr,-1); \
+ } while (0)
+
+#define SCTP_INCR_CHK_COUNT() \
+ do { \
+ atomic_add_int(&sctppcbinfo.ipi_count_chunk, 1); \
+ } while (0)
+
+#define SCTP_DECR_CHK_COUNT() \
+ do { \
+ if(sctppcbinfo.ipi_count_chunk == 0) \
+ panic("chunk count to 0?"); \
+ atomic_add_int(&sctppcbinfo.ipi_count_chunk,-1); \
+ } while (0)
+
+#define SCTP_INCR_READQ_COUNT() \
+ do { \
+ atomic_add_int(&sctppcbinfo.ipi_count_readq,1); \
+ } while (0)
+
+#define SCTP_DECR_READQ_COUNT() \
+ do { \
+ atomic_add_int(&sctppcbinfo.ipi_count_readq, -1); \
+ } while (0)
+
+#define SCTP_INCR_STRMOQ_COUNT() \
+ do { \
+ atomic_add_int(&sctppcbinfo.ipi_count_strmoq, 1); \
+ } while (0)
+
+#define SCTP_DECR_STRMOQ_COUNT() \
+ do { \
+ atomic_add_int(&sctppcbinfo.ipi_count_strmoq,-1); \
+ } while (0)
+
+
+
+
+
+#endif
diff --git a/sys/netinet/sctp_os.h b/sys/netinet/sctp_os.h
new file mode 100644
index 0000000..f969348
--- /dev/null
+++ b/sys/netinet/sctp_os.h
@@ -0,0 +1,66 @@
+/*-
+ * Copyright (c) 2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+__FBSDID("$FreeBSD$");
+#ifndef __sctp_os_h__
+#define __sctp_os_h__
+
+/*
+ * General kernel memory allocation:
+ * SCTP_MALLOC(element, type, size, name)
+ * SCTP_FREE(element)
+ * Kernel memory allocation for "soname"- memory must be zeroed.
+ * SCTP_MALLOC_SONAME(name, type, size)
+ * SCTP_FREE_SONAME(name)
+ */
+
+/*
+ * Zone(pool) allocation routines: MUST be defined for each OS.
+ * zone = zone/pool pointer.
+ * name = string name of the zone/pool.
+ * size = size of each zone/pool element.
+ * number = number of elements in zone/pool.
+ *
+ * sctp_zone_t
+ * SCTP_ZONE_INIT(zone, name, size, number)
+ * SCTP_ZONE_GET(zone)
+ * SCTP_ZONE_FREE(zone, element)
+ * SCTP_ZONE_DESTROY(zone)
+ */
+
+/*
+ * Functions:
+ * sctp_read_random(void *buffer, uint32_t bytes)
+ */
+
+#include <netinet/sctp_os_bsd.h>
+
+
+
+#endif
diff --git a/sys/netinet/sctp_os_bsd.h b/sys/netinet/sctp_os_bsd.h
new file mode 100644
index 0000000..bd3e8bf
--- /dev/null
+++ b/sys/netinet/sctp_os_bsd.h
@@ -0,0 +1,89 @@
+/*-
+ * Copyright (c) 2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+__FBSDID("$FreeBSD$");
+#ifndef __sctp_os_bsd_h__
+#define __sctp_os_bsd_h__
+
+/*
+ * includes
+ */
+#include <sys/random.h>
+
+
+/*
+ *
+ */
+typedef struct mbuf *sctp_mbuf_t;
+
+/*
+ * general memory allocation
+ */
+#define SCTP_MALLOC(var, type, size, name) \
+ do { \
+ MALLOC(var, type, size, M_PCB, M_NOWAIT); \
+ } while (0)
+
+#define SCTP_FREE(var) FREE(var, M_PCB)
+
+#define SCTP_MALLOC_SONAME(var, type, size) \
+ do { \
+ MALLOC(var, type, size, M_SONAME, M_WAITOK | M_ZERO); \
+ } while (0)
+
+#define SCTP_FREE_SONAME(var) FREE(var, M_SONAME)
+
+/*
+ * zone allocation functions
+ */
+#include <vm/uma.h>
+/* SCTP_ZONE_INIT: initialize the zone */
+typedef struct uma_zone *sctp_zone_t;
+
+#define UMA_ZFLAG_FULL 0x0020
+#define SCTP_ZONE_INIT(zone, name, size, number) { \
+ zone = uma_zcreate(name, size, NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,\
+ UMA_ZFLAG_FULL); \
+ uma_zone_set_max(zone, number); \
+}
+
+/* SCTP_ZONE_GET: allocate element from the zone */
+#define SCTP_ZONE_GET(zone) \
+ uma_zalloc(zone, M_NOWAIT);
+
+/* SCTP_ZONE_FREE: free element from the zone */
+#define SCTP_ZONE_FREE(zone, element) \
+ uma_zfree(zone, element);
+
+/*
+ * Functions
+ */
+#define sctp_read_random(buf, len) read_random(buf, len)
+
+#endif
diff --git a/sys/netinet/sctp_output.c b/sys/netinet/sctp_output.c
new file mode 100644
index 0000000..ea91c3a
--- /dev/null
+++ b/sys/netinet/sctp_output.c
@@ -0,0 +1,10378 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_ipsec.h"
+#include "opt_compat.h"
+#include "opt_inet6.h"
+#include "opt_inet.h"
+#include "opt_sctp.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/domain.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/resourcevar.h>
+#include <sys/uio.h>
+#ifdef INET6
+#include <sys/domain.h>
+#endif
+
+#include <sys/limits.h>
+#include <machine/cpu.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+
+#include <net/if_var.h>
+
+#include <net/route.h>
+
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/in_pcb.h>
+#include <netinet/in_var.h>
+#include <netinet/ip_var.h>
+
+#ifdef INET6
+#include <netinet/ip6.h>
+#include <netinet6/ip6_var.h>
+#include <netinet6/scope6_var.h>
+#include <netinet6/nd6.h>
+
+#include <netinet6/in6_pcb.h>
+
+#include <netinet/icmp6.h>
+
+#endif /* INET6 */
+
+
+
+#ifndef in6pcb
+#define in6pcb inpcb
+#endif
+
+#ifdef IPSEC
+#include <netinet6/ipsec.h>
+#include <netkey/key.h>
+#endif /* IPSEC */
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_bsd_addr.h>
+
+#ifdef SCTP_DEBUG
+extern uint32_t sctp_debug_on;
+
+#endif
+
+
+
+#define SCTP_MAX_GAPS_INARRAY 4
+struct sack_track {
+ uint8_t right_edge; /* mergable on the right edge */
+ uint8_t left_edge; /* mergable on the left edge */
+ uint8_t num_entries;
+ uint8_t spare;
+ struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
+};
+
+struct sack_track sack_array[256] = {
+ {0, 0, 0, 0, /* 0x00 */
+ {{0, 0},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x01 */
+ {{0, 0},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x02 */
+ {{1, 1},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x03 */
+ {{0, 1},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x04 */
+ {{2, 2},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x05 */
+ {{0, 0},
+ {2, 2},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x06 */
+ {{1, 2},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x07 */
+ {{0, 2},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x08 */
+ {{3, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x09 */
+ {{0, 0},
+ {3, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x0a */
+ {{1, 1},
+ {3, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x0b */
+ {{0, 1},
+ {3, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x0c */
+ {{2, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x0d */
+ {{0, 0},
+ {2, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x0e */
+ {{1, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x0f */
+ {{0, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x10 */
+ {{4, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x11 */
+ {{0, 0},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x12 */
+ {{1, 1},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x13 */
+ {{0, 1},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x14 */
+ {{2, 2},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x15 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x16 */
+ {{1, 2},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x17 */
+ {{0, 2},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x18 */
+ {{3, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x19 */
+ {{0, 0},
+ {3, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x1a */
+ {{1, 1},
+ {3, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x1b */
+ {{0, 1},
+ {3, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x1c */
+ {{2, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x1d */
+ {{0, 0},
+ {2, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x1e */
+ {{1, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x1f */
+ {{0, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x20 */
+ {{5, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x21 */
+ {{0, 0},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x22 */
+ {{1, 1},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x23 */
+ {{0, 1},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x24 */
+ {{2, 2},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x25 */
+ {{0, 0},
+ {2, 2},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x26 */
+ {{1, 2},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x27 */
+ {{0, 2},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x28 */
+ {{3, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x29 */
+ {{0, 0},
+ {3, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x2a */
+ {{1, 1},
+ {3, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x2b */
+ {{0, 1},
+ {3, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x2c */
+ {{2, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x2d */
+ {{0, 0},
+ {2, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x2e */
+ {{1, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x2f */
+ {{0, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x30 */
+ {{4, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x31 */
+ {{0, 0},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x32 */
+ {{1, 1},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x33 */
+ {{0, 1},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x34 */
+ {{2, 2},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x35 */
+ {{0, 0},
+ {2, 2},
+ {4, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x36 */
+ {{1, 2},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x37 */
+ {{0, 2},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x38 */
+ {{3, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x39 */
+ {{0, 0},
+ {3, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x3a */
+ {{1, 1},
+ {3, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x3b */
+ {{0, 1},
+ {3, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x3c */
+ {{2, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x3d */
+ {{0, 0},
+ {2, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x3e */
+ {{1, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x3f */
+ {{0, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x40 */
+ {{6, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x41 */
+ {{0, 0},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x42 */
+ {{1, 1},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x43 */
+ {{0, 1},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x44 */
+ {{2, 2},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x45 */
+ {{0, 0},
+ {2, 2},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x46 */
+ {{1, 2},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x47 */
+ {{0, 2},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x48 */
+ {{3, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x49 */
+ {{0, 0},
+ {3, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x4a */
+ {{1, 1},
+ {3, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x4b */
+ {{0, 1},
+ {3, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x4c */
+ {{2, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x4d */
+ {{0, 0},
+ {2, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x4e */
+ {{1, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x4f */
+ {{0, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x50 */
+ {{4, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x51 */
+ {{0, 0},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x52 */
+ {{1, 1},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x53 */
+ {{0, 1},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x54 */
+ {{2, 2},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 4, 0, /* 0x55 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {6, 6}
+ }
+ },
+ {0, 0, 3, 0, /* 0x56 */
+ {{1, 2},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x57 */
+ {{0, 2},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x58 */
+ {{3, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x59 */
+ {{0, 0},
+ {3, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x5a */
+ {{1, 1},
+ {3, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x5b */
+ {{0, 1},
+ {3, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x5c */
+ {{2, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x5d */
+ {{0, 0},
+ {2, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x5e */
+ {{1, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x5f */
+ {{0, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x60 */
+ {{5, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x61 */
+ {{0, 0},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x62 */
+ {{1, 1},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x63 */
+ {{0, 1},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x64 */
+ {{2, 2},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x65 */
+ {{0, 0},
+ {2, 2},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x66 */
+ {{1, 2},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x67 */
+ {{0, 2},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x68 */
+ {{3, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x69 */
+ {{0, 0},
+ {3, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x6a */
+ {{1, 1},
+ {3, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x6b */
+ {{0, 1},
+ {3, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x6c */
+ {{2, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x6d */
+ {{0, 0},
+ {2, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x6e */
+ {{1, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x6f */
+ {{0, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x70 */
+ {{4, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x71 */
+ {{0, 0},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x72 */
+ {{1, 1},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x73 */
+ {{0, 1},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x74 */
+ {{2, 2},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x75 */
+ {{0, 0},
+ {2, 2},
+ {4, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x76 */
+ {{1, 2},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x77 */
+ {{0, 2},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x78 */
+ {{3, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x79 */
+ {{0, 0},
+ {3, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x7a */
+ {{1, 1},
+ {3, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x7b */
+ {{0, 1},
+ {3, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x7c */
+ {{2, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x7d */
+ {{0, 0},
+ {2, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x7e */
+ {{1, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x7f */
+ {{0, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0x80 */
+ {{7, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x81 */
+ {{0, 0},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x82 */
+ {{1, 1},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x83 */
+ {{0, 1},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x84 */
+ {{2, 2},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x85 */
+ {{0, 0},
+ {2, 2},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x86 */
+ {{1, 2},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x87 */
+ {{0, 2},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x88 */
+ {{3, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x89 */
+ {{0, 0},
+ {3, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x8a */
+ {{1, 1},
+ {3, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x8b */
+ {{0, 1},
+ {3, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x8c */
+ {{2, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x8d */
+ {{0, 0},
+ {2, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x8e */
+ {{1, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x8f */
+ {{0, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x90 */
+ {{4, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x91 */
+ {{0, 0},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x92 */
+ {{1, 1},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x93 */
+ {{0, 1},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x94 */
+ {{2, 2},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0x95 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0x96 */
+ {{1, 2},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x97 */
+ {{0, 2},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x98 */
+ {{3, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x99 */
+ {{0, 0},
+ {3, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x9a */
+ {{1, 1},
+ {3, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x9b */
+ {{0, 1},
+ {3, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x9c */
+ {{2, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x9d */
+ {{0, 0},
+ {2, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x9e */
+ {{1, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x9f */
+ {{0, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xa0 */
+ {{5, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xa1 */
+ {{0, 0},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa2 */
+ {{1, 1},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xa3 */
+ {{0, 1},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa4 */
+ {{2, 2},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xa5 */
+ {{0, 0},
+ {2, 2},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa6 */
+ {{1, 2},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xa7 */
+ {{0, 2},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa8 */
+ {{3, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xa9 */
+ {{0, 0},
+ {3, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 4, 0, /* 0xaa */
+ {{1, 1},
+ {3, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {1, 1, 4, 0, /* 0xab */
+ {{0, 1},
+ {3, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xac */
+ {{2, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xad */
+ {{0, 0},
+ {2, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xae */
+ {{1, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xaf */
+ {{0, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xb0 */
+ {{4, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb1 */
+ {{0, 0},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xb2 */
+ {{1, 1},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb3 */
+ {{0, 1},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xb4 */
+ {{2, 2},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xb5 */
+ {{0, 0},
+ {2, 2},
+ {4, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xb6 */
+ {{1, 2},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb7 */
+ {{0, 2},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xb8 */
+ {{3, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb9 */
+ {{0, 0},
+ {3, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xba */
+ {{1, 1},
+ {3, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xbb */
+ {{0, 1},
+ {3, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xbc */
+ {{2, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xbd */
+ {{0, 0},
+ {2, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xbe */
+ {{1, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xbf */
+ {{0, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xc0 */
+ {{6, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xc1 */
+ {{0, 0},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc2 */
+ {{1, 1},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xc3 */
+ {{0, 1},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc4 */
+ {{2, 2},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xc5 */
+ {{0, 0},
+ {2, 2},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc6 */
+ {{1, 2},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xc7 */
+ {{0, 2},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc8 */
+ {{3, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xc9 */
+ {{0, 0},
+ {3, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xca */
+ {{1, 1},
+ {3, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xcb */
+ {{0, 1},
+ {3, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xcc */
+ {{2, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xcd */
+ {{0, 0},
+ {2, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xce */
+ {{1, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xcf */
+ {{0, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xd0 */
+ {{4, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd1 */
+ {{0, 0},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xd2 */
+ {{1, 1},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd3 */
+ {{0, 1},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xd4 */
+ {{2, 2},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xd5 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {6, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xd6 */
+ {{1, 2},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd7 */
+ {{0, 2},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xd8 */
+ {{3, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd9 */
+ {{0, 0},
+ {3, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xda */
+ {{1, 1},
+ {3, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xdb */
+ {{0, 1},
+ {3, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xdc */
+ {{2, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xdd */
+ {{0, 0},
+ {2, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xde */
+ {{1, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xdf */
+ {{0, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xe0 */
+ {{5, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xe1 */
+ {{0, 0},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe2 */
+ {{1, 1},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xe3 */
+ {{0, 1},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe4 */
+ {{2, 2},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xe5 */
+ {{0, 0},
+ {2, 2},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe6 */
+ {{1, 2},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xe7 */
+ {{0, 2},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe8 */
+ {{3, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xe9 */
+ {{0, 0},
+ {3, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xea */
+ {{1, 1},
+ {3, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xeb */
+ {{0, 1},
+ {3, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xec */
+ {{2, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xed */
+ {{0, 0},
+ {2, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xee */
+ {{1, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xef */
+ {{0, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xf0 */
+ {{4, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf1 */
+ {{0, 0},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xf2 */
+ {{1, 1},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf3 */
+ {{0, 1},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xf4 */
+ {{2, 2},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xf5 */
+ {{0, 0},
+ {2, 2},
+ {4, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xf6 */
+ {{1, 2},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf7 */
+ {{0, 2},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xf8 */
+ {{3, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf9 */
+ {{0, 0},
+ {3, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xfa */
+ {{1, 1},
+ {3, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xfb */
+ {{0, 1},
+ {3, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xfc */
+ {{2, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xfd */
+ {{0, 0},
+ {2, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xfe */
+ {{1, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 1, 0, /* 0xff */
+ {{0, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ }
+};
+
+
+
+
+extern int sctp_peer_chunk_oh;
+
+static int
+sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize)
+{
+ struct cmsghdr cmh;
+ int tlen, at;
+
+ tlen = control->m_len;
+ at = 0;
+ /*
+ * Independent of how many mbufs, find the c_type inside the control
+ * structure and copy out the data.
+ */
+ while (at < tlen) {
+ if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
+ /* not enough room for one more we are done. */
+ return (0);
+ }
+ m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
+ if ((cmh.cmsg_len + at) > tlen) {
+ /*
+ * this is real messed up since there is not enough
+ * data here to cover the cmsg header. We are done.
+ */
+ return (0);
+ }
+ if ((cmh.cmsg_level == IPPROTO_SCTP) &&
+ (c_type == cmh.cmsg_type)) {
+ /* found the one we want, copy it out */
+ at += CMSG_ALIGN(sizeof(struct cmsghdr));
+ if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) {
+ /*
+ * space of cmsg_len after header not big
+ * enough
+ */
+ return (0);
+ }
+ m_copydata(control, at, cpsize, data);
+ return (1);
+ } else {
+ at += CMSG_ALIGN(cmh.cmsg_len);
+ if (cmh.cmsg_len == 0) {
+ break;
+ }
+ }
+ }
+ /* not found */
+ return (0);
+}
+
+
+extern int sctp_mbuf_threshold_count;
+
+
+__inline struct mbuf *
+sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
+ int how, int allonebuf, int type)
+{
+ struct mbuf *m = NULL;
+ int aloc_size;
+ int index = 0;
+ int mbuf_threshold;
+
+ if (want_header) {
+ MGETHDR(m, how, type);
+ } else {
+ MGET(m, how, type);
+ }
+ if (m == NULL) {
+ return (NULL);
+ }
+ if (allonebuf == 0)
+ mbuf_threshold = sctp_mbuf_threshold_count;
+ else
+ mbuf_threshold = 1;
+
+
+ if (space_needed > (((mbuf_threshold - 1) * MLEN) + MHLEN)) {
+try_again:
+ index = 4;
+ if (space_needed <= MCLBYTES) {
+ aloc_size = MCLBYTES;
+ } else if (space_needed <= MJUMPAGESIZE) {
+ aloc_size = MJUMPAGESIZE;
+ index = 5;
+ } else if (space_needed <= MJUM9BYTES) {
+ aloc_size = MJUM9BYTES;
+ index = 6;
+ } else {
+ aloc_size = MJUM16BYTES;
+ index = 7;
+ }
+ m_cljget(m, how, aloc_size);
+ if (m == NULL) {
+ return (NULL);
+ }
+ if ((m->m_flags & M_EXT) == 0) {
+ if ((aloc_size != MCLBYTES) &&
+ (allonebuf == 0)) {
+ aloc_size -= 10;
+ goto try_again;
+ }
+ sctp_m_freem(m);
+ return (NULL);
+ }
+ }
+ m->m_len = 0;
+ m->m_next = m->m_nextpkt = NULL;
+#ifdef SCTP_MBUF_LOGGING
+ if (m->m_flags & M_EXT) {
+ sctp_log_mb(m, SCTP_MBUF_IALLOC);
+ }
+#endif
+
+ if (want_header) {
+ m->m_pkthdr.len = 0;
+ }
+ return (m);
+}
+
+
+
+static struct mbuf *
+sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
+ struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in)
+{
+ struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
+ struct sctp_state_cookie *stc;
+ struct sctp_paramhdr *ph;
+ uint8_t *signature;
+ int sig_offset;
+ uint16_t cookie_sz;
+
+ mret = NULL;
+
+
+ mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
+ sizeof(struct sctp_paramhdr)), 0, M_DONTWAIT, 1, MT_DATA);
+ if (mret == NULL) {
+ return (NULL);
+ }
+ copy_init = sctp_m_copym(init, init_offset, M_COPYALL, M_DONTWAIT);
+ if (copy_init == NULL) {
+ sctp_m_freem(mret);
+ return (NULL);
+ }
+ copy_initack = sctp_m_copym(initack, initack_offset, M_COPYALL,
+ M_DONTWAIT);
+ if (copy_initack == NULL) {
+ sctp_m_freem(mret);
+ sctp_m_freem(copy_init);
+ return (NULL);
+ }
+ /* easy side we just drop it on the end */
+ ph = mtod(mret, struct sctp_paramhdr *);
+ mret->m_len = sizeof(struct sctp_state_cookie) +
+ sizeof(struct sctp_paramhdr);
+ stc = (struct sctp_state_cookie *)((caddr_t)ph +
+ sizeof(struct sctp_paramhdr));
+ ph->param_type = htons(SCTP_STATE_COOKIE);
+ ph->param_length = 0; /* fill in at the end */
+ /* Fill in the stc cookie data */
+ *stc = *stc_in;
+
+ /* tack the INIT and then the INIT-ACK onto the chain */
+ cookie_sz = 0;
+ m_at = mret;
+ for (m_at = mret; m_at; m_at = m_at->m_next) {
+ cookie_sz += m_at->m_len;
+ if (m_at->m_next == NULL) {
+ m_at->m_next = copy_init;
+ break;
+ }
+ }
+
+ for (m_at = copy_init; m_at; m_at = m_at->m_next) {
+ cookie_sz += m_at->m_len;
+ if (m_at->m_next == NULL) {
+ m_at->m_next = copy_initack;
+ break;
+ }
+ }
+
+ for (m_at = copy_initack; m_at; m_at = m_at->m_next) {
+ cookie_sz += m_at->m_len;
+ if (m_at->m_next == NULL) {
+ break;
+ }
+ }
+ sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA);
+ if (sig == NULL) {
+ /* no space, so free the entire chain */
+ sctp_m_freem(mret);
+ return (NULL);
+ }
+ sig->m_len = 0;
+ m_at->m_next = sig;
+ sig_offset = 0;
+ signature = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
+ /* Time to sign the cookie */
+ sctp_hmac_m(SCTP_HMAC,
+ (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
+ SCTP_SECRET_SIZE, mret, sizeof(struct sctp_paramhdr),
+ (uint8_t *) signature);
+ sig->m_len += SCTP_SIGNATURE_SIZE;
+ cookie_sz += SCTP_SIGNATURE_SIZE;
+
+ ph->param_length = htons(cookie_sz);
+ return (mret);
+}
+
+
+static __inline uint8_t
+sctp_get_ect(struct sctp_tcb *stcb,
+ struct sctp_tmit_chunk *chk)
+{
+ uint8_t this_random;
+
+ /* Huh? */
+ if (sctp_ecn_enable == 0)
+ return (0);
+
+ if (sctp_ecn_nonce == 0)
+ /* no nonce, always return ECT0 */
+ return (SCTP_ECT0_BIT);
+
+ if (stcb->asoc.peer_supports_ecn_nonce == 0) {
+ /* Peer does NOT support it, so we send a ECT0 only */
+ return (SCTP_ECT0_BIT);
+ }
+ if (chk == NULL)
+ return (SCTP_ECT0_BIT);
+
+ if (((stcb->asoc.hb_random_idx == 3) &&
+ (stcb->asoc.hb_ect_randombit > 7)) ||
+ (stcb->asoc.hb_random_idx > 3)) {
+ uint32_t rndval;
+
+ rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+ memcpy(stcb->asoc.hb_random_values, &rndval,
+ sizeof(stcb->asoc.hb_random_values));
+ this_random = stcb->asoc.hb_random_values[0];
+ stcb->asoc.hb_random_idx = 0;
+ stcb->asoc.hb_ect_randombit = 0;
+ } else {
+ if (stcb->asoc.hb_ect_randombit > 7) {
+ stcb->asoc.hb_ect_randombit = 0;
+ stcb->asoc.hb_random_idx++;
+ }
+ this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
+ }
+ if ((this_random >> stcb->asoc.hb_ect_randombit) & 0x01) {
+ if (chk != NULL)
+ /* ECN Nonce stuff */
+ chk->rec.data.ect_nonce = SCTP_ECT1_BIT;
+ stcb->asoc.hb_ect_randombit++;
+ return (SCTP_ECT1_BIT);
+ } else {
+ stcb->asoc.hb_ect_randombit++;
+ return (SCTP_ECT0_BIT);
+ }
+}
+
+extern int sctp_no_csum_on_loopback;
+
+static int
+sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb, /* may be NULL */
+ struct sctp_nets *net,
+ struct sockaddr *to,
+ struct mbuf *m,
+ uint32_t auth_offset,
+ struct sctp_auth_chunk *auth,
+ int nofragment_flag,
+ int ecn_ok,
+ struct sctp_tmit_chunk *chk,
+ int out_of_asoc_ok)
+/* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
+{
+ /*
+ * Given a mbuf chain (via m_next) that holds a packet header WITH a
+ * SCTPHDR but no IP header, endpoint inp and sa structure. - fill
+ * in the HMAC digest of any AUTH chunk in the packet - calculate
+ * SCTP checksum and fill in - prepend a IP address header - if
+ * boundall use INADDR_ANY - if boundspecific do source address
+ * selection - set fragmentation option for ipV4 - On return from IP
+ * output, check/adjust mtu size - of output interface and
+ * smallest_mtu size as well.
+ */
+ struct sctphdr *sctphdr;
+ int o_flgs;
+ uint32_t csum;
+ int ret;
+ unsigned int have_mtu;
+ struct route *ro;
+
+ if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
+ sctp_m_freem(m);
+ return (EFAULT);
+ }
+ if ((m->m_flags & M_PKTHDR) == 0) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Software error: sctp_lowlevel_chunk_output() called with non pkthdr!\n");
+ }
+#endif
+ sctp_m_freem(m);
+ return (EFAULT);
+ }
+ /* fill in the HMAC digest for any AUTH chunk in the packet */
+ if ((auth != NULL) && (stcb != NULL)) {
+ sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb);
+ }
+ /* Calculate the csum and fill in the length of the packet */
+ sctphdr = mtod(m, struct sctphdr *);
+ have_mtu = 0;
+ if (sctp_no_csum_on_loopback &&
+ (stcb) &&
+ (stcb->asoc.loopback_scope)) {
+ sctphdr->checksum = 0;
+ /*
+ * This can probably now be taken out since my audit shows
+ * no more bad pktlen's coming in. But we will wait a while
+ * yet.
+ */
+ m->m_pkthdr.len = sctp_calculate_len(m);
+ } else {
+ sctphdr->checksum = 0;
+ csum = sctp_calculate_sum(m, &m->m_pkthdr.len, 0);
+ sctphdr->checksum = csum;
+ }
+ if (to->sa_family == AF_INET) {
+ struct ip *ip;
+ struct route iproute;
+ uint8_t tos_value;
+
+ M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
+ if (m == NULL) {
+ /* failed to prepend data, give up */
+ return (ENOMEM);
+ }
+ ip = mtod(m, struct ip *);
+ ip->ip_v = IPVERSION;
+ ip->ip_hl = (sizeof(struct ip) >> 2);
+ if (net) {
+ tos_value = net->tos_flowlabel & 0x000000ff;
+ } else {
+ tos_value = inp->ip_inp.inp.inp_ip_tos;
+ }
+ if (nofragment_flag) {
+#if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__APPLE__)
+ ip->ip_off = IP_DF;
+#else
+ ip->ip_off = htons(IP_DF);
+#endif
+ } else
+ ip->ip_off = 0;
+
+
+ /* FreeBSD has a function for ip_id's */
+ ip->ip_id = ip_newid();
+
+ ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
+ ip->ip_len = m->m_pkthdr.len;
+ if (stcb) {
+ if ((stcb->asoc.ecn_allowed) && ecn_ok) {
+ /* Enable ECN */
+ ip->ip_tos = ((u_char)(tos_value & 0xfc) | sctp_get_ect(stcb, chk));
+ } else {
+ /* No ECN */
+ ip->ip_tos = (u_char)(tos_value & 0xfc);
+ }
+ } else {
+ /* no association at all */
+ ip->ip_tos = (tos_value & 0xfc);
+ }
+ ip->ip_p = IPPROTO_SCTP;
+ ip->ip_sum = 0;
+ if (net == NULL) {
+ ro = &iproute;
+ memset(&iproute, 0, sizeof(iproute));
+ memcpy(&ro->ro_dst, to, to->sa_len);
+ } else {
+ ro = (struct route *)&net->ro;
+ }
+ /* Now the address selection part */
+ ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
+
+ /* call the routine to select the src address */
+ if (net) {
+ if (net->src_addr_selected == 0) {
+ /* Cache the source address */
+ ((struct sockaddr_in *)&net->ro._s_addr)->sin_addr = sctp_ipv4_source_address_selection(inp,
+ stcb,
+ ro, net, out_of_asoc_ok);
+ if (ro->ro_rt)
+ net->src_addr_selected = 1;
+ }
+ ip->ip_src = ((struct sockaddr_in *)&net->ro._s_addr)->sin_addr;
+ } else {
+ ip->ip_src = sctp_ipv4_source_address_selection(inp,
+ stcb, ro, net, out_of_asoc_ok);
+ }
+
+ /*
+ * If source address selection fails and we find no route
+ * then the ip_ouput should fail as well with a
+ * NO_ROUTE_TO_HOST type error. We probably should catch
+ * that somewhere and abort the association right away
+ * (assuming this is an INIT being sent).
+ */
+ if ((ro->ro_rt == NULL)) {
+ /*
+ * src addr selection failed to find a route (or
+ * valid source addr), so we can't get there from
+ * here!
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("low_level_output: dropped v4 packet- no valid source addr\n");
+ printf("Destination was %x\n", (uint32_t) (ntohl(ip->ip_dst.s_addr)));
+ }
+#endif /* SCTP_DEBUG */
+ if (net) {
+ if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb)
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+ stcb,
+ SCTP_FAILED_THRESHOLD,
+ (void *)net);
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
+ if (stcb) {
+ if (net == stcb->asoc.primary_destination) {
+ /* need a new primary */
+ struct sctp_nets *alt;
+
+ alt = sctp_find_alternate_net(stcb, net, 0);
+ if (alt != net) {
+ if (sctp_set_primary_addr(stcb,
+ (struct sockaddr *)NULL,
+ alt) == 0) {
+ net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
+ net->src_addr_selected = 0;
+ }
+ }
+ }
+ }
+ }
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
+ } else {
+ have_mtu = ro->ro_rt->rt_ifp->if_mtu;
+ }
+ if (inp->sctp_socket) {
+ o_flgs = (IP_RAWOUTPUT | (inp->sctp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST)));
+ } else {
+ o_flgs = IP_RAWOUTPUT;
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
+ printf("Calling ipv4 output routine from low level src addr:%x\n",
+ (uint32_t) (ntohl(ip->ip_src.s_addr)));
+ printf("Destination is %x\n", (uint32_t) (ntohl(ip->ip_dst.s_addr)));
+ printf("RTP route is %p through\n", ro->ro_rt);
+ }
+#endif
+
+ if ((have_mtu) && (net) && (have_mtu > net->mtu)) {
+ ro->ro_rt->rt_ifp->if_mtu = net->mtu;
+ }
+ if (ro != &iproute) {
+ memcpy(&iproute, ro, sizeof(*ro));
+ }
+ ret = ip_output(m, inp->ip_inp.inp.inp_options,
+ ro, o_flgs, inp->ip_inp.inp.inp_moptions
+ ,(struct inpcb *)NULL
+ );
+ if ((ro->ro_rt) && (have_mtu) && (net) && (have_mtu > net->mtu)) {
+ ro->ro_rt->rt_ifp->if_mtu = have_mtu;
+ }
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+ if (ret)
+ SCTP_STAT_INCR(sctps_senderrors);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
+ printf("Ip output returns %d\n", ret);
+ }
+#endif
+ if (net == NULL) {
+ /* free tempy routes */
+ if (ro->ro_rt)
+ RTFREE(ro->ro_rt);
+ } else {
+ /* PMTU check versus smallest asoc MTU goes here */
+ if (ro->ro_rt != NULL) {
+ if (ro->ro_rt->rt_rmx.rmx_mtu &&
+ (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) {
+ sctp_mtu_size_reset(inp, &stcb->asoc,
+ ro->ro_rt->rt_rmx.rmx_mtu);
+ }
+ } else {
+ /* route was freed */
+ net->src_addr_selected = 0;
+ }
+ }
+ return (ret);
+ }
+#ifdef INET6
+ else if (to->sa_family == AF_INET6) {
+ uint32_t flowlabel;
+ struct ip6_hdr *ip6h;
+
+ struct route_in6 ip6route;
+
+ struct ifnet *ifp;
+ u_char flowTop;
+ uint16_t flowBottom;
+ u_char tosBottom, tosTop;
+ struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
+ struct sockaddr_in6 lsa6_storage;
+ int prev_scope = 0;
+ int error;
+ u_short prev_port = 0;
+
+ if (net != NULL) {
+ flowlabel = net->tos_flowlabel;
+ } else {
+ flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
+ }
+ M_PREPEND(m, sizeof(struct ip6_hdr), M_DONTWAIT);
+ if (m == NULL) {
+ /* failed to prepend data, give up */
+ return (ENOMEM);
+ }
+ ip6h = mtod(m, struct ip6_hdr *);
+
+ /*
+ * We assume here that inp_flow is in host byte order within
+ * the TCB!
+ */
+ flowBottom = flowlabel & 0x0000ffff;
+ flowTop = ((flowlabel & 0x000f0000) >> 16);
+ tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION);
+ /* protect *sin6 from overwrite */
+ sin6 = (struct sockaddr_in6 *)to;
+ tmp = *sin6;
+ sin6 = &tmp;
+
+ /* KAME hack: embed scopeid */
+ if (sa6_embedscope(sin6, ip6_use_defzone) != 0)
+ return (EINVAL);
+ if (net == NULL) {
+ memset(&ip6route, 0, sizeof(ip6route));
+ ro = (struct route *)&ip6route;
+ memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
+ } else {
+ ro = (struct route *)&net->ro;
+ }
+ if (stcb != NULL) {
+ if ((stcb->asoc.ecn_allowed) && ecn_ok) {
+ /* Enable ECN */
+ tosBottom = (((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) | sctp_get_ect(stcb, chk)) << 4);
+ } else {
+ /* No ECN */
+ tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
+ }
+ } else {
+ /* we could get no asoc if it is a O-O-T-B packet */
+ tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
+ }
+ ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom));
+ ip6h->ip6_nxt = IPPROTO_SCTP;
+ ip6h->ip6_plen = m->m_pkthdr.len;
+ ip6h->ip6_dst = sin6->sin6_addr;
+
+ /*
+ * Add SRC address selection here: we can only reuse to a
+ * limited degree the kame src-addr-sel, since we can try
+ * their selection but it may not be bound.
+ */
+ bzero(&lsa6_tmp, sizeof(lsa6_tmp));
+ lsa6_tmp.sin6_family = AF_INET6;
+ lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
+ lsa6 = &lsa6_tmp;
+ if (net) {
+ if (net->src_addr_selected == 0) {
+ /* Cache the source address */
+ ((struct sockaddr_in6 *)&net->ro._s_addr)->sin6_addr = sctp_ipv6_source_address_selection(inp,
+ stcb, ro, net, out_of_asoc_ok);
+
+ if (ro->ro_rt)
+ net->src_addr_selected = 1;
+ }
+ lsa6->sin6_addr = ((struct sockaddr_in6 *)&net->ro._s_addr)->sin6_addr;
+ } else {
+ lsa6->sin6_addr = sctp_ipv6_source_address_selection(
+ inp, stcb, ro, net, out_of_asoc_ok);
+ }
+ lsa6->sin6_port = inp->sctp_lport;
+
+ if ((ro->ro_rt == NULL)) {
+ /*
+ * src addr selection failed to find a route (or
+ * valid source addr), so we can't get there from
+ * here!
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("low_level_output: dropped v6 pkt- no valid source addr\n");
+ }
+#endif
+ sctp_m_freem(m);
+ if (net) {
+ if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb)
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+ stcb,
+ SCTP_FAILED_THRESHOLD,
+ (void *)net);
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
+ if (stcb) {
+ if (net == stcb->asoc.primary_destination) {
+ /* need a new primary */
+ struct sctp_nets *alt;
+
+ alt = sctp_find_alternate_net(stcb, net, 0);
+ if (alt != net) {
+ if (sctp_set_primary_addr(stcb,
+ (struct sockaddr *)NULL,
+ alt) == 0) {
+ net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
+ net->src_addr_selected = 0;
+ }
+ }
+ }
+ }
+ }
+ return (EHOSTUNREACH);
+ }
+ /*
+ * XXX: sa6 may not have a valid sin6_scope_id in the
+ * non-SCOPEDROUTING case.
+ */
+ bzero(&lsa6_storage, sizeof(lsa6_storage));
+ lsa6_storage.sin6_family = AF_INET6;
+ lsa6_storage.sin6_len = sizeof(lsa6_storage);
+ if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
+ sctp_m_freem(m);
+ return (error);
+ }
+ /* XXX */
+ lsa6_storage.sin6_addr = lsa6->sin6_addr;
+ lsa6_storage.sin6_port = inp->sctp_lport;
+ lsa6 = &lsa6_storage;
+ ip6h->ip6_src = lsa6->sin6_addr;
+
+ /*
+ * We set the hop limit now since there is a good chance
+ * that our ro pointer is now filled
+ */
+ ip6h->ip6_hlim = in6_selecthlim((struct in6pcb *)&inp->ip_inp.inp,
+ (ro ?
+ (ro->ro_rt ? (ro->ro_rt->rt_ifp) : (NULL)) :
+ (NULL)));
+ o_flgs = 0;
+ ifp = ro->ro_rt->rt_ifp;
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
+ /* Copy to be sure something bad is not happening */
+ sin6->sin6_addr = ip6h->ip6_dst;
+ lsa6->sin6_addr = ip6h->ip6_src;
+
+ printf("Calling ipv6 output routine from low level\n");
+ printf("src: ");
+ sctp_print_address((struct sockaddr *)lsa6);
+ printf("dst: ");
+ sctp_print_address((struct sockaddr *)sin6);
+ }
+#endif /* SCTP_DEBUG */
+ if (net) {
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ /* preserve the port and scope for link local send */
+ prev_scope = sin6->sin6_scope_id;
+ prev_port = sin6->sin6_port;
+ }
+ ret = ip6_output(m, ((struct in6pcb *)inp)->in6p_outputopts,
+ (struct route_in6 *)ro,
+ o_flgs,
+ ((struct in6pcb *)inp)->in6p_moptions,
+ &ifp
+ ,NULL
+ );
+ if (net) {
+ /* for link local this must be done */
+ sin6->sin6_scope_id = prev_scope;
+ sin6->sin6_port = prev_port;
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
+ printf("return from send is %d\n", ret);
+ }
+#endif /* SCTP_DEBUG_OUTPUT */
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+ if (ret)
+ SCTP_STAT_INCR(sctps_senderrors);
+ if (net == NULL) {
+ /* Now if we had a temp route free it */
+ if (ro->ro_rt) {
+ RTFREE(ro->ro_rt);
+ }
+ } else {
+ /* PMTU check versus smallest asoc MTU goes here */
+ if (ro->ro_rt == NULL) {
+ /* Route was freed */
+ net->src_addr_selected = 0;
+ }
+ if (ro->ro_rt != NULL) {
+ if (ro->ro_rt->rt_rmx.rmx_mtu &&
+ (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) {
+ sctp_mtu_size_reset(inp,
+ &stcb->asoc,
+ ro->ro_rt->rt_rmx.rmx_mtu);
+ }
+ } else if (ifp) {
+ if (ND_IFINFO(ifp)->linkmtu &&
+ (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
+ sctp_mtu_size_reset(inp,
+ &stcb->asoc,
+ ND_IFINFO(ifp)->linkmtu);
+ }
+ }
+ }
+ return (ret);
+ }
+#endif
+ else {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Unknown protocol (TSNH) type %d\n", ((struct sockaddr *)to)->sa_family);
+ }
+#endif
+ sctp_m_freem(m);
+ return (EFAULT);
+ }
+}
+
+
+void
+sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+ struct mbuf *m, *m_at, *m_last;
+ struct sctp_nets *net;
+ struct sctp_init_msg *initm;
+ struct sctp_supported_addr_param *sup_addr;
+ struct sctp_ecn_supported_param *ecn;
+ struct sctp_prsctp_supported_param *prsctp;
+ struct sctp_ecn_nonce_supported_param *ecn_nonce;
+ struct sctp_supported_chunk_types_param *pr_supported;
+ int cnt_inits_to = 0;
+ int padval, ret;
+ int num_ext;
+ int p_len;
+
+ /* INIT's always go to the primary (and usually ONLY address) */
+ m_last = NULL;
+ net = stcb->asoc.primary_destination;
+ if (net == NULL) {
+ net = TAILQ_FIRST(&stcb->asoc.nets);
+ if (net == NULL) {
+ /* TSNH */
+ return;
+ }
+ /* we confirm any address we send an INIT to */
+ net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+ sctp_set_primary_addr(stcb, NULL, net);
+ } else {
+ /* we confirm any address we send an INIT to */
+ net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
+ printf("Sending INIT\n");
+ }
+#endif
+ if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
+ /*
+ * special hook, if we are sending to link local it will not
+ * show up in our private address count.
+ */
+ struct sockaddr_in6 *sin6l;
+
+ sin6l = &net->ro._l_addr.sin6;
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
+ cnt_inits_to = 1;
+ }
+ if (callout_pending(&net->rxt_timer.timer)) {
+ /* This case should not happen */
+ return;
+ }
+ /* start the INIT timer */
+ if (sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net)) {
+ /* we are hosed since I can't start the INIT timer? */
+ return;
+ }
+ m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
+ if (m == NULL) {
+ /* No memory, INIT timer will re-attempt. */
+ return;
+ }
+ m->m_pkthdr.len = 0;
+ m->m_data += SCTP_MIN_OVERHEAD;
+ m->m_len = sizeof(struct sctp_init_msg);
+ /* Now lets put the SCTP header in place */
+ initm = mtod(m, struct sctp_init_msg *);
+ initm->sh.src_port = inp->sctp_lport;
+ initm->sh.dest_port = stcb->rport;
+ initm->sh.v_tag = 0;
+ initm->sh.checksum = 0; /* calculate later */
+ /* now the chunk header */
+ initm->msg.ch.chunk_type = SCTP_INITIATION;
+ initm->msg.ch.chunk_flags = 0;
+ /* fill in later from mbuf we build */
+ initm->msg.ch.chunk_length = 0;
+ /* place in my tag */
+ initm->msg.init.initiate_tag = htonl(stcb->asoc.my_vtag);
+ /* set up some of the credits. */
+ initm->msg.init.a_rwnd = htonl(max(inp->sctp_socket->so_rcv.sb_hiwat,
+ SCTP_MINIMAL_RWND));
+
+ initm->msg.init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
+ initm->msg.init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
+ initm->msg.init.initial_tsn = htonl(stcb->asoc.init_seq_number);
+ /* now the address restriction */
+ sup_addr = (struct sctp_supported_addr_param *)((caddr_t)initm +
+ sizeof(*initm));
+ sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
+ /* we support 2 types IPv6/IPv4 */
+ sup_addr->ph.param_length = htons(sizeof(*sup_addr) +
+ sizeof(uint16_t));
+ sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
+ sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
+ m->m_len += sizeof(*sup_addr) + sizeof(uint16_t);
+
+ if (inp->sctp_ep.adaptation_layer_indicator) {
+ struct sctp_adaptation_layer_indication *ali;
+
+ ali = (struct sctp_adaptation_layer_indication *)(
+ (caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t));
+ ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
+ ali->ph.param_length = htons(sizeof(*ali));
+ ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
+ m->m_len += sizeof(*ali);
+ ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali +
+ sizeof(*ali));
+ } else {
+ ecn = (struct sctp_ecn_supported_param *)((caddr_t)sup_addr +
+ sizeof(*sup_addr) + sizeof(uint16_t));
+ }
+
+ /* now any cookie time extensions */
+ if (stcb->asoc.cookie_preserve_req) {
+ struct sctp_cookie_perserve_param *cookie_preserve;
+
+ cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
+ cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
+ cookie_preserve->ph.param_length = htons(
+ sizeof(*cookie_preserve));
+ cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
+ m->m_len += sizeof(*cookie_preserve);
+ ecn = (struct sctp_ecn_supported_param *)(
+ (caddr_t)cookie_preserve + sizeof(*cookie_preserve));
+ stcb->asoc.cookie_preserve_req = 0;
+ }
+ /* ECN parameter */
+ if (sctp_ecn_enable == 1) {
+ ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
+ ecn->ph.param_length = htons(sizeof(*ecn));
+ m->m_len += sizeof(*ecn);
+ prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
+ sizeof(*ecn));
+ } else {
+ prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
+ }
+ /* And now tell the peer we do pr-sctp */
+ prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
+ prsctp->ph.param_length = htons(sizeof(*prsctp));
+ m->m_len += sizeof(*prsctp);
+
+ /* And now tell the peer we do all the extensions */
+ pr_supported = (struct sctp_supported_chunk_types_param *)
+ ((caddr_t)prsctp + sizeof(*prsctp));
+ pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
+ num_ext = 0;
+ pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
+ pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
+ pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
+ pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
+ pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
+ if (!sctp_auth_disable)
+ pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
+ p_len = sizeof(*pr_supported) + num_ext;
+ pr_supported->ph.param_length = htons(p_len);
+ bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
+ m->m_len += SCTP_SIZE32(p_len);
+
+ /* ECN nonce: And now tell the peer we support ECN nonce */
+ if (sctp_ecn_nonce) {
+ ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
+ ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
+ ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
+ ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
+ m->m_len += sizeof(*ecn_nonce);
+ }
+ /* add authentication parameters */
+ if (!sctp_auth_disable) {
+ struct sctp_auth_random *random;
+ struct sctp_auth_hmac_algo *hmacs;
+ struct sctp_auth_chunk_list *chunks;
+
+ /* attach RANDOM parameter, if available */
+ if (stcb->asoc.authinfo.random != NULL) {
+ random = (struct sctp_auth_random *)(mtod(m, caddr_t)+m->m_len);
+ random->ph.param_type = htons(SCTP_RANDOM);
+ p_len = sizeof(*random) + stcb->asoc.authinfo.random_len;
+ random->ph.param_length = htons(p_len);
+ bcopy(stcb->asoc.authinfo.random->key, random->random_data,
+ stcb->asoc.authinfo.random_len);
+ /* zero out any padding required */
+ bzero((caddr_t)random + p_len, SCTP_SIZE32(p_len) - p_len);
+ m->m_len += SCTP_SIZE32(p_len);
+ }
+ /* add HMAC_ALGO parameter */
+ hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+m->m_len);
+ p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
+ (uint8_t *) hmacs->hmac_ids);
+ if (p_len > 0) {
+ p_len += sizeof(*hmacs);
+ hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
+ hmacs->ph.param_length = htons(p_len);
+ /* zero out any padding required */
+ bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
+ m->m_len += SCTP_SIZE32(p_len);
+ }
+ /* add CHUNKS parameter */
+ chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+m->m_len);
+ p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks,
+ chunks->chunk_types);
+ if (p_len > 0) {
+ p_len += sizeof(*chunks);
+ chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
+ chunks->ph.param_length = htons(p_len);
+ /* zero out any padding required */
+ bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
+ m->m_len += SCTP_SIZE32(p_len);
+ }
+ }
+ m_at = m;
+ /* now the addresses */
+ {
+ struct sctp_scoping scp;
+
+ /*
+ * To optimize this we could put the scoping stuff into a
+ * structure and remove the individual uint8's from the
+ * assoc structure. Then we could just pass in the address
+ * within the stcb.. but for now this is a quick hack to get
+ * the address stuff teased apart.
+ */
+ scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
+ scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
+ scp.loopback_scope = stcb->asoc.loopback_scope;
+ scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
+ scp.local_scope = stcb->asoc.local_scope;
+ scp.site_scope = stcb->asoc.site_scope;
+
+ m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
+ }
+
+
+ /* calulate the size and update pkt header and chunk header */
+ m->m_pkthdr.len = 0;
+ for (m_at = m; m_at; m_at = m_at->m_next) {
+ if (m_at->m_next == NULL)
+ m_last = m_at;
+ m->m_pkthdr.len += m_at->m_len;
+ }
+ initm->msg.ch.chunk_length = htons((m->m_pkthdr.len -
+ sizeof(struct sctphdr)));
+ /*
+ * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the return
+ * here since the timer will drive a retranmission.
+ */
+
+ /* I don't expect this to execute but we will be safe here */
+ padval = m->m_pkthdr.len % 4;
+ if ((padval) && (m_last)) {
+ /*
+ * The compiler worries that m_last may not be set even
+ * though I think it is impossible :-> however we add m_last
+ * here just in case.
+ */
+ int ret;
+
+ ret = sctp_add_pad_tombuf(m_last, (4 - padval));
+ if (ret) {
+ /* Houston we have a problem, no space */
+ sctp_m_freem(m);
+ return;
+ }
+ m->m_pkthdr.len += padval;
+ }
+ ret = sctp_lowlevel_chunk_output(inp, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr,
+ m, 0, NULL, 0, 0, NULL, 0);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
+ SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+}
+
+struct mbuf *
+sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
+ int param_offset, int *abort_processing, struct sctp_chunkhdr *cp)
+{
+ /*
+ * Given a mbuf containing an INIT or INIT-ACK with the param_offset
+ * being equal to the beginning of the params i.e. (iphlen +
+ * sizeof(struct sctp_init_msg) parse through the parameters to the
+ * end of the mbuf verifying that all parameters are known.
+ *
+ * For unknown parameters build and return a mbuf with
+ * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
+ * processing this chunk stop, and set *abort_processing to 1.
+ *
+ * By having param_offset be pre-set to where parameters begin it is
+ * hoped that this routine may be reused in the future by new
+ * features.
+ */
+ struct sctp_paramhdr *phdr, params;
+
+ struct mbuf *mat, *op_err;
+ char tempbuf[2048];
+ int at, limit, pad_needed;
+ uint16_t ptype, plen;
+ int err_at;
+
+ *abort_processing = 0;
+ mat = in_initpkt;
+ err_at = 0;
+ limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
+ at = param_offset;
+ op_err = NULL;
+
+ phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
+ while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+ limit -= SCTP_SIZE32(plen);
+ if (plen < sizeof(struct sctp_paramhdr)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
+ printf("sctp_output.c:Impossible length in parameter < %d\n", plen);
+ }
+#endif
+ *abort_processing = 1;
+ break;
+ }
+ /*
+ * All parameters for all chunks that we know/understand are
+ * listed here. We process them other places and make
+ * appropriate stop actions per the upper bits. However this
+ * is the generic routine processor's can call to get back
+ * an operr.. to either incorporate (init-ack) or send.
+ */
+ if ((ptype == SCTP_HEARTBEAT_INFO) ||
+ (ptype == SCTP_IPV4_ADDRESS) ||
+ (ptype == SCTP_IPV6_ADDRESS) ||
+ (ptype == SCTP_STATE_COOKIE) ||
+ (ptype == SCTP_UNRECOG_PARAM) ||
+ (ptype == SCTP_COOKIE_PRESERVE) ||
+ (ptype == SCTP_SUPPORTED_ADDRTYPE) ||
+ (ptype == SCTP_PRSCTP_SUPPORTED) ||
+ (ptype == SCTP_ADD_IP_ADDRESS) ||
+ (ptype == SCTP_DEL_IP_ADDRESS) ||
+ (ptype == SCTP_ECN_CAPABLE) ||
+ (ptype == SCTP_ULP_ADAPTATION) ||
+ (ptype == SCTP_ERROR_CAUSE_IND) ||
+ (ptype == SCTP_RANDOM) ||
+ (ptype == SCTP_CHUNK_LIST) ||
+ (ptype == SCTP_CHUNK_LIST) ||
+ (ptype == SCTP_SET_PRIM_ADDR) ||
+ (ptype == SCTP_SUCCESS_REPORT) ||
+ (ptype == SCTP_ULP_ADAPTATION) ||
+ (ptype == SCTP_SUPPORTED_CHUNK_EXT) ||
+ (ptype == SCTP_ECN_NONCE_SUPPORTED)
+ ) {
+ /* no skip it */
+ at += SCTP_SIZE32(plen);
+ } else if (ptype == SCTP_HOSTNAME_ADDRESS) {
+ /* We can NOT handle HOST NAME addresses!! */
+ int l_len;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
+ printf("Can't handle hostname addresses.. abort processing\n");
+ }
+#endif
+ *abort_processing = 1;
+ if (op_err == NULL) {
+ /* Ok need to try to get a mbuf */
+ l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+ l_len += plen;
+ l_len += sizeof(struct sctp_paramhdr);
+ op_err = sctp_get_mbuf_for_msg(l_len, 1, M_DONTWAIT, 1, MT_DATA);
+ if (op_err) {
+ op_err->m_len = 0;
+ op_err->m_pkthdr.len = 0;
+ /*
+ * pre-reserve space for ip and sctp
+ * header and chunk hdr
+ */
+ op_err->m_data += sizeof(struct ip6_hdr);
+ op_err->m_data += sizeof(struct sctphdr);
+ op_err->m_data += sizeof(struct sctp_chunkhdr);
+ }
+ }
+ if (op_err) {
+ /* If we have space */
+ struct sctp_paramhdr s;
+
+ if (err_at % 4) {
+ uint32_t cpthis = 0;
+
+ pad_needed = 4 - (err_at % 4);
+ m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
+ err_at += pad_needed;
+ }
+ s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
+ s.param_length = htons(sizeof(s) + plen);
+ m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
+ err_at += sizeof(s);
+ phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen);
+ if (phdr == NULL) {
+ sctp_m_freem(op_err);
+ /*
+ * we are out of memory but we still
+ * need to have a look at what to do
+ * (the system is in trouble
+ * though).
+ */
+ return (NULL);
+ }
+ m_copyback(op_err, err_at, plen, (caddr_t)phdr);
+ err_at += plen;
+ }
+ return (op_err);
+ } else {
+ /*
+ * we do not recognize the parameter figure out what
+ * we do.
+ */
+ if ((ptype & 0x4000) == 0x4000) {
+ /* Report bit is set?? */
+ if (op_err == NULL) {
+ int l_len;
+
+ /* Ok need to try to get an mbuf */
+ l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+ l_len += plen;
+ l_len += sizeof(struct sctp_paramhdr);
+ op_err = sctp_get_mbuf_for_msg(l_len, 1, M_DONTWAIT, 1, MT_DATA);
+ if (op_err) {
+ op_err->m_len = 0;
+ op_err->m_pkthdr.len = 0;
+ op_err->m_data += sizeof(struct ip6_hdr);
+ op_err->m_data += sizeof(struct sctphdr);
+ op_err->m_data += sizeof(struct sctp_chunkhdr);
+ }
+ }
+ if (op_err) {
+ /* If we have space */
+ struct sctp_paramhdr s;
+
+ if (err_at % 4) {
+ uint32_t cpthis = 0;
+
+ pad_needed = 4 - (err_at % 4);
+ m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
+ err_at += pad_needed;
+ }
+ s.param_type = htons(SCTP_UNRECOG_PARAM);
+ s.param_length = htons(sizeof(s) + plen);
+ m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
+ err_at += sizeof(s);
+ if (plen > sizeof(tempbuf)) {
+ plen = sizeof(tempbuf);
+ }
+ phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen);
+ if (phdr == NULL) {
+ sctp_m_freem(op_err);
+ /*
+ * we are out of memory but
+ * we still need to have a
+ * look at what to do (the
+ * system is in trouble
+ * though).
+ */
+ goto more_processing;
+ }
+ m_copyback(op_err, err_at, plen, (caddr_t)phdr);
+ err_at += plen;
+ }
+ }
+ more_processing:
+ if ((ptype & 0x8000) == 0x0000) {
+ return (op_err);
+ } else {
+ /* skip this chunk and continue processing */
+ at += SCTP_SIZE32(plen);
+ }
+
+ }
+ phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
+ }
+ return (op_err);
+}
+
+static int
+sctp_are_there_new_addresses(struct sctp_association *asoc,
+ struct mbuf *in_initpkt, int iphlen, int offset)
+{
+ /*
+ * Given a INIT packet, look through the packet to verify that there
+ * are NO new addresses. As we go through the parameters add reports
+ * of any un-understood parameters that require an error. Also we
+ * must return (1) to drop the packet if we see a un-understood
+ * parameter that tells us to drop the chunk.
+ */
+ struct sockaddr_in sin4, *sa4;
+ struct sockaddr_in6 sin6, *sa6;
+ struct sockaddr *sa_touse;
+ struct sockaddr *sa;
+ struct sctp_paramhdr *phdr, params;
+ struct ip *iph;
+ struct mbuf *mat;
+ uint16_t ptype, plen;
+ int err_at;
+ uint8_t fnd;
+ struct sctp_nets *net;
+
+ memset(&sin4, 0, sizeof(sin4));
+ memset(&sin6, 0, sizeof(sin6));
+ sin4.sin_family = AF_INET;
+ sin4.sin_len = sizeof(sin4);
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(sin6);
+
+ sa_touse = NULL;
+ /* First what about the src address of the pkt ? */
+ iph = mtod(in_initpkt, struct ip *);
+ if (iph->ip_v == IPVERSION) {
+ /* source addr is IPv4 */
+ sin4.sin_addr = iph->ip_src;
+ sa_touse = (struct sockaddr *)&sin4;
+ } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
+ /* source addr is IPv6 */
+ struct ip6_hdr *ip6h;
+
+ ip6h = mtod(in_initpkt, struct ip6_hdr *);
+ sin6.sin6_addr = ip6h->ip6_src;
+ sa_touse = (struct sockaddr *)&sin6;
+ } else {
+ return (1);
+ }
+
+ fnd = 0;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sa = (struct sockaddr *)&net->ro._l_addr;
+ if (sa->sa_family == sa_touse->sa_family) {
+ if (sa->sa_family == AF_INET) {
+ sa4 = (struct sockaddr_in *)sa;
+ if (sa4->sin_addr.s_addr ==
+ sin4.sin_addr.s_addr) {
+ fnd = 1;
+ break;
+ }
+ } else if (sa->sa_family == AF_INET6) {
+ sa6 = (struct sockaddr_in6 *)sa;
+ if (SCTP6_ARE_ADDR_EQUAL(&sa6->sin6_addr,
+ &sin6.sin6_addr)) {
+ fnd = 1;
+ break;
+ }
+ }
+ }
+ }
+ if (fnd == 0) {
+ /* New address added! no need to look futher. */
+ return (1);
+ }
+ /* Ok so far lets munge through the rest of the packet */
+ mat = in_initpkt;
+ err_at = 0;
+ sa_touse = NULL;
+ offset += sizeof(struct sctp_init_chunk);
+ phdr = sctp_get_next_param(mat, offset, &params, sizeof(params));
+ while (phdr) {
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+ if (ptype == SCTP_IPV4_ADDRESS) {
+ struct sctp_ipv4addr_param *p4, p4_buf;
+
+ phdr = sctp_get_next_param(mat, offset,
+ (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
+ if (plen != sizeof(struct sctp_ipv4addr_param) ||
+ phdr == NULL) {
+ return (1);
+ }
+ p4 = (struct sctp_ipv4addr_param *)phdr;
+ sin4.sin_addr.s_addr = p4->addr;
+ sa_touse = (struct sockaddr *)&sin4;
+ } else if (ptype == SCTP_IPV6_ADDRESS) {
+ struct sctp_ipv6addr_param *p6, p6_buf;
+
+ phdr = sctp_get_next_param(mat, offset,
+ (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
+ if (plen != sizeof(struct sctp_ipv6addr_param) ||
+ phdr == NULL) {
+ return (1);
+ }
+ p6 = (struct sctp_ipv6addr_param *)phdr;
+ memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
+ sizeof(p6->addr));
+ sa_touse = (struct sockaddr *)&sin4;
+ }
+ if (sa_touse) {
+ /* ok, sa_touse points to one to check */
+ fnd = 0;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sa = (struct sockaddr *)&net->ro._l_addr;
+ if (sa->sa_family != sa_touse->sa_family) {
+ continue;
+ }
+ if (sa->sa_family == AF_INET) {
+ sa4 = (struct sockaddr_in *)sa;
+ if (sa4->sin_addr.s_addr ==
+ sin4.sin_addr.s_addr) {
+ fnd = 1;
+ break;
+ }
+ } else if (sa->sa_family == AF_INET6) {
+ sa6 = (struct sockaddr_in6 *)sa;
+ if (SCTP6_ARE_ADDR_EQUAL(
+ &sa6->sin6_addr, &sin6.sin6_addr)) {
+ fnd = 1;
+ break;
+ }
+ }
+ }
+ if (!fnd) {
+ /* New addr added! no need to look further */
+ return (1);
+ }
+ }
+ offset += SCTP_SIZE32(plen);
+ phdr = sctp_get_next_param(mat, offset, &params, sizeof(params));
+ }
+ return (0);
+}
+
+/*
+ * Given a MBUF chain that was sent into us containing an INIT. Build a
+ * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
+ * a pullup to include IPv6/4header, SCTP header and initial part of INIT
+ * message (i.e. the struct sctp_init_msg).
+ */
+void
+sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh,
+ struct sctp_init_chunk *init_chk)
+{
+ struct sctp_association *asoc;
+ struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *m_last;
+ struct sctp_init_msg *initackm_out;
+ struct sctp_ecn_supported_param *ecn;
+ struct sctp_prsctp_supported_param *prsctp;
+ struct sctp_ecn_nonce_supported_param *ecn_nonce;
+ struct sctp_supported_chunk_types_param *pr_supported;
+ struct sockaddr_storage store;
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 *sin6;
+ struct route *ro;
+ struct ip *iph;
+ struct ip6_hdr *ip6;
+ struct sockaddr *to;
+ struct sctp_state_cookie stc;
+ struct sctp_nets *net = NULL;
+ int cnt_inits_to = 0;
+ uint16_t his_limit, i_want;
+ int abort_flag, padval, sz_of;
+ int num_ext;
+ int p_len;
+
+ if (stcb) {
+ asoc = &stcb->asoc;
+ } else {
+ asoc = NULL;
+ }
+ m_last = NULL;
+ if ((asoc != NULL) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
+ (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) {
+ /* new addresses, out of here in non-cookie-wait states */
+ /*
+ * Send a ABORT, we don't add the new address error clause
+ * though we even set the T bit and copy in the 0 tag.. this
+ * looks no different than if no listener was present.
+ */
+ sctp_send_abort(init_pkt, iphlen, sh, 0, NULL);
+ return;
+ }
+ abort_flag = 0;
+ op_err = sctp_arethere_unrecognized_parameters(init_pkt,
+ (offset + sizeof(struct sctp_init_chunk)),
+ &abort_flag, (struct sctp_chunkhdr *)init_chk);
+ if (abort_flag) {
+ sctp_send_abort(init_pkt, iphlen, sh, init_chk->init.initiate_tag, op_err);
+ return;
+ }
+ m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
+ if (m == NULL) {
+ /* No memory, INIT timer will re-attempt. */
+ if (op_err)
+ sctp_m_freem(op_err);
+ return;
+ }
+ m->m_data += SCTP_MIN_OVERHEAD;
+ m->m_pkthdr.rcvif = 0;
+ m->m_len = sizeof(struct sctp_init_msg);
+
+ /* the time I built cookie */
+ SCTP_GETTIME_TIMEVAL(&stc.time_entered);
+
+ /* populate any tie tags */
+ if (asoc != NULL) {
+ /* unlock before tag selections */
+ if (asoc->my_vtag_nonce == 0)
+ asoc->my_vtag_nonce = sctp_select_a_tag(inp);
+ stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
+
+ if (asoc->peer_vtag_nonce == 0)
+ asoc->peer_vtag_nonce = sctp_select_a_tag(inp);
+ stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
+
+ stc.cookie_life = asoc->cookie_life;
+ net = asoc->primary_destination;
+ } else {
+ stc.tie_tag_my_vtag = 0;
+ stc.tie_tag_peer_vtag = 0;
+ /* life I will award this cookie */
+ stc.cookie_life = inp->sctp_ep.def_cookie_life;
+ }
+
+ /* copy in the ports for later check */
+ stc.myport = sh->dest_port;
+ stc.peerport = sh->src_port;
+
+ /*
+ * If we wanted to honor cookie life extentions, we would add to
+ * stc.cookie_life. For now we should NOT honor any extension
+ */
+ stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ struct inpcb *in_inp;
+
+ /* Its a V6 socket */
+ in_inp = (struct inpcb *)inp;
+ stc.ipv6_addr_legal = 1;
+ /* Now look at the binding flag to see if V4 will be legal */
+ if (
+ (in_inp->inp_flags & IN6P_IPV6_V6ONLY)
+ == 0) {
+ stc.ipv4_addr_legal = 1;
+ } else {
+ /* V4 addresses are NOT legal on the association */
+ stc.ipv4_addr_legal = 0;
+ }
+ } else {
+ /* Its a V4 socket, no - V6 */
+ stc.ipv4_addr_legal = 1;
+ stc.ipv6_addr_legal = 0;
+ }
+
+#ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
+ stc.ipv4_scope = 1;
+#else
+ stc.ipv4_scope = 0;
+#endif
+ /* now for scope setup */
+ memset((caddr_t)&store, 0, sizeof(store));
+ sin = (struct sockaddr_in *)&store;
+ sin6 = (struct sockaddr_in6 *)&store;
+ if (net == NULL) {
+ to = (struct sockaddr *)&store;
+ iph = mtod(init_pkt, struct ip *);
+ if (iph->ip_v == IPVERSION) {
+ struct in_addr addr;
+ struct route iproute;
+
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(struct sockaddr_in);
+ sin->sin_port = sh->src_port;
+ sin->sin_addr = iph->ip_src;
+ /* lookup address */
+ stc.address[0] = sin->sin_addr.s_addr;
+ stc.address[1] = 0;
+ stc.address[2] = 0;
+ stc.address[3] = 0;
+ stc.addr_type = SCTP_IPV4_ADDRESS;
+ /* local from address */
+ memset(&iproute, 0, sizeof(iproute));
+ ro = &iproute;
+ memcpy(&ro->ro_dst, sin, sizeof(*sin));
+ addr = sctp_ipv4_source_address_selection(inp, NULL,
+ ro, NULL, 0);
+ if (ro->ro_rt) {
+ RTFREE(ro->ro_rt);
+ }
+ stc.laddress[0] = addr.s_addr;
+ stc.laddress[1] = 0;
+ stc.laddress[2] = 0;
+ stc.laddress[3] = 0;
+ stc.laddr_type = SCTP_IPV4_ADDRESS;
+ /* scope_id is only for v6 */
+ stc.scope_id = 0;
+#ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
+ if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+ stc.ipv4_scope = 1;
+ }
+#else
+ stc.ipv4_scope = 1;
+#endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
+ /* Must use the address in this case */
+ if (sctp_is_address_on_local_host((struct sockaddr *)sin)) {
+ stc.loopback_scope = 1;
+ stc.ipv4_scope = 1;
+ stc.site_scope = 1;
+ stc.local_scope = 1;
+ }
+ } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
+ struct in6_addr addr;
+
+ struct route_in6 iproute6;
+
+ ip6 = mtod(init_pkt, struct ip6_hdr *);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ sin6->sin6_port = sh->src_port;
+ sin6->sin6_addr = ip6->ip6_src;
+ /* lookup address */
+ memcpy(&stc.address, &sin6->sin6_addr,
+ sizeof(struct in6_addr));
+ sin6->sin6_scope_id = 0;
+ stc.addr_type = SCTP_IPV6_ADDRESS;
+ stc.scope_id = 0;
+ if (sctp_is_address_on_local_host((struct sockaddr *)sin6)) {
+ stc.loopback_scope = 1;
+ stc.local_scope = 1;
+ stc.site_scope = 1;
+ stc.ipv4_scope = 1;
+ } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ /*
+ * If the new destination is a LINK_LOCAL we
+ * must have common both site and local
+ * scope. Don't set local scope though since
+ * we must depend on the source to be added
+ * implicitly. We cannot assure just because
+ * we share one link that all links are
+ * common.
+ */
+ stc.local_scope = 0;
+ stc.site_scope = 1;
+ stc.ipv4_scope = 1;
+ /*
+ * we start counting for the private address
+ * stuff at 1. since the link local we
+ * source from won't show up in our scoped
+ * count.
+ */
+ cnt_inits_to = 1;
+ /* pull out the scope_id from incoming pkt */
+ /* FIX ME: does this have scope from rcvif? */
+ (void)sa6_recoverscope(sin6);
+ sa6_embedscope(sin6, ip6_use_defzone);
+ stc.scope_id = sin6->sin6_scope_id;
+ } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
+ /*
+ * If the new destination is SITE_LOCAL then
+ * we must have site scope in common.
+ */
+ stc.site_scope = 1;
+ }
+ /* local from address */
+ memset(&iproute6, 0, sizeof(iproute6));
+ ro = (struct route *)&iproute6;
+ memcpy(&ro->ro_dst, sin6, sizeof(*sin6));
+ addr = sctp_ipv6_source_address_selection(inp, NULL,
+ ro, NULL, 0);
+ if (ro->ro_rt) {
+ RTFREE(ro->ro_rt);
+ }
+ memcpy(&stc.laddress, &addr, sizeof(struct in6_addr));
+ stc.laddr_type = SCTP_IPV6_ADDRESS;
+ }
+ } else {
+ /* set the scope per the existing tcb */
+ struct sctp_nets *lnet;
+
+ stc.loopback_scope = asoc->loopback_scope;
+ stc.ipv4_scope = asoc->ipv4_local_scope;
+ stc.site_scope = asoc->site_scope;
+ stc.local_scope = asoc->local_scope;
+ TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
+ if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
+ if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
+ /*
+ * if we have a LL address, start
+ * counting at 1.
+ */
+ cnt_inits_to = 1;
+ }
+ }
+ }
+
+ /* use the net pointer */
+ to = (struct sockaddr *)&net->ro._l_addr;
+ if (to->sa_family == AF_INET) {
+ sin = (struct sockaddr_in *)to;
+ stc.address[0] = sin->sin_addr.s_addr;
+ stc.address[1] = 0;
+ stc.address[2] = 0;
+ stc.address[3] = 0;
+ stc.addr_type = SCTP_IPV4_ADDRESS;
+ if (net->src_addr_selected == 0) {
+ /*
+ * strange case here, the INIT should have
+ * did the selection.
+ */
+ net->ro._s_addr.sin.sin_addr =
+ sctp_ipv4_source_address_selection(inp,
+ stcb, (struct route *)&net->ro, net, 0);
+ net->src_addr_selected = 1;
+
+ }
+ stc.laddress[0] = net->ro._s_addr.sin.sin_addr.s_addr;
+ stc.laddress[1] = 0;
+ stc.laddress[2] = 0;
+ stc.laddress[3] = 0;
+ stc.laddr_type = SCTP_IPV4_ADDRESS;
+ } else if (to->sa_family == AF_INET6) {
+ sin6 = (struct sockaddr_in6 *)to;
+ memcpy(&stc.address, &sin6->sin6_addr,
+ sizeof(struct in6_addr));
+ stc.addr_type = SCTP_IPV6_ADDRESS;
+ if (net->src_addr_selected == 0) {
+ /*
+ * strange case here, the INIT should have
+ * did the selection.
+ */
+ net->ro._s_addr.sin6.sin6_addr =
+ sctp_ipv6_source_address_selection(inp,
+ stcb, (struct route *)&net->ro, net, 0);
+ net->src_addr_selected = 1;
+ }
+ memcpy(&stc.laddress, &net->ro._l_addr.sin6.sin6_addr,
+ sizeof(struct in6_addr));
+ stc.laddr_type = SCTP_IPV6_ADDRESS;
+ }
+ }
+ /* Now lets put the SCTP header in place */
+ initackm_out = mtod(m, struct sctp_init_msg *);
+ initackm_out->sh.src_port = inp->sctp_lport;
+ initackm_out->sh.dest_port = sh->src_port;
+ initackm_out->sh.v_tag = init_chk->init.initiate_tag;
+ /* Save it off for quick ref */
+ stc.peers_vtag = init_chk->init.initiate_tag;
+ initackm_out->sh.checksum = 0; /* calculate later */
+ /* who are we */
+ memcpy(stc.identification, SCTP_VERSION_STRING,
+ min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
+ /* now the chunk header */
+ initackm_out->msg.ch.chunk_type = SCTP_INITIATION_ACK;
+ initackm_out->msg.ch.chunk_flags = 0;
+ /* fill in later from mbuf we build */
+ initackm_out->msg.ch.chunk_length = 0;
+ /* place in my tag */
+ if ((asoc != NULL) &&
+ ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
+ /* re-use the v-tags and init-seq here */
+ initackm_out->msg.init.initiate_tag = htonl(asoc->my_vtag);
+ initackm_out->msg.init.initial_tsn = htonl(asoc->init_seq_number);
+ } else {
+ initackm_out->msg.init.initiate_tag = htonl(sctp_select_a_tag(inp));
+ /* get a TSN to use too */
+ initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
+ }
+ /* save away my tag to */
+ stc.my_vtag = initackm_out->msg.init.initiate_tag;
+
+ /* set up some of the credits. */
+ initackm_out->msg.init.a_rwnd = htonl(max(inp->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND));
+ /* set what I want */
+ his_limit = ntohs(init_chk->init.num_inbound_streams);
+ /* choose what I want */
+ if (asoc != NULL) {
+ if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
+ i_want = asoc->streamoutcnt;
+ } else {
+ i_want = inp->sctp_ep.pre_open_stream_count;
+ }
+ } else {
+ i_want = inp->sctp_ep.pre_open_stream_count;
+ }
+ if (his_limit < i_want) {
+ /* I Want more :< */
+ initackm_out->msg.init.num_outbound_streams = init_chk->init.num_inbound_streams;
+ } else {
+ /* I can have what I want :> */
+ initackm_out->msg.init.num_outbound_streams = htons(i_want);
+ }
+ /* tell him his limt. */
+ initackm_out->msg.init.num_inbound_streams =
+ htons(inp->sctp_ep.max_open_streams_intome);
+ /* setup the ECN pointer */
+
+ if (inp->sctp_ep.adaptation_layer_indicator) {
+ struct sctp_adaptation_layer_indication *ali;
+
+ ali = (struct sctp_adaptation_layer_indication *)(
+ (caddr_t)initackm_out + sizeof(*initackm_out));
+ ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
+ ali->ph.param_length = htons(sizeof(*ali));
+ ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
+ m->m_len += sizeof(*ali);
+ ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali +
+ sizeof(*ali));
+ } else {
+ ecn = (struct sctp_ecn_supported_param *)(
+ (caddr_t)initackm_out + sizeof(*initackm_out));
+ }
+
+ /* ECN parameter */
+ if (sctp_ecn_enable == 1) {
+ ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
+ ecn->ph.param_length = htons(sizeof(*ecn));
+ m->m_len += sizeof(*ecn);
+
+ prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
+ sizeof(*ecn));
+ } else {
+ prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
+ }
+ /* And now tell the peer we do pr-sctp */
+ prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
+ prsctp->ph.param_length = htons(sizeof(*prsctp));
+ m->m_len += sizeof(*prsctp);
+
+ /* And now tell the peer we do all the extensions */
+ pr_supported = (struct sctp_supported_chunk_types_param *)
+ ((caddr_t)prsctp + sizeof(*prsctp));
+
+ pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
+ num_ext = 0;
+ pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
+ pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
+ pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
+ pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
+ pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
+ if (!sctp_auth_disable)
+ pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
+ p_len = sizeof(*pr_supported) + num_ext;
+ pr_supported->ph.param_length = htons(p_len);
+ bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
+ m->m_len += SCTP_SIZE32(p_len);
+
+ /* ECN nonce: And now tell the peer we support ECN nonce */
+ if (sctp_ecn_nonce) {
+ ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
+ ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
+ ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
+ ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
+ m->m_len += sizeof(*ecn_nonce);
+ }
+ /* add authentication parameters */
+ if (!sctp_auth_disable) {
+ struct sctp_auth_random *random;
+ struct sctp_auth_hmac_algo *hmacs;
+ struct sctp_auth_chunk_list *chunks;
+ uint16_t random_len;
+
+ /* generate and add RANDOM parameter */
+ random_len = sctp_auth_random_len;
+ random = (struct sctp_auth_random *)(mtod(m, caddr_t)+m->m_len);
+ random->ph.param_type = htons(SCTP_RANDOM);
+ p_len = sizeof(*random) + random_len;
+ random->ph.param_length = htons(p_len);
+ sctp_read_random(random->random_data, random_len);
+ /* zero out any padding required */
+ bzero((caddr_t)random + p_len, SCTP_SIZE32(p_len) - p_len);
+ m->m_len += SCTP_SIZE32(p_len);
+
+ /* add HMAC_ALGO parameter */
+ hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+m->m_len);
+ p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
+ (uint8_t *) hmacs->hmac_ids);
+ if (p_len > 0) {
+ p_len += sizeof(*hmacs);
+ hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
+ hmacs->ph.param_length = htons(p_len);
+ /* zero out any padding required */
+ bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
+ m->m_len += SCTP_SIZE32(p_len);
+ }
+ /* add CHUNKS parameter */
+ chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+m->m_len);
+ p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
+ chunks->chunk_types);
+ if (p_len > 0) {
+ p_len += sizeof(*chunks);
+ chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
+ chunks->ph.param_length = htons(p_len);
+ /* zero out any padding required */
+ bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
+ m->m_len += SCTP_SIZE32(p_len);
+ }
+ }
+ m_at = m;
+ /* now the addresses */
+ {
+ struct sctp_scoping scp;
+
+ /*
+ * To optimize this we could put the scoping stuff into a
+ * structure and remove the individual uint8's from the stc
+ * structure. Then we could just pass in the address within
+ * the stc.. but for now this is a quick hack to get the
+ * address stuff teased apart.
+ */
+ scp.ipv4_addr_legal = stc.ipv4_addr_legal;
+ scp.ipv6_addr_legal = stc.ipv6_addr_legal;
+ scp.loopback_scope = stc.loopback_scope;
+ scp.ipv4_local_scope = stc.ipv4_scope;
+ scp.local_scope = stc.local_scope;
+ scp.site_scope = stc.site_scope;
+ m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
+ }
+
+ /* tack on the operational error if present */
+ if (op_err) {
+ if (op_err->m_pkthdr.len % 4) {
+ /* must add a pad to the param */
+ uint32_t cpthis = 0;
+ int padlen;
+
+ padlen = 4 - (op_err->m_pkthdr.len % 4);
+ m_copyback(op_err, op_err->m_pkthdr.len, padlen, (caddr_t)&cpthis);
+ }
+ while (m_at->m_next != NULL) {
+ m_at = m_at->m_next;
+ }
+ m_at->m_next = op_err;
+ while (m_at->m_next != NULL) {
+ m_at = m_at->m_next;
+ }
+ }
+ /* Get total size of init packet */
+ sz_of = SCTP_SIZE32(ntohs(init_chk->ch.chunk_length));
+ /* pre-calulate the size and update pkt header and chunk header */
+ m->m_pkthdr.len = 0;
+ for (m_tmp = m; m_tmp; m_tmp = m_tmp->m_next) {
+ m->m_pkthdr.len += m_tmp->m_len;
+ if (m_tmp->m_next == NULL) {
+ /* m_tmp should now point to last one */
+ break;
+ }
+ }
+ /*
+ * Figure now the size of the cookie. We know the size of the
+ * INIT-ACK. The Cookie is going to be the size of INIT, INIT-ACK,
+ * COOKIE-STRUCTURE and SIGNATURE.
+ */
+
+ /*
+ * take our earlier INIT calc and add in the sz we just calculated
+ * minus the size of the sctphdr (its not included in chunk size
+ */
+
+ /* add once for the INIT-ACK */
+ sz_of += (m->m_pkthdr.len - sizeof(struct sctphdr));
+
+ /* add a second time for the INIT-ACK in the cookie */
+ sz_of += (m->m_pkthdr.len - sizeof(struct sctphdr));
+
+ /* Now add the cookie header and cookie message struct */
+ sz_of += sizeof(struct sctp_state_cookie_param);
+ /* ...and add the size of our signature */
+ sz_of += SCTP_SIGNATURE_SIZE;
+ initackm_out->msg.ch.chunk_length = htons(sz_of);
+
+ /* Now we must build a cookie */
+ m_cookie = sctp_add_cookie(inp, init_pkt, offset, m,
+ sizeof(struct sctphdr), &stc);
+ if (m_cookie == NULL) {
+ /* memory problem */
+ sctp_m_freem(m);
+ return;
+ }
+ /* Now append the cookie to the end and update the space/size */
+ m_tmp->m_next = m_cookie;
+ for (; m_tmp; m_tmp = m_tmp->m_next) {
+ m->m_pkthdr.len += m_tmp->m_len;
+ if (m_tmp->m_next == NULL) {
+ /* m_tmp should now point to last one */
+ m_last = m_tmp;
+ break;
+ }
+ }
+
+ /*
+ * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the return
+ * here since the timer will drive a retranmission.
+ */
+ padval = m->m_pkthdr.len % 4;
+ if ((padval) && (m_last)) {
+ /* see my previous comments on m_last */
+ int ret;
+
+ ret = sctp_add_pad_tombuf(m_last, (4 - padval));
+ if (ret) {
+ /* Houston we have a problem, no space */
+ sctp_m_freem(m);
+ return;
+ }
+ m->m_pkthdr.len += padval;
+ }
+ sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
+ NULL, 0);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+}
+
+
+void
+sctp_insert_on_wheel(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_stream_out *strq, int holds_lock)
+{
+ struct sctp_stream_out *stre, *strn;
+
+ if (holds_lock == 0)
+ SCTP_TCB_SEND_LOCK(stcb);
+ if ((strq->next_spoke.tqe_next) ||
+ (strq->next_spoke.tqe_prev)) {
+ /* already on wheel */
+ goto outof_here;
+ }
+ stre = TAILQ_FIRST(&asoc->out_wheel);
+ if (stre == NULL) {
+ /* only one on wheel */
+ TAILQ_INSERT_HEAD(&asoc->out_wheel, strq, next_spoke);
+ goto outof_here;
+ }
+ for (; stre; stre = strn) {
+ strn = TAILQ_NEXT(stre, next_spoke);
+ if (stre->stream_no > strq->stream_no) {
+ TAILQ_INSERT_BEFORE(stre, strq, next_spoke);
+ goto outof_here;
+ } else if (stre->stream_no == strq->stream_no) {
+ /* huh, should not happen */
+ goto outof_here;
+ } else if (strn == NULL) {
+ /* next one is null */
+ TAILQ_INSERT_AFTER(&asoc->out_wheel, stre, strq,
+ next_spoke);
+ }
+ }
+outof_here:
+ if (holds_lock == 0)
+ SCTP_TCB_SEND_UNLOCK(stcb);
+
+
+}
+
+static void
+sctp_remove_from_wheel(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_stream_out *strq)
+{
+ /* take off and then setup so we know it is not on the wheel */
+ SCTP_TCB_SEND_LOCK(stcb);
+ TAILQ_REMOVE(&asoc->out_wheel, strq, next_spoke);
+ strq->next_spoke.tqe_next = NULL;
+ strq->next_spoke.tqe_prev = NULL;
+ SCTP_TCB_SEND_UNLOCK(stcb);
+}
+
+
+static void
+sctp_prune_prsctp(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_sndrcvinfo *srcv,
+ int dataout)
+{
+ int freed_spc = 0;
+ struct sctp_tmit_chunk *chk, *nchk;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if ((asoc->peer_supports_prsctp) &&
+ (asoc->sent_queue_cnt_removeable > 0)) {
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ /*
+ * Look for chunks marked with the PR_SCTP flag AND
+ * the buffer space flag. If the one being sent is
+ * equal or greater priority then purge the old one
+ * and free some space.
+ */
+ if (PR_SCTP_BUF_ENABLED(chk->flags)) {
+ /*
+ * This one is PR-SCTP AND buffer space
+ * limited type
+ */
+ if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
+ /*
+ * Lower numbers equates to higher
+ * priority so if the one we are
+ * looking at has a larger or equal
+ * priority we want to drop the data
+ * and NOT retransmit it.
+ */
+ if (chk->data) {
+ /*
+ * We release the book_size
+ * if the mbuf is here
+ */
+ int ret_spc;
+ int cause;
+
+ if (chk->sent > SCTP_DATAGRAM_UNSENT)
+ cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT;
+ else
+ cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT;
+ ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
+ cause,
+ &asoc->sent_queue);
+ freed_spc += ret_spc;
+ if (freed_spc >= dataout) {
+ return;
+ }
+ } /* if chunk was present */
+ } /* if of sufficent priority */
+ } /* if chunk has enabled */
+ } /* tailqforeach */
+
+ chk = TAILQ_FIRST(&asoc->send_queue);
+ while (chk) {
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ /* Here we must move to the sent queue and mark */
+ if (PR_SCTP_TTL_ENABLED(chk->flags)) {
+ if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
+ if (chk->data) {
+ /*
+ * We release the book_size
+ * if the mbuf is here
+ */
+ int ret_spc;
+
+ ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
+ SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT,
+ &asoc->send_queue);
+
+ freed_spc += ret_spc;
+ if (freed_spc >= dataout) {
+ return;
+ }
+ } /* end if chk->data */
+ } /* end if right class */
+ } /* end if chk pr-sctp */
+ chk = nchk;
+ } /* end while (chk) */
+ } /* if enabled in asoc */
+}
+
+__inline int
+sctp_get_frag_point(struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ int siz, ovh;
+
+ /*
+ * For endpoints that have both v6 and v4 addresses we must reserve
+ * room for the ipv6 header, for those that are only dealing with V4
+ * we use a larger frag point.
+ */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MED_OVERHEAD;
+ } else {
+ ovh = SCTP_MED_V4_OVERHEAD;
+ }
+
+ if (stcb->sctp_ep->sctp_frag_point > asoc->smallest_mtu)
+ siz = asoc->smallest_mtu - ovh;
+ else
+ siz = (stcb->sctp_ep->sctp_frag_point - ovh);
+ /*
+ * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
+ */
+ /* A data chunk MUST fit in a cluster */
+ /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
+ /* } */
+
+ /* adjust for an AUTH chunk if DATA requires auth */
+ if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
+ siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+
+ if (siz % 4) {
+ /* make it an even word boundary please */
+ siz -= (siz % 4);
+ }
+ return (siz);
+}
+extern unsigned int sctp_max_chunks_on_queue;
+
+static void
+sctp_set_prsctp_policy(struct sctp_tcb *stcb,
+ struct sctp_stream_queue_pending *sp)
+{
+ sp->pr_sctp_on = 0;
+ if (stcb->asoc.peer_supports_prsctp) {
+ /*
+ * We assume that the user wants PR_SCTP_TTL if the user
+ * provides a positive lifetime but does not specify any
+ * PR_SCTP policy. This is a BAD assumption and causes
+ * problems at least with the U-Vancovers MPI folks. I will
+ * change this to be no policy means NO PR-SCTP.
+ */
+ if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
+ sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
+ sp->pr_sctp_on = 1;
+ } else {
+ goto sctp_no_policy;
+ }
+ switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
+ case CHUNK_FLAGS_PR_SCTP_BUF:
+ /*
+ * Time to live is a priority stored in tv_sec when
+ * doing the buffer drop thing.
+ */
+ sp->ts.tv_sec = sp->timetolive;
+ sp->ts.tv_usec = 0;
+ break;
+ case CHUNK_FLAGS_PR_SCTP_TTL:
+ {
+ struct timeval tv;
+
+ SCTP_GETTIME_TIMEVAL(&sp->ts);
+ tv.tv_sec = sp->timetolive / 1000;
+ tv.tv_usec = (sp->timetolive * 1000) % 1000000;
+ timevaladd(&sp->ts, &tv);
+ }
+ break;
+ case CHUNK_FLAGS_PR_SCTP_RTX:
+ /*
+ * Time to live is a the number or retransmissions
+ * stored in tv_sec.
+ */
+ sp->ts.tv_sec = sp->timetolive;
+ sp->ts.tv_usec = 0;
+ break;
+ default:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
+ printf("Unknown PR_SCTP policy %u.\n", PR_SCTP_POLICY(sp->sinfo_flags));
+ }
+#endif
+ break;
+ }
+ }
+sctp_no_policy:
+ if (sp->sinfo_flags & SCTP_UNORDERED)
+ sp->act_flags |= SCTP_DATA_UNORDERED;
+
+}
+
+
+static int
+sctp_msg_append(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ struct mbuf *m,
+ struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
+{
+ int error = 0, holds_lock;
+ struct mbuf *at;
+ struct sctp_stream_queue_pending *sp = NULL;
+ struct sctp_stream_out *strm;
+
+ /*
+ * Given an mbuf chain, put it into the association send queue and
+ * place it on the wheel
+ */
+ holds_lock = hold_stcb_lock;
+ if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
+ /* Invalid stream number */
+ error = EINVAL;
+ goto out_now;
+ }
+ if ((stcb->asoc.stream_locked) &&
+ (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
+ error = EAGAIN;
+ goto out_now;
+ }
+ strm = &stcb->asoc.strmout[srcv->sinfo_stream];
+ /* Now can we send this? */
+ if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
+ (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
+ /* got data while shutting down */
+ error = ECONNRESET;
+ goto out_now;
+ }
+ sp = (struct sctp_stream_queue_pending *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq);
+ if (sp == NULL) {
+ error = ENOMEM;
+ goto out_now;
+ }
+ SCTP_INCR_STRMOQ_COUNT();
+ sp->act_flags = 0;
+ sp->sinfo_flags = srcv->sinfo_flags;
+ sp->timetolive = srcv->sinfo_timetolive;
+ sp->ppid = srcv->sinfo_ppid;
+ sp->context = srcv->sinfo_context;
+ sp->strseq = 0;
+ if (sp->sinfo_flags & SCTP_ADDR_OVER) {
+ sp->net = net;
+ sp->addr_over = 1;
+ } else {
+ sp->net = stcb->asoc.primary_destination;
+ sp->addr_over = 0;
+ }
+ atomic_add_int(&sp->net->ref_count, 1);
+ SCTP_GETTIME_TIMEVAL(&sp->ts);
+ sp->stream = srcv->sinfo_stream;
+ sp->msg_is_complete = 1;
+ sp->some_taken = 0;
+ sp->data = m;
+ sp->tail_mbuf = NULL;
+ sp->length = 0;
+ at = m;
+ sctp_set_prsctp_policy(stcb, sp);
+ while (at) {
+ if (at->m_next == NULL)
+ sp->tail_mbuf = at;
+ sp->length += at->m_len;
+ at = at->m_next;
+ }
+ if (sp->data->m_flags & M_PKTHDR) {
+ sp->data->m_pkthdr.len = sp->length;
+ } else {
+ /* Get an HDR in front please */
+ at = sctp_get_mbuf_for_msg(1, 1, M_DONTWAIT, 1, MT_DATA);
+ if (at) {
+ at->m_pkthdr.len = sp->length;
+ at->m_len = 0;
+ at->m_next = sp->data;
+ sp->data = at;
+ }
+ }
+ if (holds_lock == 0) {
+ printf("Msg append gets a lock\n");
+ SCTP_TCB_LOCK(stcb);
+ }
+ sctp_snd_sb_alloc(stcb, sp->length);
+ stcb->asoc.stream_queue_cnt++;
+ TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
+ if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
+ sp->strseq = strm->next_sequence_sent;
+ strm->next_sequence_sent++;
+ }
+ if ((strm->next_spoke.tqe_next == NULL) &&
+ (strm->next_spoke.tqe_prev == NULL)) {
+ /* Not on wheel, insert */
+ sctp_insert_on_wheel(stcb, &stcb->asoc, strm, 0);
+ }
+ m = NULL;
+ if (hold_stcb_lock == 0) {
+ printf("msg append frees the lock\n");
+ SCTP_TCB_UNLOCK(stcb);
+ }
+out_now:
+ if (m) {
+ sctp_m_freem(m);
+ }
+ return (error);
+}
+
+
+static struct mbuf *
+sctp_copy_mbufchain(struct mbuf *clonechain,
+ struct mbuf *outchain,
+ struct mbuf **endofchain,
+ int can_take_mbuf,
+ int sizeofcpy,
+ uint8_t copy_by_ref)
+{
+ struct mbuf *m;
+ struct mbuf *appendchain;
+ caddr_t cp;
+ int len;
+
+ if (endofchain == NULL) {
+ /* error */
+error_out:
+ if (outchain)
+ sctp_m_freem(outchain);
+ return (NULL);
+ }
+ if (can_take_mbuf) {
+ appendchain = clonechain;
+ } else {
+ if (!copy_by_ref && (sizeofcpy <= ((((sctp_mbuf_threshold_count - 1) * MLEN) + MHLEN)))) {
+ /* Its not in a cluster */
+ if (*endofchain == NULL) {
+ /* lets get a mbuf cluster */
+ if (outchain == NULL) {
+ /* This is the general case */
+ new_mbuf:
+ outchain = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_HEADER);
+ if (outchain == NULL) {
+ goto error_out;
+ }
+ outchain->m_len = 0;
+ *endofchain = outchain;
+ /* get the prepend space */
+ outchain->m_data += (SCTP_FIRST_MBUF_RESV + 4);
+ } else {
+ /*
+ * We really should not get a NULL
+ * in endofchain
+ */
+ /* find end */
+ m = outchain;
+ while (m) {
+ if (m->m_next == NULL) {
+ *endofchain = m;
+ break;
+ }
+ m = m->m_next;
+ }
+ /* sanity */
+ if (*endofchain == NULL) {
+ /*
+ * huh, TSNH XXX maybe we
+ * should panic
+ */
+ sctp_m_freem(outchain);
+ goto new_mbuf;
+ }
+ }
+ /* get the new end of length */
+ len = M_TRAILINGSPACE(*endofchain);
+ } else {
+ /* how much is left at the end? */
+ len = M_TRAILINGSPACE(*endofchain);
+ }
+ /* Find the end of the data, for appending */
+ cp = (mtod((*endofchain), caddr_t)+(*endofchain)->m_len);
+
+ /* Now lets copy it out */
+ if (len >= sizeofcpy) {
+ /* It all fits, copy it in */
+ m_copydata(clonechain, 0, sizeofcpy, cp);
+ (*endofchain)->m_len += sizeofcpy;
+ if (outchain->m_flags & M_PKTHDR)
+ outchain->m_pkthdr.len += sizeofcpy;
+ } else {
+ /* fill up the end of the chain */
+ if (len > 0) {
+ m_copydata(clonechain, 0, len, cp);
+ (*endofchain)->m_len += len;
+ if (outchain->m_flags & M_PKTHDR)
+ outchain->m_pkthdr.len += len;
+ /* now we need another one */
+ sizeofcpy -= len;
+ }
+ m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_HEADER);
+ if (m == NULL) {
+ /* We failed */
+ goto error_out;
+ }
+ (*endofchain)->m_next = m;
+ *endofchain = m;
+ cp = mtod((*endofchain), caddr_t);
+ m_copydata(clonechain, len, sizeofcpy, cp);
+ (*endofchain)->m_len += sizeofcpy;
+ if (outchain->m_flags & M_PKTHDR) {
+ outchain->m_pkthdr.len += sizeofcpy;
+ }
+ }
+ return (outchain);
+ } else {
+ /* copy the old fashion way */
+ /*
+ * Supposedly m_copypacket is an optimization, use
+ * it if we can
+ */
+ if (clonechain->m_flags & M_PKTHDR) {
+ appendchain = m_copypacket(clonechain, M_DONTWAIT);
+ } else {
+ appendchain = m_copy(clonechain, 0, M_COPYALL);
+ }
+
+ }
+ }
+ if (appendchain == NULL) {
+ /* error */
+ if (outchain)
+ sctp_m_freem(outchain);
+ return (NULL);
+ }
+ /* if outchain is null, check our special reservation flag */
+ if (outchain == NULL) {
+ /*
+ * need a lead mbuf in this one if we don't have space for:
+ * - E-net header (12+2+2) - IP header (20/40) - SCTP Common
+ * Header (12)
+ */
+ if (M_LEADINGSPACE(appendchain) < (SCTP_FIRST_MBUF_RESV)) {
+ outchain = sctp_get_mbuf_for_msg(8, 1, M_DONTWAIT, 1, MT_HEADER);
+ if (outchain) {
+ /*
+ * if we don't hit here we have a problem
+ * anyway :o We reserve all the mbuf for
+ * prepends.
+ */
+ outchain->m_pkthdr.len = 0;
+ outchain->m_len = 0;
+ outchain->m_next = NULL;
+ MH_ALIGN(outchain, 4);
+ *endofchain = outchain;
+ }
+ }
+ }
+ if (outchain) {
+ /* tack on to the end */
+ if (*endofchain != NULL) {
+ (*endofchain)->m_next = appendchain;
+ } else {
+ m = outchain;
+ while (m) {
+ if (m->m_next == NULL) {
+ m->m_next = appendchain;
+ break;
+ }
+ m = m->m_next;
+ }
+ }
+ if (outchain->m_flags & M_PKTHDR) {
+ int append_tot;
+
+ m = appendchain;
+ append_tot = 0;
+ while (m) {
+ append_tot += m->m_len;
+ if (m->m_next == NULL) {
+ *endofchain = m;
+ }
+ m = m->m_next;
+ }
+ outchain->m_pkthdr.len += append_tot;
+ } else {
+ /*
+ * save off the end and update the end-chain postion
+ */
+ m = appendchain;
+ while (m) {
+ if (m->m_next == NULL) {
+ *endofchain = m;
+ break;
+ }
+ m = m->m_next;
+ }
+ }
+ return (outchain);
+ } else {
+ /* save off the end and update the end-chain postion */
+ m = appendchain;
+ while (m) {
+ if (m->m_next == NULL) {
+ *endofchain = m;
+ break;
+ }
+ m = m->m_next;
+ }
+ return (appendchain);
+ }
+}
+
+int
+sctp_med_chunk_output(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int *num_out,
+ int *reason_code,
+ int control_only, int *cwnd_full, int from_where,
+ struct timeval *now, int *now_filled, int frag_point);
+
+static void
+sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
+ uint32_t val)
+{
+ struct sctp_copy_all *ca;
+ struct mbuf *m;
+ int ret = 0;
+ int added_control = 0;
+ int un_sent, do_chunk_output = 1;
+ struct sctp_association *asoc;
+
+ ca = (struct sctp_copy_all *)ptr;
+ if (ca->m == NULL) {
+ return;
+ }
+ if (ca->inp != inp) {
+ /* TSNH */
+ return;
+ }
+ if ((ca->m) && (ca->m->m_pkthdr.len)) {
+ m = m_copym(ca->m, 0, M_COPYALL, M_DONTWAIT);
+ if (m == NULL) {
+ /* can't copy so we are done */
+ ca->cnt_failed++;
+ return;
+ }
+ } else {
+ m = NULL;
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
+ /* Abort this assoc with m as the user defined reason */
+ if (m) {
+ struct sctp_paramhdr *ph;
+
+ M_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT);
+ if (m) {
+ ph = mtod(m, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(m->m_pkthdr.len);
+ }
+ /*
+ * We add one here to keep the assoc from
+ * dis-appearing on us.
+ */
+ atomic_add_16(&stcb->asoc.refcnt, 1);
+ sctp_abort_an_association(inp, stcb,
+ SCTP_RESPONSE_TO_USER_REQ,
+ m);
+ /*
+ * sctp_abort_an_association calls sctp_free_asoc()
+ * free association will NOT free it since we
+ * incremented the refcnt .. we do this to prevent
+ * it being freed and things getting tricky since we
+ * could end up (from free_asoc) calling inpcb_free
+ * which would get a recursive lock call to the
+ * iterator lock.. But as a consequence of that the
+ * stcb will return to us un-locked.. since
+ * free_asoc returns with either no TCB or the TCB
+ * unlocked, we must relock.. to unlock in the
+ * iterator timer :-0
+ */
+ SCTP_TCB_LOCK(stcb);
+ atomic_add_16(&stcb->asoc.refcnt, -1);
+ goto no_chunk_output;
+ }
+ } else {
+ if (m) {
+ ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m,
+ &ca->sndrcv, 1);
+ }
+ asoc = &stcb->asoc;
+ if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
+ /* shutdown this assoc */
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->locked_on_sending) {
+ goto abort_anyway;
+ }
+ /*
+ * there is nothing queued to send, so I'm
+ * done...
+ */
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ /*
+ * only send SHUTDOWN the first time
+ * through
+ */
+ sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
+ asoc->state = SCTP_STATE_SHUTDOWN_SENT;
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ added_control = 1;
+ do_chunk_output = 0;
+ }
+ } else {
+ /*
+ * we still got (or just got) data to send,
+ * so set SHUTDOWN_PENDING
+ */
+ /*
+ * XXX sockets draft says that SCTP_EOF
+ * should be sent with no data. currently,
+ * we will allow user data to be sent first
+ * and move to SHUTDOWN-PENDING
+ */
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ if (asoc->locked_on_sending) {
+ /*
+ * Locked to send out the
+ * data
+ */
+ struct sctp_stream_queue_pending *sp;
+
+ sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
+ if (sp) {
+ if ((sp->length == 0) && (sp->msg_is_complete == 0))
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ }
+ }
+ asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ abort_anyway:
+ atomic_add_16(&stcb->asoc.refcnt, 1);
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_RESPONSE_TO_USER_REQ,
+ NULL);
+ atomic_add_16(&stcb->asoc.refcnt, -1);
+ goto no_chunk_output;
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ }
+ }
+
+ }
+ }
+ un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+ ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk)));
+
+ if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
+ (stcb->asoc.total_flight > 0) &&
+ (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
+ ) {
+ do_chunk_output = 0;
+ }
+ if (do_chunk_output)
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND);
+ else if (added_control) {
+ int num_out = 0, reason = 0, cwnd_full = 0, now_filled = 0;
+ struct timeval now;
+ int frag_point;
+
+ frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
+ sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
+ &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point);
+ }
+no_chunk_output:
+ if (ret) {
+ ca->cnt_failed++;
+ } else {
+ ca->cnt_sent++;
+ }
+}
+
+static void
+sctp_sendall_completes(void *ptr, uint32_t val)
+{
+ struct sctp_copy_all *ca;
+
+ ca = (struct sctp_copy_all *)ptr;
+ /*
+ * Do a notify here? Kacheong suggests that the notify be done at
+ * the send time.. so you would push up a notification if any send
+ * failed. Don't know if this is feasable since the only failures we
+ * have is "memory" related and if you cannot get an mbuf to send
+ * the data you surely can't get an mbuf to send up to notify the
+ * user you can't send the data :->
+ */
+
+ /* now free everything */
+ sctp_m_freem(ca->m);
+ SCTP_FREE(ca);
+}
+
+
+#define MC_ALIGN(m, len) do { \
+ (m)->m_data += (MCLBYTES - (len)) & ~(sizeof(long) - 1); \
+} while (0)
+
+
+
+static struct mbuf *
+sctp_copy_out_all(struct uio *uio, int len)
+{
+ struct mbuf *ret, *at;
+ int left, willcpy, cancpy, error;
+
+ ret = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_WAIT, 1, MT_DATA);
+ if (ret == NULL) {
+ /* TSNH */
+ return (NULL);
+ }
+ left = len;
+ ret->m_len = 0;
+ ret->m_pkthdr.len = len;
+ /* save space for the data chunk header */
+ cancpy = M_TRAILINGSPACE(ret);
+ willcpy = min(cancpy, left);
+ at = ret;
+ while (left > 0) {
+ /* Align data to the end */
+ error = uiomove(mtod(at, caddr_t), willcpy, uio);
+ if (error) {
+ err_out_now:
+ sctp_m_freem(at);
+ return (NULL);
+ }
+ at->m_len = willcpy;
+ at->m_nextpkt = at->m_next = 0;
+ left -= willcpy;
+ if (left > 0) {
+ at->m_next = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA);
+ if (at->m_next == NULL) {
+ goto err_out_now;
+ }
+ at = at->m_next;
+ at->m_len = 0;
+ cancpy = M_TRAILINGSPACE(at);
+ willcpy = min(cancpy, left);
+ }
+ }
+ return (ret);
+}
+
+static int
+sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
+ struct sctp_sndrcvinfo *srcv)
+{
+ int ret;
+ struct sctp_copy_all *ca;
+
+ SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
+ "CopyAll");
+ if (ca == NULL) {
+ sctp_m_freem(m);
+ return (ENOMEM);
+ }
+ memset(ca, 0, sizeof(struct sctp_copy_all));
+
+ ca->inp = inp;
+ ca->sndrcv = *srcv;
+ /*
+ * take off the sendall flag, it would be bad if we failed to do
+ * this :-0
+ */
+ ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
+ /* get length and mbuf chain */
+ if (uio) {
+ ca->sndlen = uio->uio_resid;
+ ca->m = sctp_copy_out_all(uio, ca->sndlen);
+ if (ca->m == NULL) {
+ out_no_mem:
+ SCTP_FREE(ca);
+ return (ENOMEM);
+ }
+ } else {
+ if ((m->m_flags & M_PKTHDR) == 0) {
+ struct mbuf *mat;
+
+ mat = m;
+ ca->sndlen = 0;
+ while (m) {
+ ca->sndlen += m->m_len;
+ m = m->m_next;
+ }
+ mat = sctp_get_mbuf_for_msg(1, 1, M_WAIT, 1, MT_DATA);
+ if (mat) {
+ sctp_m_freem(m);
+ goto out_no_mem;
+ }
+ /* We MUST have a header on the front */
+ mat->m_next = m;
+ mat->m_len = 0;
+ mat->m_pkthdr.len = ca->sndlen;
+ ca->m = mat;
+ } else {
+ ca->sndlen = m->m_pkthdr.len;
+ }
+ ca->m = m;
+ }
+ ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator,
+ SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, SCTP_ASOC_ANY_STATE,
+ (void *)ca, 0,
+ sctp_sendall_completes, inp, 1);
+ if (ret) {
+#ifdef SCTP_DEBUG
+ printf("Failed to initiate iterator for sendall\n");
+#endif
+ SCTP_FREE(ca);
+ return (EFAULT);
+ }
+ return (0);
+}
+
+
+void
+sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk, *nchk;
+
+ chk = TAILQ_FIRST(&asoc->control_send_queue);
+ while (chk) {
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ asoc->ctrl_queue_cnt--;
+ if (chk->whoTo)
+ sctp_free_remote_addr(chk->whoTo);
+ sctp_free_a_chunk(stcb, chk);
+ }
+ chk = nchk;
+ }
+}
+
+void
+sctp_toss_old_asconf(struct sctp_tcb *stcb)
+{
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk, *chk_tmp;
+
+ asoc = &stcb->asoc;
+ for (chk = TAILQ_FIRST(&asoc->control_send_queue); chk != NULL;
+ chk = chk_tmp) {
+ /* get next chk */
+ chk_tmp = TAILQ_NEXT(chk, sctp_next);
+ /* find SCTP_ASCONF chunk in queue (only one ever in queue) */
+ if (chk->rec.chunk_id.id == SCTP_ASCONF) {
+ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ asoc->ctrl_queue_cnt--;
+ if (chk->whoTo)
+ sctp_free_remote_addr(chk->whoTo);
+ sctp_free_a_chunk(stcb, chk);
+ }
+ }
+}
+
+
+static __inline void
+sctp_clean_up_datalist(struct sctp_tcb *stcb,
+
+ struct sctp_association *asoc,
+ struct sctp_tmit_chunk **data_list,
+ int bundle_at,
+ struct sctp_nets *net)
+{
+ int i;
+ struct sctp_tmit_chunk *tp1;
+
+ for (i = 0; i < bundle_at; i++) {
+ /* off of the send queue */
+ if (i) {
+ /*
+ * Any chunk NOT 0 you zap the time chunk 0 gets
+ * zapped or set based on if a RTO measurment is
+ * needed.
+ */
+ data_list[i]->do_rtt = 0;
+ }
+ /* record time */
+ data_list[i]->sent_rcv_time = net->last_sent_time;
+ data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
+ TAILQ_REMOVE(&asoc->send_queue,
+ data_list[i],
+ sctp_next);
+ /* on to the sent queue */
+ tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
+ if ((tp1) && (compare_with_wrap(tp1->rec.data.TSN_seq,
+ data_list[i]->rec.data.TSN_seq, MAX_TSN))) {
+ struct sctp_tmit_chunk *tpp;
+
+ /* need to move back */
+ back_up_more:
+ tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
+ if (tpp == NULL) {
+ TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
+ goto all_done;
+ }
+ tp1 = tpp;
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ data_list[i]->rec.data.TSN_seq, MAX_TSN)) {
+ goto back_up_more;
+ }
+ TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
+ } else {
+ TAILQ_INSERT_TAIL(&asoc->sent_queue,
+ data_list[i],
+ sctp_next);
+ }
+all_done:
+ /* This does not lower until the cum-ack passes it */
+ asoc->sent_queue_cnt++;
+ asoc->send_queue_cnt--;
+ if ((asoc->peers_rwnd <= 0) &&
+ (asoc->total_flight == 0) &&
+ (bundle_at == 1)) {
+ /* Mark the chunk as being a window probe */
+ SCTP_STAT_INCR(sctps_windowprobed);
+ data_list[i]->rec.data.state_flags |= SCTP_WINDOW_PROBE;
+ } else {
+ data_list[i]->rec.data.state_flags &= ~SCTP_WINDOW_PROBE;
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xC2, 3);
+#endif
+ data_list[i]->sent = SCTP_DATAGRAM_SENT;
+ data_list[i]->snd_count = 1;
+ data_list[i]->rec.data.chunk_was_revoked = 0;
+ net->flight_size += data_list[i]->book_size;
+ asoc->total_flight += data_list[i]->book_size;
+ asoc->total_flight_count++;
+#ifdef SCTP_LOG_RWND
+ sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
+ asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh);
+#endif
+ asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
+ (uint32_t) (data_list[i]->send_size + sctp_peer_chunk_oh));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ }
+}
+
+static __inline void
+sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk, *nchk;
+
+ for (chk = TAILQ_FIRST(&asoc->control_send_queue);
+ chk; chk = nchk) {
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
+ (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
+ (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
+ (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
+ (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
+ (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
+ /* Stray chunks must be cleaned up */
+ clean_up_anyway:
+ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ asoc->ctrl_queue_cnt--;
+ sctp_free_remote_addr(chk->whoTo);
+ sctp_free_a_chunk(stcb, chk);
+ } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
+ /* special handling, we must look into the param */
+ if (chk != asoc->str_reset) {
+ goto clean_up_anyway;
+ }
+ }
+ }
+}
+
+extern int sctp_min_split_point;
+
+static __inline int
+sctp_can_we_split_this(struct sctp_tcb *stcb,
+ struct sctp_stream_queue_pending *sp,
+ int goal_mtu, int frag_point, int eeor_on)
+{
+ /*
+ * Make a decision on if I should split a msg into multiple parts.
+ */
+ if (goal_mtu < sctp_min_split_point) {
+ /* you don't want enough */
+ return (0);
+ }
+ if (sp->msg_is_complete == 0) {
+ if (eeor_on) {
+ /*
+ * If we are doing EEOR we need to always send it if
+ * its the entire thing.
+ */
+ if (goal_mtu >= sp->length)
+ return (sp->length);
+ } else {
+ if (goal_mtu >= sp->length) {
+ /*
+ * If we cannot fill the amount needed there
+ * is no sense of splitting the chunk.
+ */
+ return (0);
+ }
+ }
+ /*
+ * If we reach here sp->length is larger than the goal_mtu.
+ * Do we wish to split it for the sake of packet putting
+ * together?
+ */
+ if (goal_mtu >= min(sctp_min_split_point, stcb->asoc.smallest_mtu)) {
+ /* Its ok to split it */
+ return (min(goal_mtu, frag_point));
+ }
+ } else {
+ /* We can always split a complete message to make it fit */
+ if (goal_mtu >= sp->length)
+ /* Take it all */
+ return (sp->length);
+
+ return (min(goal_mtu, frag_point));
+ }
+ /* Nope, can't split */
+ return (0);
+
+}
+
+static int
+sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
+ struct sctp_stream_out *strq,
+ int goal_mtu,
+ int frag_point,
+ int *locked,
+ int *giveup,
+ int eeor_mode)
+{
+ /* Move from the stream to the send_queue keeping track of the total */
+ struct sctp_association *asoc;
+ struct sctp_stream_queue_pending *sp;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_data_chunk *dchkh;
+ int to_move;
+ uint8_t rcv_flags = 0;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ asoc = &stcb->asoc;
+ sp = TAILQ_FIRST(&strq->outqueue);
+
+ if (sp == NULL) {
+ *locked = 0;
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (strq->last_msg_incomplete) {
+ printf("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
+ strq->stream_no, strq->last_msg_incomplete);
+ strq->last_msg_incomplete = 0;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ return (0);
+ }
+ SCTP_TCB_SEND_LOCK(stcb);
+ if ((sp->length == 0) && (sp->msg_is_complete == 0)) {
+ /* Must wait for more data, must be last msg */
+ *locked = 1;
+ *giveup = 1;
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ return (0);
+ } else if (sp->length == 0) {
+ /* This should not happen */
+ panic("sp length is 0?");
+ }
+ if ((goal_mtu >= sp->length) && (sp->msg_is_complete)) {
+ /* It all fits and its a complete msg, no brainer */
+ to_move = min(sp->length, frag_point);
+ if (to_move == sp->length) {
+ /* Getting it all */
+ if (sp->some_taken) {
+ rcv_flags |= SCTP_DATA_LAST_FRAG;
+ } else {
+ rcv_flags |= SCTP_DATA_NOT_FRAG;
+ }
+ } else {
+ /* Not getting it all, frag point overrides */
+ if (sp->some_taken == 0) {
+ rcv_flags |= SCTP_DATA_FIRST_FRAG;
+ }
+ sp->some_taken = 1;
+ }
+ } else {
+ to_move = sctp_can_we_split_this(stcb,
+ sp, goal_mtu, frag_point, eeor_mode);
+ if (to_move) {
+ if (to_move >= sp->length) {
+ to_move = sp->length;
+ }
+ if (sp->some_taken == 0) {
+ rcv_flags |= SCTP_DATA_FIRST_FRAG;
+ }
+ sp->some_taken = 1;
+ } else {
+ if (sp->some_taken) {
+ *locked = 1;
+ }
+ *giveup = 1;
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ return (0);
+ }
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ /* If we reach here, we can copy out a chunk */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* No chunk memory */
+out_gu:
+ *giveup = 1;
+ return (0);
+ }
+ /* clear it */
+ memset(chk, sizeof(*chk), 0);
+ chk->rec.data.rcv_flags = rcv_flags;
+ SCTP_TCB_SEND_LOCK(stcb);
+ sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
+ if (sp->data->m_flags & M_EXT) {
+ chk->copy_by_ref = 1;
+ } else {
+ chk->copy_by_ref = 0;
+ }
+ if (to_move >= sp->length) {
+ /* we can steal the whole thing */
+ chk->data = sp->data;
+ chk->last_mbuf = sp->tail_mbuf;
+ /* register the stealing */
+ sp->data = sp->tail_mbuf = NULL;
+ } else {
+ struct mbuf *m;
+
+ chk->data = m_copym(sp->data, 0, to_move, M_DONTWAIT);
+ chk->last_mbuf = NULL;
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk);
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ goto out_gu;
+ }
+ /* Pull off the data */
+ m_adj(sp->data, to_move);
+ /*
+ * Now lets work our way down and compact it
+ */
+ m = sp->data;
+ while (m && (m->m_len == 0)) {
+ sp->data = m->m_next;
+ m->m_next = NULL;
+ if (sp->tail_mbuf == m) {
+ /* freeing tail */
+ sp->tail_mbuf = sp->data;
+ }
+ sctp_m_free(m);
+ m = sp->data;
+ }
+ }
+ if (to_move > sp->length) {
+ panic("Huh, how can to_move be larger?");
+ } else
+ sp->length -= to_move;
+
+ /* Update the new length in */
+ if (sp->data && (sp->data->m_flags & M_PKTHDR)) {
+ /* update length */
+ sp->data->m_pkthdr.len = sp->length;
+ }
+ if (M_LEADINGSPACE(chk->data) < sizeof(struct sctp_data_chunk)) {
+ /* Not enough room for a chunk header, get some */
+ struct mbuf *m;
+
+ m = sctp_get_mbuf_for_msg(1, 1, M_DONTWAIT, 0, MT_DATA);
+ if (m == NULL) {
+ printf("We will Panic maybe, out of mbufs\n");
+ } else {
+ m->m_len = 0;
+ m->m_next = chk->data;
+ chk->data = m;
+ chk->data->m_pkthdr.len = to_move;
+ MH_ALIGN(chk->data, 4);
+ }
+ }
+ M_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
+ if (chk->data == NULL) {
+ /* HELP */
+ sctp_free_a_chunk(stcb, chk);
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ goto out_gu;
+ }
+ chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+
+ /*
+ * get last_mbuf and counts of mb useage This is ugly but hopefully
+ * its only one mbuf.
+ */
+ if (chk->last_mbuf == NULL) {
+ chk->last_mbuf = chk->data;
+ while (chk->last_mbuf->m_next != NULL) {
+ chk->last_mbuf = chk->last_mbuf->m_next;
+ }
+ }
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->pad_inplace = 0;
+ chk->no_fr_allowed = 0;
+ chk->rec.data.stream_seq = sp->strseq;
+ chk->rec.data.stream_number = sp->stream;
+ chk->rec.data.payloadtype = sp->ppid;
+ chk->rec.data.context = sp->context;
+ chk->rec.data.doing_fast_retransmit = 0;
+ chk->rec.data.ect_nonce = 0; /* ECN Nonce */
+
+ chk->rec.data.timetodrop = sp->ts;
+ chk->flags = sp->act_flags;
+ chk->addr_over = sp->addr_over;
+
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+
+ chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
+ dchkh = mtod(chk->data, struct sctp_data_chunk *);
+ /*
+ * Put the rest of the things in place now. Size was done earlier in
+ * previous loop prior to padding.
+ */
+ dchkh->ch.chunk_type = SCTP_DATA;
+ dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
+ dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
+ dchkh->dp.stream_id = htons(strq->stream_no);
+ dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
+ dchkh->dp.protocol_id = chk->rec.data.payloadtype;
+ dchkh->ch.chunk_length = htons(chk->send_size);
+ /*
+ * Now advance the chk->send_size by the actual pad needed.
+ */
+ if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
+ /* need a pad */
+ struct mbuf *lm;
+ int pads;
+
+ pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
+ if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
+ chk->pad_inplace = 1;
+ }
+ if ((lm = chk->last_mbuf->m_next) != NULL) {
+ /* pad added an mbuf */
+ chk->last_mbuf = lm;
+ }
+ if (chk->data->m_flags & M_PKTHDR) {
+ chk->data->m_pkthdr.len += pads;
+ }
+ chk->send_size += pads;
+ }
+ /* We only re-set the policy if it is on */
+ if (sp->pr_sctp_on)
+ sctp_set_prsctp_policy(stcb, sp);
+
+ if (sp->msg_is_complete && (sp->length == 0)) {
+ /* All done pull and kill the message */
+ asoc->stream_queue_cnt--;
+ TAILQ_REMOVE(&strq->outqueue, sp, next);
+ sctp_free_remote_addr(sp->net);
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ }
+ sctp_free_a_strmoq(stcb, sp);
+
+ /* we can't be locked to it */
+ *locked = 0;
+ stcb->asoc.locked_on_sending = NULL;
+ } else {
+ /* more to go, we are locked */
+ *locked = 1;
+ }
+ asoc->chunks_on_out_queue++;
+ if (sp->pr_sctp_on) {
+ asoc->pr_sctp_cnt++;
+ chk->pr_sctp_on = 1;
+ } else {
+ chk->pr_sctp_on = 0;
+ }
+ TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
+ asoc->send_queue_cnt++;
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ return (to_move);
+}
+
+
+static struct sctp_stream_out *
+sctp_select_a_stream(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ struct sctp_stream_out *strq;
+
+ /* Find the next stream to use */
+ if (asoc->last_out_stream == NULL) {
+ strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel);
+ if (asoc->last_out_stream == NULL) {
+ /* huh nothing on the wheel, TSNH */
+ return (NULL);
+ }
+ goto done_it;
+ }
+ strq = TAILQ_NEXT(asoc->last_out_stream, next_spoke);
+done_it:
+ if (strq == NULL) {
+ strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel);
+ }
+ return (strq);
+
+}
+
+static void
+sctp_fill_outqueue(struct sctp_tcb *stcb,
+ struct sctp_nets *net, int frag_point, int eeor_mode)
+{
+ struct sctp_association *asoc;
+ struct sctp_stream_out *strq, *strqn;
+ int goal_mtu, moved_how_much, total_moved = 0;
+ int locked, giveup;
+ struct sctp_stream_queue_pending *sp;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ asoc = &stcb->asoc;
+#ifdef AF_INET6
+ if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
+ goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
+ } else {
+ /* ?? not sure what else to do */
+ goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+ }
+#else
+ goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
+ mtu_fromwheel = 0;
+#endif
+ /* Need an allowance for the data chunk header too */
+ goal_mtu -= sizeof(struct sctp_data_chunk);
+
+ /* must make even word boundary */
+ goal_mtu &= 0xfffffffc;
+ if (asoc->locked_on_sending) {
+ /* We are stuck on one stream until the message completes. */
+ strqn = strq = asoc->locked_on_sending;
+ locked = 1;
+ } else {
+ strqn = strq = sctp_select_a_stream(stcb, asoc);
+ locked = 0;
+ }
+
+ while ((goal_mtu > 0) && strq) {
+ sp = TAILQ_FIRST(&strq->outqueue);
+ /*
+ * If CMT is off, we must validate that the stream in
+ * question has the first item pointed towards are network
+ * destionation requested by the caller. Note that if we
+ * turn out to be locked to a stream (assigning TSN's then
+ * we must stop, since we cannot look for another stream
+ * with data to send to that destination). In CMT's case, by
+ * skipping this check, we will send one data packet towards
+ * the requested net.
+ */
+ if (sp == NULL) {
+ break;
+ }
+ if ((sp->net != net) && (sctp_cmt_on_off == 0)) {
+ /* none for this network */
+ if (locked) {
+ break;
+ } else {
+ strq = sctp_select_a_stream(stcb, asoc);
+ if (strq == NULL)
+ /* none left */
+ break;
+ if (strqn == strq) {
+ /* I have circled */
+ break;
+ }
+ continue;
+ }
+ }
+ giveup = 0;
+ moved_how_much = sctp_move_to_outqueue(stcb, net, strq, goal_mtu, frag_point, &locked,
+ &giveup, eeor_mode);
+ asoc->last_out_stream = strq;
+ if (locked) {
+ asoc->locked_on_sending = strq;
+ if ((moved_how_much == 0) || (giveup))
+ /* no more to move for now */
+ break;
+ } else {
+ asoc->locked_on_sending = NULL;
+ if (TAILQ_FIRST(&strq->outqueue) == NULL) {
+ sctp_remove_from_wheel(stcb, asoc, strq);
+ }
+ if (giveup) {
+ break;
+ }
+ strq = sctp_select_a_stream(stcb, asoc);
+ if (strq == NULL) {
+ break;
+ }
+ }
+ total_moved += moved_how_much;
+ goal_mtu -= moved_how_much;
+ goal_mtu &= 0xfffffffc;
+ }
+ if (total_moved == 0) {
+ if ((sctp_cmt_on_off == 0) &&
+ (net == stcb->asoc.primary_destination)) {
+ /* ran dry for primary network net */
+ SCTP_STAT_INCR(sctps_primary_randry);
+ } else if (sctp_cmt_on_off) {
+ /* ran dry with CMT on */
+ SCTP_STAT_INCR(sctps_cmt_randry);
+ }
+ }
+}
+
+__inline void
+sctp_fix_ecn_echo(struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ }
+ }
+}
+
+static void
+sctp_move_to_an_alt(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_nets *net)
+{
+ struct sctp_tmit_chunk *chk;
+ struct sctp_nets *a_net;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ a_net = sctp_find_alternate_net(stcb, net, 0);
+ if ((a_net != net) &&
+ ((a_net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE)) {
+ /*
+ * We only proceed if a valid alternate is found that is not
+ * this one and is reachable. Here we must move all chunks
+ * queued in the send queue off of the destination address
+ * to our alternate.
+ */
+ TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
+ if (chk->whoTo == net) {
+ /* Move the chunk to our alternate */
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = a_net;
+ atomic_add_int(&a_net->ref_count, 1);
+ }
+ }
+ }
+}
+
+extern int sctp_early_fr;
+
+int
+sctp_med_chunk_output(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int *num_out,
+ int *reason_code,
+ int control_only, int *cwnd_full, int from_where,
+ struct timeval *now, int *now_filled, int frag_point)
+{
+ /*
+ * Ok this is the generic chunk service queue. we must do the
+ * following: - Service the stream queue that is next, moving any
+ * message (note I must get a complete message i.e. FIRST/MIDDLE and
+ * LAST to the out queue in one pass) and assigning TSN's - Check to
+ * see if the cwnd/rwnd allows any output, if so we go ahead and
+ * fomulate and send the low level chunks. Making sure to combine
+ * any control in the control chunk queue also.
+ */
+ struct sctp_nets *net;
+ struct mbuf *outchain, *endoutchain;
+ struct sctp_tmit_chunk *chk, *nchk;
+ struct sctphdr *shdr;
+
+ /* temp arrays for unlinking */
+ struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
+ int no_fragmentflg, error;
+ int one_chunk, hbflag;
+ int asconf, cookie, no_out_cnt;
+ int bundle_at, ctl_cnt, no_data_chunks, cwnd_full_ind, eeor_mode;
+ unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
+
+ *num_out = 0;
+ struct sctp_nets *start_at, *old_startat = NULL, *send_start_at;
+
+ cwnd_full_ind = 0;
+ int tsns_sent = 0;
+ uint32_t auth_offset = 0;
+ struct sctp_auth_chunk *auth = NULL;
+
+ if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
+ (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
+ eeor_mode = 1;
+ } else {
+ eeor_mode = 0;
+ }
+ ctl_cnt = no_out_cnt = asconf = cookie = 0;
+ /*
+ * First lets prime the pump. For each destination, if there is room
+ * in the flight size, attempt to pull an MTU's worth out of the
+ * stream queues into the general send_queue
+ */
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xC2, 2);
+#endif
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ hbflag = 0;
+ if ((control_only) || (asoc->stream_reset_outstanding))
+ no_data_chunks = 1;
+ else
+ no_data_chunks = 0;
+
+ /* Nothing to possible to send? */
+ if (TAILQ_EMPTY(&asoc->control_send_queue) &&
+ TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->out_wheel)) {
+ *reason_code = 9;
+ return (0);
+ }
+ if (asoc->peers_rwnd == 0) {
+ /* No room in peers rwnd */
+ *cwnd_full = 1;
+ *reason_code = 1;
+ if (asoc->total_flight > 0) {
+ /* we are allowed one chunk in flight */
+ no_data_chunks = 1;
+ }
+ }
+ if ((no_data_chunks == 0) && (!TAILQ_EMPTY(&asoc->out_wheel))) {
+ if (sctp_cmt_on_off) {
+ /*
+ * for CMT we start at the next one past the one we
+ * last added data to.
+ */
+ if (TAILQ_FIRST(&asoc->send_queue) != NULL) {
+ goto skip_the_fill_from_streams;
+ }
+ if (asoc->last_net_data_came_from) {
+ net = TAILQ_NEXT(asoc->last_net_data_came_from, sctp_next);
+ if (net == NULL) {
+ net = TAILQ_FIRST(&asoc->nets);
+ }
+ } else {
+ /* back to start */
+ net = TAILQ_FIRST(&asoc->nets);
+ }
+
+ } else {
+ net = asoc->primary_destination;
+ if (net == NULL) {
+ /* TSNH */
+ net = TAILQ_FIRST(&asoc->nets);
+ }
+ }
+ start_at = net;
+one_more_time:
+ for (; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
+ if (old_startat && (old_startat == net)) {
+ break;
+ }
+ if ((sctp_cmt_on_off == 0) && (net->ref_count < 2)) {
+ /* nothing can be in queue for this guy */
+ continue;
+ }
+ if (net->flight_size >= net->cwnd) {
+ /* skip this network, no room */
+ cwnd_full_ind++;
+ continue;
+ }
+ /*
+ * @@@ JRI : this for loop we are in takes in each
+ * net, if its's got space in cwnd and has data sent
+ * to it (when CMT is off) then it calls
+ * sctp_fill_outqueue for the net. This gets data on
+ * the send queue for that network.
+ *
+ * In sctp_fill_outqueue TSN's are assigned and data is
+ * copied out of the stream buffers. Note mostly
+ * copy by reference (we hope).
+ */
+#ifdef SCTP_CWND_LOGGING
+ sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
+#endif
+ sctp_fill_outqueue(stcb, net, frag_point, eeor_mode);
+ }
+ if (start_at != TAILQ_FIRST(&asoc->nets)) {
+ /* got to pick up the beginning stuff. */
+ old_startat = start_at;
+ start_at = net = TAILQ_FIRST(&asoc->nets);
+ goto one_more_time;
+ }
+ }
+skip_the_fill_from_streams:
+ *cwnd_full = cwnd_full_ind;
+ /* now service each destination and send out what we can for it */
+ /* Nothing to send? */
+ if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) &&
+ (TAILQ_FIRST(&asoc->send_queue) == NULL)) {
+ *reason_code = 8;
+ return (0);
+ }
+ chk = TAILQ_FIRST(&asoc->send_queue);
+ if (chk) {
+ send_start_at = chk->whoTo;
+ } else {
+ send_start_at = TAILQ_FIRST(&asoc->nets);
+ }
+ old_startat = NULL;
+again_one_more_time:
+ for (net = send_start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
+ /* how much can we send? */
+ /* printf("Examine for sending net:%x\n", (uint32_t)net); */
+ if (old_startat && (old_startat == net)) {
+ /* through list ocmpletely. */
+ break;
+ }
+ tsns_sent = 0;
+ if (net->ref_count < 2) {
+ /*
+ * Ref-count of 1 so we cannot have data or control
+ * queued to this address. Skip it.
+ */
+ continue;
+ }
+ ctl_cnt = bundle_at = 0;
+ endoutchain = outchain = NULL;
+ no_fragmentflg = 1;
+ one_chunk = 0;
+
+ if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
+ /*
+ * if we have a route and an ifp check to see if we
+ * have room to send to this guy
+ */
+ struct ifnet *ifp;
+
+ ifp = net->ro.ro_rt->rt_ifp;
+ if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
+ SCTP_STAT_INCR(sctps_ifnomemqueued);
+#ifdef SCTP_LOG_MAXBURST
+ sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
+#endif
+ continue;
+ }
+ }
+ if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
+ mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
+ } else {
+ mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
+ }
+ mx_mtu = mtu;
+ to_out = 0;
+ if (mtu > asoc->peers_rwnd) {
+ if (asoc->total_flight > 0) {
+ /* We have a packet in flight somewhere */
+ r_mtu = asoc->peers_rwnd;
+ } else {
+ /* We are always allowed to send one MTU out */
+ one_chunk = 1;
+ r_mtu = mtu;
+ }
+ } else {
+ r_mtu = mtu;
+ }
+ /************************/
+ /* Control transmission */
+ /************************/
+ /* Now first lets go through the control queue */
+ for (chk = TAILQ_FIRST(&asoc->control_send_queue);
+ chk; chk = nchk) {
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ if (chk->whoTo != net) {
+ /*
+ * No, not sent to the network we are
+ * looking at
+ */
+ continue;
+ }
+ if (chk->data == NULL) {
+ continue;
+ }
+ if ((chk->data->m_flags & M_PKTHDR) == 0) {
+ /*
+ * NOTE: the chk queue MUST have the PKTHDR
+ * flag set on it with a total in the
+ * m_pkthdr.len field!! else the chunk will
+ * ALWAYS be skipped
+ */
+ continue;
+ }
+ if (chk->sent != SCTP_DATAGRAM_UNSENT) {
+ /*
+ * It must be unsent. Cookies and ASCONF's
+ * hang around but there timers will force
+ * when marked for resend.
+ */
+ continue;
+ }
+ /*
+ * if no AUTH is yet included and this chunk
+ * requires it, make sure to account for it. We
+ * don't apply the size until the AUTH chunk is
+ * actually added below in case there is no room for
+ * this chunk. NOTE: we overload the use of "omtu"
+ * here
+ */
+ if ((auth == NULL) &&
+ sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+ stcb->asoc.peer_auth_chunks)) {
+ omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ } else
+ omtu = 0;
+ /* Here we do NOT factor the r_mtu */
+ if ((chk->data->m_pkthdr.len < (int)(mtu - omtu)) ||
+ (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
+ /*
+ * We probably should glom the mbuf chain
+ * from the chk->data for control but the
+ * problem is it becomes yet one more level
+ * of tracking to do if for some reason
+ * output fails. Then I have got to
+ * reconstruct the merged control chain.. el
+ * yucko.. for now we take the easy way and
+ * do the copy
+ */
+ /*
+ * Add an AUTH chunk, if chunk requires it
+ * save the offset into the chain for AUTH
+ */
+ if ((auth == NULL) &&
+ (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+ stcb->asoc.peer_auth_chunks))) {
+ outchain = sctp_add_auth_chunk(outchain,
+ &endoutchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ chk->rec.chunk_id.id);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ }
+ outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
+ (int)chk->rec.chunk_id.can_take_data,
+ chk->data->m_pkthdr.len, chk->copy_by_ref);
+ if (outchain == NULL) {
+ *reason_code = 8;
+ return (ENOMEM);
+ }
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ /* update our MTU size */
+ if (mtu > (chk->data->m_pkthdr.len + omtu))
+ mtu -= (chk->data->m_pkthdr.len + omtu);
+ else
+ mtu = 0;
+ to_out += (chk->data->m_pkthdr.len + omtu);
+ /* Do clear IP_DF ? */
+ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+ no_fragmentflg = 0;
+ }
+ if (chk->rec.chunk_id.can_take_data)
+ chk->data = NULL;
+ /* Mark things to be removed, if needed */
+ if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
+ (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
+ (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
+ (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
+ (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
+ (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
+
+ if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST)
+ hbflag = 1;
+ /* remove these chunks at the end */
+ if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) {
+ /* turn off the timer */
+ if (callout_pending(&stcb->asoc.dack_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+ inp, stcb, net);
+ }
+ }
+ ctl_cnt++;
+ } else {
+ /*
+ * Other chunks, since they have
+ * timers running (i.e. COOKIE or
+ * ASCONF) we just "trust" that it
+ * gets sent or retransmitted.
+ */
+ ctl_cnt++;
+ if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+ cookie = 1;
+ no_out_cnt = 1;
+ } else if (chk->rec.chunk_id.id == SCTP_ASCONF) {
+ /*
+ * set hb flag since we can
+ * use these for RTO
+ */
+ hbflag = 1;
+ asconf = 1;
+ }
+ chk->sent = SCTP_DATAGRAM_SENT;
+ chk->snd_count++;
+ }
+ if (mtu == 0) {
+ /*
+ * Ok we are out of room but we can
+ * output without effecting the
+ * flight size since this little guy
+ * is a control only packet.
+ */
+ if (asconf) {
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
+ asconf = 0;
+ }
+ if (cookie) {
+ sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
+ cookie = 0;
+ }
+ M_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT);
+ if (outchain == NULL) {
+ /* no memory */
+ error = ENOBUFS;
+ goto error_out_again;
+ }
+ shdr = mtod(outchain, struct sctphdr *);
+ shdr->src_port = inp->sctp_lport;
+ shdr->dest_port = stcb->rport;
+ shdr->v_tag = htonl(stcb->asoc.peer_vtag);
+ shdr->checksum = 0;
+ auth_offset += sizeof(struct sctphdr);
+ if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr,
+ outchain, auth_offset, auth,
+ no_fragmentflg, 0, NULL, asconf))) {
+ if (error == ENOBUFS) {
+ asoc->ifp_had_enobuf = 1;
+ }
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ if (from_where == 0) {
+ SCTP_STAT_INCR(sctps_lowlevelerrusr);
+ }
+ error_out_again:
+ /* error, could not output */
+ if (hbflag) {
+ if (*now_filled == 0) {
+ SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+ *now_filled = 1;
+ *now = net->last_sent_time;
+ } else {
+ net->last_sent_time = *now;
+ }
+ hbflag = 0;
+ }
+ if (error == EHOSTUNREACH) {
+ /*
+ * Destination went
+ * unreachable
+ * during this send
+ */
+ sctp_move_to_an_alt(stcb, asoc, net);
+ }
+ sctp_clean_up_ctl(stcb, asoc);
+ *reason_code = 7;
+ return (error);
+ } else
+ asoc->ifp_had_enobuf = 0;
+ /* Only HB or ASCONF advances time */
+ if (hbflag) {
+ if (*now_filled == 0) {
+ SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+ *now_filled = 1;
+ *now = net->last_sent_time;
+ } else {
+ net->last_sent_time = *now;
+ }
+ hbflag = 0;
+ }
+ /*
+ * increase the number we sent, if a
+ * cookie is sent we don't tell them
+ * any was sent out.
+ */
+ outchain = endoutchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ if (!no_out_cnt)
+ *num_out += ctl_cnt;
+ /* recalc a clean slate and setup */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ mtu = (net->mtu - SCTP_MIN_OVERHEAD);
+ } else {
+ mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
+ }
+ to_out = 0;
+ no_fragmentflg = 1;
+ }
+ }
+ }
+ /*********************/
+ /* Data transmission */
+ /*********************/
+ /*
+ * if AUTH for DATA is required and no AUTH has been added
+ * yet, account for this in the mtu now... if no data can be
+ * bundled, this adjustment won't matter anyways since the
+ * packet will be going out...
+ */
+ if ((auth == NULL) &&
+ sctp_auth_is_required_chunk(SCTP_DATA,
+ stcb->asoc.peer_auth_chunks)) {
+ mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ }
+ /* now lets add any data within the MTU constraints */
+ if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
+ if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
+ omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
+ else
+ omtu = 0;
+ } else {
+ if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
+ omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
+ else
+ omtu = 0;
+ }
+ if (((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) ||
+ (cookie)) {
+ for (chk = TAILQ_FIRST(&asoc->send_queue); chk; chk = nchk) {
+ if (no_data_chunks) {
+ /* let only control go out */
+ *reason_code = 1;
+ break;
+ }
+ if (net->flight_size >= net->cwnd) {
+ /* skip this net, no room for data */
+ *reason_code = 2;
+ break;
+ }
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ if (chk->whoTo != net) {
+ /* No, not sent to this net */
+ continue;
+ }
+ if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
+ /*
+ * strange, we have a chunk that is
+ * to bit for its destination and
+ * yet no fragment ok flag.
+ * Something went wrong when the
+ * PMTU changed...we did not mark
+ * this chunk for some reason?? I
+ * will fix it here by letting IP
+ * fragment it for now and printing
+ * a warning. This really should not
+ * happen ...
+ */
+#ifdef SCTP_DEBUG
+ printf("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
+ chk->send_size, mtu);
+#endif
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ }
+ if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
+ ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
+ /* ok we will add this one */
+
+ /*
+ * Add an AUTH chunk, if chunk
+ * requires it, save the offset into
+ * the chain for AUTH
+ */
+ if ((auth == NULL) &&
+ (sctp_auth_is_required_chunk(SCTP_DATA,
+ stcb->asoc.peer_auth_chunks))) {
+
+ outchain = sctp_add_auth_chunk(outchain,
+ &endoutchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ SCTP_DATA);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ }
+ outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
+ chk->send_size, chk->copy_by_ref);
+ if (outchain == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
+ printf("No memory?\n");
+ }
+#endif
+ if (!callout_pending(&net->rxt_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ }
+ *reason_code = 3;
+ return (ENOMEM);
+ }
+ /* upate our MTU size */
+ /* Do clear IP_DF ? */
+ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+ no_fragmentflg = 0;
+ }
+ /* unsigned subtraction of mtu */
+ if (mtu > chk->send_size)
+ mtu -= chk->send_size;
+ else
+ mtu = 0;
+ /* unsigned subtraction of r_mtu */
+ if (r_mtu > chk->send_size)
+ r_mtu -= chk->send_size;
+ else
+ r_mtu = 0;
+
+ to_out += chk->send_size;
+ if (to_out > mx_mtu) {
+#ifdef INVARIENT
+ panic("gag");
+#else
+ printf("Exceeding mtu of %d out size is %d\n",
+ mx_mtu, to_out);
+#endif
+ }
+ data_list[bundle_at++] = chk;
+ if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
+ mtu = 0;
+ break;
+ }
+ if (chk->sent == SCTP_DATAGRAM_UNSENT) {
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
+ SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
+ } else {
+ SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
+ }
+ if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
+ ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
+ SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
+ }
+ if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
+ break;
+ }
+ } else {
+ /*
+ * Must be sent in order of the
+ * TSN's (on a network)
+ */
+ break;
+ }
+ } /* for () */
+ } /* if asoc.state OPEN */
+ /* Is there something to send for this destination? */
+ if (outchain) {
+ /* We may need to start a control timer or two */
+ if (asconf) {
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
+ asconf = 0;
+ }
+ if (cookie) {
+ sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
+ cookie = 0;
+ }
+ /* must start a send timer if data is being sent */
+ if (bundle_at && (!callout_pending(&net->rxt_timer.timer))) {
+ /*
+ * no timer running on this destination
+ * restart it.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ }
+ /* Now send it, if there is anything to send :> */
+ M_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT);
+ if (outchain == NULL) {
+ /* out of mbufs */
+ error = ENOBUFS;
+ goto errored_send;
+ }
+ shdr = mtod(outchain, struct sctphdr *);
+ shdr->src_port = inp->sctp_lport;
+ shdr->dest_port = stcb->rport;
+ shdr->v_tag = htonl(stcb->asoc.peer_vtag);
+ shdr->checksum = 0;
+ auth_offset += sizeof(struct sctphdr);
+ if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr,
+ outchain,
+ auth_offset,
+ auth,
+ no_fragmentflg,
+ bundle_at,
+ data_list[0],
+ asconf))) {
+ /* error, we could not output */
+ if (error == ENOBUFS) {
+ asoc->ifp_had_enobuf = 1;
+ }
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ if (from_where == 0) {
+ SCTP_STAT_INCR(sctps_lowlevelerrusr);
+ }
+ errored_send:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
+ printf("Gak send error %d\n", error);
+ }
+#endif
+ if (hbflag) {
+ if (*now_filled == 0) {
+ SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+ *now_filled = 1;
+ *now = net->last_sent_time;
+ } else {
+ net->last_sent_time = *now;
+ }
+ hbflag = 0;
+ }
+ if (error == EHOSTUNREACH) {
+ /*
+ * Destination went unreachable
+ * during this send
+ */
+ sctp_move_to_an_alt(stcb, asoc, net);
+ }
+ sctp_clean_up_ctl(stcb, asoc);
+ *reason_code = 6;
+ return (error);
+ } else {
+ asoc->ifp_had_enobuf = 0;
+ }
+ outchain = endoutchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ if (bundle_at || hbflag) {
+ /* For data/asconf and hb set time */
+ if (*now_filled == 0) {
+ SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+ *now_filled = 1;
+ *now = net->last_sent_time;
+ } else {
+ net->last_sent_time = *now;
+ }
+ }
+ if (!no_out_cnt) {
+ *num_out += (ctl_cnt + bundle_at);
+ }
+ if (bundle_at) {
+ /* if (!net->rto_pending) { */
+ /* setup for a RTO measurement */
+ /* net->rto_pending = 1; */
+ tsns_sent = data_list[0]->rec.data.TSN_seq;
+
+ data_list[0]->do_rtt = 1;
+ /* } else { */
+ /* data_list[0]->do_rtt = 0; */
+ /* } */
+ SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
+ sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
+ if (sctp_early_fr) {
+ if (net->flight_size < net->cwnd) {
+ /* start or restart it */
+ if (callout_pending(&net->fr_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net);
+ }
+ SCTP_STAT_INCR(sctps_earlyfrstrout);
+ sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net);
+ } else {
+ /* stop it if its running */
+ if (callout_pending(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpout);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net);
+ }
+ }
+ }
+ }
+ if (one_chunk) {
+ break;
+ }
+ }
+#ifdef SCTP_CWND_LOGGING
+ sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
+#endif
+ }
+ if (old_startat == NULL) {
+ old_startat = send_start_at;
+ send_start_at = TAILQ_FIRST(&asoc->nets);
+ goto again_one_more_time;
+ }
+ /*
+ * At the end there should be no NON timed chunks hanging on this
+ * queue.
+ */
+#ifdef SCTP_CWND_LOGGING
+ sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
+#endif
+ if ((*num_out == 0) && (*reason_code == 0)) {
+ *reason_code = 4;
+ } else {
+ *reason_code = 5;
+ }
+ sctp_clean_up_ctl(stcb, asoc);
+ return (0);
+}
+
+void
+sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
+{
+ /*
+ * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
+ * the control chunk queue.
+ */
+ struct sctp_chunkhdr *hdr;
+ struct sctp_tmit_chunk *chk;
+ struct mbuf *mat;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(op_err);
+ return;
+ }
+ chk->copy_by_ref = 0;
+ M_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
+ if (op_err == NULL) {
+ sctp_free_a_chunk(stcb, chk);
+ return;
+ }
+ chk->send_size = 0;
+ mat = op_err;
+ while (mat != NULL) {
+ chk->send_size += mat->m_len;
+ mat = mat->m_next;
+ }
+ chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = op_err;
+ chk->whoTo = chk->asoc->primary_destination;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ hdr = mtod(op_err, struct sctp_chunkhdr *);
+ hdr->chunk_type = SCTP_OPERATION_ERROR;
+ hdr->chunk_flags = 0;
+ hdr->chunk_length = htons(chk->send_size);
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
+ chk,
+ sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+}
+
+int
+sctp_send_cookie_echo(struct mbuf *m,
+ int offset,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ /*
+ * pull out the cookie and put it at the front of the control chunk
+ * queue.
+ */
+ int at;
+ struct mbuf *cookie, *mat;
+ struct sctp_paramhdr parm, *phdr;
+ struct sctp_chunkhdr *hdr;
+ struct sctp_tmit_chunk *chk;
+ uint16_t ptype, plen;
+
+ /* First find the cookie in the param area */
+ cookie = NULL;
+ at = offset + sizeof(struct sctp_init_chunk);
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ do {
+ phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
+ if (phdr == NULL) {
+ return (-3);
+ }
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+ if (ptype == SCTP_STATE_COOKIE) {
+ int pad;
+
+ /* found the cookie */
+ if ((pad = (plen % 4))) {
+ plen += 4 - pad;
+ }
+ cookie = sctp_m_copym(m, at, plen, M_DONTWAIT);
+ if (cookie == NULL) {
+ /* No memory */
+ return (-2);
+ }
+ break;
+ }
+ at += SCTP_SIZE32(plen);
+ } while (phdr);
+ if (cookie == NULL) {
+ /* Did not find the cookie */
+ return (-3);
+ }
+ /* ok, we got the cookie lets change it into a cookie echo chunk */
+
+ /* first the change from param to cookie */
+ hdr = mtod(cookie, struct sctp_chunkhdr *);
+ hdr->chunk_type = SCTP_COOKIE_ECHO;
+ hdr->chunk_flags = 0;
+ /* now we MUST have a PKTHDR on it */
+ if ((cookie->m_flags & M_PKTHDR) != M_PKTHDR) {
+ /* we hope this happens rarely */
+ mat = sctp_get_mbuf_for_msg(8, 1, M_DONTWAIT, 1, MT_HEADER);
+ if (mat == NULL) {
+ sctp_m_freem(cookie);
+ return (-4);
+ }
+ mat->m_len = 0;
+ mat->m_pkthdr.rcvif = 0;
+ mat->m_next = cookie;
+ cookie = mat;
+ }
+ cookie->m_pkthdr.len = plen;
+ /* get the chunk stuff now and place it in the FRONT of the queue */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(cookie);
+ return (-5);
+ }
+ chk->copy_by_ref = 0;
+ chk->send_size = cookie->m_pkthdr.len;
+ chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = cookie;
+ chk->whoTo = chk->asoc->primary_destination;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ return (0);
+}
+
+void
+sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
+ struct mbuf *m,
+ int offset,
+ int chk_length,
+ struct sctp_nets *net)
+{
+ /*
+ * take a HB request and make it into a HB ack and send it.
+ */
+ struct mbuf *outchain;
+ struct sctp_chunkhdr *chdr;
+ struct sctp_tmit_chunk *chk;
+
+
+ if (net == NULL)
+ /* must have a net pointer */
+ return;
+
+ outchain = sctp_m_copym(m, offset, chk_length, M_DONTWAIT);
+ if (outchain == NULL) {
+ /* gak out of memory */
+ return;
+ }
+ chdr = mtod(outchain, struct sctp_chunkhdr *);
+ chdr->chunk_type = SCTP_HEARTBEAT_ACK;
+ chdr->chunk_flags = 0;
+ if ((outchain->m_flags & M_PKTHDR) != M_PKTHDR) {
+ /* should not happen but we are cautious. */
+ struct mbuf *tmp;
+
+ tmp = sctp_get_mbuf_for_msg(1, 1, M_DONTWAIT, 1, MT_HEADER);
+ if (tmp == NULL) {
+ return;
+ }
+ tmp->m_len = 0;
+ tmp->m_pkthdr.rcvif = 0;
+ tmp->m_next = outchain;
+ outchain = tmp;
+ }
+ outchain->m_pkthdr.len = chk_length;
+ if (chk_length % 4) {
+ /* need pad */
+ uint32_t cpthis = 0;
+ int padlen;
+
+ padlen = 4 - (outchain->m_pkthdr.len % 4);
+ m_copyback(outchain, outchain->m_pkthdr.len, padlen,
+ (caddr_t)&cpthis);
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(outchain);
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->send_size = chk_length;
+ chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = outchain;
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+}
+
+int
+sctp_send_cookie_ack(struct sctp_tcb *stcb)
+{
+ /* formulate and queue a cookie-ack back to sender */
+ struct mbuf *cookie_ack;
+ struct sctp_chunkhdr *hdr;
+ struct sctp_tmit_chunk *chk;
+
+ cookie_ack = NULL;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+
+ cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 1, M_DONTWAIT, 1, MT_HEADER);
+ if (cookie_ack == NULL) {
+ /* no mbuf's */
+ return (-1);
+ }
+ cookie_ack->m_data += SCTP_MIN_OVERHEAD;
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(cookie_ack);
+ return (-1);
+ }
+ chk->copy_by_ref = 0;
+ chk->send_size = sizeof(struct sctp_chunkhdr);
+ chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = cookie_ack;
+ if (chk->asoc->last_control_chunk_from != NULL) {
+ chk->whoTo = chk->asoc->last_control_chunk_from;
+ } else {
+ chk->whoTo = chk->asoc->primary_destination;
+ }
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
+ hdr->chunk_type = SCTP_COOKIE_ACK;
+ hdr->chunk_flags = 0;
+ hdr->chunk_length = htons(chk->send_size);
+ cookie_ack->m_pkthdr.len = cookie_ack->m_len = chk->send_size;
+ cookie_ack->m_pkthdr.rcvif = 0;
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ return (0);
+}
+
+
+int
+sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /* formulate and queue a SHUTDOWN-ACK back to the sender */
+ struct mbuf *m_shutdown_ack;
+ struct sctp_shutdown_ack_chunk *ack_cp;
+ struct sctp_tmit_chunk *chk;
+
+ m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 1, M_DONTWAIT, 1, MT_HEADER);
+ if (m_shutdown_ack == NULL) {
+ /* no mbuf's */
+ return (-1);
+ }
+ m_shutdown_ack->m_data += SCTP_MIN_OVERHEAD;
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(m_shutdown_ack);
+ return (-1);
+ }
+ chk->copy_by_ref = 0;
+
+ chk->send_size = sizeof(struct sctp_chunkhdr);
+ chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = m_shutdown_ack;
+ chk->whoTo = net;
+ atomic_add_int(&net->ref_count, 1);
+
+ ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
+ ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
+ ack_cp->ch.chunk_flags = 0;
+ ack_cp->ch.chunk_length = htons(chk->send_size);
+ m_shutdown_ack->m_pkthdr.len = m_shutdown_ack->m_len = chk->send_size;
+ m_shutdown_ack->m_pkthdr.rcvif = 0;
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ return (0);
+}
+
+int
+sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /* formulate and queue a SHUTDOWN to the sender */
+ struct mbuf *m_shutdown;
+ struct sctp_shutdown_chunk *shutdown_cp;
+ struct sctp_tmit_chunk *chk;
+
+ m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 1, M_DONTWAIT, 1, MT_HEADER);
+ if (m_shutdown == NULL) {
+ /* no mbuf's */
+ return (-1);
+ }
+ m_shutdown->m_data += SCTP_MIN_OVERHEAD;
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(m_shutdown);
+ return (-1);
+ }
+ chk->copy_by_ref = 0;
+ chk->send_size = sizeof(struct sctp_shutdown_chunk);
+ chk->rec.chunk_id.id = SCTP_SHUTDOWN;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = m_shutdown;
+ chk->whoTo = net;
+ atomic_add_int(&net->ref_count, 1);
+
+ shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
+ shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
+ shutdown_cp->ch.chunk_flags = 0;
+ shutdown_cp->ch.chunk_length = htons(chk->send_size);
+ shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
+ m_shutdown->m_pkthdr.len = m_shutdown->m_len = chk->send_size;
+ m_shutdown->m_pkthdr.rcvif = 0;
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ return (0);
+}
+
+int
+sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /*
+ * formulate and queue an ASCONF to the peer ASCONF parameters
+ * should be queued on the assoc queue
+ */
+ struct sctp_tmit_chunk *chk;
+ struct mbuf *m_asconf;
+ struct sctp_asconf_chunk *acp;
+
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ /* compose an ASCONF chunk, maximum length is PMTU */
+ m_asconf = sctp_compose_asconf(stcb);
+ if (m_asconf == NULL) {
+ return (-1);
+ }
+ acp = mtod(m_asconf, struct sctp_asconf_chunk *);
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(m_asconf);
+ return (-1);
+ }
+ chk->copy_by_ref = 0;
+ chk->data = m_asconf;
+ chk->send_size = m_asconf->m_pkthdr.len;
+ chk->rec.chunk_id.id = SCTP_ASCONF;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->whoTo = chk->asoc->primary_destination;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ return (0);
+}
+
+int
+sctp_send_asconf_ack(struct sctp_tcb *stcb, uint32_t retrans)
+{
+ /*
+ * formulate and queue a asconf-ack back to sender the asconf-ack
+ * must be stored in the tcb
+ */
+ struct sctp_tmit_chunk *chk;
+ struct mbuf *m_ack;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ /* is there a asconf-ack mbuf chain to send? */
+ if (stcb->asoc.last_asconf_ack_sent == NULL) {
+ return (-1);
+ }
+ /* copy the asconf_ack */
+ /*
+ * Supposedly the m_copypacket is a optimzation, use it if we can.
+ */
+ if (stcb->asoc.last_asconf_ack_sent->m_flags & M_PKTHDR) {
+ m_ack = m_copypacket(stcb->asoc.last_asconf_ack_sent, M_DONTWAIT);
+ } else
+ m_ack = m_copy(stcb->asoc.last_asconf_ack_sent, 0, M_COPYALL);
+
+ if (m_ack == NULL) {
+ /* couldn't copy it */
+
+ return (-1);
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ if (m_ack)
+ sctp_m_freem(m_ack);
+ return (-1);
+ }
+ chk->copy_by_ref = 0;
+ /* figure out where it goes to */
+ if (retrans) {
+ /* we're doing a retransmission */
+ if (stcb->asoc.used_alt_asconfack > 2) {
+ /* tried alternate nets already, go back */
+ chk->whoTo = NULL;
+ } else {
+ /* need to try and alternate net */
+ chk->whoTo = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
+ stcb->asoc.used_alt_asconfack++;
+ }
+ if (chk->whoTo == NULL) {
+ /* no alternate */
+ if (stcb->asoc.last_control_chunk_from == NULL)
+ chk->whoTo = stcb->asoc.primary_destination;
+ else
+ chk->whoTo = stcb->asoc.last_control_chunk_from;
+ stcb->asoc.used_alt_asconfack = 0;
+ }
+ } else {
+ /* normal case */
+ if (stcb->asoc.last_control_chunk_from == NULL)
+ chk->whoTo = stcb->asoc.primary_destination;
+ else
+ chk->whoTo = stcb->asoc.last_control_chunk_from;
+ stcb->asoc.used_alt_asconfack = 0;
+ }
+ chk->data = m_ack;
+ chk->send_size = m_ack->m_pkthdr.len;
+ chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ return (0);
+}
+
+
+static int
+sctp_chunk_retransmission(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int *cnt_out, struct timeval *now, int *now_filled)
+{
+ /*
+ * send out one MTU of retransmission. If fast_retransmit is
+ * happening we ignore the cwnd. Otherwise we obey the cwnd and
+ * rwnd. For a Cookie or Asconf in the control chunk queue we
+ * retransmit them by themselves.
+ *
+ * For data chunks we will pick out the lowest TSN's in the sent_queue
+ * marked for resend and bundle them all together (up to a MTU of
+ * destination). The address to send to should have been
+ * selected/changed where the retransmission was marked (i.e. in FR
+ * or t3-timeout routines).
+ */
+ struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
+ struct sctp_tmit_chunk *chk, *fwd;
+ struct mbuf *m, *endofchain;
+ struct sctphdr *shdr;
+ int asconf;
+ struct sctp_nets *net;
+ uint32_t tsns_sent = 0;
+ int no_fragmentflg, bundle_at, cnt_thru;
+ unsigned int mtu;
+ int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
+ struct sctp_auth_chunk *auth = NULL;
+ uint32_t auth_offset = 0;
+ uint32_t dmtu = 0;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ tmr_started = ctl_cnt = bundle_at = error = 0;
+ no_fragmentflg = 1;
+ asconf = 0;
+ fwd_tsn = 0;
+ *cnt_out = 0;
+ fwd = NULL;
+ endofchain = m = NULL;
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xC3, 1);
+#endif
+ if (TAILQ_EMPTY(&asoc->sent_queue)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("SCTP hits empty queue with cnt set to %d?\n",
+ asoc->sent_queue_retran_cnt);
+ }
+#endif
+ asoc->sent_queue_cnt = 0;
+ asoc->sent_queue_cnt_removeable = 0;
+ }
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
+ (chk->rec.chunk_id.id == SCTP_ASCONF) ||
+ (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
+ (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
+ if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
+ if (chk != asoc->str_reset) {
+ /*
+ * not eligible for retran if its
+ * not ours
+ */
+ continue;
+ }
+ }
+ ctl_cnt++;
+ if (chk->rec.chunk_id.id == SCTP_ASCONF) {
+ no_fragmentflg = 1;
+ asconf = 1;
+ }
+ if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
+ fwd_tsn = 1;
+ fwd = chk;
+ }
+ /*
+ * Add an AUTH chunk, if chunk requires it save the
+ * offset into the chain for AUTH
+ */
+ if ((auth == NULL) &&
+ (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+ stcb->asoc.peer_auth_chunks))) {
+ m = sctp_add_auth_chunk(m, &endofchain,
+ &auth, &auth_offset,
+ stcb,
+ chk->rec.chunk_id.id);
+ }
+ m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
+ break;
+ }
+ }
+ one_chunk = 0;
+ cnt_thru = 0;
+ /* do we have control chunks to retransmit? */
+ if (m != NULL) {
+ /* Start a timer no matter if we suceed or fail */
+ if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+ sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
+ } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
+
+ M_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT);
+ if (m == NULL) {
+ return (ENOBUFS);
+ }
+ shdr = mtod(m, struct sctphdr *);
+ shdr->src_port = inp->sctp_lport;
+ shdr->dest_port = stcb->rport;
+ shdr->v_tag = htonl(stcb->asoc.peer_vtag);
+ shdr->checksum = 0;
+ auth_offset += sizeof(struct sctphdr);
+ chk->snd_count++; /* update our count */
+
+ if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
+ (struct sockaddr *)&chk->whoTo->ro._l_addr, m, auth_offset,
+ auth, no_fragmentflg, 0, NULL, asconf))) {
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ return (error);
+ }
+ m = endofchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ /*
+ * We don't want to mark the net->sent time here since this
+ * we use this for HB and retrans cannot measure RTT
+ */
+ /* SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
+ *cnt_out += 1;
+ chk->sent = SCTP_DATAGRAM_SENT;
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+ if (fwd_tsn == 0) {
+ return (0);
+ } else {
+ /* Clean up the fwd-tsn list */
+ sctp_clean_up_ctl(stcb, asoc);
+ return (0);
+ }
+ }
+ /*
+ * Ok, it is just data retransmission we need to do or that and a
+ * fwd-tsn with it all.
+ */
+ if (TAILQ_EMPTY(&asoc->sent_queue)) {
+ return (-1);
+ }
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
+ /* not yet open, resend the cookie and that is it */
+ return (1);
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(20, inp, stcb, NULL);
+#endif
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ /* No, not sent to this net or not ready for rtx */
+ continue;
+
+ }
+ /* pick up the net */
+ net = chk->whoTo;
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ mtu = (net->mtu - SCTP_MIN_OVERHEAD);
+ } else {
+ mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+ }
+
+ if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
+ /* No room in peers rwnd */
+ uint32_t tsn;
+
+ tsn = asoc->last_acked_seq + 1;
+ if (tsn == chk->rec.data.TSN_seq) {
+ /*
+ * we make a special exception for this
+ * case. The peer has no rwnd but is missing
+ * the lowest chunk.. which is probably what
+ * is holding up the rwnd.
+ */
+ goto one_chunk_around;
+ }
+ return (1);
+ }
+one_chunk_around:
+ if (asoc->peers_rwnd < mtu) {
+ one_chunk = 1;
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xC3, 2);
+#endif
+ bundle_at = 0;
+ m = NULL;
+ net->fast_retran_ip = 0;
+ if (chk->rec.data.doing_fast_retransmit == 0) {
+ /*
+ * if no FR in progress skip destination that have
+ * flight_size > cwnd.
+ */
+ if (net->flight_size >= net->cwnd) {
+ continue;
+ }
+ } else {
+ /*
+ * Mark the destination net to have FR recovery
+ * limits put on it.
+ */
+ net->fast_retran_ip = 1;
+ }
+
+ /*
+ * if no AUTH is yet included and this chunk requires it,
+ * make sure to account for it. We don't apply the size
+ * until the AUTH chunk is actually added below in case
+ * there is no room for this chunk.
+ */
+ if ((auth == NULL) &&
+ sctp_auth_is_required_chunk(SCTP_DATA,
+ stcb->asoc.peer_auth_chunks)) {
+ dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ } else
+ dmtu = 0;
+
+ if ((chk->send_size <= (mtu - dmtu)) ||
+ (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
+ /* ok we will add this one */
+ if ((auth == NULL) &&
+ (sctp_auth_is_required_chunk(SCTP_DATA,
+ stcb->asoc.peer_auth_chunks))) {
+ m = sctp_add_auth_chunk(m, &endofchain,
+ &auth, &auth_offset,
+ stcb, SCTP_DATA);
+ }
+ m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
+ if (m == NULL) {
+ return (ENOMEM);
+ }
+ /* Do clear IP_DF ? */
+ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+ no_fragmentflg = 0;
+ }
+ /* upate our MTU size */
+ if (mtu > (chk->send_size + dmtu))
+ mtu -= (chk->send_size + dmtu);
+ else
+ mtu = 0;
+ data_list[bundle_at++] = chk;
+ if (one_chunk && (asoc->total_flight <= 0)) {
+ SCTP_STAT_INCR(sctps_windowprobed);
+ chk->rec.data.state_flags |= SCTP_WINDOW_PROBE;
+ }
+ }
+ if (one_chunk == 0) {
+ /*
+ * now are there anymore forward from chk to pick
+ * up?
+ */
+ fwd = TAILQ_NEXT(chk, sctp_next);
+ while (fwd) {
+ if (fwd->sent != SCTP_DATAGRAM_RESEND) {
+ /* Nope, not for retran */
+ fwd = TAILQ_NEXT(fwd, sctp_next);
+ continue;
+ }
+ if (fwd->whoTo != net) {
+ /* Nope, not the net in question */
+ fwd = TAILQ_NEXT(fwd, sctp_next);
+ continue;
+ }
+ if ((auth == NULL) &&
+ sctp_auth_is_required_chunk(SCTP_DATA,
+ stcb->asoc.peer_auth_chunks)) {
+ dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ } else
+ dmtu = 0;
+ if (fwd->send_size <= (mtu - dmtu)) {
+ if ((auth == NULL) &&
+ (sctp_auth_is_required_chunk(SCTP_DATA,
+ stcb->asoc.peer_auth_chunks))) {
+ m = sctp_add_auth_chunk(m,
+ &endofchain,
+ &auth, &auth_offset,
+ stcb,
+ SCTP_DATA);
+ }
+ m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
+ if (m == NULL) {
+ return (ENOMEM);
+ }
+ /* Do clear IP_DF ? */
+ if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+ no_fragmentflg = 0;
+ }
+ /* upate our MTU size */
+ if (mtu > (fwd->send_size + dmtu))
+ mtu -= (fwd->send_size + dmtu);
+ else
+ mtu = 0;
+ data_list[bundle_at++] = fwd;
+ if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
+ break;
+ }
+ fwd = TAILQ_NEXT(fwd, sctp_next);
+ } else {
+ /* can't fit so we are done */
+ break;
+ }
+ }
+ }
+ /* Is there something to send for this destination? */
+ if (m) {
+ /*
+ * No matter if we fail/or suceed we should start a
+ * timer. A failure is like a lost IP packet :-)
+ */
+ if (!callout_pending(&net->rxt_timer.timer)) {
+ /*
+ * no timer running on this destination
+ * restart it.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ tmr_started = 1;
+ }
+ M_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT);
+ if (m == NULL) {
+ return (ENOBUFS);
+ }
+ shdr = mtod(m, struct sctphdr *);
+ shdr->src_port = inp->sctp_lport;
+ shdr->dest_port = stcb->rport;
+ shdr->v_tag = htonl(stcb->asoc.peer_vtag);
+ shdr->checksum = 0;
+ auth_offset += sizeof(struct sctphdr);
+ /* Now lets send it, if there is anything to send :> */
+ if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr, m, auth_offset,
+ auth, no_fragmentflg, 0, NULL, asconf))) {
+ /* error, we could not output */
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ return (error);
+ }
+ m = endofchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ /* For HB's */
+ /*
+ * We don't want to mark the net->sent time here
+ * since this we use this for HB and retrans cannot
+ * measure RTT
+ */
+ /* SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
+
+ /* For auto-close */
+ cnt_thru++;
+ if (*now_filled == 0) {
+ SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
+ *now = asoc->time_last_sent;
+ *now_filled = 1;
+ } else {
+ asoc->time_last_sent = *now;
+ }
+ *cnt_out += bundle_at;
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xC4, bundle_at);
+#endif
+ if (bundle_at) {
+ tsns_sent = data_list[0]->rec.data.TSN_seq;
+ }
+ for (i = 0; i < bundle_at; i++) {
+ SCTP_STAT_INCR(sctps_sendretransdata);
+ data_list[i]->sent = SCTP_DATAGRAM_SENT;
+ /*
+ * When we have a revoked data, and we
+ * retransmit it, then we clear the revoked
+ * flag since this flag dictates if we
+ * subtracted from the fs
+ */
+ data_list[i]->rec.data.chunk_was_revoked = 0;
+ data_list[i]->snd_count++;
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+ /* record the time */
+ data_list[i]->sent_rcv_time = asoc->time_last_sent;
+ if (asoc->sent_queue_retran_cnt < 0) {
+ asoc->sent_queue_retran_cnt = 0;
+ }
+ net->flight_size += data_list[i]->book_size;
+ asoc->total_flight += data_list[i]->book_size;
+ if (data_list[i]->book_size_scale) {
+ /*
+ * need to double the book size on
+ * this one
+ */
+ data_list[i]->book_size_scale = 0;
+ data_list[i]->book_size *= 2;
+ } else {
+ sctp_ucount_incr(asoc->total_flight_count);
+#ifdef SCTP_LOG_RWND
+ sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
+ asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh);
+#endif
+ asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
+ (uint32_t) (data_list[i]->send_size +
+ sctp_peer_chunk_oh));
+ }
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ if ((i == 0) &&
+ (data_list[i]->rec.data.doing_fast_retransmit)) {
+ SCTP_STAT_INCR(sctps_sendfastretrans);
+ if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
+ (tmr_started == 0)) {
+ /*
+ * ok we just fast-retrans'd
+ * the lowest TSN, i.e the
+ * first on the list. In
+ * this case we want to give
+ * some more time to get a
+ * SACK back without a
+ * t3-expiring.
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ }
+ }
+ }
+#ifdef SCTP_CWND_LOGGING
+ sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
+#endif
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(21, inp, stcb, NULL);
+#endif
+ } else {
+ /* None will fit */
+ return (1);
+ }
+ if (asoc->sent_queue_retran_cnt <= 0) {
+ /* all done we have no more to retran */
+ asoc->sent_queue_retran_cnt = 0;
+ break;
+ }
+ if (one_chunk) {
+ /* No more room in rwnd */
+ return (1);
+ }
+ /* stop the for loop here. we sent out a packet */
+ break;
+ }
+ return (0);
+}
+
+
+static int
+sctp_timer_validation(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int ret)
+{
+ struct sctp_nets *net;
+
+ /* Validate that a timer is running somewhere */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (callout_pending(&net->rxt_timer.timer)) {
+ /* Here is a timer */
+ return (ret);
+ }
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ /* Gak, we did not have a timer somewhere */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
+ printf("Deadlock avoided starting timer on a dest at retran\n");
+ }
+#endif
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
+ return (ret);
+}
+
+int
+sctp_chunk_output(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ int from_where)
+{
+ /*
+ * Ok this is the generic chunk service queue. we must do the
+ * following: - See if there are retransmits pending, if so we must
+ * do these first and return. - Service the stream queue that is
+ * next, moving any message (note I must get a complete message i.e.
+ * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
+ * TSN's - Check to see if the cwnd/rwnd allows any output, if so we
+ * go ahead and fomulate and send the low level chunks. Making sure
+ * to combine any control in the control chunk queue also.
+ */
+ struct sctp_association *asoc;
+ struct sctp_nets *net;
+ int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0,
+ burst_cnt = 0, burst_limit = 0;
+ struct timeval now;
+ int now_filled = 0;
+ int cwnd_full = 0;
+ int nagle_on = 0;
+ int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
+ int un_sent = 0;
+
+ asoc = &stcb->asoc;
+ if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
+ nagle_on = 0;
+ } else {
+ nagle_on = 1;
+ }
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+
+ un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
+
+
+ if ((un_sent <= 0) &&
+ (TAILQ_EMPTY(&asoc->control_send_queue)) &&
+ (asoc->sent_queue_retran_cnt == 0)) {
+ /* Nothing to do unless there is something to be sent left */
+ return (error);
+ }
+ /*
+ * Do we have something to send, data or control AND a sack timer
+ * running, if so piggy-back the sack.
+ */
+ if (callout_pending(&stcb->asoc.dack_timer.timer)) {
+ sctp_send_sack(stcb);
+ callout_stop(&stcb->asoc.dack_timer.timer);
+ }
+ while (asoc->sent_queue_retran_cnt) {
+ /*
+ * Ok, it is retransmission time only, we send out only ONE
+ * packet with a single call off to the retran code.
+ */
+ if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
+ /* if its not from a HB then do it */
+ ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled);
+ } else {
+ /*
+ * its from any other place, we don't allow retran
+ * output (only control)
+ */
+ ret = 1;
+ }
+ if (ret > 0) {
+ /* Can't send anymore */
+ /*
+ * now lets push out control by calling med-level
+ * output once. this assures that we WILL send HB's
+ * if queued too.
+ */
+ (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
+ &cwnd_full, from_where,
+ &now, &now_filled, frag_point);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(8, inp, stcb, NULL);
+#endif
+ return (sctp_timer_validation(inp, stcb, asoc, ret));
+ }
+ if (ret < 0) {
+ /*
+ * The count was off.. retran is not happening so do
+ * the normal retransmission.
+ */
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(9, inp, stcb, NULL);
+#endif
+ break;
+ }
+ if (from_where == SCTP_OUTPUT_FROM_T3) {
+ /* Only one transmission allowed out of a timeout */
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(10, inp, stcb, NULL);
+#endif
+ /* Push out any control */
+ (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, &cwnd_full, from_where,
+ &now, &now_filled, frag_point);
+ return (ret);
+ }
+ if ((num_out == 0) && (ret == 0)) {
+ /* No more retrans to send */
+ break;
+ }
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(12, inp, stcb, NULL);
+#endif
+ /* Check for bad destinations, if they exist move chunks around. */
+ burst_limit = asoc->max_burst;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
+ SCTP_ADDR_NOT_REACHABLE) {
+ /*
+ * if possible move things off of this address we
+ * still may send below due to the dormant state but
+ * we try to find an alternate address to send to
+ * and if we have one we move all queued data on the
+ * out wheel to this alternate address.
+ */
+ if (net->ref_count > 1)
+ sctp_move_to_an_alt(stcb, asoc, net);
+ } else {
+ /*
+ * if ((asoc->sat_network) || (net->addr_is_local))
+ * { burst_limit = asoc->max_burst *
+ * SCTP_SAT_NETWORK_BURST_INCR; }
+ */
+ if (sctp_use_cwnd_based_maxburst) {
+ if ((net->flight_size + (burst_limit * net->mtu)) < net->cwnd) {
+ int old_cwnd;
+
+ if (net->ssthresh < net->cwnd)
+ net->ssthresh = net->cwnd;
+ old_cwnd = net->cwnd;
+ net->cwnd = (net->flight_size + (burst_limit * net->mtu));
+
+#ifdef SCTP_CWND_MONITOR
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
+#endif
+
+#ifdef SCTP_LOG_MAXBURST
+ sctp_log_maxburst(stcb, net, 0, burst_limit, SCTP_MAX_BURST_APPLIED);
+#endif
+ SCTP_STAT_INCR(sctps_maxburstqueued);
+ }
+ net->fast_retran_ip = 0;
+ } else {
+ if (net->flight_size == 0) {
+ /* Should be decaying the cwnd here */
+ ;
+ }
+ }
+ }
+
+ }
+ burst_cnt = 0;
+ cwnd_full = 0;
+ do {
+ error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
+ &reason_code, 0, &cwnd_full, from_where,
+ &now, &now_filled, frag_point);
+ if (error) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Error %d was returned from med-c-op\n", error);
+ }
+#endif
+#ifdef SCTP_LOG_MAXBURST
+ sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
+#endif
+#ifdef SCTP_CWND_LOGGING
+ sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
+ sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
+#endif
+
+ break;
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
+ printf("m-c-o put out %d\n", num_out);
+ }
+#endif
+ tot_out += num_out;
+ burst_cnt++;
+#ifdef SCTP_CWND_LOGGING
+ sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
+ if (num_out == 0) {
+ sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
+ }
+#endif
+ if (nagle_on) {
+ /*
+ * When nagle is on, we look at how much is un_sent,
+ * then if its smaller than an MTU and we have data
+ * in flight we stop.
+ */
+ un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+ ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count)
+ * sizeof(struct sctp_data_chunk)));
+ if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
+ (stcb->asoc.total_flight > 0)) {
+ break;
+ }
+ }
+ if (TAILQ_EMPTY(&asoc->control_send_queue) &&
+ TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->out_wheel)) {
+ /* Nothing left to send */
+ break;
+ }
+ if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
+ /* Nothing left to send */
+ break;
+ }
+ } while (num_out && (sctp_use_cwnd_based_maxburst ||
+ (burst_cnt < burst_limit)));
+
+ if (sctp_use_cwnd_based_maxburst == 0) {
+ if (burst_cnt >= burst_limit) {
+ SCTP_STAT_INCR(sctps_maxburstqueued);
+ asoc->burst_limit_applied = 1;
+#ifdef SCTP_LOG_MAXBURST
+ sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
+#endif
+ } else {
+ asoc->burst_limit_applied = 0;
+ }
+ }
+#ifdef SCTP_CWND_LOGGING
+ sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
+#endif
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Ok, we have put out %d chunks\n", tot_out);
+ }
+#endif
+ /*
+ * Now we need to clean up the control chunk chain if a ECNE is on
+ * it. It must be marked as UNSENT again so next call will continue
+ * to send it until such time that we get a CWR, to remove it.
+ */
+ if (stcb->asoc.ecn_echo_cnt_onq)
+ sctp_fix_ecn_echo(asoc);
+ return (error);
+}
+
+
+int
+sctp_output(inp, m, addr, control, p, flags)
+ struct sctp_inpcb *inp;
+ struct mbuf *m;
+ struct sockaddr *addr;
+ struct mbuf *control;
+
+ struct thread *p;
+ int flags;
+{
+ if (inp == NULL) {
+ return (EINVAL);
+ }
+ if (inp->sctp_socket == NULL) {
+ return (EINVAL);
+ }
+ return (sctp_sosend(inp->sctp_socket,
+ addr,
+ (struct uio *)NULL,
+ m,
+ control,
+ flags,
+ p));
+}
+
+void
+send_forward_tsn(struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+ struct sctp_forward_tsn_chunk *fwdtsn;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
+ /* mark it to unsent */
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ /* Do we correct its output location? */
+ if (chk->whoTo != asoc->primary_destination) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = asoc->primary_destination;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ }
+ goto sctp_fill_in_rest;
+ }
+ }
+ /* Ok if we reach here we must build one */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->asoc = asoc;
+ chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
+ if (chk->data == NULL) {
+ atomic_subtract_int(&chk->whoTo->ref_count, 1);
+ sctp_free_a_chunk(stcb, chk);
+ return;
+ }
+ chk->data->m_data += SCTP_MIN_OVERHEAD;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = asoc->primary_destination;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+sctp_fill_in_rest:
+ /*
+ * Here we go through and fill out the part that deals with
+ * stream/seq of the ones we skip.
+ */
+ chk->data->m_pkthdr.len = chk->data->m_len = 0;
+ {
+ struct sctp_tmit_chunk *at, *tp1, *last;
+ struct sctp_strseq *strseq;
+ unsigned int cnt_of_space, i, ovh;
+ unsigned int space_needed;
+ unsigned int cnt_of_skipped = 0;
+
+ TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
+ if (at->sent != SCTP_FORWARD_TSN_SKIP) {
+ /* no more to look at */
+ break;
+ }
+ if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
+ /* We don't report these */
+ continue;
+ }
+ cnt_of_skipped++;
+ }
+ space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
+ (cnt_of_skipped * sizeof(struct sctp_strseq)));
+
+ cnt_of_space = M_TRAILINGSPACE(chk->data);
+
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MIN_OVERHEAD;
+ } else {
+ ovh = SCTP_MIN_V4_OVERHEAD;
+ }
+ if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
+ /* trim to a mtu size */
+ cnt_of_space = asoc->smallest_mtu - ovh;
+ }
+ if (cnt_of_space < space_needed) {
+ /*
+ * ok we must trim down the chunk by lowering the
+ * advance peer ack point.
+ */
+ cnt_of_skipped = (cnt_of_space -
+ ((sizeof(struct sctp_forward_tsn_chunk)) /
+ sizeof(struct sctp_strseq)));
+ /*
+ * Go through and find the TSN that will be the one
+ * we report.
+ */
+ at = TAILQ_FIRST(&asoc->sent_queue);
+ for (i = 0; i < cnt_of_skipped; i++) {
+ tp1 = TAILQ_NEXT(at, sctp_next);
+ at = tp1;
+ }
+ last = at;
+ /*
+ * last now points to last one I can report, update
+ * peer ack point
+ */
+ asoc->advanced_peer_ack_point = last->rec.data.TSN_seq;
+ space_needed -= (cnt_of_skipped * sizeof(struct sctp_strseq));
+ }
+ chk->send_size = space_needed;
+ /* Setup the chunk */
+ fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
+ fwdtsn->ch.chunk_length = htons(chk->send_size);
+ fwdtsn->ch.chunk_flags = 0;
+ fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
+ fwdtsn->new_cumulative_tsn = htonl(asoc->advanced_peer_ack_point);
+ chk->send_size = (sizeof(struct sctp_forward_tsn_chunk) +
+ (cnt_of_skipped * sizeof(struct sctp_strseq)));
+ chk->data->m_pkthdr.len = chk->data->m_len = chk->send_size;
+ fwdtsn++;
+ /*
+ * Move pointer to after the fwdtsn and transfer to the
+ * strseq pointer.
+ */
+ strseq = (struct sctp_strseq *)fwdtsn;
+ /*
+ * Now populate the strseq list. This is done blindly
+ * without pulling out duplicate stream info. This is
+ * inefficent but won't harm the process since the peer will
+ * look at these in sequence and will thus release anything.
+ * It could mean we exceed the PMTU and chop off some that
+ * we could have included.. but this is unlikely (aka 1432/4
+ * would mean 300+ stream seq's would have to be reported in
+ * one FWD-TSN. With a bit of work we can later FIX this to
+ * optimize and pull out duplcates.. but it does add more
+ * overhead. So for now... not!
+ */
+ at = TAILQ_FIRST(&asoc->sent_queue);
+ for (i = 0; i < cnt_of_skipped; i++) {
+ tp1 = TAILQ_NEXT(at, sctp_next);
+ if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
+ /* We don't report these */
+ i--;
+ at = tp1;
+ continue;
+ }
+ strseq->stream = ntohs(at->rec.data.stream_number);
+ strseq->sequence = ntohs(at->rec.data.stream_seq);
+ strseq++;
+ at = tp1;
+ }
+ }
+ return;
+
+}
+
+void
+sctp_send_sack(struct sctp_tcb *stcb)
+{
+ /*
+ * Queue up a SACK in the control queue. We must first check to see
+ * if a SACK is somehow on the control queue. If so, we will take
+ * and and remove the old one.
+ */
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk, *a_chk;
+ struct sctp_sack_chunk *sack;
+ struct sctp_gap_ack_block *gap_descriptor;
+ struct sack_track *selector;
+ int mergeable = 0;
+ int offset;
+ caddr_t limit;
+ uint32_t *dup;
+ int limit_reached = 0;
+ unsigned int i, jstart, siz, j;
+ unsigned int num_gap_blocks = 0, space;
+ int num_dups = 0;
+ int space_req;
+
+
+ a_chk = NULL;
+ asoc = &stcb->asoc;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (asoc->last_data_chunk_from == NULL) {
+ /* Hmm we never received anything */
+ return;
+ }
+ sctp_set_rwnd(stcb, asoc);
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) {
+ /* Hmm, found a sack already on queue, remove it */
+ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+ a_chk = chk;
+ if (a_chk->data) {
+ sctp_m_freem(a_chk->data);
+ a_chk->data = NULL;
+ }
+ sctp_free_remote_addr(a_chk->whoTo);
+ a_chk->whoTo = NULL;
+ break;
+ }
+ }
+ if (a_chk == NULL) {
+ sctp_alloc_a_chunk(stcb, a_chk);
+ if (a_chk == NULL) {
+ /* No memory so we drop the idea, and set a timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ return;
+ }
+ a_chk->copy_by_ref = 0;
+ /* a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; */
+ a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK;
+ a_chk->rec.chunk_id.can_take_data = 1;
+ }
+ a_chk->asoc = asoc;
+ a_chk->snd_count = 0;
+ a_chk->send_size = 0; /* fill in later */
+ a_chk->sent = SCTP_DATAGRAM_UNSENT;
+
+ if ((asoc->numduptsns) ||
+ (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)
+ ) {
+ /*
+ * Ok, we have some duplicates or the destination for the
+ * sack is unreachable, lets see if we can select an
+ * alternate than asoc->last_data_chunk_from
+ */
+ if ((!(asoc->last_data_chunk_from->dest_state &
+ SCTP_ADDR_NOT_REACHABLE)) &&
+ (asoc->used_alt_onsack > asoc->numnets)) {
+ /* We used an alt last time, don't this time */
+ a_chk->whoTo = NULL;
+ } else {
+ asoc->used_alt_onsack++;
+ a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
+ }
+ if (a_chk->whoTo == NULL) {
+ /* Nope, no alternate */
+ a_chk->whoTo = asoc->last_data_chunk_from;
+ asoc->used_alt_onsack = 0;
+ }
+ } else {
+ /*
+ * No duplicates so we use the last place we received data
+ * from.
+ */
+ asoc->used_alt_onsack = 0;
+ a_chk->whoTo = asoc->last_data_chunk_from;
+ }
+ if (a_chk->whoTo) {
+ atomic_add_int(&a_chk->whoTo->ref_count, 1);
+ }
+ if (asoc->highest_tsn_inside_map == asoc->cumulative_tsn) {
+ /* no gaps */
+ space_req = sizeof(struct sctp_sack_chunk);
+ } else {
+ /* gaps get a cluster */
+ space_req = MCLBYTES;
+ }
+ /* Ok now lets formulate a MBUF with our sack */
+ a_chk->data = sctp_get_mbuf_for_msg(space_req, 1, M_DONTWAIT, 1, MT_DATA);
+ if ((a_chk->data == NULL) ||
+ (a_chk->whoTo == NULL)) {
+ /* rats, no mbuf memory */
+ if (a_chk->data) {
+ /* was a problem with the destination */
+ sctp_m_freem(a_chk->data);
+ a_chk->data = NULL;
+ }
+ if (a_chk->whoTo)
+ atomic_subtract_int(&a_chk->whoTo->ref_count, 1);
+ sctp_free_a_chunk(stcb, a_chk);
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ return;
+ }
+ /* ok, lets go through and fill it in */
+ a_chk->data->m_data += SCTP_MIN_OVERHEAD;
+ space = M_TRAILINGSPACE(a_chk->data);
+ if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
+ space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
+ }
+ limit = mtod(a_chk->data, caddr_t);
+ limit += space;
+
+ sack = mtod(a_chk->data, struct sctp_sack_chunk *);
+ sack->ch.chunk_type = SCTP_SELECTIVE_ACK;
+ /* 0x01 is used by nonce for ecn */
+ sack->ch.chunk_flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM);
+ if (sctp_cmt_on_off && sctp_cmt_use_dac) {
+ /*
+ * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
+ * received, then set high bit to 1, else 0. Reset
+ * pkts_rcvd.
+ */
+ sack->ch.chunk_flags |= (asoc->cmt_dac_pkts_rcvd << 6);
+ asoc->cmt_dac_pkts_rcvd = 0;
+ }
+ sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
+ sack->sack.a_rwnd = htonl(asoc->my_rwnd);
+ asoc->my_last_reported_rwnd = asoc->my_rwnd;
+
+ /* reset the readers interpretation */
+ stcb->freed_by_sorcv_sincelast = 0;
+
+ gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
+
+
+ siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
+ if (asoc->cumulative_tsn < asoc->mapping_array_base_tsn) {
+ offset = 1;
+ /*
+ * cum-ack behind the mapping array, so we start and use all
+ * entries.
+ */
+ jstart = 0;
+ } else {
+ offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
+ /*
+ * we skip the first one when the cum-ack is at or above the
+ * mapping array base.
+ */
+ jstart = 1;
+ }
+ if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN)) {
+ /* we have a gap .. maybe */
+ for (i = 0; i < siz; i++) {
+ selector = &sack_array[asoc->mapping_array[i]];
+ if (mergeable && selector->right_edge) {
+ /*
+ * Backup, left and right edges were ok to
+ * merge.
+ */
+ num_gap_blocks--;
+ gap_descriptor--;
+ }
+ if (selector->num_entries == 0)
+ mergeable = 0;
+ else {
+ for (j = jstart; j < selector->num_entries; j++) {
+ if (mergeable && selector->right_edge) {
+ /*
+ * do a merge by NOT setting
+ * the left side
+ */
+ mergeable = 0;
+ } else {
+ /*
+ * no merge, set the left
+ * side
+ */
+ mergeable = 0;
+ gap_descriptor->start = htons((selector->gaps[j].start + offset));
+ }
+ gap_descriptor->end = htons((selector->gaps[j].end + offset));
+ num_gap_blocks++;
+ gap_descriptor++;
+ if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
+ /* no more room */
+ limit_reached = 1;
+ break;
+ }
+ }
+ if (selector->left_edge) {
+ mergeable = 1;
+ }
+ }
+ jstart = 0;
+ offset += 8;
+ }
+ if (num_gap_blocks == 0) {
+ /* reneged all chunks */
+ asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
+ }
+ }
+ /* now we must add any dups we are going to report. */
+ if ((limit_reached == 0) && (asoc->numduptsns)) {
+ dup = (uint32_t *) gap_descriptor;
+ for (i = 0; i < asoc->numduptsns; i++) {
+ *dup = htonl(asoc->dup_tsns[i]);
+ dup++;
+ num_dups++;
+ if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
+ /* no more room */
+ break;
+ }
+ }
+ asoc->numduptsns = 0;
+ }
+ /*
+ * now that the chunk is prepared queue it to the control chunk
+ * queue.
+ */
+ a_chk->send_size = (sizeof(struct sctp_sack_chunk) +
+ (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
+ (num_dups * sizeof(int32_t)));
+ a_chk->data->m_pkthdr.len = a_chk->data->m_len = a_chk->send_size;
+ sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
+ sack->sack.num_dup_tsns = htons(num_dups);
+ sack->ch.chunk_length = htons(a_chk->send_size);
+ TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+ SCTP_STAT_INCR(sctps_sendsacks);
+ return;
+}
+
+
+void
+sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr)
+{
+ struct mbuf *m_abort;
+ struct mbuf *m_out = NULL, *m_end = NULL;
+ struct sctp_abort_chunk *abort = NULL;
+ int sz;
+ uint32_t auth_offset = 0;
+ struct sctp_auth_chunk *auth = NULL;
+ struct sctphdr *shdr;
+
+ /*
+ * Add an AUTH chunk, if chunk requires it and save the offset into
+ * the chain for AUTH
+ */
+ if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
+ stcb->asoc.peer_auth_chunks)) {
+ m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset,
+ stcb, SCTP_ABORT_ASSOCIATION);
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 1, M_DONTWAIT, 1, MT_HEADER);
+ if (m_abort == NULL) {
+ /* no mbuf's */
+ if (m_out)
+ sctp_m_freem(m_out);
+ return;
+ }
+ /* link in any error */
+ m_abort->m_next = operr;
+ sz = 0;
+ if (operr) {
+ struct mbuf *n;
+
+ n = operr;
+ while (n) {
+ sz += n->m_len;
+ n = n->m_next;
+ }
+ }
+ m_abort->m_len = sizeof(*abort);
+ m_abort->m_pkthdr.len = m_abort->m_len + sz;
+ m_abort->m_pkthdr.rcvif = 0;
+ if (m_out == NULL) {
+ /* NO Auth chunk prepended, so reserve space in front */
+ m_abort->m_data += SCTP_MIN_OVERHEAD;
+ m_out = m_abort;
+ } else {
+ /* Put AUTH chunk at the front of the chain */
+ m_out->m_pkthdr.len += m_abort->m_pkthdr.len;
+ m_end->m_next = m_abort;
+ }
+
+ /* fill in the ABORT chunk */
+ abort = mtod(m_abort, struct sctp_abort_chunk *);
+ abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
+ abort->ch.chunk_flags = 0;
+ abort->ch.chunk_length = htons(sizeof(*abort) + sz);
+
+ /* prepend and fill in the SCTP header */
+ M_PREPEND(m_out, sizeof(struct sctphdr), M_DONTWAIT);
+ if (m_out == NULL) {
+ /* TSNH: no memory */
+ return;
+ }
+ shdr = mtod(m_out, struct sctphdr *);
+ shdr->src_port = stcb->sctp_ep->sctp_lport;
+ shdr->dest_port = stcb->rport;
+ shdr->v_tag = htonl(stcb->asoc.peer_vtag);
+ shdr->checksum = 0;
+ auth_offset += sizeof(struct sctphdr);
+
+ sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb,
+ stcb->asoc.primary_destination,
+ (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr,
+ m_out, auth_offset, auth, 1, 0, NULL, 0);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+}
+
+int
+sctp_send_shutdown_complete(struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ /* formulate and SEND a SHUTDOWN-COMPLETE */
+ struct mbuf *m_shutdown_comp;
+ struct sctp_shutdown_complete_msg *comp_cp;
+
+ m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_complete_msg), 1, M_DONTWAIT, 1, MT_HEADER);
+ if (m_shutdown_comp == NULL) {
+ /* no mbuf's */
+ return (-1);
+ }
+ m_shutdown_comp->m_data += sizeof(struct ip6_hdr);
+ comp_cp = mtod(m_shutdown_comp, struct sctp_shutdown_complete_msg *);
+ comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
+ comp_cp->shut_cmp.ch.chunk_flags = 0;
+ comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
+ comp_cp->sh.src_port = stcb->sctp_ep->sctp_lport;
+ comp_cp->sh.dest_port = stcb->rport;
+ comp_cp->sh.v_tag = htonl(stcb->asoc.peer_vtag);
+ comp_cp->sh.checksum = 0;
+
+ m_shutdown_comp->m_pkthdr.len = m_shutdown_comp->m_len = sizeof(struct sctp_shutdown_complete_msg);
+ m_shutdown_comp->m_pkthdr.rcvif = 0;
+ sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr,
+ m_shutdown_comp, 0, NULL, 1, 0, NULL, 0);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ return (0);
+}
+
+int
+sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh)
+{
+ /* formulate and SEND a SHUTDOWN-COMPLETE */
+ struct mbuf *mout;
+ struct ip *iph, *iph_out;
+ struct ip6_hdr *ip6, *ip6_out;
+ int offset_out;
+ struct sctp_shutdown_complete_msg *comp_cp;
+
+ mout = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_complete_msg), 1, M_DONTWAIT, 1, MT_HEADER);
+ if (mout == NULL) {
+ /* no mbuf's */
+ return (-1);
+ }
+ iph = mtod(m, struct ip *);
+ iph_out = NULL;
+ ip6_out = NULL;
+ offset_out = 0;
+ if (iph->ip_v == IPVERSION) {
+ mout->m_len = sizeof(struct ip) +
+ sizeof(struct sctp_shutdown_complete_msg);
+ mout->m_next = NULL;
+ iph_out = mtod(mout, struct ip *);
+
+ /* Fill in the IP header for the ABORT */
+ iph_out->ip_v = IPVERSION;
+ iph_out->ip_hl = (sizeof(struct ip) / 4);
+ iph_out->ip_tos = (u_char)0;
+ iph_out->ip_id = 0;
+ iph_out->ip_off = 0;
+ iph_out->ip_ttl = MAXTTL;
+ iph_out->ip_p = IPPROTO_SCTP;
+ iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
+ iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
+
+ /* let IP layer calculate this */
+ iph_out->ip_sum = 0;
+ offset_out += sizeof(*iph_out);
+ comp_cp = (struct sctp_shutdown_complete_msg *)(
+ (caddr_t)iph_out + offset_out);
+ } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
+ ip6 = (struct ip6_hdr *)iph;
+ mout->m_len = sizeof(struct ip6_hdr) +
+ sizeof(struct sctp_shutdown_complete_msg);
+ mout->m_next = NULL;
+ ip6_out = mtod(mout, struct ip6_hdr *);
+
+ /* Fill in the IPv6 header for the ABORT */
+ ip6_out->ip6_flow = ip6->ip6_flow;
+ ip6_out->ip6_hlim = ip6_defhlim;
+ ip6_out->ip6_nxt = IPPROTO_SCTP;
+ ip6_out->ip6_src = ip6->ip6_dst;
+ ip6_out->ip6_dst = ip6->ip6_src;
+ ip6_out->ip6_plen = mout->m_len;
+ offset_out += sizeof(*ip6_out);
+ comp_cp = (struct sctp_shutdown_complete_msg *)(
+ (caddr_t)ip6_out + offset_out);
+ } else {
+ /* Currently not supported. */
+ return (-1);
+ }
+
+ /* Now copy in and fill in the ABORT tags etc. */
+ comp_cp->sh.src_port = sh->dest_port;
+ comp_cp->sh.dest_port = sh->src_port;
+ comp_cp->sh.checksum = 0;
+ comp_cp->sh.v_tag = sh->v_tag;
+ comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB;
+ comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
+ comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
+
+ mout->m_pkthdr.len = mout->m_len;
+ /* add checksum */
+ if ((sctp_no_csum_on_loopback) &&
+ (m->m_pkthdr.rcvif) &&
+ (m->m_pkthdr.rcvif->if_type == IFT_LOOP)) {
+ comp_cp->sh.checksum = 0;
+ } else {
+ comp_cp->sh.checksum = sctp_calculate_sum(mout, NULL, offset_out);
+ }
+
+ /* zap the rcvif, it should be null */
+ mout->m_pkthdr.rcvif = 0;
+ /* zap the stack pointer to the route */
+ if (iph_out != NULL) {
+ struct route ro;
+
+ bzero(&ro, sizeof ro);
+ /* set IPv4 length */
+ iph_out->ip_len = mout->m_pkthdr.len;
+ /* out it goes */
+ ip_output(mout, 0, &ro, IP_RAWOUTPUT, NULL
+ ,NULL
+ );
+ /* Free the route if we got one back */
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt);
+ } else if (ip6_out != NULL) {
+ struct route_in6 ro;
+
+
+ bzero(&ro, sizeof(ro));
+ ip6_output(mout, NULL, &ro, 0, NULL, NULL
+ ,NULL
+ );
+ /* Free the route if we got one back */
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt);
+ }
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ return (0);
+}
+
+static struct sctp_nets *
+sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now)
+{
+ struct sctp_nets *net, *hnet;
+ int ms_goneby, highest_ms, state_overide = 0;
+
+ SCTP_GETTIME_TIMEVAL(now);
+ highest_ms = 0;
+ hnet = NULL;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (
+ ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) ||
+ (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)
+ ) {
+ /*
+ * Skip this guy from consideration if HB is off AND
+ * its confirmed
+ */
+ continue;
+ }
+ if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) {
+ /* skip this dest net from consideration */
+ continue;
+ }
+ if (net->last_sent_time.tv_sec) {
+ /* Sent to so we subtract */
+ ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000;
+ } else
+ /* Never been sent to */
+ ms_goneby = 0x7fffffff;
+ /*
+ * When the address state is unconfirmed but still
+ * considered reachable, we HB at a higher rate. Once it
+ * goes confirmed OR reaches the "unreachable" state, thenw
+ * we cut it back to HB at a more normal pace.
+ */
+ if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) {
+ state_overide = 1;
+ } else {
+ state_overide = 0;
+ }
+
+ if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) &&
+ (ms_goneby > highest_ms)) {
+ highest_ms = ms_goneby;
+ hnet = net;
+ }
+ }
+ if (hnet &&
+ ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) {
+ state_overide = 1;
+ } else {
+ state_overide = 0;
+ }
+
+ if (highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) {
+ /*
+ * Found the one with longest delay bounds OR it is
+ * unconfirmed and still not marked unreachable.
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
+ printf("net:%p is the hb winner -",
+ hnet);
+ if (hnet)
+ sctp_print_address((struct sockaddr *)&hnet->ro._l_addr);
+ else
+ printf(" none\n");
+ }
+#endif
+ /* update the timer now */
+ hnet->last_sent_time = *now;
+ return (hnet);
+ }
+ /* Nothing to HB */
+ return (NULL);
+}
+
+int
+sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
+{
+ struct sctp_tmit_chunk *chk;
+ struct sctp_nets *net;
+ struct sctp_heartbeat_chunk *hb;
+ struct timeval now;
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 *sin6;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (user_req == 0) {
+ net = sctp_select_hb_destination(stcb, &now);
+ if (net == NULL) {
+ /*
+ * All our busy none to send to, just start the
+ * timer again.
+ */
+ if (stcb->asoc.state == 0) {
+ return (0);
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
+ stcb->sctp_ep,
+ stcb,
+ net);
+ return (0);
+ }
+ } else {
+ net = u_net;
+ if (net == NULL) {
+ return (0);
+ }
+ SCTP_GETTIME_TIMEVAL(&now);
+ }
+ sin = (struct sockaddr_in *)&net->ro._l_addr;
+ if (sin->sin_family != AF_INET) {
+ if (sin->sin_family != AF_INET6) {
+ /* huh */
+ return (0);
+ }
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
+ printf("Gak, can't get a chunk for hb\n");
+ }
+#endif
+ return (0);
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->asoc = &stcb->asoc;
+ chk->send_size = sizeof(struct sctp_heartbeat_chunk);
+
+ chk->data = sctp_get_mbuf_for_msg(chk->send_size, 1, M_DONTWAIT, 1, MT_HEADER);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk);
+ return (0);
+ }
+ chk->data->m_data += SCTP_MIN_OVERHEAD;
+ chk->data->m_pkthdr.len = chk->data->m_len = chk->send_size;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ /* Now we have a mbuf that we can fill in with the details */
+ hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
+
+ /* fill out chunk header */
+ hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
+ hb->ch.chunk_flags = 0;
+ hb->ch.chunk_length = htons(chk->send_size);
+ /* Fill out hb parameter */
+ hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
+ hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
+ hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
+ hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
+ /* Did our user request this one, put it in */
+ hb->heartbeat.hb_info.user_req = user_req;
+ hb->heartbeat.hb_info.addr_family = sin->sin_family;
+ hb->heartbeat.hb_info.addr_len = sin->sin_len;
+ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ /*
+ * we only take from the entropy pool if the address is not
+ * confirmed.
+ */
+ net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+ net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+ } else {
+ net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
+ net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
+ }
+ if (sin->sin_family == AF_INET) {
+ memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr));
+ } else if (sin->sin_family == AF_INET6) {
+ /* We leave the scope the way it is in our lookup table. */
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr));
+ } else {
+ /* huh compiler bug */
+ return (0);
+ }
+ /* ok we have a destination that needs a beat */
+ /* lets do the theshold management Qiaobing style */
+ if (sctp_threshold_management(stcb->sctp_ep, stcb, net,
+ stcb->asoc.max_send_times)) {
+ /*
+ * we have lost the association, in a way this is quite bad
+ * since we really are one less time since we really did not
+ * send yet. This is the down side to the Q's style as
+ * defined in the RFC and not my alternate style defined in
+ * the RFC.
+ */
+ atomic_subtract_int(&chk->whoTo->ref_count, 1);
+ if (chk->data != NULL) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk);
+ return (-1);
+ }
+ net->hb_responded = 0;
+ TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+ stcb->asoc.ctrl_queue_cnt++;
+ SCTP_STAT_INCR(sctps_sendheartbeat);
+ /*
+ * Call directly med level routine to put out the chunk. It will
+ * always tumble out control chunks aka HB but it may even tumble
+ * out data too.
+ */
+ return (1);
+}
+
+void
+sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
+ uint32_t high_tsn)
+{
+ struct sctp_association *asoc;
+ struct sctp_ecne_chunk *ecne;
+ struct sctp_tmit_chunk *chk;
+
+ asoc = &stcb->asoc;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
+ /* found a previous ECN_ECHO update it if needed */
+ ecne = mtod(chk->data, struct sctp_ecne_chunk *);
+ ecne->tsn = htonl(high_tsn);
+ return;
+ }
+ }
+ /* nope could not find one to update so we must build one */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return;
+ }
+ chk->copy_by_ref = 0;
+ SCTP_STAT_INCR(sctps_sendecne);
+ chk->rec.chunk_id.id = SCTP_ECN_ECHO;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->asoc = &stcb->asoc;
+ chk->send_size = sizeof(struct sctp_ecne_chunk);
+ chk->data = sctp_get_mbuf_for_msg(chk->send_size, 1, M_DONTWAIT, 1, MT_HEADER);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk);
+ return;
+ }
+ chk->data->m_data += SCTP_MIN_OVERHEAD;
+ chk->data->m_pkthdr.len = chk->data->m_len = chk->send_size;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ stcb->asoc.ecn_echo_cnt_onq++;
+ ecne = mtod(chk->data, struct sctp_ecne_chunk *);
+ ecne->ch.chunk_type = SCTP_ECN_ECHO;
+ ecne->ch.chunk_flags = 0;
+ ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
+ ecne->tsn = htonl(high_tsn);
+ TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+}
+
+void
+sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
+ struct mbuf *m, int iphlen, int bad_crc)
+{
+ struct sctp_association *asoc;
+ struct sctp_pktdrop_chunk *drp;
+ struct sctp_tmit_chunk *chk;
+ uint8_t *datap;
+ int len;
+ unsigned int small_one;
+ struct ip *iph;
+
+ long spc;
+
+ asoc = &stcb->asoc;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (asoc->peer_supports_pktdrop == 0) {
+ /*
+ * peer must declare support before I send one.
+ */
+ return;
+ }
+ if (stcb->sctp_socket == NULL) {
+ return;
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return;
+ }
+ chk->copy_by_ref = 0;
+ iph = mtod(m, struct ip *);
+ if (iph == NULL) {
+ return;
+ }
+ if (iph->ip_v == IPVERSION) {
+ /* IPv4 */
+ len = chk->send_size = iph->ip_len;
+ } else {
+ struct ip6_hdr *ip6h;
+
+ /* IPv6 */
+ ip6h = mtod(m, struct ip6_hdr *);
+ len = chk->send_size = htons(ip6h->ip6_plen);
+ }
+ if ((len + iphlen) > m->m_pkthdr.len) {
+ /* huh */
+ chk->send_size = len = m->m_pkthdr.len - iphlen;
+ }
+ chk->asoc = &stcb->asoc;
+ chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
+ if (chk->data == NULL) {
+jump_out:
+ sctp_free_a_chunk(stcb, chk);
+ return;
+ }
+ chk->data->m_data += SCTP_MIN_OVERHEAD;
+ drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
+ if (drp == NULL) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ goto jump_out;
+ }
+ small_one = asoc->smallest_mtu;
+ if (small_one > MCLBYTES) {
+ /* Only one cluster worth of data MAX */
+ small_one = MCLBYTES;
+ }
+ chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
+ sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
+ if (chk->book_size > small_one) {
+ drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
+ drp->trunc_len = htons(chk->send_size);
+ chk->send_size = small_one - (SCTP_MED_OVERHEAD +
+ sizeof(struct sctp_pktdrop_chunk) +
+ sizeof(struct sctphdr));
+ len = chk->send_size;
+ } else {
+ /* no truncation needed */
+ drp->ch.chunk_flags = 0;
+ drp->trunc_len = htons(0);
+ }
+ if (bad_crc) {
+ drp->ch.chunk_flags |= SCTP_BADCRC;
+ }
+ chk->send_size += sizeof(struct sctp_pktdrop_chunk);
+ chk->data->m_pkthdr.len = chk->data->m_len = chk->send_size;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ if (net) {
+ /* we should hit here */
+ chk->whoTo = net;
+ } else {
+ chk->whoTo = asoc->primary_destination;
+ }
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
+ chk->rec.chunk_id.can_take_data = 1;
+ drp->ch.chunk_type = SCTP_PACKET_DROPPED;
+ drp->ch.chunk_length = htons(chk->send_size);
+ spc = stcb->sctp_socket->so_rcv.sb_hiwat;
+ if (spc < 0) {
+ spc = 0;
+ }
+ drp->bottle_bw = htonl(spc);
+ if (asoc->my_rwnd) {
+ drp->current_onq = htonl(asoc->size_on_reasm_queue +
+ asoc->size_on_all_streams +
+ asoc->my_rwnd_control_len +
+ stcb->sctp_socket->so_rcv.sb_cc);
+ } else {
+ /*
+ * If my rwnd is 0, possibly from mbuf depletion as well as
+ * space used, tell the peer there is NO space aka onq == bw
+ */
+ drp->current_onq = htonl(spc);
+ }
+ drp->reserved = 0;
+ datap = drp->data;
+ m_copydata(m, iphlen, len, (caddr_t)datap);
+ TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+}
+
+void
+sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn)
+{
+ struct sctp_association *asoc;
+ struct sctp_cwr_chunk *cwr;
+ struct sctp_tmit_chunk *chk;
+
+ asoc = &stcb->asoc;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id == SCTP_ECN_CWR) {
+ /* found a previous ECN_CWR update it if needed */
+ cwr = mtod(chk->data, struct sctp_cwr_chunk *);
+ if (compare_with_wrap(high_tsn, ntohl(cwr->tsn),
+ MAX_TSN)) {
+ cwr->tsn = htonl(high_tsn);
+ }
+ return;
+ }
+ }
+ /* nope could not find one to update so we must build one */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_ECN_CWR;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->asoc = &stcb->asoc;
+ chk->send_size = sizeof(struct sctp_cwr_chunk);
+ chk->data = sctp_get_mbuf_for_msg(chk->send_size, 1, M_DONTWAIT, 1, MT_HEADER);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk);
+ return;
+ }
+ chk->data->m_data += SCTP_MIN_OVERHEAD;
+ chk->data->m_pkthdr.len = chk->data->m_len = chk->send_size;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ cwr = mtod(chk->data, struct sctp_cwr_chunk *);
+ cwr->ch.chunk_type = SCTP_ECN_CWR;
+ cwr->ch.chunk_flags = 0;
+ cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
+ cwr->tsn = htonl(high_tsn);
+ TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+}
+
+void
+sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
+ int number_entries, uint16_t * list,
+ uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
+{
+ int len, old_len, i;
+ struct sctp_stream_reset_out_request *req_out;
+ struct sctp_chunkhdr *ch;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+
+
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
+ req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
+ req_out->ph.param_length = htons(len);
+ req_out->request_seq = htonl(seq);
+ req_out->response_seq = htonl(resp_seq);
+ req_out->send_reset_at_tsn = htonl(last_sent);
+ if (number_entries) {
+ for (i = 0; i < number_entries; i++) {
+ req_out->list_of_streams[i] = htons(list[i]);
+ }
+ }
+ if (SCTP_SIZE32(len) > len) {
+ /*
+ * Need to worry about the pad we may end up adding to the
+ * end. This is easy since the struct is either aligned to 4
+ * bytes or 2 bytes off.
+ */
+ req_out->list_of_streams[number_entries] = 0;
+ }
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->send_size = len + old_len;
+ chk->book_size = SCTP_SIZE32(chk->send_size);
+ chk->data->m_pkthdr.len = chk->data->m_len = SCTP_SIZE32(chk->send_size);
+ return;
+}
+
+
+void
+sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
+ int number_entries, uint16_t * list,
+ uint32_t seq)
+{
+ int len, old_len, i;
+ struct sctp_stream_reset_in_request *req_in;
+ struct sctp_chunkhdr *ch;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+
+
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
+ req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
+ req_in->ph.param_length = htons(len);
+ req_in->request_seq = htonl(seq);
+ if (number_entries) {
+ for (i = 0; i < number_entries; i++) {
+ req_in->list_of_streams[i] = htons(list[i]);
+ }
+ }
+ if (SCTP_SIZE32(len) > len) {
+ /*
+ * Need to worry about the pad we may end up adding to the
+ * end. This is easy since the struct is either aligned to 4
+ * bytes or 2 bytes off.
+ */
+ req_in->list_of_streams[number_entries] = 0;
+ }
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->send_size = len + old_len;
+ chk->book_size = SCTP_SIZE32(chk->send_size);
+ chk->data->m_pkthdr.len = chk->data->m_len = SCTP_SIZE32(chk->send_size);
+ return;
+}
+
+
+void
+sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
+ uint32_t seq)
+{
+ int len, old_len;
+ struct sctp_stream_reset_tsn_request *req_tsn;
+ struct sctp_chunkhdr *ch;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+
+
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = sizeof(struct sctp_stream_reset_tsn_request);
+ req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
+ req_tsn->ph.param_length = htons(len);
+ req_tsn->request_seq = htonl(seq);
+
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->send_size = len + old_len;
+ chk->book_size = SCTP_SIZE32(chk->send_size);
+ chk->data->m_pkthdr.len = chk->data->m_len = SCTP_SIZE32(chk->send_size);
+ return;
+}
+
+void
+sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
+ uint32_t resp_seq, uint32_t result)
+{
+ int len, old_len;
+ struct sctp_stream_reset_response *resp;
+ struct sctp_chunkhdr *ch;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+
+
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = sizeof(struct sctp_stream_reset_response);
+ resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
+ resp->ph.param_length = htons(len);
+ resp->response_seq = htonl(resp_seq);
+ resp->result = ntohl(result);
+
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->send_size = len + old_len;
+ chk->book_size = SCTP_SIZE32(chk->send_size);
+ chk->data->m_pkthdr.len = chk->data->m_len = SCTP_SIZE32(chk->send_size);
+ return;
+
+}
+
+
+void
+sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
+ uint32_t resp_seq, uint32_t result,
+ uint32_t send_una, uint32_t recv_next)
+{
+ int len, old_len;
+ struct sctp_stream_reset_response_tsn *resp;
+ struct sctp_chunkhdr *ch;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+
+
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = sizeof(struct sctp_stream_reset_response_tsn);
+ resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
+ resp->ph.param_length = htons(len);
+ resp->response_seq = htonl(resp_seq);
+ resp->result = htonl(result);
+ resp->senders_next_tsn = htonl(send_una);
+ resp->receivers_next_tsn = htonl(recv_next);
+
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->send_size = len + old_len;
+ chk->book_size = SCTP_SIZE32(chk->send_size);
+ chk->data->m_pkthdr.len = chk->data->m_len = SCTP_SIZE32(chk->send_size);
+ return;
+}
+
+
+int
+sctp_send_str_reset_req(struct sctp_tcb *stcb,
+ int number_entries, uint16_t * list,
+ uint8_t send_out_req, uint32_t resp_seq,
+ uint8_t send_in_req,
+ uint8_t send_tsn_req)
+{
+
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_chunkhdr *ch;
+ uint32_t seq;
+
+ asoc = &stcb->asoc;
+ if (asoc->stream_reset_outstanding) {
+ /*
+ * Already one pending, must get ACK back to clear the flag.
+ */
+ return (EBUSY);
+ }
+ if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0)) {
+ /* nothing to do */
+ return (EINVAL);
+ }
+ if (send_tsn_req && (send_out_req || send_in_req)) {
+ /* error, can't do that */
+ return (EINVAL);
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return (ENOMEM);
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_STREAM_RESET;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->asoc = &stcb->asoc;
+ chk->book_size = SCTP_SIZE32(chk->send_size = sizeof(struct sctp_chunkhdr));
+
+ chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk);
+ return (ENOMEM);
+ }
+ chk->data->m_data += SCTP_MIN_OVERHEAD;
+
+ /* setup chunk parameters */
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = asoc->primary_destination;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ ch->chunk_type = SCTP_STREAM_RESET;
+ ch->chunk_flags = 0;
+ ch->chunk_length = htons(chk->send_size);
+ chk->data->m_pkthdr.len = chk->data->m_len = SCTP_SIZE32(chk->send_size);
+
+ seq = stcb->asoc.str_reset_seq_out;
+ if (send_out_req) {
+ sctp_add_stream_reset_out(chk, number_entries, list,
+ seq, resp_seq, (stcb->asoc.sending_seq - 1));
+ asoc->stream_reset_out_is_outstanding = 1;
+ seq++;
+ asoc->stream_reset_outstanding++;
+ }
+ if (send_in_req) {
+ sctp_add_stream_reset_in(chk, number_entries, list, seq);
+ asoc->stream_reset_outstanding++;
+ }
+ if (send_tsn_req) {
+ sctp_add_stream_reset_tsn(chk, seq);
+ asoc->stream_reset_outstanding++;
+ }
+ asoc->str_reset = chk;
+
+ /* insert the chunk for sending */
+ TAILQ_INSERT_TAIL(&asoc->control_send_queue,
+ chk,
+ sctp_next);
+ asoc->ctrl_queue_cnt++;
+ sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
+ return (0);
+}
+
+void
+sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
+ struct mbuf *err_cause)
+{
+ /*
+ * Formulate the abort message, and send it back down.
+ */
+ struct mbuf *mout;
+ struct sctp_abort_msg *abm;
+ struct ip *iph, *iph_out;
+ struct ip6_hdr *ip6, *ip6_out;
+ int iphlen_out;
+
+ /* don't respond to ABORT with ABORT */
+ if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
+ if (err_cause)
+ sctp_m_freem(err_cause);
+ return;
+ }
+ mout = sctp_get_mbuf_for_msg((sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg)),
+ 1, M_DONTWAIT, 1, MT_HEADER);
+ if (mout == NULL) {
+ if (err_cause)
+ sctp_m_freem(err_cause);
+ return;
+ }
+ iph = mtod(m, struct ip *);
+ iph_out = NULL;
+ ip6_out = NULL;
+ if (iph->ip_v == IPVERSION) {
+ iph_out = mtod(mout, struct ip *);
+ mout->m_len = sizeof(*iph_out) + sizeof(*abm);
+ mout->m_next = err_cause;
+
+ /* Fill in the IP header for the ABORT */
+ iph_out->ip_v = IPVERSION;
+ iph_out->ip_hl = (sizeof(struct ip) / 4);
+ iph_out->ip_tos = (u_char)0;
+ iph_out->ip_id = 0;
+ iph_out->ip_off = 0;
+ iph_out->ip_ttl = MAXTTL;
+ iph_out->ip_p = IPPROTO_SCTP;
+ iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
+ iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
+ /* let IP layer calculate this */
+ iph_out->ip_sum = 0;
+
+ iphlen_out = sizeof(*iph_out);
+ abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out);
+ } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
+ ip6 = (struct ip6_hdr *)iph;
+ ip6_out = mtod(mout, struct ip6_hdr *);
+ mout->m_len = sizeof(*ip6_out) + sizeof(*abm);
+ mout->m_next = err_cause;
+
+ /* Fill in the IP6 header for the ABORT */
+ ip6_out->ip6_flow = ip6->ip6_flow;
+ ip6_out->ip6_hlim = ip6_defhlim;
+ ip6_out->ip6_nxt = IPPROTO_SCTP;
+ ip6_out->ip6_src = ip6->ip6_dst;
+ ip6_out->ip6_dst = ip6->ip6_src;
+
+ iphlen_out = sizeof(*ip6_out);
+ abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out);
+ } else {
+ /* Currently not supported */
+ return;
+ }
+
+ abm->sh.src_port = sh->dest_port;
+ abm->sh.dest_port = sh->src_port;
+ abm->sh.checksum = 0;
+ if (vtag == 0) {
+ abm->sh.v_tag = sh->v_tag;
+ abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB;
+ } else {
+ abm->sh.v_tag = htonl(vtag);
+ abm->msg.ch.chunk_flags = 0;
+ }
+ abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
+
+ if (err_cause) {
+ struct mbuf *m_tmp = err_cause;
+ int err_len = 0;
+
+ /* get length of the err_cause chain */
+ while (m_tmp != NULL) {
+ err_len += m_tmp->m_len;
+ m_tmp = m_tmp->m_next;
+ }
+ mout->m_pkthdr.len = mout->m_len + err_len;
+ if (err_len % 4) {
+ /* need pad at end of chunk */
+ uint32_t cpthis = 0;
+ int padlen;
+
+ padlen = 4 - (mout->m_pkthdr.len % 4);
+ m_copyback(mout, mout->m_pkthdr.len, padlen, (caddr_t)&cpthis);
+ }
+ abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len);
+ } else {
+ mout->m_pkthdr.len = mout->m_len;
+ abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch));
+ }
+
+ /* add checksum */
+ if ((sctp_no_csum_on_loopback) &&
+ (m->m_pkthdr.rcvif) &&
+ (m->m_pkthdr.rcvif->if_type == IFT_LOOP)) {
+ abm->sh.checksum = 0;
+ } else {
+ abm->sh.checksum = sctp_calculate_sum(mout, NULL, iphlen_out);
+ }
+
+ /* zap the rcvif, it should be null */
+ mout->m_pkthdr.rcvif = 0;
+ if (iph_out != NULL) {
+ struct route ro;
+
+ /* zap the stack pointer to the route */
+ bzero(&ro, sizeof ro);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
+ printf("sctp_send_abort calling ip_output:\n");
+ sctp_print_address_pkt(iph_out, &abm->sh);
+ }
+#endif
+ /* set IPv4 length */
+ iph_out->ip_len = mout->m_pkthdr.len;
+ /* out it goes */
+ (void)ip_output(mout, 0, &ro, IP_RAWOUTPUT, NULL
+ ,NULL
+ );
+ /* Free the route if we got one back */
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt);
+ } else if (ip6_out != NULL) {
+ struct route_in6 ro;
+
+
+ /* zap the stack pointer to the route */
+ bzero(&ro, sizeof(ro));
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
+ printf("sctp_send_abort calling ip6_output:\n");
+ sctp_print_address_pkt((struct ip *)ip6_out, &abm->sh);
+ }
+#endif
+ ip6_output(mout, NULL, &ro, 0, NULL, NULL
+ ,NULL
+ );
+ /* Free the route if we got one back */
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt);
+ }
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+}
+
+void
+sctp_send_operr_to(struct mbuf *m, int iphlen,
+ struct mbuf *scm,
+ uint32_t vtag)
+{
+ struct sctphdr *ihdr;
+ int retcode;
+ struct sctphdr *ohdr;
+ struct sctp_chunkhdr *ophdr;
+
+ struct ip *iph;
+
+#ifdef SCTP_DEBUG
+ struct sockaddr_in6 lsa6, fsa6;
+
+#endif
+ uint32_t val;
+
+ iph = mtod(m, struct ip *);
+ ihdr = (struct sctphdr *)((caddr_t)iph + iphlen);
+ if (!(scm->m_flags & M_PKTHDR)) {
+ /* must be a pkthdr */
+ printf("Huh, not a packet header in send_operr\n");
+ sctp_m_freem(scm);
+ return;
+ }
+ M_PREPEND(scm, (sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr)), M_DONTWAIT);
+ if (scm == NULL) {
+ /* can't send because we can't add a mbuf */
+ return;
+ }
+ ohdr = mtod(scm, struct sctphdr *);
+ ohdr->src_port = ihdr->dest_port;
+ ohdr->dest_port = ihdr->src_port;
+ ohdr->v_tag = vtag;
+ ohdr->checksum = 0;
+ ophdr = (struct sctp_chunkhdr *)(ohdr + 1);
+ ophdr->chunk_type = SCTP_OPERATION_ERROR;
+ ophdr->chunk_flags = 0;
+ ophdr->chunk_length = htons(scm->m_pkthdr.len - sizeof(struct sctphdr));
+ if (scm->m_pkthdr.len % 4) {
+ /* need padding */
+ uint32_t cpthis = 0;
+ int padlen;
+
+ padlen = 4 - (scm->m_pkthdr.len % 4);
+ m_copyback(scm, scm->m_pkthdr.len, padlen, (caddr_t)&cpthis);
+ }
+ if ((sctp_no_csum_on_loopback) &&
+ (m->m_pkthdr.rcvif) &&
+ (m->m_pkthdr.rcvif->if_type == IFT_LOOP)) {
+ val = 0;
+ } else {
+ val = sctp_calculate_sum(scm, NULL, 0);
+ }
+ ohdr->checksum = val;
+ if (iph->ip_v == IPVERSION) {
+ /* V4 */
+ struct ip *out;
+ struct route ro;
+
+ M_PREPEND(scm, sizeof(struct ip), M_DONTWAIT);
+ if (scm == NULL)
+ return;
+ bzero(&ro, sizeof ro);
+ out = mtod(scm, struct ip *);
+ out->ip_v = iph->ip_v;
+ out->ip_hl = (sizeof(struct ip) / 4);
+ out->ip_tos = iph->ip_tos;
+ out->ip_id = iph->ip_id;
+ out->ip_off = 0;
+ out->ip_ttl = MAXTTL;
+ out->ip_p = IPPROTO_SCTP;
+ out->ip_sum = 0;
+ out->ip_src = iph->ip_dst;
+ out->ip_dst = iph->ip_src;
+ out->ip_len = scm->m_pkthdr.len;
+ retcode = ip_output(scm, 0, &ro, IP_RAWOUTPUT, NULL
+ ,NULL
+ );
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+ /* Free the route if we got one back */
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt);
+ } else {
+ /* V6 */
+ struct route_in6 ro;
+
+ struct ip6_hdr *out6, *in6;
+
+ M_PREPEND(scm, sizeof(struct ip6_hdr), M_DONTWAIT);
+ if (scm == NULL)
+ return;
+ bzero(&ro, sizeof ro);
+ in6 = mtod(m, struct ip6_hdr *);
+ out6 = mtod(scm, struct ip6_hdr *);
+ out6->ip6_flow = in6->ip6_flow;
+ out6->ip6_hlim = ip6_defhlim;
+ out6->ip6_nxt = IPPROTO_SCTP;
+ out6->ip6_src = in6->ip6_dst;
+ out6->ip6_dst = in6->ip6_src;
+
+#ifdef SCTP_DEBUG
+ bzero(&lsa6, sizeof(lsa6));
+ lsa6.sin6_len = sizeof(lsa6);
+ lsa6.sin6_family = AF_INET6;
+ lsa6.sin6_addr = out6->ip6_src;
+ bzero(&fsa6, sizeof(fsa6));
+ fsa6.sin6_len = sizeof(fsa6);
+ fsa6.sin6_family = AF_INET6;
+ fsa6.sin6_addr = out6->ip6_dst;
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
+ printf("sctp_operr_to calling ipv6 output:\n");
+ printf("src: ");
+ sctp_print_address((struct sockaddr *)&lsa6);
+ printf("dst ");
+ sctp_print_address((struct sockaddr *)&fsa6);
+ }
+#endif /* SCTP_DEBUG */
+ ip6_output(scm, NULL, &ro, 0, NULL, NULL
+ ,NULL
+ );
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+ /* Free the route if we got one back */
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt);
+ }
+}
+
+
+
+static struct mbuf *
+sctp_copy_resume(struct sctp_stream_queue_pending *sp,
+ struct uio *uio,
+ struct sctp_sndrcvinfo *srcv,
+ int max_send_len,
+ int user_marks_eor,
+ int *error,
+ uint32_t * sndout,
+ struct mbuf **new_tail)
+{
+ int left, cancpy, willcpy, need_hdr = 0;
+ struct mbuf *m, *prev, *head;
+
+ left = min(uio->uio_resid, max_send_len);
+ /* Always get a header just in case */
+ need_hdr = 1;
+
+ head = sctp_get_mbuf_for_msg(left, need_hdr, M_WAIT, 0, MT_DATA);
+ cancpy = M_TRAILINGSPACE(head);
+ willcpy = min(cancpy, left);
+ *error = uiomove(mtod(head, caddr_t), willcpy, uio);
+ if (*error) {
+ sctp_m_freem(head);
+ return (NULL);
+ }
+ *sndout += willcpy;
+ left -= willcpy;
+ head->m_len = willcpy;
+ m = head;
+ *new_tail = head;
+ while (left > 0) {
+ /* move in user data */
+ m->m_next = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA);
+ if (m->m_next == NULL) {
+ sctp_m_freem(head);
+ *new_tail = NULL;
+ *error = ENOMEM;
+ return (NULL);
+ }
+ prev = m;
+ m = m->m_next;
+ cancpy = M_TRAILINGSPACE(m);
+ willcpy = min(cancpy, left);
+ *error = uiomove(mtod(m, caddr_t), willcpy, uio);
+ if (*error) {
+ sctp_m_freem(head);
+ *new_tail = NULL;
+ *error = EFAULT;
+ return (NULL);
+ }
+ m->m_len = willcpy;
+ left -= willcpy;
+ *sndout += willcpy;
+ *new_tail = m;
+ if (left == 0) {
+ m->m_next = NULL;
+ }
+ }
+ return (head);
+}
+
+static int
+sctp_copy_one(struct sctp_stream_queue_pending *sp,
+ struct uio *uio,
+ int resv_upfront)
+{
+ int left, cancpy, willcpy, error;
+ struct mbuf *m, *head;
+ int cpsz = 0;
+
+ /* First one gets a header */
+ left = sp->length;
+ head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 1, M_WAIT, 0, MT_DATA);
+ if (m == NULL) {
+ return (ENOMEM);
+ }
+ /*
+ * Add this one for m in now, that way if the alloc fails we won't
+ * have a bad cnt.
+ */
+ m->m_data += resv_upfront;
+ cancpy = M_TRAILINGSPACE(m);
+ willcpy = min(cancpy, left);
+ while (left > 0) {
+ /* move in user data */
+ error = uiomove(mtod(m, caddr_t), willcpy, uio);
+ if (error) {
+ sctp_m_freem(head);
+ return (error);
+ }
+ m->m_len = willcpy;
+ left -= willcpy;
+ cpsz += willcpy;
+ if (left > 0) {
+ m->m_next = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA);
+ if (m->m_next == NULL) {
+ /*
+ * the head goes back to caller, he can free
+ * the rest
+ */
+ sctp_m_freem(head);
+ return (ENOMEM);
+ }
+ m = m->m_next;
+ cancpy = M_TRAILINGSPACE(m);
+ willcpy = min(cancpy, left);
+ } else {
+ sp->tail_mbuf = m;
+ m->m_next = NULL;
+ }
+ }
+ sp->data = head;
+ sp->length = cpsz;
+ return (0);
+}
+
+
+
+static struct sctp_stream_queue_pending *
+sctp_copy_it_in(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_sndrcvinfo *srcv,
+ struct uio *uio,
+ struct sctp_nets *net,
+ int max_send_len,
+ int user_marks_eor,
+ int *errno,
+ int non_blocking)
+{
+ /*
+ * This routine must be very careful in its work. Protocol
+ * processing is up and running so care must be taken to spl...()
+ * when you need to do something that may effect the stcb/asoc. The
+ * sb is locked however. When data is copied the protocol processing
+ * should be enabled since this is a slower operation...
+ */
+ struct sctp_stream_queue_pending *sp = NULL;
+ int resv_in_first;
+
+ *errno = 0;
+ /*
+ * Unless E_EOR mode is on, we must make a send FIT in one call.
+ */
+ if (((user_marks_eor == 0) && non_blocking) &&
+ (uio->uio_resid > stcb->sctp_socket->so_snd.sb_hiwat)) {
+ /* It will NEVER fit */
+ *errno = EMSGSIZE;
+ goto out_now;
+ }
+ /* Now can we send this? */
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
+ /* got data while shutting down */
+ *errno = ECONNRESET;
+ goto out_now;
+ }
+ sp = (struct sctp_stream_queue_pending *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq);
+ if (sp == NULL) {
+ *errno = ENOMEM;
+ goto out_now;
+ }
+ SCTP_INCR_STRMOQ_COUNT();
+ sp->act_flags = 0;
+ sp->sinfo_flags = srcv->sinfo_flags;
+ sp->timetolive = srcv->sinfo_timetolive;
+ sp->ppid = srcv->sinfo_ppid;
+ sp->context = srcv->sinfo_context;
+ sp->strseq = 0;
+ SCTP_GETTIME_TIMEVAL(&sp->ts);
+
+ sp->stream = srcv->sinfo_stream;
+ sp->length = min(uio->uio_resid, max_send_len);
+ if ((sp->length == uio->uio_resid) &&
+ ((user_marks_eor == 0) ||
+ (srcv->sinfo_flags & SCTP_EOF) ||
+ (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))
+ ) {
+ sp->msg_is_complete = 1;
+ } else {
+ sp->msg_is_complete = 0;
+ }
+ sp->some_taken = 0;
+ resv_in_first = sizeof(struct sctp_data_chunk);
+ sp->data = sp->tail_mbuf = NULL;
+ *errno = sctp_copy_one(sp, uio, resv_in_first);
+ if (*errno) {
+ sctp_free_a_strmoq(stcb, sp);
+ sp->data = NULL;
+ sp->net = NULL;
+ sp = NULL;
+ } else {
+ if (sp->sinfo_flags & SCTP_ADDR_OVER) {
+ sp->net = net;
+ sp->addr_over = 1;
+ } else {
+ sp->net = asoc->primary_destination;
+ sp->addr_over = 0;
+ }
+ atomic_add_int(&sp->net->ref_count, 1);
+ sp->data->m_pkthdr.len = sp->length;
+ sctp_set_prsctp_policy(stcb, sp);
+ }
+out_now:
+ return (sp);
+}
+
+
+int
+sctp_sosend(struct socket *so,
+ struct sockaddr *addr,
+ struct uio *uio,
+ struct mbuf *top,
+ struct mbuf *control,
+ int flags
+ ,
+ struct thread *p
+)
+{
+ struct sctp_inpcb *inp;
+ int s, error, use_rcvinfo = 0;
+ struct sctp_sndrcvinfo srcv;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ s = splnet();
+ if (control) {
+ /* process cmsg snd/rcv info (maybe a assoc-id) */
+ if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control,
+ sizeof(srcv))) {
+ /* got one */
+ use_rcvinfo = 1;
+ }
+ }
+ error = sctp_lower_sosend(so, addr, uio, top, control, flags,
+ use_rcvinfo, &srcv, p);
+ splx(s);
+ return (error);
+}
+
+
+extern unsigned int sctp_add_more_threshold;
+int
+sctp_lower_sosend(struct socket *so,
+ struct sockaddr *addr,
+ struct uio *uio,
+ struct mbuf *top,
+ struct mbuf *control,
+ int flags,
+ int use_rcvinfo,
+ struct sctp_sndrcvinfo *srcv,
+ struct thread *p
+)
+{
+ unsigned int sndlen, max_len;
+ int error, len;
+ int s, queue_only = 0, queue_only_for_init = 0;
+ int free_cnt_applied = 0;
+ int un_sent = 0;
+ int now_filled = 0;
+ struct sctp_block_entry be;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb = NULL;
+ struct timeval now;
+ struct sctp_nets *net;
+ struct sctp_association *asoc;
+ struct sctp_inpcb *t_inp;
+ int create_lock_applied = 0;
+ int nagle_applies = 0;
+ int some_on_control = 0;
+ int got_all_of_the_send = 0;
+ int hold_tcblock = 0;
+ int non_blocking = 0;
+
+ error = 0;
+ net = NULL;
+ stcb = NULL;
+ asoc = NULL;
+ t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
+ if (uio)
+ sndlen = uio->uio_resid;
+ else
+ sndlen = top->m_pkthdr.len;
+
+ s = splnet();
+ hold_tcblock = 0;
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_socket->so_qlimit)) {
+ /* The listener can NOT send */
+ error = EFAULT;
+ splx(s);
+ goto out_unlocked;
+ }
+ if ((use_rcvinfo) && srcv) {
+ if (srcv->sinfo_flags & SCTP_SENDALL) {
+ /* its a sendall */
+ error = sctp_sendall(inp, uio, top, srcv);
+ top = NULL;
+ splx(s);
+ goto out_unlocked;
+ }
+ }
+ /* now we must find the assoc */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ SCTP_INP_RUNLOCK(inp);
+ error = ENOTCONN;
+ splx(s);
+ goto out_unlocked;
+ }
+ hold_tcblock = 0;
+ SCTP_INP_RUNLOCK(inp);
+ if (addr)
+ /* Must locate the net structure if addr given */
+ net = sctp_findnet(stcb, addr);
+ else
+ net = stcb->asoc.primary_destination;
+
+ } else if (use_rcvinfo && srcv && srcv->sinfo_assoc_id) {
+ stcb = sctp_findassociation_ep_asocid(inp, srcv->sinfo_assoc_id, 0);
+ if (stcb) {
+ if (addr)
+ /*
+ * Must locate the net structure if addr
+ * given
+ */
+ net = sctp_findnet(stcb, addr);
+ else
+ net = stcb->asoc.primary_destination;
+ }
+ hold_tcblock = 0;
+ } else if (addr) {
+ /*
+ * Since we did not use findep we must increment it, and if
+ * we don't find a tcb decrement it.
+ */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_INCR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ hold_tcblock = 1;
+ }
+ }
+ if ((stcb == NULL) && (addr)) {
+ /* Possible implicit send? */
+ SCTP_ASOC_CREATE_LOCK(inp);
+ create_lock_applied = 1;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ /* Should I really unlock ? */
+ error = EFAULT;
+ splx(s);
+ goto out_unlocked;
+
+ }
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+ (addr->sa_family == AF_INET6)) {
+ error = EINVAL;
+ splx(s);
+ goto out_unlocked;
+ }
+ }
+ if (stcb == NULL) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ error = ENOTCONN;
+ splx(s);
+ goto out_unlocked;
+ } else if (addr == NULL) {
+ error = ENOENT;
+ splx(s);
+ goto out_unlocked;
+ } else {
+ /*
+ * UDP style, we must go ahead and start the INIT
+ * process
+ */
+ if ((use_rcvinfo) && (srcv) &&
+ (srcv->sinfo_flags & SCTP_ABORT)) {
+ /* User asks to abort a non-existant asoc */
+ error = ENOENT;
+ splx(s);
+ goto out_unlocked;
+ }
+ /* get an asoc/stcb struct */
+ stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0);
+ if (stcb == NULL) {
+ /* Error is setup for us in the call */
+ splx(s);
+ goto out_unlocked;
+ }
+ if (create_lock_applied) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ create_lock_applied = 0;
+ } else {
+ printf("Huh-3? create lock should have been on??\n");
+ }
+ /*
+ * Turn on queue only flag to prevent data from
+ * being sent
+ */
+ queue_only = 1;
+ asoc = &stcb->asoc;
+ asoc->state = SCTP_STATE_COOKIE_WAIT;
+ SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
+
+ /* initialize authentication params for the assoc */
+ sctp_initialize_auth_params(inp, stcb);
+
+ if (control) {
+ /*
+ * see if a init structure exists in cmsg
+ * headers
+ */
+ struct sctp_initmsg initm;
+ int i;
+
+ if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control,
+ sizeof(initm))) {
+ /*
+ * we have an INIT override of the
+ * default
+ */
+ if (initm.sinit_max_attempts)
+ asoc->max_init_times = initm.sinit_max_attempts;
+ if (initm.sinit_num_ostreams)
+ asoc->pre_open_streams = initm.sinit_num_ostreams;
+ if (initm.sinit_max_instreams)
+ asoc->max_inbound_streams = initm.sinit_max_instreams;
+ if (initm.sinit_max_init_timeo)
+ asoc->initial_init_rto_max = initm.sinit_max_init_timeo;
+ if (asoc->streamoutcnt < asoc->pre_open_streams) {
+ /* Default is NOT correct */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("Ok, defout:%d pre_open:%d\n",
+ asoc->streamoutcnt, asoc->pre_open_streams);
+ }
+#endif
+ SCTP_FREE(asoc->strmout);
+ asoc->strmout = NULL;
+ asoc->streamoutcnt = asoc->pre_open_streams;
+ /*
+ * What happens if this
+ * fails? .. we panic ...
+ */
+ {
+ struct sctp_stream_out *tmp_str;
+ int had_lock = 0;
+
+ if (hold_tcblock) {
+ had_lock = 1;
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_MALLOC(tmp_str,
+ struct sctp_stream_out *,
+ asoc->streamoutcnt *
+ sizeof(struct sctp_stream_out),
+ "StreamsOut");
+ if (had_lock) {
+ SCTP_TCB_LOCK(stcb);
+ }
+ asoc->strmout = tmp_str;
+ }
+ for (i = 0; i < asoc->streamoutcnt; i++) {
+ /*
+ * inbound side must
+ * be set to 0xffff,
+ * also NOTE when we
+ * get the INIT-ACK
+ * back (for INIT
+ * sender) we MUST
+ * reduce the count
+ * (streamoutcnt)
+ * but first check
+ * if we sent to any
+ * of the upper
+ * streams that were
+ * dropped (if some
+ * were). Those that
+ * were dropped must
+ * be notified to
+ * the upper layer
+ * as failed to
+ * send.
+ */
+ asoc->strmout[i].next_sequence_sent = 0x0;
+ TAILQ_INIT(&asoc->strmout[i].outqueue);
+ asoc->strmout[i].stream_no = i;
+ asoc->strmout[i].last_msg_incomplete = 0;
+ asoc->strmout[i].next_spoke.tqe_next = 0;
+ asoc->strmout[i].next_spoke.tqe_prev = 0;
+ }
+ }
+ }
+ }
+ hold_tcblock = 1;
+ /* out with the INIT */
+ queue_only_for_init = 1;
+ /*
+ * we may want to dig in after this call and adjust
+ * the MTU value. It defaulted to 1500 (constant)
+ * but the ro structure may now have an update and
+ * thus we may need to change it BEFORE we append
+ * the message.
+ */
+ net = stcb->asoc.primary_destination;
+ asoc = &stcb->asoc;
+ }
+ }
+ if (((so->so_state & SS_NBIO)
+ || (flags & MSG_NBIO)
+ )) {
+ non_blocking = 1;
+ }
+ asoc = &stcb->asoc;
+ /* would we block? */
+ if (non_blocking) {
+ if ((so->so_snd.sb_hiwat <
+ (sndlen + stcb->asoc.total_output_queue_size)) ||
+ (stcb->asoc.chunks_on_out_queue >
+ sctp_max_chunks_on_queue)) {
+ error = EWOULDBLOCK;
+ splx(s);
+ goto out_unlocked;
+ }
+ }
+ /* Keep the stcb from being freed under our feet */
+ atomic_add_16(&stcb->asoc.refcnt, 1);
+ free_cnt_applied = 1;
+
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ error = ECONNRESET;
+ goto out_unlocked;
+ }
+ if (create_lock_applied) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ create_lock_applied = 0;
+ }
+ if (asoc->stream_reset_outstanding) {
+ /*
+ * Can't queue any data while stream reset is underway.
+ */
+ error = EAGAIN;
+ goto out_unlocked;
+ }
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+ queue_only = 1;
+ }
+ if ((use_rcvinfo == 0) || (srcv == NULL)) {
+ /* Grab the default stuff from the asoc */
+ srcv = &stcb->asoc.def_send;
+ }
+ /* we are now done with all control */
+ if (control) {
+ sctp_m_freem(control);
+ control = NULL;
+ }
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
+ if ((use_rcvinfo) &&
+ (srcv->sinfo_flags & SCTP_ABORT)) {
+ ;
+ } else {
+ error = ECONNRESET;
+ splx(s);
+ goto out_unlocked;
+ }
+ }
+ /* Ok, we will attempt a msgsnd :> */
+ if (p) {
+ p->td_proc->p_stats->p_ru.ru_msgsnd++;
+ }
+ if (stcb) {
+ if (net && ((srcv->sinfo_flags & SCTP_ADDR_OVER))) {
+ /* we take the override or the unconfirmed */
+ ;
+ } else {
+ net = stcb->asoc.primary_destination;
+ }
+ }
+ if ((net->flight_size > net->cwnd) && (sctp_cmt_on_off == 0)) {
+ /*
+ * CMT: Added check for CMT above. net above is the primary
+ * dest. If CMT is ON, sender should always attempt to send
+ * with the output routine sctp_fill_outqueue() that loops
+ * through all destination addresses. Therefore, if CMT is
+ * ON, queue_only is NOT set to 1 here, so that
+ * sctp_chunk_output() can be called below.
+ */
+ queue_only = 1;
+
+ } else if (asoc->ifp_had_enobuf) {
+ SCTP_STAT_INCR(sctps_ifnomemqueued);
+ if (net->flight_size > (net->mtu * 2))
+ queue_only = 1;
+ asoc->ifp_had_enobuf = 0;
+ } else {
+ un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+ ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk)));
+ }
+ /* Are we aborting? */
+ if (srcv->sinfo_flags & SCTP_ABORT) {
+ struct mbuf *mm;
+ int tot_demand, tot_out, max;
+
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+ /* It has to be up before we abort */
+ /* how big is the user initiated abort? */
+ error = EINVAL;
+ goto out;
+ }
+ if (hold_tcblock) {
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+ if (top) {
+ mm = sctp_get_mbuf_for_msg(1, 1, M_WAIT, 1, MT_DATA);
+ if (top->m_flags & M_PKTHDR)
+ tot_out = top->m_pkthdr.len;
+ else {
+ struct mbuf *cntm;
+
+ tot_out = 0;
+ cntm = top;
+ while (cntm) {
+ tot_out += cntm->m_len;
+ cntm = cntm->m_next;
+ }
+ }
+ tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
+ } else {
+ /* Must fit in a MTU */
+ tot_out = uio->uio_resid;
+ tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
+ mm = sctp_get_mbuf_for_msg(tot_demand, 1, M_WAIT, 1, MT_DATA);
+ }
+ if (mm == NULL) {
+ error = ENOMEM;
+ goto out;
+ }
+ max = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
+ max -= sizeof(struct sctp_abort_msg);
+ if (tot_out > max) {
+ tot_out = max;
+ }
+ if (mm) {
+ struct sctp_paramhdr *ph;
+
+ /* now move forward the data pointer */
+ ph = mtod(mm, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
+ ph++;
+ mm->m_pkthdr.len = tot_out + sizeof(struct sctp_paramhdr);
+ mm->m_len = mm->m_pkthdr.len;
+ if (top == NULL) {
+ error = uiomove((caddr_t)ph, (int)tot_out, uio);
+ if (error) {
+ /*
+ * Here if we can't get his data we
+ * still abort we just don't get to
+ * send the users note :-0
+ */
+ sctp_m_freem(mm);
+ mm = NULL;
+ }
+ } else {
+ mm->m_next = top;
+ }
+ }
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ atomic_add_16(&stcb->asoc.refcnt, -1);
+ free_cnt_applied = 0;
+ /* release this lock, otherwise we hang on ourselves */
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_RESPONSE_TO_USER_REQ,
+ mm);
+ /* now relock the stcb so everything is sane */
+ hold_tcblock = 0;
+ stcb = NULL;
+ goto out_unlocked;
+ }
+ /* Calculate the maximum we can send */
+ if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size) {
+ max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size;
+ } else {
+ max_len = 0;
+ }
+ if (hold_tcblock) {
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+ splx(s);
+ /* Is the stream no. valid? */
+ if (srcv->sinfo_stream >= asoc->streamoutcnt) {
+ /* Invalid stream number */
+ error = EINVAL;
+ goto out_unlocked;
+ }
+ if (asoc->strmout == NULL) {
+ /* huh? software error */
+ error = EFAULT;
+ goto out_unlocked;
+ }
+ len = 0;
+ if (max_len < sctp_add_more_threshold) {
+ /* No room right no ! */
+ SOCKBUF_LOCK(&so->so_snd);
+ while (so->so_snd.sb_hiwat < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) {
+#ifdef SCTP_BLK_LOGGING
+ sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA,
+ so, asoc, uio->uio_resid);
+#endif
+ be.error = 0;
+ stcb->block_entry = &be;
+ error = sbwait(&so->so_snd);
+ stcb->block_entry = NULL;
+ if (error || so->so_error || be.error) {
+ if (error == 0) {
+ if (so->so_error)
+ error = so->so_error;
+ if (be.error) {
+ error = be.error;
+ }
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ goto out_unlocked;
+ }
+#ifdef SCTP_BLK_LOGGING
+ sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
+ so, asoc, stcb->asoc.total_output_queue_size);
+#endif
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ goto out_unlocked;
+ }
+ }
+ if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size) {
+ max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size;
+ } else {
+ max_len = 0;
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ goto out_unlocked;
+ }
+ if (top == NULL) {
+ struct sctp_stream_queue_pending *sp;
+ struct sctp_stream_out *strm;
+ uint32_t sndout, initial_out;
+ int user_marks_eor;
+
+ if (uio->uio_resid == 0) {
+ if (srcv->sinfo_flags & SCTP_EOF) {
+ got_all_of_the_send = 1;
+ goto dataless_eof;
+ } else {
+ error = EINVAL;
+ goto out;
+ }
+ }
+ initial_out = uio->uio_resid;
+
+ if ((asoc->stream_locked) &&
+ (asoc->stream_locked_on != srcv->sinfo_stream)) {
+ error = EAGAIN;
+ goto out;
+ }
+ strm = &stcb->asoc.strmout[srcv->sinfo_stream];
+ user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
+ if (strm->last_msg_incomplete == 0) {
+ sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking);
+ if ((sp == NULL) || (error)) {
+ goto out;
+ }
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (sp->msg_is_complete) {
+ strm->last_msg_incomplete = 0;
+ asoc->stream_locked = 0;
+ } else {
+ /*
+ * Just got locked to this guy in case of an
+ * interupt.
+ */
+ strm->last_msg_incomplete = 1;
+ asoc->stream_locked = 1;
+ asoc->stream_locked_on = srcv->sinfo_stream;
+ }
+ sctp_snd_sb_alloc(stcb, sp->length);
+
+ asoc->stream_queue_cnt++;
+ TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
+ if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
+ sp->strseq = strm->next_sequence_sent;
+ strm->next_sequence_sent++;
+ }
+ if ((strm->next_spoke.tqe_next == NULL) &&
+ (strm->next_spoke.tqe_prev == NULL)) {
+ /* Not on wheel, insert */
+ sctp_insert_on_wheel(stcb, asoc, strm, 1);
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ } else {
+ sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
+ }
+ while (uio->uio_resid > 0) {
+ /* How much room do we have? */
+ struct mbuf *new_tail, *mm;
+
+ if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size)
+ max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size;
+ else
+ max_len = 0;
+
+ if ((max_len > sctp_add_more_threshold) ||
+ (uio->uio_resid && (uio->uio_resid < max_len))) {
+ sndout = 0;
+ new_tail = NULL;
+ if (hold_tcblock) {
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+ mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail);
+ if ((mm == NULL) || error) {
+ goto out;
+ }
+ /* Update the mbuf and count */
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /*
+ * we need to get out. Peer probably
+ * aborted.
+ */
+ sctp_m_freem(mm);
+ if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED)
+ error = ECONNRESET;
+ goto out;
+ }
+ if (sp->tail_mbuf) {
+ /* tack it to the end */
+ sp->tail_mbuf->m_next = mm;
+ sp->tail_mbuf = new_tail;
+ } else {
+ /* A stolen mbuf */
+ sp->data = mm;
+ sp->tail_mbuf = new_tail;
+ }
+ sctp_snd_sb_alloc(stcb, sndout);
+ sp->length += sndout;
+ len += sndout;
+ /* Did we reach EOR? */
+ if ((uio->uio_resid == 0) &&
+ ((user_marks_eor == 0) ||
+ (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))
+ ) {
+ sp->msg_is_complete = 1;
+ } else {
+ sp->msg_is_complete = 0;
+ }
+ if (sp->data->m_flags & M_PKTHDR) {
+ /* update length */
+ sp->data->m_pkthdr.len = sp->length;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ if (uio->uio_resid == 0) {
+ /* got it all? */
+ continue;
+ }
+ /* PR-SCTP? */
+ if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
+ /*
+ * This is ugly but we must assure locking
+ * order
+ */
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
+ if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size)
+ max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size;
+ else
+ max_len = 0;
+ if (max_len > 0) {
+ continue;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+ /* wait for space now */
+ if (non_blocking) {
+ /* Non-blocking io in place out */
+ goto skip_out_eof;
+ }
+ if ((net->flight_size > net->cwnd) &&
+ (sctp_cmt_on_off == 0)) {
+ queue_only = 1;
+
+ } else if (asoc->ifp_had_enobuf) {
+ SCTP_STAT_INCR(sctps_ifnomemqueued);
+ if (net->flight_size > (net->mtu * 2)) {
+ queue_only = 1;
+ } else {
+ queue_only = 0;
+ }
+ asoc->ifp_had_enobuf = 0;
+ un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+ ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
+ sizeof(struct sctp_data_chunk)));
+ } else {
+ un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+ ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
+ sizeof(struct sctp_data_chunk)));
+ queue_only = 0;
+ }
+ if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
+ (stcb->asoc.total_flight > 0) &&
+ (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
+ ) {
+
+ /*
+ * Ok, Nagle is set on and we have data
+ * outstanding. Don't send anything and let
+ * SACKs drive out the data unless wen have
+ * a "full" segment to send.
+ */
+#ifdef SCTP_NAGLE_LOGGING
+ sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
+#endif
+ SCTP_STAT_INCR(sctps_naglequeued);
+ nagle_applies = 1;
+ } else {
+#ifdef SCTP_NAGLE_LOGGING
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
+ sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
+#endif
+ SCTP_STAT_INCR(sctps_naglesent);
+ nagle_applies = 0;
+ }
+ /* What about the INIT, send it maybe */
+#ifdef SCTP_BLK_LOGGING
+ sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, nagle_applies, un_sent);
+ sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, stcb->asoc.total_flight,
+ stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
+#endif
+ if (queue_only_for_init) {
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ sctp_send_initiate(inp, stcb);
+ queue_only_for_init = 0;
+ queue_only = 1;
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+ if ((queue_only == 0) && (nagle_applies == 0)
+ ) {
+ /*
+ * need to start chunk output before
+ * blocking.. note that if a lock is already
+ * applied, then the input via the net is
+ * happening and I don't need to start
+ * output :-D
+ */
+ if (hold_tcblock == 0) {
+ if (SCTP_TCB_TRYLOCK(stcb)) {
+ hold_tcblock = 1;
+ sctp_chunk_output(inp,
+ stcb,
+ SCTP_OUTPUT_FROM_USR_SEND);
+
+ }
+ } else {
+ sctp_chunk_output(inp,
+ stcb,
+ SCTP_OUTPUT_FROM_USR_SEND);
+ }
+ if (hold_tcblock == 1) {
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+ }
+ SOCKBUF_LOCK(&so->so_snd);
+ /*
+ * This is a bit strange, but I think it will work.
+ * The total_output_queue_size is locked and
+ * protected by the TCB_LOCK, which we just
+ * released. There is a race that can occur between
+ * releasing it above, and me getting the socket
+ * lock, where sacks come in but we have not put the
+ * SB_WAIT on the so_snd buffer to get the wakeup.
+ * After the LOCK is applied the sack_processing
+ * will also need to LOCK the so->so_snd to do the
+ * actual sowwakeup(). So once we have the socket
+ * buffer lock if we recheck the size we KNOW we
+ * will get to sleep safely with the wakeup flag in
+ * place.
+ */
+ if (so->so_snd.sb_hiwat < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) {
+#ifdef SCTP_BLK_LOGGING
+ sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
+ so, asoc, uio->uio_resid);
+#endif
+ be.error = 0;
+ stcb->block_entry = &be;
+ error = sbwait(&so->so_snd);
+ stcb->block_entry = NULL;
+
+ if (error || so->so_error || be.error) {
+ if (error == 0) {
+ if (so->so_error)
+ error = so->so_error;
+ if (be.error) {
+ error = be.error;
+ }
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ goto out_unlocked;
+ }
+#ifdef SCTP_BLK_LOGGING
+ sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
+ so, asoc, stcb->asoc.total_output_queue_size);
+#endif
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ goto out_unlocked;
+ }
+ }
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (sp->msg_is_complete == 0) {
+ strm->last_msg_incomplete = 1;
+ asoc->stream_locked = 1;
+ asoc->stream_locked_on = srcv->sinfo_stream;
+ } else {
+ strm->last_msg_incomplete = 0;
+ asoc->stream_locked = 0;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ if (uio->uio_resid == 0) {
+ got_all_of_the_send = 1;
+ }
+ } else if (top) {
+ /* We send in a 0, since we do NOT have any locks */
+ error = sctp_msg_append(stcb, net, top, srcv, 0);
+ top = NULL;
+ }
+ if (error) {
+ goto out;
+ }
+dataless_eof:
+ /* EOF thing ? */
+ if ((srcv->sinfo_flags & SCTP_EOF) &&
+ (got_all_of_the_send == 1) &&
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)
+ ) {
+ error = 0;
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->locked_on_sending) {
+ goto abort_anyway;
+ }
+ /* there is nothing queued to send, so I'm done... */
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ /* only send SHUTDOWN the first time through */
+ sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
+ asoc->state = SCTP_STATE_SHUTDOWN_SENT;
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ }
+ } else {
+ /*
+ * we still got (or just got) data to send, so set
+ * SHUTDOWN_PENDING
+ */
+ /*
+ * XXX sockets draft says that SCTP_EOF should be
+ * sent with no data. currently, we will allow user
+ * data to be sent first and move to
+ * SHUTDOWN-PENDING
+ */
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ if (asoc->locked_on_sending) {
+ /* Locked to send out the data */
+ struct sctp_stream_queue_pending *sp;
+
+ sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
+ if (sp) {
+ if ((sp->length == 0) && (sp->msg_is_complete == 0))
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ }
+ }
+ asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ abort_anyway:
+ if (free_cnt_applied) {
+ atomic_add_16(&stcb->asoc.refcnt, -1);
+ free_cnt_applied = 0;
+ }
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_RESPONSE_TO_USER_REQ,
+ NULL);
+ /*
+ * now relock the stcb so everything
+ * is sane
+ */
+ hold_tcblock = 0;
+ stcb = NULL;
+ goto out;
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ }
+ }
+ }
+skip_out_eof:
+ if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
+ some_on_control = 1;
+ }
+ if ((net->flight_size > net->cwnd) &&
+ (sctp_cmt_on_off == 0)) {
+ queue_only = 1;
+ } else if (asoc->ifp_had_enobuf) {
+ SCTP_STAT_INCR(sctps_ifnomemqueued);
+ if (net->flight_size > (net->mtu * 2)) {
+ queue_only = 1;
+ } else {
+ queue_only = 0;
+ }
+ asoc->ifp_had_enobuf = 0;
+ un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+ ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
+ sizeof(struct sctp_data_chunk)));
+ } else {
+ un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+ ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
+ sizeof(struct sctp_data_chunk)));
+ queue_only = 0;
+ }
+ if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
+ (stcb->asoc.total_flight > 0) &&
+ (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
+ ) {
+
+ /*
+ * Ok, Nagle is set on and we have data outstanding. Don't
+ * send anything and let SACKs drive out the data unless wen
+ * have a "full" segment to send.
+ */
+#ifdef SCTP_NAGLE_LOGGING
+ sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
+#endif
+ SCTP_STAT_INCR(sctps_naglequeued);
+ nagle_applies = 1;
+ } else {
+#ifdef SCTP_NAGLE_LOGGING
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
+ sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
+#endif
+ SCTP_STAT_INCR(sctps_naglesent);
+ nagle_applies = 0;
+ }
+ if (queue_only_for_init) {
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ sctp_send_initiate(inp, stcb);
+ queue_only_for_init = 0;
+ queue_only = 1;
+ }
+ if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
+ /* we can attempt to send too. */
+ s = splnet();
+ if (hold_tcblock == 0) {
+ /*
+ * If there is activity recv'ing sacks no need to
+ * send
+ */
+ if (SCTP_TCB_TRYLOCK(stcb)) {
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND);
+ hold_tcblock = 1;
+ }
+ } else {
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND);
+ }
+ splx(s);
+ } else if ((queue_only == 0) &&
+ (stcb->asoc.peers_rwnd == 0) &&
+ (stcb->asoc.total_flight == 0)) {
+ /* We get to have a probe outstanding */
+ if (hold_tcblock == 0) {
+ hold_tcblock = 1;
+ SCTP_TCB_LOCK(stcb);
+ }
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND);
+ } else if (some_on_control) {
+ int num_out, reason, cwnd_full, frag_point;
+
+ /* Here we do control only */
+ if (hold_tcblock == 0) {
+ hold_tcblock = 1;
+ SCTP_TCB_LOCK(stcb);
+ }
+ frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
+ sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
+ &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point);
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
+ printf("USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d \n",
+ queue_only, stcb->asoc.peers_rwnd, un_sent,
+ stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
+ stcb->asoc.total_output_queue_size);
+ }
+#endif
+out:
+out_unlocked:
+
+ if (create_lock_applied) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ create_lock_applied = 0;
+ }
+ if ((stcb) && hold_tcblock) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ if ((stcb) && (free_cnt_applied)) {
+ atomic_add_16(&stcb->asoc.refcnt, -1);
+ }
+#ifdef INVARIENTS
+ if (stcb) {
+ if (mtx_owned(&stcb->tcb_mtx)) {
+ panic("Leaving with tcb mtx owned?");
+ }
+ if (mtx_owned(&stcb->tcb_send_mtx)) {
+ panic("Leaving with tcb send mtx owned?");
+ }
+ }
+#endif
+ if (top) {
+ sctp_m_freem(top);
+ }
+ if (control) {
+ sctp_m_freem(control);
+ }
+ return (error);
+}
+
+
+/*
+ * generate an AUTHentication chunk, if required
+ */
+struct mbuf *
+sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
+ struct sctp_auth_chunk **auth_ret, uint32_t * offset,
+ struct sctp_tcb *stcb, uint8_t chunk)
+{
+ struct mbuf *m_auth;
+ struct sctp_auth_chunk *auth;
+ int chunk_len;
+
+ if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
+ (stcb == NULL))
+ return (m);
+
+ /* sysctl disabled auth? */
+ if (sctp_auth_disable)
+ return (m);
+
+ /* peer doesn't do auth... */
+ if (!stcb->asoc.peer_supports_auth) {
+ return (m);
+ }
+ /* does the requested chunk require auth? */
+ if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
+ return (m);
+ }
+ m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 1, M_DONTWAIT, 1, MT_HEADER);
+ if (m_auth == NULL) {
+ /* no mbuf's */
+ return (m);
+ }
+ /* reserve some space if this will be the first mbuf */
+ if (m == NULL)
+ m_auth->m_data += SCTP_MIN_OVERHEAD;
+ /* fill in the AUTH chunk details */
+ auth = mtod(m_auth, struct sctp_auth_chunk *);
+ bzero(auth, sizeof(*auth));
+ auth->ch.chunk_type = SCTP_AUTHENTICATION;
+ auth->ch.chunk_flags = 0;
+ chunk_len = sizeof(*auth) +
+ sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
+ auth->ch.chunk_length = htons(chunk_len);
+ auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
+ /* key id and hmac digest will be computed and filled in upon send */
+
+ /* save the offset where the auth was inserted into the chain */
+ if (m != NULL)
+ *offset = m->m_pkthdr.len;
+ else
+ *offset = 0;
+
+ /* update length and return pointer to the auth chunk */
+ m_auth->m_pkthdr.len = m_auth->m_len = chunk_len;
+ m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
+ if (auth_ret != NULL)
+ *auth_ret = auth;
+
+ return (m);
+}
diff --git a/sys/netinet/sctp_output.h b/sys/netinet/sctp_output.h
new file mode 100644
index 0000000..0915fff
--- /dev/null
+++ b/sys/netinet/sctp_output.h
@@ -0,0 +1,171 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_output.h,v 1.14 2005/03/06 16:04:18 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_output_h__
+#define __sctp_output_h__
+
+
+
+
+#include <netinet/sctp_header.h>
+
+
+
+#if defined(_KERNEL)
+void sctp_send_initiate(struct sctp_inpcb *, struct sctp_tcb *);
+
+void
+sctp_send_initiate_ack(struct sctp_inpcb *, struct sctp_tcb *,
+ struct mbuf *, int, int, struct sctphdr *, struct sctp_init_chunk *);
+
+struct mbuf *
+sctp_arethere_unrecognized_parameters(struct mbuf *, int, int *,
+ struct sctp_chunkhdr *);
+void sctp_queue_op_err(struct sctp_tcb *, struct mbuf *);
+
+int
+sctp_send_cookie_echo(struct mbuf *, int, struct sctp_tcb *,
+ struct sctp_nets *);
+int sctp_send_cookie_ack(struct sctp_tcb *);
+
+void
+sctp_send_heartbeat_ack(struct sctp_tcb *, struct mbuf *, int, int,
+ struct sctp_nets *);
+
+
+int sctp_send_shutdown(struct sctp_tcb *, struct sctp_nets *);
+
+int sctp_send_shutdown_ack(struct sctp_tcb *, struct sctp_nets *);
+
+int sctp_send_shutdown_complete(struct sctp_tcb *, struct sctp_nets *);
+
+int sctp_send_shutdown_complete2(struct mbuf *, int, struct sctphdr *);
+
+int sctp_send_asconf(struct sctp_tcb *, struct sctp_nets *);
+
+int sctp_send_asconf_ack(struct sctp_tcb *, uint32_t);
+
+int sctp_get_frag_point(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_toss_old_cookies(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_toss_old_asconf(struct sctp_tcb *);
+
+void sctp_fix_ecn_echo(struct sctp_association *);
+
+int
+sctp_output(struct sctp_inpcb *, struct mbuf *, struct sockaddr *,
+ struct mbuf *, struct thread *, int);
+
+
+void
+sctp_insert_on_wheel(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_stream_out *strq, int holdslock);
+
+int sctp_chunk_output(struct sctp_inpcb *, struct sctp_tcb *, int);
+void sctp_send_abort_tcb(struct sctp_tcb *, struct mbuf *);
+
+void send_forward_tsn(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_send_sack(struct sctp_tcb *);
+
+int sctp_send_hb(struct sctp_tcb *, int, struct sctp_nets *);
+
+void sctp_send_ecn_echo(struct sctp_tcb *, struct sctp_nets *, uint32_t);
+
+
+void
+sctp_send_packet_dropped(struct sctp_tcb *, struct sctp_nets *, struct mbuf *,
+ int, int);
+
+
+
+void sctp_send_cwr(struct sctp_tcb *, struct sctp_nets *, uint32_t);
+
+
+struct mbuf *
+sctp_get_mbuf_for_msg(unsigned int space_needed,
+ int want_header, int how, int allonebuf, int type);
+
+void
+sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
+ int number_entries, uint16_t * list,
+ uint32_t seq, uint32_t resp_seq, uint32_t last_sent);
+
+void
+sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
+ int number_entries, uint16_t * list,
+ uint32_t seq);
+
+void
+sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
+ uint32_t seq);
+
+void
+sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
+ uint32_t resp_seq, uint32_t result);
+
+void
+sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
+ uint32_t resp_seq, uint32_t result,
+ uint32_t send_una, uint32_t recv_next);
+
+int
+sctp_send_str_reset_req(struct sctp_tcb *stcb,
+ int number_entries, uint16_t * list,
+ uint8_t send_out_req, uint32_t resp_seq,
+ uint8_t send_in_req,
+ uint8_t send_tsn_req);
+
+
+void
+sctp_send_abort(struct mbuf *, int, struct sctphdr *, uint32_t,
+ struct mbuf *);
+
+void sctp_send_operr_to(struct mbuf *, int, struct mbuf *, uint32_t);
+
+int
+sctp_sosend(struct socket *so,
+ struct sockaddr *addr,
+ struct uio *uio,
+ struct mbuf *top,
+ struct mbuf *control,
+ int flags,
+ struct thread *p
+);
+
+#endif
+#endif
diff --git a/sys/netinet/sctp_pcb.c b/sys/netinet/sctp_pcb.c
new file mode 100644
index 0000000..977dfe7
--- /dev/null
+++ b/sys/netinet/sctp_pcb.c
@@ -0,0 +1,5283 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_pcb.c,v 1.38 2005/03/06 16:04:18 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_ipsec.h"
+#include "opt_compat.h"
+#include "opt_inet6.h"
+#include "opt_inet.h"
+#include "opt_sctp.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/domain.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+
+#include <sys/callout.h>
+
+#include <sys/limits.h>
+#include <machine/cpu.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/in_pcb.h>
+#include <netinet/in_var.h>
+#include <netinet/ip_var.h>
+
+#ifdef INET6
+#include <netinet/ip6.h>
+#include <netinet6/ip6_var.h>
+#include <netinet6/scope6_var.h>
+#include <netinet6/in6_pcb.h>
+#endif /* INET6 */
+
+#ifdef IPSEC
+#include <netinet6/ipsec.h>
+#include <netkey/key.h>
+#endif /* IPSEC */
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_timer.h>
+
+
+#ifdef SCTP_DEBUG
+uint32_t sctp_debug_on = 0;
+
+#endif /* SCTP_DEBUG */
+
+
+extern int sctp_pcbtblsize;
+extern int sctp_hashtblsize;
+extern int sctp_chunkscale;
+
+struct sctp_epinfo sctppcbinfo;
+
+/* FIX: we don't handle multiple link local scopes */
+/* "scopeless" replacement IN6_ARE_ADDR_EQUAL */
+int
+SCTP6_ARE_ADDR_EQUAL(struct in6_addr *a, struct in6_addr *b)
+{
+ struct in6_addr tmp_a, tmp_b;
+
+ /* use a copy of a and b */
+ tmp_a = *a;
+ tmp_b = *b;
+ in6_clearscope(&tmp_a);
+ in6_clearscope(&tmp_b);
+ return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b));
+}
+
+
+void
+sctp_fill_pcbinfo(struct sctp_pcbinfo *spcb)
+{
+ /*
+ * We really don't need to lock this, but I will just because it
+ * does not hurt.
+ */
+ SCTP_INP_INFO_RLOCK();
+ spcb->ep_count = sctppcbinfo.ipi_count_ep;
+ spcb->asoc_count = sctppcbinfo.ipi_count_asoc;
+ spcb->laddr_count = sctppcbinfo.ipi_count_laddr;
+ spcb->raddr_count = sctppcbinfo.ipi_count_raddr;
+ spcb->chk_count = sctppcbinfo.ipi_count_chunk;
+ spcb->readq_count = sctppcbinfo.ipi_count_readq;
+ spcb->stream_oque = sctppcbinfo.ipi_count_strmoq;
+ spcb->free_chunks = sctppcbinfo.ipi_free_chunks;
+
+ SCTP_INP_INFO_RUNLOCK();
+}
+
+
+/*
+ * Notes on locks for FreeBSD 5 and up. All association lookups that have a
+ * definte ep, the INP structure is assumed to be locked for reading. If we
+ * need to go find the INP (ususally when a **inp is passed) then we must
+ * lock the INFO structure first and if needed lock the INP too. Note that if
+ * we lock it we must
+ *
+ */
+
+
+/*
+ * Given a endpoint, look and find in its association list any association
+ * with the "to" address given. This can be a "from" address, too, for
+ * inbound packets. For outbound packets it is a true "to" address.
+ */
+
+static struct sctp_tcb *
+sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from,
+ struct sockaddr *to, struct sctp_nets **netp)
+{
+ /**** ASSUMSES THE CALLER holds the INP_INFO_RLOCK */
+
+ /*
+ * Note for this module care must be taken when observing what to is
+ * for. In most of the rest of the code the TO field represents my
+ * peer and the FROM field represents my address. For this module it
+ * is reversed of that.
+ */
+ /*
+ * If we support the TCP model, then we must now dig through to see
+ * if we can find our endpoint in the list of tcp ep's.
+ */
+ uint16_t lport, rport;
+ struct sctppcbhead *ephead;
+ struct sctp_inpcb *inp;
+ struct sctp_laddr *laddr;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+
+ if ((to == NULL) || (from == NULL)) {
+ return (NULL);
+ }
+ if (to->sa_family == AF_INET && from->sa_family == AF_INET) {
+ lport = ((struct sockaddr_in *)to)->sin_port;
+ rport = ((struct sockaddr_in *)from)->sin_port;
+ } else if (to->sa_family == AF_INET6 && from->sa_family == AF_INET6) {
+ lport = ((struct sockaddr_in6 *)to)->sin6_port;
+ rport = ((struct sockaddr_in6 *)from)->sin6_port;
+ } else {
+ return NULL;
+ }
+ ephead = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR(
+ (lport + rport), sctppcbinfo.hashtcpmark)];
+ /*
+ * Ok now for each of the guys in this bucket we must look and see:
+ * - Does the remote port match. - Does there single association's
+ * addresses match this address (to). If so we update p_ep to point
+ * to this ep and return the tcb from it.
+ */
+ LIST_FOREACH(inp, ephead, sctp_hash) {
+ if (lport != inp->sctp_lport) {
+ continue;
+ }
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ /* check to see if the ep has one of the addresses */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /* We are NOT bound all, so look further */
+ int match = 0;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+
+ if (laddr->ifa == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ printf("An ounce of prevention is worth a pound of cure\n");
+ }
+#endif
+ continue;
+ }
+ if (laddr->ifa->ifa_addr == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ printf("ifa with a NULL address\n");
+ }
+#endif
+ continue;
+ }
+ if (laddr->ifa->ifa_addr->sa_family ==
+ to->sa_family) {
+ /* see if it matches */
+ struct sockaddr_in *intf_addr, *sin;
+
+ intf_addr = (struct sockaddr_in *)
+ laddr->ifa->ifa_addr;
+ sin = (struct sockaddr_in *)to;
+ if (from->sa_family == AF_INET) {
+ if (sin->sin_addr.s_addr ==
+ intf_addr->sin_addr.s_addr) {
+ match = 1;
+ break;
+ }
+ } else {
+ struct sockaddr_in6 *intf_addr6;
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)
+ to;
+ intf_addr6 = (struct sockaddr_in6 *)
+ laddr->ifa->ifa_addr;
+
+ if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
+ &intf_addr6->sin6_addr)) {
+ match = 1;
+ break;
+ }
+ }
+ }
+ }
+ if (match == 0) {
+ /* This endpoint does not have this address */
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ }
+ /*
+ * Ok if we hit here the ep has the address, does it hold
+ * the tcb?
+ */
+
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ SCTP_TCB_LOCK(stcb);
+ if (stcb->rport != rport) {
+ /* remote port does not match. */
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ /* Does this TCB have a matching address? */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+
+ if (net->ro._l_addr.sa.sa_family != from->sa_family) {
+ /* not the same family, can't be a match */
+ continue;
+ }
+ if (from->sa_family == AF_INET) {
+ struct sockaddr_in *sin, *rsin;
+
+ sin = (struct sockaddr_in *)&net->ro._l_addr;
+ rsin = (struct sockaddr_in *)from;
+ if (sin->sin_addr.s_addr ==
+ rsin->sin_addr.s_addr) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ /* Update the endpoint pointer */
+ *inp_p = inp;
+ SCTP_INP_RUNLOCK(inp);
+ return (stcb);
+ }
+ } else {
+ struct sockaddr_in6 *sin6, *rsin6;
+
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ rsin6 = (struct sockaddr_in6 *)from;
+ if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
+ &rsin6->sin6_addr)) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ /* Update the endpoint pointer */
+ *inp_p = inp;
+ SCTP_INP_RUNLOCK(inp);
+ return (stcb);
+ }
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ }
+ return (NULL);
+}
+
+/*
+ * rules for use
+ *
+ * 1) If I return a NULL you must decrement any INP ref cnt. 2) If I find an
+ * stcb, both will be locked (locked_tcb and stcb) but decrement will be done
+ * (if locked == NULL). 3) Decrement happens on return ONLY if locked ==
+ * NULL.
+ */
+
+struct sctp_tcb *
+sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote,
+ struct sctp_nets **netp, struct sockaddr *local, struct sctp_tcb *locked_tcb)
+{
+ struct sctpasochead *head;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+ uint16_t rport;
+
+ inp = *inp_p;
+ if (remote->sa_family == AF_INET) {
+ rport = (((struct sockaddr_in *)remote)->sin_port);
+ } else if (remote->sa_family == AF_INET6) {
+ rport = (((struct sockaddr_in6 *)remote)->sin6_port);
+ } else {
+ return (NULL);
+ }
+ if (locked_tcb) {
+ /*
+ * UN-lock so we can do proper locking here this occurs when
+ * called from load_addresses_from_init.
+ */
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ SCTP_INP_INFO_RLOCK();
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ /*
+ * Now either this guy is our listener or it's the
+ * connector. If it is the one that issued the connect, then
+ * it's only chance is to be the first TCB in the list. If
+ * it is the acceptor, then do the special_lookup to hash
+ * and find the real inp.
+ */
+ if ((inp->sctp_socket) && (inp->sctp_socket->so_qlimit)) {
+ /* to is peer addr, from is my addr */
+ stcb = sctp_tcb_special_locate(inp_p, remote, local,
+ netp);
+ if ((stcb != NULL) && (locked_tcb == NULL)) {
+ /* we have a locked tcb, lower refcount */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if ((locked_tcb != NULL) && (locked_tcb != stcb)) {
+ SCTP_INP_RLOCK(locked_tcb->sctp_ep);
+ SCTP_TCB_LOCK(locked_tcb);
+ SCTP_INP_RUNLOCK(locked_tcb->sctp_ep);
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ goto null_return;
+ }
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ goto null_return;
+ }
+ SCTP_TCB_LOCK(stcb);
+ if (stcb->rport != rport) {
+ /* remote port does not match. */
+ SCTP_TCB_UNLOCK(stcb);
+ goto null_return;
+ }
+ /* now look at the list of remote addresses */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+#ifdef INVARIENTS
+ if (net == (TAILQ_NEXT(net, sctp_next))) {
+ panic("Corrupt net list");
+ }
+#endif
+ if (net->ro._l_addr.sa.sa_family !=
+ remote->sa_family) {
+ /* not the same family */
+ continue;
+ }
+ if (remote->sa_family == AF_INET) {
+ struct sockaddr_in *sin, *rsin;
+
+ sin = (struct sockaddr_in *)
+ &net->ro._l_addr;
+ rsin = (struct sockaddr_in *)remote;
+ if (sin->sin_addr.s_addr ==
+ rsin->sin_addr.s_addr) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ if (locked_tcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else if (locked_tcb != stcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ } else if (remote->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6, *rsin6;
+
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ rsin6 = (struct sockaddr_in6 *)remote;
+ if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
+ &rsin6->sin6_addr)) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ if (locked_tcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else if (locked_tcb != stcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ goto null_return;
+ }
+ head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(rport,
+ inp->sctp_hashmark)];
+ if (head == NULL) {
+ goto null_return;
+ }
+ LIST_FOREACH(stcb, head, sctp_tcbhash) {
+ if (stcb->rport != rport) {
+ /* remote port does not match */
+ continue;
+ }
+ /* now look at the list of remote addresses */
+ SCTP_TCB_LOCK(stcb);
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+#ifdef INVARIENTS
+ if (net == (TAILQ_NEXT(net, sctp_next))) {
+ panic("Corrupt net list");
+ }
+#endif
+ if (net->ro._l_addr.sa.sa_family !=
+ remote->sa_family) {
+ /* not the same family */
+ continue;
+ }
+ if (remote->sa_family == AF_INET) {
+ struct sockaddr_in *sin, *rsin;
+
+ sin = (struct sockaddr_in *)
+ &net->ro._l_addr;
+ rsin = (struct sockaddr_in *)remote;
+ if (sin->sin_addr.s_addr ==
+ rsin->sin_addr.s_addr) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ if (locked_tcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else if (locked_tcb != stcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ } else if (remote->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6, *rsin6;
+
+ sin6 = (struct sockaddr_in6 *)
+ &net->ro._l_addr;
+ rsin6 = (struct sockaddr_in6 *)remote;
+ if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
+ &rsin6->sin6_addr)) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ if (locked_tcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else if (locked_tcb != stcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+null_return:
+ /* clean up for returning null */
+ if (locked_tcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ /* not found */
+ return (NULL);
+}
+
+/*
+ * Find an association for a specific endpoint using the association id given
+ * out in the COMM_UP notification
+ */
+
+struct sctp_tcb *
+sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
+{
+ /*
+ * Use my the assoc_id to find a endpoint
+ */
+ struct sctpasochead *head;
+ struct sctp_tcb *stcb;
+ uint32_t id;
+
+ if (asoc_id == 0 || inp == NULL) {
+ return (NULL);
+ }
+ SCTP_INP_INFO_RLOCK();
+ id = (uint32_t) asoc_id;
+ head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(id,
+ sctppcbinfo.hashasocmark)];
+ if (head == NULL) {
+ /* invalid id TSNH */
+ SCTP_INP_INFO_RUNLOCK();
+ return (NULL);
+ }
+ LIST_FOREACH(stcb, head, sctp_asocs) {
+ SCTP_INP_RLOCK(stcb->sctp_ep);
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ SCTP_INP_INFO_RUNLOCK();
+ return (NULL);
+ }
+ if (stcb->asoc.assoc_id == id) {
+ /* candidate */
+ if (inp != stcb->sctp_ep) {
+ /*
+ * some other guy has the same id active (id
+ * collision ??).
+ */
+ SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ continue;
+ }
+ if (want_lock) {
+ SCTP_TCB_LOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ }
+ /* Ok if we missed here, lets try the restart hash */
+ head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(id, sctppcbinfo.hashrestartmark)];
+ if (head == NULL) {
+ /* invalid id TSNH */
+ SCTP_INP_INFO_RUNLOCK();
+ return (NULL);
+ }
+ LIST_FOREACH(stcb, head, sctp_tcbrestarhash) {
+ SCTP_INP_RLOCK(stcb->sctp_ep);
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ SCTP_INP_INFO_RUNLOCK();
+ return (NULL);
+ }
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ if (stcb->asoc.assoc_id == id) {
+ /* candidate */
+ if (inp != stcb->sctp_ep) {
+ /*
+ * some other guy has the same id active (id
+ * collision ??).
+ */
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ return (NULL);
+}
+
+
+static struct sctp_inpcb *
+sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
+ uint16_t lport)
+{
+ struct sctp_inpcb *inp;
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 *sin6;
+ struct sctp_laddr *laddr;
+
+ /*
+ * Endpoing probe expects that the INP_INFO is locked.
+ */
+ if (nam->sa_family == AF_INET) {
+ sin = (struct sockaddr_in *)nam;
+ sin6 = NULL;
+ } else if (nam->sa_family == AF_INET6) {
+ sin6 = (struct sockaddr_in6 *)nam;
+ sin = NULL;
+ } else {
+ /* unsupported family */
+ return (NULL);
+ }
+ if (head == NULL)
+ return (NULL);
+ LIST_FOREACH(inp, head, sctp_hash) {
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) &&
+ (inp->sctp_lport == lport)) {
+ /* got it */
+ if ((nam->sa_family == AF_INET) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ (((struct inpcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY)
+ ) {
+ /* IPv4 on a IPv6 socket with ONLY IPv6 set */
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ /* A V6 address and the endpoint is NOT bound V6 */
+ if (nam->sa_family == AF_INET6 &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ SCTP_INP_RUNLOCK(inp);
+ return (inp);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+
+ if ((nam->sa_family == AF_INET) &&
+ (sin->sin_addr.s_addr == INADDR_ANY)) {
+ /* Can't hunt for one that has no address specified */
+ return (NULL);
+ } else if ((nam->sa_family == AF_INET6) &&
+ (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) {
+ /* Can't hunt for one that has no address specified */
+ return (NULL);
+ }
+ /*
+ * ok, not bound to all so see if we can find a EP bound to this
+ * address.
+ */
+ LIST_FOREACH(inp, head, sctp_hash) {
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ /*
+ * Ok this could be a likely candidate, look at all of its
+ * addresses
+ */
+ if (inp->sctp_lport != lport) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ printf("An ounce of prevention is worth a pound of cure\n");
+ }
+#endif
+ continue;
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ printf("Ok laddr->ifa:%p is possible, ",
+ laddr->ifa);
+ }
+#endif
+ if (laddr->ifa->ifa_addr == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ printf("Huh IFA as an ifa_addr=NULL, ");
+ }
+#endif
+ continue;
+ }
+ if (laddr->ifa->ifa_addr->sa_family == nam->sa_family) {
+ /* possible, see if it matches */
+ struct sockaddr_in *intf_addr;
+
+ intf_addr = (struct sockaddr_in *)
+ laddr->ifa->ifa_addr;
+ if (nam->sa_family == AF_INET) {
+ if (sin->sin_addr.s_addr ==
+ intf_addr->sin_addr.s_addr) {
+ SCTP_INP_RUNLOCK(inp);
+ return (inp);
+ }
+ } else if (nam->sa_family == AF_INET6) {
+ struct sockaddr_in6 *intf_addr6;
+
+ intf_addr6 = (struct sockaddr_in6 *)
+ laddr->ifa->ifa_addr;
+ if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
+ &intf_addr6->sin6_addr)) {
+ SCTP_INP_RUNLOCK(inp);
+ return (inp);
+ }
+ }
+ }
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ return (NULL);
+}
+
+
+struct sctp_inpcb *
+sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock)
+{
+ /*
+ * First we check the hash table to see if someone has this port
+ * bound with just the port.
+ */
+ struct sctp_inpcb *inp;
+ struct sctppcbhead *head;
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 *sin6;
+ int lport;
+
+ if (nam->sa_family == AF_INET) {
+ sin = (struct sockaddr_in *)nam;
+ lport = ((struct sockaddr_in *)nam)->sin_port;
+ } else if (nam->sa_family == AF_INET6) {
+ sin6 = (struct sockaddr_in6 *)nam;
+ lport = ((struct sockaddr_in6 *)nam)->sin6_port;
+ } else {
+ /* unsupported family */
+ return (NULL);
+ }
+ /*
+ * I could cheat here and just cast to one of the types but we will
+ * do it right. It also provides the check against an Unsupported
+ * type too.
+ */
+ /* Find the head of the ALLADDR chain */
+ if (have_lock == 0) {
+ SCTP_INP_INFO_RLOCK();
+
+ }
+ head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
+ sctppcbinfo.hashmark)];
+ inp = sctp_endpoint_probe(nam, head, lport);
+
+ /*
+ * If the TCP model exists it could be that the main listening
+ * endpoint is gone but there exists a connected socket for this guy
+ * yet. If so we can return the first one that we find. This may NOT
+ * be the correct one but the sctp_findassociation_ep_addr has
+ * further code to look at all TCP models.
+ */
+ if (inp == NULL && find_tcp_pool) {
+ unsigned int i;
+
+ for (i = 0; i < sctppcbinfo.hashtblsize; i++) {
+ /*
+ * This is real gross, but we do NOT have a remote
+ * port at this point depending on who is calling.
+ * We must therefore look for ANY one that matches
+ * our local port :/
+ */
+ head = &sctppcbinfo.sctp_tcpephash[i];
+ if (LIST_FIRST(head)) {
+ inp = sctp_endpoint_probe(nam, head, lport);
+ if (inp) {
+ /* Found one */
+ break;
+ }
+ }
+ }
+ }
+ if (inp) {
+ SCTP_INP_INCR_REF(inp);
+ }
+ if (have_lock == 0) {
+ SCTP_INP_INFO_RUNLOCK();
+ }
+ return (inp);
+}
+
+/*
+ * Find an association for an endpoint with the pointer to whom you want to
+ * send to and the endpoint pointer. The address can be IPv4 or IPv6. We may
+ * need to change the *to to some other struct like a mbuf...
+ */
+struct sctp_tcb *
+sctp_findassociation_addr_sa(struct sockaddr *to, struct sockaddr *from,
+ struct sctp_inpcb **inp_p, struct sctp_nets **netp, int find_tcp_pool)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *retval;
+
+ SCTP_INP_INFO_RLOCK();
+ if (find_tcp_pool) {
+ if (inp_p != NULL) {
+ retval = sctp_tcb_special_locate(inp_p, from, to, netp);
+ } else {
+ retval = sctp_tcb_special_locate(&inp, from, to, netp);
+ }
+ if (retval != NULL) {
+ SCTP_INP_INFO_RUNLOCK();
+ return (retval);
+ }
+ }
+ inp = sctp_pcb_findep(to, 0, 1);
+ if (inp_p != NULL) {
+ *inp_p = inp;
+ }
+ SCTP_INP_INFO_RUNLOCK();
+
+ if (inp == NULL) {
+ return (NULL);
+ }
+ /*
+ * ok, we have an endpoint, now lets find the assoc for it (if any)
+ * we now place the source address or from in the to of the find
+ * endpoint call. Since in reality this chain is used from the
+ * inbound packet side.
+ */
+ if (inp_p != NULL) {
+ retval = sctp_findassociation_ep_addr(inp_p, from, netp, to, NULL);
+ } else {
+ retval = sctp_findassociation_ep_addr(&inp, from, netp, to, NULL);
+ }
+ return retval;
+}
+
+
+/*
+ * This routine will grub through the mbuf that is a INIT or INIT-ACK and
+ * find all addresses that the sender has specified in any address list. Each
+ * address will be used to lookup the TCB and see if one exits.
+ */
+static struct sctp_tcb *
+sctp_findassociation_special_addr(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp,
+ struct sockaddr *dest)
+{
+ struct sockaddr_in sin4;
+ struct sockaddr_in6 sin6;
+ struct sctp_paramhdr *phdr, parm_buf;
+ struct sctp_tcb *retval;
+ uint32_t ptype, plen;
+
+ memset(&sin4, 0, sizeof(sin4));
+ memset(&sin6, 0, sizeof(sin6));
+ sin4.sin_len = sizeof(sin4);
+ sin4.sin_family = AF_INET;
+ sin4.sin_port = sh->src_port;
+ sin6.sin6_len = sizeof(sin6);
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = sh->src_port;
+
+ retval = NULL;
+ offset += sizeof(struct sctp_init_chunk);
+
+ phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
+ while (phdr != NULL) {
+ /* now we must see if we want the parameter */
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+ if (plen == 0) {
+ break;
+ }
+ if (ptype == SCTP_IPV4_ADDRESS &&
+ plen == sizeof(struct sctp_ipv4addr_param)) {
+ /* Get the rest of the address */
+ struct sctp_ipv4addr_param ip4_parm, *p4;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&ip4_parm, plen);
+ if (phdr == NULL) {
+ return (NULL);
+ }
+ p4 = (struct sctp_ipv4addr_param *)phdr;
+ memcpy(&sin4.sin_addr, &p4->addr, sizeof(p4->addr));
+ /* look it up */
+ retval = sctp_findassociation_ep_addr(inp_p,
+ (struct sockaddr *)&sin4, netp, dest, NULL);
+ if (retval != NULL) {
+ return (retval);
+ }
+ } else if (ptype == SCTP_IPV6_ADDRESS &&
+ plen == sizeof(struct sctp_ipv6addr_param)) {
+ /* Get the rest of the address */
+ struct sctp_ipv6addr_param ip6_parm, *p6;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&ip6_parm, plen);
+ if (phdr == NULL) {
+ return (NULL);
+ }
+ p6 = (struct sctp_ipv6addr_param *)phdr;
+ memcpy(&sin6.sin6_addr, &p6->addr, sizeof(p6->addr));
+ /* look it up */
+ retval = sctp_findassociation_ep_addr(inp_p,
+ (struct sockaddr *)&sin6, netp, dest, NULL);
+ if (retval != NULL) {
+ return (retval);
+ }
+ }
+ offset += SCTP_SIZE32(plen);
+ phdr = sctp_get_next_param(m, offset, &parm_buf,
+ sizeof(parm_buf));
+ }
+ return (NULL);
+}
+
+
+static struct sctp_tcb *
+sctp_findassoc_by_vtag(struct sockaddr *from, uint32_t vtag,
+ struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint16_t rport,
+ uint16_t lport, int skip_src_check)
+{
+ /*
+ * Use my vtag to hash. If we find it we then verify the source addr
+ * is in the assoc. If all goes well we save a bit on rec of a
+ * packet.
+ */
+ struct sctpasochead *head;
+ struct sctp_nets *net;
+ struct sctp_tcb *stcb;
+
+ *netp = NULL;
+ *inp_p = NULL;
+ SCTP_INP_INFO_RLOCK();
+ head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(vtag,
+ sctppcbinfo.hashasocmark)];
+ if (head == NULL) {
+ /* invalid vtag */
+ SCTP_INP_INFO_RUNLOCK();
+ return (NULL);
+ }
+ LIST_FOREACH(stcb, head, sctp_asocs) {
+ SCTP_INP_RLOCK(stcb->sctp_ep);
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ SCTP_INP_INFO_RUNLOCK();
+ return (NULL);
+ }
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ if (stcb->asoc.my_vtag == vtag) {
+ /* candidate */
+ if (stcb->rport != rport) {
+ /*
+ * we could remove this if vtags are unique
+ * across the system.
+ */
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ if (stcb->sctp_ep->sctp_lport != lport) {
+ /*
+ * we could remove this if vtags are unique
+ * across the system.
+ */
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ if (skip_src_check) {
+ *netp = NULL; /* unknown */
+ *inp_p = stcb->sctp_ep;
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ net = sctp_findnet(stcb, from);
+ if (net) {
+ /* yep its him. */
+ *netp = net;
+ SCTP_STAT_INCR(sctps_vtagexpress);
+ *inp_p = stcb->sctp_ep;
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ } else {
+ /*
+ * not him, this should only happen in rare
+ * cases so I peg it.
+ */
+ SCTP_STAT_INCR(sctps_vtagbogus);
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ return (NULL);
+}
+
+/*
+ * Find an association with the pointer to the inbound IP packet. This can be
+ * a IPv4 or IPv6 packet.
+ */
+struct sctp_tcb *
+sctp_findassociation_addr(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_chunkhdr *ch,
+ struct sctp_inpcb **inp_p, struct sctp_nets **netp)
+{
+ int find_tcp_pool;
+ struct ip *iph;
+ struct sctp_tcb *retval;
+ struct sockaddr_storage to_store, from_store;
+ struct sockaddr *to = (struct sockaddr *)&to_store;
+ struct sockaddr *from = (struct sockaddr *)&from_store;
+ struct sctp_inpcb *inp;
+
+
+ iph = mtod(m, struct ip *);
+ if (iph->ip_v == IPVERSION) {
+ /* its IPv4 */
+ struct sockaddr_in *from4;
+
+ from4 = (struct sockaddr_in *)&from_store;
+ bzero(from4, sizeof(*from4));
+ from4->sin_family = AF_INET;
+ from4->sin_len = sizeof(struct sockaddr_in);
+ from4->sin_addr.s_addr = iph->ip_src.s_addr;
+ from4->sin_port = sh->src_port;
+ } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
+ /* its IPv6 */
+ struct ip6_hdr *ip6;
+ struct sockaddr_in6 *from6;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ from6 = (struct sockaddr_in6 *)&from_store;
+ bzero(from6, sizeof(*from6));
+ from6->sin6_family = AF_INET6;
+ from6->sin6_len = sizeof(struct sockaddr_in6);
+ from6->sin6_addr = ip6->ip6_src;
+ from6->sin6_port = sh->src_port;
+ /* Get the scopes in properly to the sin6 addr's */
+ /* we probably don't need these operations */
+ (void)sa6_recoverscope(from6);
+ sa6_embedscope(from6, ip6_use_defzone);
+ } else {
+ /* Currently not supported. */
+ return (NULL);
+ }
+ if (sh->v_tag) {
+ /* we only go down this path if vtag is non-zero */
+ retval = sctp_findassoc_by_vtag(from, ntohl(sh->v_tag),
+ inp_p, netp, sh->src_port, sh->dest_port, 0);
+ if (retval) {
+ return (retval);
+ }
+ }
+ if (iph->ip_v == IPVERSION) {
+ /* its IPv4 */
+ struct sockaddr_in *to4;
+
+ to4 = (struct sockaddr_in *)&to_store;
+ bzero(to4, sizeof(*to4));
+ to4->sin_family = AF_INET;
+ to4->sin_len = sizeof(struct sockaddr_in);
+ to4->sin_addr.s_addr = iph->ip_dst.s_addr;
+ to4->sin_port = sh->dest_port;
+ } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
+ /* its IPv6 */
+ struct ip6_hdr *ip6;
+ struct sockaddr_in6 *to6;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ to6 = (struct sockaddr_in6 *)&to_store;
+ bzero(to6, sizeof(*to6));
+ to6->sin6_family = AF_INET6;
+ to6->sin6_len = sizeof(struct sockaddr_in6);
+ to6->sin6_addr = ip6->ip6_dst;
+ to6->sin6_port = sh->dest_port;
+ /* Get the scopes in properly to the sin6 addr's */
+ /* we probably don't need these operations */
+ (void)sa6_recoverscope(to6);
+ sa6_embedscope(to6, ip6_use_defzone);
+ }
+ find_tcp_pool = 0;
+ /*
+ * FIX FIX?, I think we only need to look in the TCP pool if its an
+ * INIT or COOKIE-ECHO, We really don't need to find it that way if
+ * its a INIT-ACK or COOKIE_ACK since these in bot one-2-one and
+ * one-2-N would be in the main pool anyway.
+ */
+ if ((ch->chunk_type != SCTP_INITIATION) &&
+ (ch->chunk_type != SCTP_INITIATION_ACK) &&
+ (ch->chunk_type != SCTP_COOKIE_ACK) &&
+ (ch->chunk_type != SCTP_COOKIE_ECHO)) {
+ /* Other chunk types go to the tcp pool. */
+ find_tcp_pool = 1;
+ }
+ if (inp_p) {
+ retval = sctp_findassociation_addr_sa(to, from, inp_p, netp,
+ find_tcp_pool);
+ inp = *inp_p;
+ } else {
+ retval = sctp_findassociation_addr_sa(to, from, &inp, netp,
+ find_tcp_pool);
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ printf("retval:%p inp:%p\n", retval, inp);
+ }
+#endif
+ if (retval == NULL && inp) {
+ /* Found a EP but not this address */
+ if ((ch->chunk_type == SCTP_INITIATION) ||
+ (ch->chunk_type == SCTP_INITIATION_ACK)) {
+ /*
+ * special hook, we do NOT return linp or an
+ * association that is linked to an existing
+ * association that is under the TCP pool (i.e. no
+ * listener exists). The endpoint finding routine
+ * will always find a listner before examining the
+ * TCP pool.
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
+ if (inp_p) {
+ *inp_p = NULL;
+ }
+ return (NULL);
+ }
+ retval = sctp_findassociation_special_addr(m, iphlen,
+ offset, sh, &inp, netp, to);
+ if (inp_p != NULL) {
+ *inp_p = inp;
+ }
+ }
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ printf("retval is %p\n", retval);
+ }
+#endif
+ return (retval);
+}
+
+/*
+ * lookup an association by an ASCONF lookup address.
+ * if the lookup address is 0.0.0.0 or ::0, use the vtag to do the lookup
+ */
+struct sctp_tcb *
+sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp)
+{
+ struct sctp_tcb *stcb;
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_storage local_store, remote_store;
+ struct ip *iph;
+ struct sctp_paramhdr parm_buf, *phdr;
+ int ptype;
+ int zero_address = 0;
+
+
+ memset(&local_store, 0, sizeof(local_store));
+ memset(&remote_store, 0, sizeof(remote_store));
+
+ /* First get the destination address setup too. */
+ iph = mtod(m, struct ip *);
+ if (iph->ip_v == IPVERSION) {
+ /* its IPv4 */
+ sin = (struct sockaddr_in *)&local_store;
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(*sin);
+ sin->sin_port = sh->dest_port;
+ sin->sin_addr.s_addr = iph->ip_dst.s_addr;
+ } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
+ /* its IPv6 */
+ struct ip6_hdr *ip6;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ sin6 = (struct sockaddr_in6 *)&local_store;
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(*sin6);
+ sin6->sin6_port = sh->dest_port;
+ sin6->sin6_addr = ip6->ip6_dst;
+ } else {
+ return NULL;
+ }
+
+ phdr = sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk),
+ &parm_buf, sizeof(struct sctp_paramhdr));
+ if (phdr == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("findassociation_ep_asconf: failed to get asconf lookup addr\n");
+ }
+#endif /* SCTP_DEBUG */
+ return NULL;
+ }
+ ptype = (int)((uint32_t) ntohs(phdr->param_type));
+ /* get the correlation address */
+ if (ptype == SCTP_IPV6_ADDRESS) {
+ /* ipv6 address param */
+ struct sctp_ipv6addr_param *p6, p6_buf;
+
+ if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv6addr_param)) {
+ return NULL;
+ }
+ p6 = (struct sctp_ipv6addr_param *)sctp_get_next_param(m,
+ offset + sizeof(struct sctp_asconf_chunk),
+ &p6_buf.ph, sizeof(*p6));
+ if (p6 == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("findassociation_ep_asconf: failed to get asconf v6 lookup addr\n");
+ }
+#endif /* SCTP_DEBUG */
+ return (NULL);
+ }
+ sin6 = (struct sockaddr_in6 *)&remote_store;
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(*sin6);
+ sin6->sin6_port = sh->src_port;
+ memcpy(&sin6->sin6_addr, &p6->addr, sizeof(struct in6_addr));
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ zero_address = 1;
+ } else if (ptype == SCTP_IPV4_ADDRESS) {
+ /* ipv4 address param */
+ struct sctp_ipv4addr_param *p4, p4_buf;
+
+ if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv4addr_param)) {
+ return NULL;
+ }
+ p4 = (struct sctp_ipv4addr_param *)sctp_get_next_param(m,
+ offset + sizeof(struct sctp_asconf_chunk),
+ &p4_buf.ph, sizeof(*p4));
+ if (p4 == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
+ printf("findassociation_ep_asconf: failed to get asconf v4 lookup addr\n");
+ }
+#endif /* SCTP_DEBUG */
+ return (NULL);
+ }
+ sin = (struct sockaddr_in *)&remote_store;
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(*sin);
+ sin->sin_port = sh->src_port;
+ memcpy(&sin->sin_addr, &p4->addr, sizeof(struct in_addr));
+ if (sin->sin_addr.s_addr == INADDR_ANY)
+ zero_address = 1;
+ } else {
+ /* invalid address param type */
+ return NULL;
+ }
+
+ if (zero_address) {
+ stcb = sctp_findassoc_by_vtag(NULL, ntohl(sh->v_tag), inp_p,
+ netp, sh->src_port, sh->dest_port, 1);
+ /*
+ * printf("findassociation_ep_asconf: zero lookup address
+ * finds stcb 0x%x\n", (uint32_t)stcb);
+ */
+ } else {
+ stcb = sctp_findassociation_ep_addr(inp_p,
+ (struct sockaddr *)&remote_store, netp,
+ (struct sockaddr *)&local_store, NULL);
+ }
+ return (stcb);
+}
+
+
+extern int sctp_max_burst_default;
+
+extern unsigned int sctp_delayed_sack_time_default;
+extern unsigned int sctp_heartbeat_interval_default;
+extern unsigned int sctp_pmtu_raise_time_default;
+extern unsigned int sctp_shutdown_guard_time_default;
+extern unsigned int sctp_secret_lifetime_default;
+
+extern unsigned int sctp_rto_max_default;
+extern unsigned int sctp_rto_min_default;
+extern unsigned int sctp_rto_initial_default;
+extern unsigned int sctp_init_rto_max_default;
+extern unsigned int sctp_valid_cookie_life_default;
+extern unsigned int sctp_init_rtx_max_default;
+extern unsigned int sctp_assoc_rtx_max_default;
+extern unsigned int sctp_path_rtx_max_default;
+extern unsigned int sctp_nr_outgoing_streams_default;
+
+/*
+ * allocate a sctp_inpcb and setup a temporary binding to a port/all
+ * addresses. This way if we don't get a bind we by default pick a ephemeral
+ * port with all addresses bound.
+ */
+int
+sctp_inpcb_alloc(struct socket *so)
+{
+ /*
+ * we get called when a new endpoint starts up. We need to allocate
+ * the sctp_inpcb structure from the zone and init it. Mark it as
+ * unbound and find a port that we can use as an ephemeral with
+ * INADDR_ANY. If the user binds later no problem we can then add in
+ * the specific addresses. And setup the default parameters for the
+ * EP.
+ */
+ int i, error;
+ struct sctp_inpcb *inp;
+
+ struct sctp_pcb *m;
+ struct timeval time;
+ sctp_sharedkey_t *null_key;
+
+ error = 0;
+
+ SCTP_INP_INFO_WLOCK();
+ inp = (struct sctp_inpcb *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_ep);
+ if (inp == NULL) {
+ printf("Out of SCTP-INPCB structures - no resources\n");
+ SCTP_INP_INFO_WUNLOCK();
+ return (ENOBUFS);
+ }
+ /* zap it */
+ bzero(inp, sizeof(*inp));
+
+ /* bump generations */
+ /* setup socket pointers */
+ inp->sctp_socket = so;
+ inp->ip_inp.inp.inp_socket = so;
+
+ inp->partial_delivery_point = so->so_rcv.sb_hiwat >> SCTP_PARTIAL_DELIVERY_SHIFT;
+ inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
+
+#ifdef IPSEC
+ {
+ struct inpcbpolicy *pcb_sp = NULL;
+
+ error = ipsec_init_pcbpolicy(so, &pcb_sp);
+ /* Arrange to share the policy */
+ inp->ip_inp.inp.inp_sp = pcb_sp;
+ ((struct in6pcb *)(&inp->ip_inp.inp))->in6p_sp = pcb_sp;
+ }
+ if (error != 0) {
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return error;
+ }
+#endif /* IPSEC */
+ SCTP_INCR_EP_COUNT();
+ inp->ip_inp.inp.inp_ip_ttl = ip_defttl;
+ SCTP_INP_INFO_WUNLOCK();
+
+ so->so_pcb = (caddr_t)inp;
+
+ if ((so->so_type == SOCK_DGRAM) ||
+ (so->so_type == SOCK_SEQPACKET)) {
+ /* UDP style socket */
+ inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
+ SCTP_PCB_FLAGS_UNBOUND);
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
+ /* Be sure it is NON-BLOCKING IO for UDP */
+ /* so->so_state |= SS_NBIO; */
+ } else if (so->so_type == SOCK_STREAM) {
+ /* TCP style socket */
+ inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
+ SCTP_PCB_FLAGS_UNBOUND);
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
+ /* Be sure we have blocking IO by default */
+ so->so_state &= ~SS_NBIO;
+ } else {
+ /*
+ * unsupported socket type (RAW, etc)- in case we missed it
+ * in protosw
+ */
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
+ return (EOPNOTSUPP);
+ }
+ inp->sctp_tcbhash = hashinit(sctp_pcbtblsize,
+ M_PCB,
+ &inp->sctp_hashmark);
+ if (inp->sctp_tcbhash == NULL) {
+ printf("Out of SCTP-INPCB->hashinit - no resources\n");
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
+ return (ENOBUFS);
+ }
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_LOCK_INIT(inp);
+ SCTP_INP_READ_INIT(inp);
+ SCTP_ASOC_CREATE_LOCK_INIT(inp);
+ /* lock the new ep */
+ SCTP_INP_WLOCK(inp);
+
+ /* add it to the info area */
+ LIST_INSERT_HEAD(&sctppcbinfo.listhead, inp, sctp_list);
+ SCTP_INP_INFO_WUNLOCK();
+
+ TAILQ_INIT(&inp->read_queue);
+ LIST_INIT(&inp->sctp_addr_list);
+ LIST_INIT(&inp->sctp_asoc_list);
+
+ /* Init the timer structure for signature change */
+ callout_init(&inp->sctp_ep.signature_change.timer, 1);
+ inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NEWCOOKIE;
+
+ /* now init the actual endpoint default data */
+ m = &inp->sctp_ep;
+
+ /* setup the base timeout information */
+ m->sctp_timeoutticks[SCTP_TIMER_SEND] = SEC_TO_TICKS(SCTP_SEND_SEC); /* needed ? */
+ m->sctp_timeoutticks[SCTP_TIMER_INIT] = SEC_TO_TICKS(SCTP_INIT_SEC); /* needed ? */
+ m->sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sctp_delayed_sack_time_default);
+ m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(sctp_heartbeat_interval_default);
+ m->sctp_timeoutticks[SCTP_TIMER_PMTU] = SEC_TO_TICKS(sctp_pmtu_raise_time_default);
+ m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = SEC_TO_TICKS(sctp_shutdown_guard_time_default);
+ m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = SEC_TO_TICKS(sctp_secret_lifetime_default);
+ /* all max/min max are in ms */
+ m->sctp_maxrto = sctp_rto_max_default;
+ m->sctp_minrto = sctp_rto_min_default;
+ m->initial_rto = sctp_rto_initial_default;
+ m->initial_init_rto_max = sctp_init_rto_max_default;
+
+ m->max_open_streams_intome = MAX_SCTP_STREAMS;
+
+ m->max_init_times = sctp_init_rtx_max_default;
+ m->max_send_times = sctp_assoc_rtx_max_default;
+ m->def_net_failure = sctp_path_rtx_max_default;
+ m->sctp_sws_sender = SCTP_SWS_SENDER_DEF;
+ m->sctp_sws_receiver = SCTP_SWS_RECEIVER_DEF;
+ m->max_burst = sctp_max_burst_default;
+ /* number of streams to pre-open on a association */
+ m->pre_open_stream_count = sctp_nr_outgoing_streams_default;
+
+ /* Add adaptation cookie */
+ m->adaptation_layer_indicator = 0x504C5253;
+
+ /* seed random number generator */
+ m->random_counter = 1;
+ m->store_at = SCTP_SIGNATURE_SIZE;
+ sctp_read_random(m->random_numbers, sizeof(m->random_numbers));
+ sctp_fill_random_store(m);
+
+ /* Minimum cookie size */
+ m->size_of_a_cookie = (sizeof(struct sctp_init_msg) * 2) +
+ sizeof(struct sctp_state_cookie);
+ m->size_of_a_cookie += SCTP_SIGNATURE_SIZE;
+
+ /* Setup the initial secret */
+ SCTP_GETTIME_TIMEVAL(&time);
+ m->time_of_secret_change = time.tv_sec;
+
+ for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
+ m->secret_key[0][i] = sctp_select_initial_TSN(m);
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
+
+ /* How long is a cookie good for ? */
+ m->def_cookie_life = sctp_valid_cookie_life_default;
+
+ /*
+ * Initialize authentication parameters
+ */
+ m->local_hmacs = sctp_default_supported_hmaclist();
+ m->local_auth_chunks = sctp_alloc_chunklist();
+ sctp_auth_set_default_chunks(m->local_auth_chunks);
+ LIST_INIT(&m->shared_keys);
+ /* add default NULL key as key id 0 */
+ null_key = sctp_alloc_sharedkey();
+ sctp_insert_sharedkey(&m->shared_keys, null_key);
+ SCTP_INP_WUNLOCK(inp);
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 12);
+#endif
+ return (error);
+}
+
+
+void
+sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp,
+ struct sctp_tcb *stcb)
+{
+ struct sctp_nets *net;
+ uint16_t lport, rport;
+ struct sctppcbhead *head;
+ struct sctp_laddr *laddr, *oladdr;
+
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_WLOCK(old_inp);
+ SCTP_INP_WLOCK(new_inp);
+ SCTP_TCB_LOCK(stcb);
+
+ new_inp->sctp_ep.time_of_secret_change =
+ old_inp->sctp_ep.time_of_secret_change;
+ memcpy(new_inp->sctp_ep.secret_key, old_inp->sctp_ep.secret_key,
+ sizeof(old_inp->sctp_ep.secret_key));
+ new_inp->sctp_ep.current_secret_number =
+ old_inp->sctp_ep.current_secret_number;
+ new_inp->sctp_ep.last_secret_number =
+ old_inp->sctp_ep.last_secret_number;
+ new_inp->sctp_ep.size_of_a_cookie = old_inp->sctp_ep.size_of_a_cookie;
+
+ /* make it so new data pours into the new socket */
+ stcb->sctp_socket = new_inp->sctp_socket;
+ stcb->sctp_ep = new_inp;
+
+ /* Copy the port across */
+ lport = new_inp->sctp_lport = old_inp->sctp_lport;
+ rport = stcb->rport;
+ /* Pull the tcb from the old association */
+ LIST_REMOVE(stcb, sctp_tcbhash);
+ LIST_REMOVE(stcb, sctp_tcblist);
+
+ /* Now insert the new_inp into the TCP connected hash */
+ head = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR((lport + rport),
+ sctppcbinfo.hashtcpmark)];
+
+ LIST_INSERT_HEAD(head, new_inp, sctp_hash);
+
+ /* Now move the tcb into the endpoint list */
+ LIST_INSERT_HEAD(&new_inp->sctp_asoc_list, stcb, sctp_tcblist);
+ /*
+ * Question, do we even need to worry about the ep-hash since we
+ * only have one connection? Probably not :> so lets get rid of it
+ * and not suck up any kernel memory in that.
+ */
+
+ /* Ok. Let's restart timer. */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, new_inp,
+ stcb, net);
+ }
+
+ SCTP_INP_INFO_WUNLOCK();
+ if (new_inp->sctp_tcbhash != NULL) {
+ SCTP_FREE(new_inp->sctp_tcbhash);
+ new_inp->sctp_tcbhash = NULL;
+ }
+ if ((new_inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /* Subset bound, so copy in the laddr list from the old_inp */
+ LIST_FOREACH(oladdr, &old_inp->sctp_addr_list, sctp_nxt_addr) {
+ laddr = (struct sctp_laddr *)SCTP_ZONE_GET(
+ sctppcbinfo.ipi_zone_laddr);
+ if (laddr == NULL) {
+ /*
+ * Gak, what can we do? This assoc is really
+ * HOSED. We probably should send an abort
+ * here.
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ printf("Association hosed in TCP model, out of laddr memory\n");
+ }
+#endif /* SCTP_DEBUG */
+ continue;
+ }
+ SCTP_INCR_LADDR_COUNT();
+ bzero(laddr, sizeof(*laddr));
+ laddr->ifa = oladdr->ifa;
+ LIST_INSERT_HEAD(&new_inp->sctp_addr_list, laddr,
+ sctp_nxt_addr);
+ new_inp->laddr_count++;
+ }
+ }
+ /*
+ * Now any running timers need to be adjusted since we really don't
+ * care if they are running or not just blast in the new_inp into
+ * all of them.
+ */
+
+ stcb->asoc.hb_timer.ep = (void *)new_inp;
+ stcb->asoc.dack_timer.ep = (void *)new_inp;
+ stcb->asoc.asconf_timer.ep = (void *)new_inp;
+ stcb->asoc.strreset_timer.ep = (void *)new_inp;
+ stcb->asoc.shut_guard_timer.ep = (void *)new_inp;
+ stcb->asoc.autoclose_timer.ep = (void *)new_inp;
+ stcb->asoc.delayed_event_timer.ep = (void *)new_inp;
+ /* now what about the nets? */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ net->pmtu_timer.ep = (void *)new_inp;
+ net->rxt_timer.ep = (void *)new_inp;
+ net->fr_timer.ep = (void *)new_inp;
+ }
+ SCTP_INP_WUNLOCK(new_inp);
+ SCTP_INP_WUNLOCK(old_inp);
+}
+
+static int
+sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport)
+{
+ struct sctppcbhead *head;
+ struct sctp_inpcb *t_inp;
+
+ head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
+ sctppcbinfo.hashmark)];
+
+ LIST_FOREACH(t_inp, head, sctp_hash) {
+ if (t_inp->sctp_lport != lport) {
+ continue;
+ }
+ /* This one is in use. */
+ /* check the v6/v4 binding issue */
+ if ((t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ (((struct inpcb *)t_inp)->inp_flags & IN6P_IPV6_V6ONLY)
+ ) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ /* collision in V6 space */
+ return (1);
+ } else {
+ /* inp is BOUND_V4 no conflict */
+ continue;
+ }
+ } else if (t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ /* t_inp is bound v4 and v6, conflict always */
+ return (1);
+ } else {
+ /* t_inp is bound only V4 */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ (((struct inpcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY)
+ ) {
+ /* no conflict */
+ continue;
+ }
+ /* else fall through to conflict */
+ }
+ return (1);
+ }
+ return (0);
+}
+
+
+
+int
+sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
+{
+ /* bind a ep to a socket address */
+ struct sctppcbhead *head;
+ struct sctp_inpcb *inp, *inp_tmp;
+ struct inpcb *ip_inp;
+ int bindall;
+ uint16_t lport;
+ int error;
+
+ lport = 0;
+ error = 0;
+ bindall = 1;
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ ip_inp = (struct inpcb *)so->so_pcb;
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ if (addr) {
+ printf("Bind called port:%d\n",
+ ntohs(((struct sockaddr_in *)addr)->sin_port));
+ printf("Addr :");
+ sctp_print_address(addr);
+ }
+ }
+#endif /* SCTP_DEBUG */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
+ /* already did a bind, subsequent binds NOT allowed ! */
+ return (EINVAL);
+ }
+ if (addr != NULL) {
+ if (addr->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ /* IPV6_V6ONLY socket? */
+ if (
+ (ip_inp->inp_flags & IN6P_IPV6_V6ONLY)
+ ) {
+ return (EINVAL);
+ }
+ if (addr->sa_len != sizeof(*sin))
+ return (EINVAL);
+
+ sin = (struct sockaddr_in *)addr;
+ lport = sin->sin_port;
+
+ if (sin->sin_addr.s_addr != INADDR_ANY) {
+ bindall = 0;
+ }
+ } else if (addr->sa_family == AF_INET6) {
+ /* Only for pure IPv6 Address. (No IPv4 Mapped!) */
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addr;
+
+ if (addr->sa_len != sizeof(*sin6))
+ return (EINVAL);
+
+ lport = sin6->sin6_port;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ bindall = 0;
+ /* KAME hack: embed scopeid */
+ if (sa6_embedscope(sin6, ip6_use_defzone) != 0)
+ return (EINVAL);
+ }
+ /* this must be cleared for ifa_ifwithaddr() */
+ sin6->sin6_scope_id = 0;
+ } else {
+ return (EAFNOSUPPORT);
+ }
+ }
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_WLOCK(inp);
+ /* increase our count due to the unlock we do */
+ SCTP_INP_INCR_REF(inp);
+ if (lport) {
+ /*
+ * Did the caller specify a port? if so we must see if a ep
+ * already has this one bound.
+ */
+ /* got to be root to get at low ports */
+ if (ntohs(lport) < IPPORT_RESERVED) {
+ if (p && (error =
+ suser_cred(p->td_ucred, 0)
+ )) {
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (error);
+ }
+ }
+ if (p == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (error);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ inp_tmp = sctp_pcb_findep(addr, 0, 1);
+ if (inp_tmp != NULL) {
+ /*
+ * lock guy returned and lower count note that we
+ * are not bound so inp_tmp should NEVER be inp. And
+ * it is this inp (inp_tmp) that gets the reference
+ * bump, so we must lower it.
+ */
+ SCTP_INP_DECR_REF(inp_tmp);
+ SCTP_INP_DECR_REF(inp);
+ /* unlock info */
+ SCTP_INP_INFO_WUNLOCK();
+ return (EADDRNOTAVAIL);
+ }
+ SCTP_INP_WLOCK(inp);
+ if (bindall) {
+ /* verify that no lport is not used by a singleton */
+ if (sctp_isport_inuse(inp, lport)) {
+ /* Sorry someone already has this one bound */
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (EADDRNOTAVAIL);
+ }
+ }
+ } else {
+ /*
+ * get any port but lets make sure no one has any address
+ * with this port bound
+ */
+
+ /*
+ * setup the inp to the top (I could use the union but this
+ * is just as easy
+ */
+ uint32_t port_guess;
+ uint16_t port_attempt;
+ int not_done = 1;
+
+ while (not_done) {
+ port_guess = sctp_select_initial_TSN(&inp->sctp_ep);
+ port_attempt = (port_guess & 0x0000ffff);
+ if (port_attempt == 0) {
+ goto next_half;
+ }
+ if (port_attempt < IPPORT_RESERVED) {
+ port_attempt += IPPORT_RESERVED;
+ }
+ if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) {
+ /* got a port we can use */
+ not_done = 0;
+ continue;
+ }
+ /* try upper half */
+ next_half:
+ port_attempt = ((port_guess >> 16) & 0x0000ffff);
+ if (port_attempt == 0) {
+ goto last_try;
+ }
+ if (port_attempt < IPPORT_RESERVED) {
+ port_attempt += IPPORT_RESERVED;
+ }
+ if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) {
+ /* got a port we can use */
+ not_done = 0;
+ continue;
+ }
+ /* try two half's added together */
+ last_try:
+ port_attempt = (((port_guess >> 16) & 0x0000ffff) +
+ (port_guess & 0x0000ffff));
+ if (port_attempt == 0) {
+ /* get a new random number */
+ continue;
+ }
+ if (port_attempt < IPPORT_RESERVED) {
+ port_attempt += IPPORT_RESERVED;
+ }
+ if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) {
+ /* got a port we can use */
+ not_done = 0;
+ continue;
+ }
+ }
+ /* we don't get out of the loop until we have a port */
+ lport = htons(port_attempt);
+ }
+ SCTP_INP_DECR_REF(inp);
+ if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE |
+ SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ /*
+ * this really should not happen. The guy did a non-blocking
+ * bind and then did a close at the same time.
+ */
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (EINVAL);
+ }
+ /* ok we look clear to give out this port, so lets setup the binding */
+ if (bindall) {
+ /* binding to all addresses, so just set in the proper flags */
+ inp->sctp_flags |= SCTP_PCB_FLAGS_BOUNDALL;
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF);
+ /* set the automatic addr changes from kernel flag */
+ if (sctp_auto_asconf == 0) {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+ } else {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+ }
+ } else {
+ /*
+ * bind specific, make sure flags is off and add a new
+ * address structure to the sctp_addr_list inside the ep
+ * structure.
+ *
+ * We will need to allocate one and insert it at the head. The
+ * socketopt call can just insert new addresses in there as
+ * well. It will also have to do the embed scope kame hack
+ * too (before adding).
+ */
+ struct ifaddr *ifa;
+ struct sockaddr_storage store_sa;
+
+ memset(&store_sa, 0, sizeof(store_sa));
+ if (addr->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)&store_sa;
+ memcpy(sin, addr, sizeof(struct sockaddr_in));
+ sin->sin_port = 0;
+ } else if (addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&store_sa;
+ memcpy(sin6, addr, sizeof(struct sockaddr_in6));
+ sin6->sin6_port = 0;
+ }
+ /*
+ * first find the interface with the bound address need to
+ * zero out the port to find the address! yuck! can't do
+ * this earlier since need port for sctp_pcb_findep()
+ */
+ ifa = sctp_find_ifa_by_addr((struct sockaddr *)&store_sa);
+ if (ifa == NULL) {
+ /* Can't find an interface with that address */
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (EADDRNOTAVAIL);
+ }
+ if (addr->sa_family == AF_INET6) {
+ struct in6_ifaddr *ifa6;
+
+ ifa6 = (struct in6_ifaddr *)ifa;
+ /*
+ * allow binding of deprecated addresses as per RFC
+ * 2462 and ipng discussion
+ */
+ if (ifa6->ia6_flags & (IN6_IFF_DETACHED |
+ IN6_IFF_ANYCAST |
+ IN6_IFF_NOTREADY)) {
+ /* Can't bind a non-existent addr. */
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (EINVAL);
+ }
+ }
+ /* we're not bound all */
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUNDALL;
+ /* set the automatic addr changes from kernel flag */
+ if (sctp_auto_asconf == 0) {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+ } else {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+ }
+ /* allow bindx() to send ASCONF's for binding changes */
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF);
+ /* add this address to the endpoint list */
+ error = sctp_insert_laddr(&inp->sctp_addr_list, ifa);
+ if (error != 0) {
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (error);
+ }
+ inp->laddr_count++;
+ }
+ /* find the bucket */
+ head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
+ sctppcbinfo.hashmark)];
+ /* put it in the bucket */
+ LIST_INSERT_HEAD(head, inp, sctp_hash);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ printf("Main hash to bind at head:%p, bound port:%d\n", head, ntohs(lport));
+ }
+#endif
+ /* set in the port */
+ inp->sctp_lport = lport;
+
+ /* turn off just the unbound flag */
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND;
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (0);
+}
+
+
+static void
+sctp_iterator_inp_being_freed(struct sctp_inpcb *inp, struct sctp_inpcb *inp_next)
+{
+ struct sctp_iterator *it;
+
+ /*
+ * We enter with the only the ITERATOR_LOCK in place and a write
+ * lock on the inp_info stuff.
+ */
+
+ /*
+ * Go through all iterators, we must do this since it is possible
+ * that some iterator does NOT have the lock, but is waiting for it.
+ * And the one that had the lock has either moved in the last
+ * iteration or we just cleared it above. We need to find all of
+ * those guys. The list of iterators should never be very big
+ * though.
+ */
+ LIST_FOREACH(it, &sctppcbinfo.iteratorhead, sctp_nxt_itr) {
+ if (it == inp->inp_starting_point_for_iterator)
+ /* skip this guy, he's special */
+ continue;
+ if (it->inp == inp) {
+ /*
+ * This is tricky and we DON'T lock the iterator.
+ * Reason is he's running but waiting for me since
+ * inp->inp_starting_point_for_iterator has the lock
+ * on me (the guy above we skipped). This tells us
+ * its is not running but waiting for
+ * inp->inp_starting_point_for_iterator to be
+ * released by the guy that does have our INP in a
+ * lock.
+ */
+ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+ it->inp = NULL;
+ it->stcb = NULL;
+ } else {
+ /* set him up to do the next guy not me */
+ it->inp = inp_next;
+ it->stcb = NULL;
+ }
+ }
+ }
+ it = inp->inp_starting_point_for_iterator;
+ if (it) {
+ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+ it->inp = NULL;
+ } else {
+ it->inp = inp_next;
+ }
+ it->stcb = NULL;
+ }
+}
+
+/* release sctp_inpcb unbind the port */
+void
+sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
+{
+ /*
+ * Here we free a endpoint. We must find it (if it is in the Hash
+ * table) and remove it from there. Then we must also find it in the
+ * overall list and remove it from there. After all removals are
+ * complete then any timer has to be stopped. Then start the actual
+ * freeing. a) Any local lists. b) Any associations. c) The hash of
+ * all associations. d) finally the ep itself.
+ */
+ struct sctp_pcb *m;
+ struct sctp_inpcb *inp_save;
+ struct sctp_tcb *asoc, *nasoc;
+ struct sctp_laddr *laddr, *nladdr;
+ struct inpcb *ip_pcb;
+ struct socket *so;
+
+ struct sctp_queued_to_read *sq;
+
+ int s, cnt;
+ sctp_sharedkey_t *shared_key;
+
+ s = splnet();
+
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 0);
+#endif
+
+ SCTP_ITERATOR_LOCK();
+ so = inp->sctp_socket;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ /* been here before.. eeks.. get out of here */
+ splx(s);
+ printf("This conflict in free SHOULD not be happening!\n");
+ SCTP_ITERATOR_UNLOCK();
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 1);
+#endif
+ return;
+ }
+ SCTP_ASOC_CREATE_LOCK(inp);
+ SCTP_INP_INFO_WLOCK();
+
+ SCTP_INP_WLOCK(inp);
+ /*
+ * First time through we have the socket lock, after that no more.
+ */
+ if (from == 1) {
+ /*
+ * Once we are in we can remove the flag from = 1 is only
+ * passed from the actual closing routines that are called
+ * via the sockets layer.
+ */
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_CLOSE_IP;
+ }
+ sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
+
+ if (inp->control) {
+ sctp_m_freem(inp->control);
+ inp->control = NULL;
+ }
+ if (inp->pkt) {
+ sctp_m_freem(inp->pkt);
+ inp->pkt = NULL;
+ }
+ m = &inp->sctp_ep;
+ ip_pcb = &inp->ip_inp.inp; /* we could just cast the main pointer
+ * here but I will be nice :> (i.e.
+ * ip_pcb = ep;) */
+ if (immediate == 0) {
+ int cnt_in_sd;
+
+ cnt_in_sd = 0;
+ for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL;
+ asoc = nasoc) {
+ nasoc = LIST_NEXT(asoc, sctp_tcblist);
+ if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /* Skip guys being freed */
+ asoc->sctp_socket = NULL;
+ cnt_in_sd++;
+ continue;
+ }
+ if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+ /* Just abandon things in the front states */
+ if (asoc->asoc.total_output_queue_size == 0) {
+ sctp_free_assoc(inp, asoc, 1);
+ continue;
+ }
+ }
+ SCTP_TCB_LOCK(asoc);
+ /* Disconnect the socket please */
+ asoc->sctp_socket = NULL;
+ asoc->asoc.state |= SCTP_STATE_CLOSED_SOCKET;
+ if ((asoc->asoc.size_on_reasm_queue > 0) ||
+ (asoc->asoc.control_pdapi) ||
+ (asoc->asoc.size_on_all_streams > 0) ||
+ (so && (so->so_rcv.sb_cc > 0))
+ ) {
+ /* Left with Data unread */
+ struct mbuf *op_err;
+
+ op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err) {
+ /* Fill in the user initiated abort */
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ op_err->m_len =
+ sizeof(struct sctp_paramhdr) + sizeof(uint32_t);
+ ph = mtod(op_err,
+ struct sctp_paramhdr *);
+ ph->param_type = htons(
+ SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(op_err->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x30000004);
+ }
+ sctp_send_abort_tcb(asoc, op_err);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ sctp_free_assoc(inp, asoc, 1);
+ continue;
+ } else if (TAILQ_EMPTY(&asoc->asoc.send_queue) &&
+ TAILQ_EMPTY(&asoc->asoc.sent_queue) &&
+ (asoc->asoc.stream_queue_cnt == 0)
+ ) {
+ if (asoc->asoc.locked_on_sending) {
+ goto abort_anyway;
+ }
+ if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ /*
+ * there is nothing queued to send,
+ * so I send shutdown
+ */
+ sctp_send_shutdown(asoc, asoc->asoc.primary_destination);
+ asoc->asoc.state = SCTP_STATE_SHUTDOWN_SENT;
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, asoc->sctp_ep, asoc,
+ asoc->asoc.primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc,
+ asoc->asoc.primary_destination);
+ sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_SHUT_TMR);
+ }
+ } else {
+ /* mark into shutdown pending */
+ struct sctp_stream_queue_pending *sp;
+
+ asoc->asoc.state |= SCTP_STATE_SHUTDOWN_PENDING;
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc,
+ asoc->asoc.primary_destination);
+ if (asoc->asoc.locked_on_sending) {
+ sp = TAILQ_LAST(&((asoc->asoc.locked_on_sending)->outqueue),
+ sctp_streamhead);
+ if (sp == NULL) {
+ printf("Error, sp is NULL, locked on sending is %x strm:%d\n",
+ (u_int)asoc->asoc.locked_on_sending,
+ asoc->asoc.locked_on_sending->stream_no);
+ } else {
+ if ((sp->length == 0) && (sp->msg_is_complete == 0))
+ asoc->asoc.state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ }
+ }
+ if (TAILQ_EMPTY(&asoc->asoc.send_queue) &&
+ TAILQ_EMPTY(&asoc->asoc.sent_queue) &&
+ (asoc->asoc.state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ struct mbuf *op_err;
+
+ abort_anyway:
+ op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err) {
+ /*
+ * Fill in the user
+ * initiated abort
+ */
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ op_err->m_len =
+ (sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t));
+ ph = mtod(op_err,
+ struct sctp_paramhdr *);
+ ph->param_type = htons(
+ SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(op_err->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x30000005);
+ }
+ sctp_send_abort_tcb(asoc, op_err);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ sctp_free_assoc(inp, asoc, 1);
+ continue;
+ }
+ }
+ cnt_in_sd++;
+ SCTP_TCB_UNLOCK(asoc);
+ }
+ /* now is there some left in our SHUTDOWN state? */
+ if (cnt_in_sd) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) !=
+ SCTP_PCB_FLAGS_UNBOUND) {
+ /*
+ * ok, this guy has been bound. It's port is
+ * somewhere in the sctppcbinfo hash table.
+ * Remove it!
+ *
+ * Note we are depending on lookup by vtag to
+ * find associations that are dieing. This
+ * free's the port so we don't have to block
+ * its useage. The SCTP_PCB_FLAGS_UNBOUND
+ * flags will prevent us from doing this
+ * again.
+ */
+ LIST_REMOVE(inp, sctp_hash);
+ inp->sctp_flags |= SCTP_PCB_FLAGS_UNBOUND;
+ }
+ splx(s);
+
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_ITERATOR_UNLOCK();
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 2);
+#endif
+ return;
+ }
+ }
+ inp->sctp_socket = NULL;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) !=
+ SCTP_PCB_FLAGS_UNBOUND) {
+ /*
+ * ok, this guy has been bound. It's port is somewhere in
+ * the sctppcbinfo hash table. Remove it!
+ */
+ LIST_REMOVE(inp, sctp_hash);
+ inp->sctp_flags |= SCTP_PCB_FLAGS_UNBOUND;
+ }
+ /*
+ * If there is a timer running to kill us, forget it, since it may
+ * have a contest on the INP lock.. which would cause us to die ...
+ */
+ cnt = 0;
+ for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL;
+ asoc = nasoc) {
+ nasoc = LIST_NEXT(asoc, sctp_tcblist);
+ if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ cnt++;
+ continue;
+ }
+ /* Free associations that are NOT killing us */
+ SCTP_TCB_LOCK(asoc);
+ if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_COOKIE_WAIT) &&
+ ((asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) {
+ struct mbuf *op_err;
+ uint32_t *ippp;
+
+ op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err) {
+ /* Fill in the user initiated abort */
+ struct sctp_paramhdr *ph;
+
+ op_err->m_len = (sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t));
+ ph = mtod(op_err, struct sctp_paramhdr *);
+ ph->param_type = htons(
+ SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(op_err->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x30000006);
+
+ }
+ sctp_send_abort_tcb(asoc, op_err);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ } else if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ cnt++;
+ SCTP_TCB_UNLOCK(asoc);
+ continue;
+ }
+ if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ sctp_free_assoc(inp, asoc, 2);
+ }
+ if (cnt) {
+ /* Ok we have someone out there that will kill us */
+ callout_stop(&inp->sctp_ep.signature_change.timer);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_ITERATOR_UNLOCK();
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 3);
+#endif
+ return;
+ }
+ if ((inp->refcount) || (inp->sctp_flags & SCTP_PCB_FLAGS_CLOSE_IP)) {
+ callout_stop(&inp->sctp_ep.signature_change.timer);
+ sctp_timer_start(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_ITERATOR_UNLOCK();
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 4);
+#endif
+ return;
+ }
+ callout_stop(&inp->sctp_ep.signature_change.timer);
+ inp->sctp_ep.signature_change.type = 0;
+ inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_ALLGONE;
+
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 5);
+#endif
+
+ callout_stop(&inp->sctp_ep.signature_change.timer);
+ inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NONE;
+ /* Clear the read queue */
+ while ((sq = TAILQ_FIRST(&inp->read_queue)) != NULL) {
+ TAILQ_REMOVE(&inp->read_queue, sq, next);
+ sctp_free_remote_addr(sq->whoFrom);
+ if (so)
+ so->so_rcv.sb_cc -= sq->length;
+ if (sq->data) {
+ sctp_m_freem(sq->data);
+ sq->data = NULL;
+ }
+ /*
+ * no need to free the net count, since at this point all
+ * assoc's are gone.
+ */
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, sq);
+ SCTP_DECR_READQ_COUNT();
+ }
+ /* Now the sctp_pcb things */
+ /*
+ * free each asoc if it is not already closed/free. we can't use the
+ * macro here since le_next will get freed as part of the
+ * sctp_free_assoc() call.
+ */
+ cnt = 0;
+ if (so) {
+#ifdef IPSEC
+ ipsec4_delete_pcbpolicy(ip_pcb);
+#endif /* IPSEC */
+
+ /* Unlocks not needed since the socket is gone now */
+ }
+ if (ip_pcb->inp_options) {
+ (void)sctp_m_free(ip_pcb->inp_options);
+ ip_pcb->inp_options = 0;
+ }
+ if (ip_pcb->inp_moptions) {
+ ip_freemoptions(ip_pcb->inp_moptions);
+ ip_pcb->inp_moptions = 0;
+ }
+#ifdef INET6
+ if (ip_pcb->inp_vflag & INP_IPV6) {
+ struct in6pcb *in6p;
+
+ in6p = (struct in6pcb *)inp;
+ ip6_freepcbopts(in6p->in6p_outputopts);
+ }
+#endif /* INET6 */
+ ip_pcb->inp_vflag = 0;
+ /* free up authentication fields */
+ if (inp->sctp_ep.local_auth_chunks != NULL)
+ sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
+ if (inp->sctp_ep.local_hmacs != NULL)
+ sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
+
+ shared_key = LIST_FIRST(&inp->sctp_ep.shared_keys);
+ while (shared_key) {
+ LIST_REMOVE(shared_key, next);
+ sctp_free_sharedkey(shared_key);
+ shared_key = LIST_FIRST(&inp->sctp_ep.shared_keys);
+ }
+
+ inp_save = LIST_NEXT(inp, sctp_list);
+ LIST_REMOVE(inp, sctp_list);
+
+ /* fix any iterators only after out of the list */
+ sctp_iterator_inp_being_freed(inp, inp_save);
+ /*
+ * if we have an address list the following will free the list of
+ * ifaddr's that are set into this ep. Again macro limitations here,
+ * since the LIST_FOREACH could be a bad idea.
+ */
+ for ((laddr = LIST_FIRST(&inp->sctp_addr_list)); laddr != NULL;
+ laddr = nladdr) {
+ nladdr = LIST_NEXT(laddr, sctp_nxt_addr);
+ LIST_REMOVE(laddr, sctp_nxt_addr);
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr);
+ SCTP_DECR_LADDR_COUNT();
+ }
+ /* Now lets see about freeing the EP hash table. */
+ if (inp->sctp_tcbhash != NULL) {
+ SCTP_FREE(inp->sctp_tcbhash);
+ inp->sctp_tcbhash = 0;
+ }
+ /* Now we must put the ep memory back into the zone pool */
+ SCTP_INP_LOCK_DESTROY(inp);
+ SCTP_INP_READ_DESTROY(inp);
+ SCTP_ASOC_CREATE_LOCK_DESTROY(inp);
+ SCTP_INP_INFO_WUNLOCK();
+
+ SCTP_ITERATOR_UNLOCK();
+
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
+ SCTP_DECR_EP_COUNT();
+
+ splx(s);
+}
+
+
+struct sctp_nets *
+sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr)
+{
+ struct sctp_nets *net;
+
+ /* locate the address */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (sctp_cmpaddr(addr, (struct sockaddr *)&net->ro._l_addr))
+ return (net);
+ }
+ return (NULL);
+}
+
+
+/*
+ * add's a remote endpoint address, done with the INIT/INIT-ACK as well as
+ * when a ASCONF arrives that adds it. It will also initialize all the cwnd
+ * stats of stuff.
+ */
+int
+sctp_is_address_on_local_host(struct sockaddr *addr)
+{
+ struct ifnet *ifn;
+ struct ifaddr *ifa;
+
+ TAILQ_FOREACH(ifn, &ifnet, if_list) {
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ if (addr->sa_family == ifa->ifa_addr->sa_family) {
+ /* same family */
+ if (addr->sa_family == AF_INET) {
+ struct sockaddr_in *sin, *sin_c;
+
+ sin = (struct sockaddr_in *)addr;
+ sin_c = (struct sockaddr_in *)
+ ifa->ifa_addr;
+ if (sin->sin_addr.s_addr ==
+ sin_c->sin_addr.s_addr) {
+ /*
+ * we are on the same
+ * machine
+ */
+ return (1);
+ }
+ } else if (addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6, *sin_c6;
+
+ sin6 = (struct sockaddr_in6 *)addr;
+ sin_c6 = (struct sockaddr_in6 *)
+ ifa->ifa_addr;
+ if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
+ &sin_c6->sin6_addr)) {
+ /*
+ * we are on the same
+ * machine
+ */
+ return (1);
+ }
+ }
+ }
+ }
+ }
+ return (0);
+}
+
+int
+sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
+ int set_scope, int from)
+{
+ /*
+ * The following is redundant to the same lines in the
+ * sctp_aloc_assoc() but is needed since other's call the add
+ * address function
+ */
+ struct sctp_nets *net, *netfirst;
+ int addr_inscope;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ printf("Adding an address (from:%d) to the peer: ", from);
+ sctp_print_address(newaddr);
+ }
+#endif
+
+ netfirst = sctp_findnet(stcb, newaddr);
+ if (netfirst) {
+ /*
+ * Lie and return ok, we don't want to make the association
+ * go away for this behavior. It will happen in the TCP
+ * model in a connected socket. It does not reach the hash
+ * table until after the association is built so it can't be
+ * found. Mark as reachable, since the initial creation will
+ * have been cleared and the NOT_IN_ASSOC flag will have
+ * been added... and we don't want to end up removing it
+ * back out.
+ */
+ if (netfirst->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ netfirst->dest_state = (SCTP_ADDR_REACHABLE |
+ SCTP_ADDR_UNCONFIRMED);
+ } else {
+ netfirst->dest_state = SCTP_ADDR_REACHABLE;
+ }
+
+ return (0);
+ }
+ addr_inscope = 1;
+ if (newaddr->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)newaddr;
+ if (sin->sin_addr.s_addr == 0) {
+ /* Invalid address */
+ return (-1);
+ }
+ /* zero out the bzero area */
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
+
+ /* assure len is set */
+ sin->sin_len = sizeof(struct sockaddr_in);
+ if (set_scope) {
+#ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
+ stcb->ipv4_local_scope = 1;
+#else
+ if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+ stcb->asoc.ipv4_local_scope = 1;
+ }
+#endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
+
+ if (sctp_is_address_on_local_host(newaddr)) {
+ stcb->asoc.loopback_scope = 1;
+ stcb->asoc.ipv4_local_scope = 1;
+ stcb->asoc.local_scope = 1;
+ stcb->asoc.site_scope = 1;
+ }
+ } else {
+ if (from == 8) {
+ /* From connectx */
+ if (sctp_is_address_on_local_host(newaddr)) {
+ stcb->asoc.loopback_scope = 1;
+ stcb->asoc.ipv4_local_scope = 1;
+ stcb->asoc.local_scope = 1;
+ stcb->asoc.site_scope = 1;
+ }
+ }
+ /* Validate the address is in scope */
+ if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) &&
+ (stcb->asoc.ipv4_local_scope == 0)) {
+ addr_inscope = 0;
+ }
+ }
+ } else if (newaddr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)newaddr;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* Invalid address */
+ return (-1);
+ }
+ /* assure len is set */
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ if (set_scope) {
+ if (sctp_is_address_on_local_host(newaddr)) {
+ stcb->asoc.loopback_scope = 1;
+ stcb->asoc.local_scope = 1;
+ stcb->asoc.ipv4_local_scope = 1;
+ stcb->asoc.site_scope = 1;
+ } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ /*
+ * If the new destination is a LINK_LOCAL we
+ * must have common site scope. Don't set
+ * the local scope since we may not share
+ * all links, only loopback can do this.
+ * Links on the local network would also be
+ * on our private network for v4 too.
+ */
+ stcb->asoc.ipv4_local_scope = 1;
+ stcb->asoc.site_scope = 1;
+ } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
+ /*
+ * If the new destination is SITE_LOCAL then
+ * we must have site scope in common.
+ */
+ stcb->asoc.site_scope = 1;
+ }
+ } else {
+ if (from == 8) {
+ /* From connectx */
+ if (sctp_is_address_on_local_host(newaddr)) {
+ stcb->asoc.loopback_scope = 1;
+ stcb->asoc.ipv4_local_scope = 1;
+ stcb->asoc.local_scope = 1;
+ stcb->asoc.site_scope = 1;
+ }
+ }
+ /* Validate the address is in scope */
+ if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) &&
+ (stcb->asoc.loopback_scope == 0)) {
+ addr_inscope = 0;
+ } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) &&
+ (stcb->asoc.local_scope == 0)) {
+ addr_inscope = 0;
+ } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr) &&
+ (stcb->asoc.site_scope == 0)) {
+ addr_inscope = 0;
+ }
+ }
+ } else {
+ /* not supported family type */
+ return (-1);
+ }
+ net = (struct sctp_nets *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_net);
+ if (net == NULL) {
+ return (-1);
+ }
+ SCTP_INCR_RADDR_COUNT();
+ bzero(net, sizeof(*net));
+ memcpy(&net->ro._l_addr, newaddr, newaddr->sa_len);
+ if (newaddr->sa_family == AF_INET) {
+ ((struct sockaddr_in *)&net->ro._l_addr)->sin_port = stcb->rport;
+ } else if (newaddr->sa_family == AF_INET6) {
+ ((struct sockaddr_in6 *)&net->ro._l_addr)->sin6_port = stcb->rport;
+ }
+ net->addr_is_local = sctp_is_address_on_local_host(newaddr);
+ net->failure_threshold = stcb->asoc.def_net_failure;
+ if (addr_inscope == 0) {
+ net->dest_state = (SCTP_ADDR_REACHABLE |
+ SCTP_ADDR_OUT_OF_SCOPE);
+ } else {
+ if (from == 8)
+ /* 8 is passed by connect_x */
+ net->dest_state = SCTP_ADDR_REACHABLE;
+ else
+ net->dest_state = SCTP_ADDR_REACHABLE |
+ SCTP_ADDR_UNCONFIRMED;
+ }
+ net->RTO = stcb->asoc.initial_rto;
+ stcb->asoc.numnets++;
+ *(&net->ref_count) = 1;
+ net->tos_flowlabel = 0;
+#ifdef AF_INET
+ if (newaddr->sa_family == AF_INET)
+ net->tos_flowlabel = stcb->asoc.default_tos;
+#endif
+#ifdef AF_INET6
+ if (newaddr->sa_family == AF_INET6)
+ net->tos_flowlabel = stcb->asoc.default_flowlabel;
+#endif
+ /* Init the timer structure */
+ callout_init(&net->rxt_timer.timer, 1);
+ callout_init(&net->fr_timer.timer, 1);
+ callout_init(&net->pmtu_timer.timer, 1);
+
+ /* Now generate a route for this guy */
+ /* KAME hack: embed scopeid */
+ if (newaddr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ (void)sa6_embedscope(sin6, ip6_use_defzone);
+ sin6->sin6_scope_id = 0;
+ }
+ rtalloc_ign((struct route *)&net->ro, 0UL);
+ if (newaddr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ (void)sa6_recoverscope(sin6);
+ }
+ if ((net->ro.ro_rt) &&
+ (net->ro.ro_rt->rt_ifp)) {
+ net->mtu = net->ro.ro_rt->rt_ifp->if_mtu;
+ if (from == 1) {
+ stcb->asoc.smallest_mtu = net->mtu;
+ }
+ /* start things off to match mtu of interface please. */
+ net->ro.ro_rt->rt_rmx.rmx_mtu = net->ro.ro_rt->rt_ifp->if_mtu;
+ } else {
+ net->mtu = stcb->asoc.smallest_mtu;
+ }
+
+ if (stcb->asoc.smallest_mtu > net->mtu) {
+ stcb->asoc.smallest_mtu = net->mtu;
+ }
+ /*
+ * We take the max of the burst limit times a MTU or the
+ * INITIAL_CWND. We then limit this to 4 MTU's of sending.
+ */
+ net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
+
+ /* we always get at LEAST 2 MTU's */
+ if (net->cwnd < (2 * net->mtu)) {
+ net->cwnd = 2 * net->mtu;
+ }
+ net->ssthresh = stcb->asoc.peers_rwnd;
+
+#if defined(SCTP_CWND_MONITOR) || defined(SCTP_CWND_LOGGING)
+ sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
+#endif
+
+ /*
+ * CMT: CUC algo - set find_pseudo_cumack to TRUE (1) at beginning
+ * of assoc (2005/06/27, iyengar@cis.udel.edu)
+ */
+ net->find_pseudo_cumack = 1;
+ net->find_rtx_pseudo_cumack = 1;
+ net->src_addr_selected = 0;
+ netfirst = TAILQ_FIRST(&stcb->asoc.nets);
+ if (net->ro.ro_rt == NULL) {
+ /* Since we have no route put it at the back */
+ TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next);
+ } else if (netfirst == NULL) {
+ /* We are the first one in the pool. */
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
+ } else if (netfirst->ro.ro_rt == NULL) {
+ /*
+ * First one has NO route. Place this one ahead of the first
+ * one.
+ */
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
+ } else if (net->ro.ro_rt->rt_ifp != netfirst->ro.ro_rt->rt_ifp) {
+ /*
+ * This one has a different interface than the one at the
+ * top of the list. Place it ahead.
+ */
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
+ } else {
+ /*
+ * Ok we have the same interface as the first one. Move
+ * forward until we find either a) one with a NULL route...
+ * insert ahead of that b) one with a different ifp.. insert
+ * after that. c) end of the list.. insert at the tail.
+ */
+ struct sctp_nets *netlook;
+
+ do {
+ netlook = TAILQ_NEXT(netfirst, sctp_next);
+ if (netlook == NULL) {
+ /* End of the list */
+ TAILQ_INSERT_TAIL(&stcb->asoc.nets, net,
+ sctp_next);
+ break;
+ } else if (netlook->ro.ro_rt == NULL) {
+ /* next one has NO route */
+ TAILQ_INSERT_BEFORE(netfirst, net, sctp_next);
+ break;
+ } else if (netlook->ro.ro_rt->rt_ifp !=
+ net->ro.ro_rt->rt_ifp) {
+ TAILQ_INSERT_AFTER(&stcb->asoc.nets, netlook,
+ net, sctp_next);
+ break;
+ }
+ /* Shift forward */
+ netfirst = netlook;
+ } while (netlook != NULL);
+ }
+
+ /* got to have a primary set */
+ if (stcb->asoc.primary_destination == 0) {
+ stcb->asoc.primary_destination = net;
+ } else if ((stcb->asoc.primary_destination->ro.ro_rt == NULL) &&
+ (net->ro.ro_rt) &&
+ ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) {
+ /* No route to current primary adopt new primary */
+ stcb->asoc.primary_destination = net;
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb,
+ net);
+ /* Validate primary is first */
+ net = TAILQ_FIRST(&stcb->asoc.nets);
+ if ((net != stcb->asoc.primary_destination) &&
+ (stcb->asoc.primary_destination)) {
+ /*
+ * first one on the list is NOT the primary sctp_cmpaddr()
+ * is much more efficent if the primary is the first on the
+ * list, make it so.
+ */
+ TAILQ_REMOVE(&stcb->asoc.nets,
+ stcb->asoc.primary_destination, sctp_next);
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets,
+ stcb->asoc.primary_destination, sctp_next);
+ }
+ return (0);
+}
+
+
+/*
+ * allocate an association and add it to the endpoint. The caller must be
+ * careful to add all additional addresses once they are know right away or
+ * else the assoc will be may experience a blackout scenario.
+ */
+struct sctp_tcb *
+sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
+ int for_a_init, int *error, uint32_t override_tag)
+{
+ struct sctp_tcb *stcb;
+ struct sctp_association *asoc;
+ struct sctpasochead *head;
+ uint16_t rport;
+ int err;
+
+ /*
+ * Assumption made here: Caller has done a
+ * sctp_findassociation_ep_addr(ep, addr's); to make sure the
+ * address does not exist already.
+ */
+ if (sctppcbinfo.ipi_count_asoc >= SCTP_MAX_NUM_OF_ASOC) {
+ /* Hit max assoc, sorry no more */
+ *error = ENOBUFS;
+ return (NULL);
+ }
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
+ /*
+ * If its in the TCP pool, its NOT allowed to create an
+ * association. The parent listener needs to call
+ * sctp_aloc_assoc.. or the one-2-many socket. If a peeled
+ * off, or connected one does this.. its an error.
+ */
+ SCTP_INP_RUNLOCK(inp);
+ *error = EINVAL;
+ return (NULL);
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB3) {
+ printf("Allocate an association for peer:");
+ if (firstaddr)
+ sctp_print_address(firstaddr);
+ else
+ printf("None\n");
+ printf("Port:%d\n",
+ ntohs(((struct sockaddr_in *)firstaddr)->sin_port));
+ }
+#endif /* SCTP_DEBUG */
+ if (firstaddr->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)firstaddr;
+ if ((sin->sin_port == 0) || (sin->sin_addr.s_addr == 0)) {
+ /* Invalid address */
+ SCTP_INP_RUNLOCK(inp);
+ *error = EINVAL;
+ return (NULL);
+ }
+ rport = sin->sin_port;
+ } else if (firstaddr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)firstaddr;
+ if ((sin6->sin6_port == 0) ||
+ (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) {
+ /* Invalid address */
+ SCTP_INP_RUNLOCK(inp);
+ *error = EINVAL;
+ return (NULL);
+ }
+ rport = sin6->sin6_port;
+ } else {
+ /* not supported family type */
+ SCTP_INP_RUNLOCK(inp);
+ *error = EINVAL;
+ return (NULL);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
+ /*
+ * If you have not performed a bind, then we need to do the
+ * ephemerial bind for you.
+ */
+ if ((err = sctp_inpcb_bind(inp->sctp_socket,
+ (struct sockaddr *)NULL,
+ (struct thread *)NULL
+ ))) {
+ /* bind error, probably perm */
+ *error = err;
+ return (NULL);
+ }
+ }
+ stcb = (struct sctp_tcb *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_asoc);
+ if (stcb == NULL) {
+ /* out of memory? */
+ *error = ENOMEM;
+ return (NULL);
+ }
+ SCTP_INCR_ASOC_COUNT();
+
+ bzero(stcb, sizeof(*stcb));
+ asoc = &stcb->asoc;
+ SCTP_TCB_LOCK_INIT(stcb);
+ SCTP_TCB_SEND_LOCK_INIT(stcb);
+ /* setup back pointer's */
+ stcb->sctp_ep = inp;
+ stcb->sctp_socket = inp->sctp_socket;
+ if ((err = sctp_init_asoc(inp, asoc, for_a_init, override_tag))) {
+ /* failed */
+ SCTP_TCB_LOCK_DESTROY(stcb);
+ SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
+ SCTP_DECR_ASOC_COUNT();
+ *error = err;
+ return (NULL);
+ }
+ /* and the port */
+ stcb->rport = rport;
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_WLOCK(inp);
+ if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ /* inpcb freed while alloc going on */
+ SCTP_TCB_LOCK_DESTROY(stcb);
+ SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_DECR_ASOC_COUNT();
+ *error = EINVAL;
+ return (NULL);
+ }
+ SCTP_TCB_LOCK(stcb);
+
+ /* now that my_vtag is set, add it to the hash */
+ head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
+ sctppcbinfo.hashasocmark)];
+ /* put it in the bucket in the vtag hash of assoc's for the system */
+ LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+ SCTP_INP_INFO_WUNLOCK();
+
+ if ((err = sctp_add_remote_addr(stcb, firstaddr, 1, 1))) {
+ /* failure.. memory error? */
+ if (asoc->strmout)
+ SCTP_FREE(asoc->strmout);
+ if (asoc->mapping_array)
+ SCTP_FREE(asoc->mapping_array);
+
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
+ SCTP_DECR_ASOC_COUNT();
+ SCTP_TCB_LOCK_DESTROY(stcb);
+ SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+ *error = ENOBUFS;
+ return (NULL);
+ }
+ /* Init all the timers */
+ callout_init(&asoc->hb_timer.timer, 1);
+ callout_init(&asoc->dack_timer.timer, 1);
+ callout_init(&asoc->asconf_timer.timer, 1);
+ callout_init(&asoc->strreset_timer.timer, 1);
+ callout_init(&asoc->shut_guard_timer.timer, 1);
+ callout_init(&asoc->autoclose_timer.timer, 1);
+ callout_init(&asoc->delayed_event_timer.timer, 1);
+ LIST_INSERT_HEAD(&inp->sctp_asoc_list, stcb, sctp_tcblist);
+ /* now file the port under the hash as well */
+ if (inp->sctp_tcbhash != NULL) {
+ head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(stcb->rport,
+ inp->sctp_hashmark)];
+ LIST_INSERT_HEAD(head, stcb, sctp_tcbhash);
+ }
+ SCTP_INP_WUNLOCK(inp);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ printf("Association %p now allocated\n", stcb);
+ }
+#endif
+ return (stcb);
+}
+
+
+void
+sctp_remove_net(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ struct sctp_association *asoc;
+
+ asoc = &stcb->asoc;
+ asoc->numnets--;
+ TAILQ_REMOVE(&asoc->nets, net, sctp_next);
+ sctp_free_remote_addr(net);
+ if (net == asoc->primary_destination) {
+ /* Reset primary */
+ struct sctp_nets *lnet;
+
+ lnet = TAILQ_FIRST(&asoc->nets);
+ /* Try to find a confirmed primary */
+ asoc->primary_destination = sctp_find_alternate_net(stcb, lnet,
+ 0);
+ }
+ if (net == asoc->last_data_chunk_from) {
+ /* Reset primary */
+ asoc->last_data_chunk_from = TAILQ_FIRST(&asoc->nets);
+ }
+ if (net == asoc->last_control_chunk_from) {
+ /* Clear net */
+ asoc->last_control_chunk_from = NULL;
+ }
+/* if (net == asoc->asconf_last_sent_to) {*/
+ /* Reset primary */
+/* asoc->asconf_last_sent_to = TAILQ_FIRST(&asoc->nets);*/
+/* }*/
+}
+
+/*
+ * remove a remote endpoint address from an association, it will fail if the
+ * address does not exist.
+ */
+int
+sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr)
+{
+ /*
+ * Here we need to remove a remote address. This is quite simple, we
+ * first find it in the list of address for the association
+ * (tasoc->asoc.nets) and then if it is there, we do a LIST_REMOVE
+ * on that item. Note we do not allow it to be removed if there are
+ * no other addresses.
+ */
+ struct sctp_association *asoc;
+ struct sctp_nets *net, *net_tmp;
+
+ asoc = &stcb->asoc;
+
+ /* locate the address */
+ for (net = TAILQ_FIRST(&asoc->nets); net != NULL; net = net_tmp) {
+ net_tmp = TAILQ_NEXT(net, sctp_next);
+ if (net->ro._l_addr.sa.sa_family != remaddr->sa_family) {
+ continue;
+ }
+ if (sctp_cmpaddr((struct sockaddr *)&net->ro._l_addr,
+ remaddr)) {
+ /* we found the guy */
+ if (asoc->numnets < 2) {
+ /* Must have at LEAST two remote addresses */
+ return (-1);
+ } else {
+ sctp_remove_net(stcb, net);
+ return (0);
+ }
+ }
+ }
+ /* not found. */
+ return (-2);
+}
+
+
+static void
+sctp_add_vtag_to_timewait(struct sctp_inpcb *inp, uint32_t tag)
+{
+ struct sctpvtaghead *chain;
+ struct sctp_tagblock *twait_block;
+ struct timeval now;
+ int set, i;
+
+ SCTP_GETTIME_TIMEVAL(&now);
+ chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
+ set = 0;
+ if (!LIST_EMPTY(chain)) {
+ /* Block(s) present, lets find space, and expire on the fly */
+ LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+ for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
+ if ((twait_block->vtag_block[i].v_tag == 0) &&
+ !set) {
+ twait_block->vtag_block[i].tv_sec_at_expire =
+ now.tv_sec + SCTP_TIME_WAIT;
+ twait_block->vtag_block[i].v_tag = tag;
+ set = 1;
+ } else if ((twait_block->vtag_block[i].v_tag) &&
+ ((long)twait_block->vtag_block[i].tv_sec_at_expire >
+ now.tv_sec)) {
+ /* Audit expires this guy */
+ twait_block->vtag_block[i].tv_sec_at_expire = 0;
+ twait_block->vtag_block[i].v_tag = 0;
+ if (set == 0) {
+ /* Reuse it for my new tag */
+ twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + SCTP_TIME_WAIT;
+ twait_block->vtag_block[0].v_tag = tag;
+ set = 1;
+ }
+ }
+ }
+ if (set) {
+ /*
+ * We only do up to the block where we can
+ * place our tag for audits
+ */
+ break;
+ }
+ }
+ }
+ /* Need to add a new block to chain */
+ if (!set) {
+ SCTP_MALLOC(twait_block, struct sctp_tagblock *,
+ sizeof(struct sctp_tagblock), "TimeWait");
+ if (twait_block == NULL) {
+ return;
+ }
+ memset(twait_block, 0, sizeof(struct sctp_timewait));
+ LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock);
+ twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec +
+ SCTP_TIME_WAIT;
+ twait_block->vtag_block[0].v_tag = tag;
+ }
+}
+
+
+static void
+sctp_iterator_asoc_being_freed(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+ struct sctp_iterator *it;
+
+ /*
+ * Unlock the tcb lock we do this so we avoid a dead lock scenario
+ * where the iterator is waiting on the TCB lock and the TCB lock is
+ * waiting on the iterator lock.
+ */
+ it = stcb->asoc.stcb_starting_point_for_iterator;
+ if (it == NULL) {
+ return;
+ }
+ if (it->inp != stcb->sctp_ep) {
+ /* hmm, focused on the wrong one? */
+ return;
+ }
+ if (it->stcb != stcb) {
+ return;
+ }
+ it->stcb = LIST_NEXT(stcb, sctp_tcblist);
+ if (it->stcb == NULL) {
+ /* done with all asoc's in this assoc */
+ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+ it->inp = NULL;
+ } else {
+ it->inp = LIST_NEXT(inp, sctp_list);
+ }
+ }
+}
+
+/*
+ * Free the association after un-hashing the remote port.
+ */
+int
+sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfree)
+{
+ int i;
+ struct sctp_association *asoc;
+ struct sctp_nets *net, *prev;
+ struct sctp_laddr *laddr;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_asconf_addr *aparam;
+ struct sctp_stream_reset_list *liste;
+ struct sctp_queued_to_read *sq;
+ struct sctp_stream_queue_pending *sp;
+ sctp_sharedkey_t *shared_key;
+ struct socket *so;
+ int ccnt = 0;
+ int s, cnt = 0;
+
+ /* first, lets purge the entry from the hash table. */
+ s = splnet();
+
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, stcb, 6);
+#endif
+ if (stcb->asoc.state == 0) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 7);
+#endif
+ splx(s);
+ /* there is no asoc, really TSNH :-0 */
+ return (1);
+ }
+ asoc = &stcb->asoc;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
+ /* nothing around */
+ so = NULL;
+ else
+ so = inp->sctp_socket;
+
+ /*
+ * We used timer based freeing if a reader or writer is in the way.
+ * So we first check if we are actually being called from a timer,
+ * if so we abort early if a reader or writer is still in the way.
+ */
+ if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
+ (from_inpcbfree == 0)) {
+ /*
+ * is it the timer driving us? if so are the reader/writers
+ * gone?
+ */
+ if (stcb->asoc.refcnt) {
+ /* nope, reader or writer in the way */
+ sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
+ /* no asoc destroyed */
+ SCTP_TCB_UNLOCK(stcb);
+ splx(s);
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, stcb, 8);
+#endif
+ return (0);
+ }
+ }
+ /* now clean up any other timers */
+ callout_stop(&asoc->hb_timer.timer);
+ callout_stop(&asoc->dack_timer.timer);
+ callout_stop(&asoc->strreset_timer.timer);
+ callout_stop(&asoc->asconf_timer.timer);
+ callout_stop(&asoc->autoclose_timer.timer);
+ callout_stop(&asoc->shut_guard_timer.timer);
+ callout_stop(&asoc->delayed_event_timer.timer);
+
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ callout_stop(&net->fr_timer.timer);
+ callout_stop(&net->rxt_timer.timer);
+ callout_stop(&net->pmtu_timer.timer);
+ }
+
+ stcb->asoc.state |= SCTP_STATE_ABOUT_TO_BE_FREED;
+ if ((from_inpcbfree != 2) && (stcb->asoc.refcnt)) {
+ /* reader or writer in the way */
+ sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
+ SCTP_TCB_UNLOCK(stcb);
+ splx(s);
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, stcb, 9);
+#endif
+ /* no asoc destroyed */
+ return (0);
+ }
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, stcb, 10);
+#endif
+ /* Now the read queue needs to be cleaned up */
+ SCTP_INP_READ_LOCK(inp);
+ TAILQ_FOREACH(sq, &inp->read_queue, next) {
+ if (sq->stcb == stcb) {
+ sq->do_not_ref_stcb = 1;
+ sq->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
+ if ((from_inpcbfree == 0) && so) {
+ /*
+ * Only if we have a socket lock do we do
+ * this
+ */
+ if ((sq->held_length) ||
+ (sq->end_added == 0) ||
+ ((sq->length == 0) && (sq->end_added == 0))) {
+ /* Held for PD-API */
+ sq->held_length = 0;
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) {
+ /*
+ * need to change to PD-API
+ * aborted
+ */
+ stcb->asoc.control_pdapi = sq;
+ sctp_notify_partial_delivery_indication(stcb,
+ SCTP_PARTIAL_DELIVERY_ABORTED, 1);
+ stcb->asoc.control_pdapi = NULL;
+ } else {
+ /*
+ * need to get the reader to
+ * remove it
+ */
+ sq->length = 0;
+ if (sq->data) {
+ struct mbuf *m;
+
+ m = sq->data;
+ while (m) {
+ sctp_sbfree(sq, stcb, &stcb->sctp_socket->so_rcv, m);
+ m = sctp_m_free(m);
+ }
+ sq->data = NULL;
+ sq->tail_mbuf = NULL;
+ }
+ }
+ }
+ }
+ sq->end_added = 1;
+ cnt++;
+ }
+ }
+ SCTP_INP_READ_UNLOCK(inp);
+ if (stcb->block_entry) {
+ stcb->block_entry->error = ECONNRESET;
+ stcb->block_entry = NULL;
+ }
+ if ((from_inpcbfree == 0) && so) {
+ sctp_sorwakeup(inp, so);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /*
+ * For TCP type we need special handling when we are
+ * connected. We also include the peel'ed off ones to.
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_CONNECTED;
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAS_CONNECTED;
+ if (so) {
+ SOCK_LOCK(so);
+ if (so->so_rcv.sb_cc == 0) {
+ so->so_state &= ~(SS_ISCONNECTING |
+ SS_ISDISCONNECTING |
+ SS_ISCONFIRMING |
+ SS_ISCONNECTED);
+ }
+ SOCK_UNLOCK(so);
+ sctp_sowwakeup(inp, so);
+ sctp_sorwakeup(inp, so);
+ wakeup(&so->so_timeo);
+ }
+ }
+ }
+ /*
+ * When I reach here, no others want to kill the assoc yet.. and I
+ * own the lock. Now its possible an abort comes in when I do the
+ * lock exchange below to grab all the locks to do the final take
+ * out. to prevent this we increment the count, which will start a
+ * timer and blow out above thus assuring us that we hold exclusive
+ * killing of the asoc. Note that after getting back the TCB lock we
+ * will go ahead and increment the counter back up and stop any
+ * timer a passing stranger may have started :-S
+ */
+ if (from_inpcbfree == 0) {
+ atomic_add_16(&stcb->asoc.refcnt, 1);
+
+ SCTP_TCB_UNLOCK(stcb);
+
+ SCTP_ITERATOR_LOCK();
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_WLOCK(inp);
+ SCTP_TCB_LOCK(stcb);
+ }
+ /* Stop any timer someone may have started */
+ callout_stop(&asoc->strreset_timer.timer);
+ /*
+ * Make it invalid too, that way if its about to run it will abort
+ * and return.
+ */
+ asoc->strreset_timer.type = SCTP_TIMER_TYPE_NONE;
+ sctp_iterator_asoc_being_freed(inp, stcb);
+ /* re-increment the lock */
+ if (from_inpcbfree == 0) {
+ atomic_add_16(&stcb->asoc.refcnt, -1);
+ }
+ /* now restop the timers to be sure - this is paranoia at is finest! */
+ callout_stop(&asoc->hb_timer.timer);
+ callout_stop(&asoc->dack_timer.timer);
+ callout_stop(&asoc->strreset_timer.timer);
+ callout_stop(&asoc->asconf_timer.timer);
+ callout_stop(&asoc->shut_guard_timer.timer);
+ callout_stop(&asoc->autoclose_timer.timer);
+ callout_stop(&asoc->delayed_event_timer.timer);
+
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ callout_stop(&net->fr_timer.timer);
+ callout_stop(&net->rxt_timer.timer);
+ callout_stop(&net->pmtu_timer.timer);
+ }
+ asoc->state = 0;
+ if (inp->sctp_tcbhash) {
+ LIST_REMOVE(stcb, sctp_tcbhash);
+ }
+ if (stcb->asoc.in_restart_hash) {
+ LIST_REMOVE(stcb, sctp_tcbrestarhash);
+ }
+ /* Now lets remove it from the list of ALL associations in the EP */
+ LIST_REMOVE(stcb, sctp_tcblist);
+ if (from_inpcbfree == 0) {
+ SCTP_INP_INCR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_ITERATOR_UNLOCK();
+ }
+ /* pull from vtag hash */
+ LIST_REMOVE(stcb, sctp_asocs);
+ sctp_add_vtag_to_timewait(inp, asoc->my_vtag);
+
+ if (from_inpcbfree == 0) {
+ SCTP_INP_INFO_WUNLOCK();
+ }
+ prev = NULL;
+ /*
+ * The chunk lists and such SHOULD be empty but we check them just
+ * in case.
+ */
+ /* anything on the wheel needs to be removed */
+ for (i = 0; i < asoc->streamoutcnt; i++) {
+ struct sctp_stream_out *outs;
+
+ outs = &asoc->strmout[i];
+ /* now clean up any chunks here */
+ sp = TAILQ_FIRST(&outs->outqueue);
+ while (sp) {
+ TAILQ_REMOVE(&outs->outqueue, sp, next);
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ sp->tail_mbuf = NULL;
+ }
+ sctp_free_remote_addr(sp->net);
+ sctp_free_spbufspace(stcb, asoc, sp);
+ /* Free the zone stuff */
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_strmoq, sp);
+ SCTP_DECR_STRMOQ_COUNT();
+ sp = TAILQ_FIRST(&outs->outqueue);
+ }
+ }
+
+ while ((sp = TAILQ_FIRST(&asoc->free_strmoq)) != NULL) {
+ TAILQ_REMOVE(&asoc->free_strmoq, sp, next);
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ sp->tail_mbuf = NULL;
+ }
+ /* Free the zone stuff */
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_strmoq, sp);
+ SCTP_DECR_STRMOQ_COUNT();
+ atomic_add_int(&sctppcbinfo.ipi_free_strmoq, -1);
+ }
+
+ while ((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) {
+ TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
+ SCTP_FREE(liste);
+ }
+
+ sq = TAILQ_FIRST(&asoc->pending_reply_queue);
+ while (sq) {
+ TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next);
+ if (sq->data) {
+ sctp_m_freem(sq->data);
+ sq->data = NULL;
+ }
+ sctp_free_remote_addr(sq->whoFrom);
+ sq->whoFrom = NULL;
+ sq->stcb = NULL;
+ /* Free the ctl entry */
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, sq);
+ SCTP_DECR_READQ_COUNT();
+ sq = TAILQ_FIRST(&asoc->pending_reply_queue);
+ }
+
+ chk = TAILQ_FIRST(&asoc->free_chunks);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->free_chunks, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ ccnt++;
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
+ SCTP_DECR_CHK_COUNT();
+ atomic_subtract_int(&sctppcbinfo.ipi_free_chunks, 1);
+ asoc->free_chunk_cnt--;
+ chk = TAILQ_FIRST(&asoc->free_chunks);
+ }
+ /* pending send queue SHOULD be empty */
+ if (!TAILQ_EMPTY(&asoc->send_queue)) {
+ chk = TAILQ_FIRST(&asoc->send_queue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ ccnt++;
+ sctp_free_remote_addr(chk->whoTo);
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
+ SCTP_DECR_CHK_COUNT();
+ chk = TAILQ_FIRST(&asoc->send_queue);
+ }
+ }
+/*
+ if(ccnt) {
+ printf("Freed %d from send_queue\n", ccnt);
+ ccnt = 0;
+ }
+*/
+ /* sent queue SHOULD be empty */
+ if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+ chk = TAILQ_FIRST(&asoc->sent_queue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ ccnt++;
+ sctp_free_remote_addr(chk->whoTo);
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
+ SCTP_DECR_CHK_COUNT();
+ chk = TAILQ_FIRST(&asoc->sent_queue);
+ }
+ }
+/*
+ if(ccnt) {
+ printf("Freed %d from sent_queue\n", ccnt);
+ ccnt = 0;
+ }
+*/
+ /* control queue MAY not be empty */
+ if (!TAILQ_EMPTY(&asoc->control_send_queue)) {
+ chk = TAILQ_FIRST(&asoc->control_send_queue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ ccnt++;
+ sctp_free_remote_addr(chk->whoTo);
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
+ SCTP_DECR_CHK_COUNT();
+ chk = TAILQ_FIRST(&asoc->control_send_queue);
+ }
+ }
+/*
+ if(ccnt) {
+ printf("Freed %d from ctrl_queue\n", ccnt);
+ ccnt = 0;
+ }
+*/
+ if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_remote_addr(chk->whoTo);
+ ccnt++;
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
+ SCTP_DECR_CHK_COUNT();
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ }
+ }
+/*
+ if(ccnt) {
+ printf("Freed %d from reasm_queue\n", ccnt);
+ ccnt = 0;
+ }
+*/
+ if (asoc->mapping_array) {
+ SCTP_FREE(asoc->mapping_array);
+ asoc->mapping_array = NULL;
+ }
+ /* the stream outs */
+ if (asoc->strmout) {
+ SCTP_FREE(asoc->strmout);
+ asoc->strmout = NULL;
+ }
+ asoc->streamoutcnt = 0;
+ if (asoc->strmin) {
+ struct sctp_queued_to_read *ctl;
+ int i;
+
+ for (i = 0; i < asoc->streamincnt; i++) {
+ if (!TAILQ_EMPTY(&asoc->strmin[i].inqueue)) {
+ /* We have somethings on the streamin queue */
+ ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
+ while (ctl) {
+ TAILQ_REMOVE(&asoc->strmin[i].inqueue,
+ ctl, next);
+ sctp_free_remote_addr(ctl->whoFrom);
+ if (ctl->data) {
+ sctp_m_freem(ctl->data);
+ ctl->data = NULL;
+ }
+ /*
+ * We don't free the address here
+ * since all the net's were freed
+ * above.
+ */
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, ctl);
+ SCTP_DECR_READQ_COUNT();
+ ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
+ }
+ }
+ }
+ SCTP_FREE(asoc->strmin);
+ asoc->strmin = NULL;
+ }
+ asoc->streamincnt = 0;
+ while (!TAILQ_EMPTY(&asoc->nets)) {
+ net = TAILQ_FIRST(&asoc->nets);
+ /* pull from list */
+ if ((sctppcbinfo.ipi_count_raddr == 0) || (prev == net)) {
+#ifdef INVARIENTS
+ panic("no net's left alloc'ed, or list points to itself");
+#endif
+ break;
+ }
+ prev = net;
+ TAILQ_REMOVE(&asoc->nets, net, sctp_next);
+ sctp_free_remote_addr(net);
+ }
+
+ /* local addresses, if any */
+ while (!LIST_EMPTY(&asoc->sctp_local_addr_list)) {
+ laddr = LIST_FIRST(&asoc->sctp_local_addr_list);
+ LIST_REMOVE(laddr, sctp_nxt_addr);
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr);
+ SCTP_DECR_LADDR_COUNT();
+ }
+ /* pending asconf (address) parameters */
+ while (!TAILQ_EMPTY(&asoc->asconf_queue)) {
+ aparam = TAILQ_FIRST(&asoc->asconf_queue);
+ TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
+ SCTP_FREE(aparam);
+ }
+ if (asoc->last_asconf_ack_sent != NULL) {
+ sctp_m_freem(asoc->last_asconf_ack_sent);
+ asoc->last_asconf_ack_sent = NULL;
+ }
+ /* clean up auth stuff */
+ if (asoc->local_hmacs)
+ sctp_free_hmaclist(asoc->local_hmacs);
+ if (asoc->peer_hmacs)
+ sctp_free_hmaclist(asoc->peer_hmacs);
+
+ if (asoc->local_auth_chunks)
+ sctp_free_chunklist(asoc->local_auth_chunks);
+ if (asoc->peer_auth_chunks)
+ sctp_free_chunklist(asoc->peer_auth_chunks);
+
+ sctp_free_authinfo(&asoc->authinfo);
+
+ shared_key = LIST_FIRST(&asoc->shared_keys);
+ while (shared_key) {
+ LIST_REMOVE(shared_key, next);
+ sctp_free_sharedkey(shared_key);
+ shared_key = LIST_FIRST(&asoc->shared_keys);
+ }
+
+ /* Insert new items here :> */
+
+ /* Get rid of LOCK */
+ SCTP_TCB_LOCK_DESTROY(stcb);
+ SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+ /* now clean up the tasoc itself */
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
+ SCTP_DECR_ASOC_COUNT();
+
+ if (from_inpcbfree == 0) {
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /*
+ * If its NOT the inp_free calling us AND sctp_close
+ * as been called, we call back...
+ */
+ SCTP_INP_RUNLOCK(inp);
+ /*
+ * This will start the kill timer (if we are the
+ * lastone) since we hold an increment yet. But this
+ * is the only safe way to do this since otherwise
+ * if the socket closes at the same time we are here
+ * we might collide in the cleanup.
+ */
+ sctp_inpcb_free(inp, 0, 0);
+ SCTP_INP_DECR_REF(inp);
+ } else {
+ /* The socket is still open. */
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_RUNLOCK(inp);
+ }
+ }
+ splx(s);
+ /* destroyed the asoc */
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 11);
+#endif
+ return (1);
+}
+
+
+
+/*
+ * determine if a destination is "reachable" based upon the addresses bound
+ * to the current endpoint (e.g. only v4 or v6 currently bound)
+ */
+/*
+ * FIX: if we allow assoc-level bindx(), then this needs to be fixed to use
+ * assoc level v4/v6 flags, as the assoc *may* not have the same address
+ * types bound as its endpoint
+ */
+int
+sctp_destination_is_reachable(struct sctp_tcb *stcb, struct sockaddr *destaddr)
+{
+ struct sctp_inpcb *inp;
+ int answer;
+
+ /*
+ * No locks here, the TCB, in all cases is already locked and an
+ * assoc is up. There is either a INP lock by the caller applied (in
+ * asconf case when deleting an address) or NOT in the HB case,
+ * however if HB then the INP increment is up and the INP will not
+ * be removed (on top of the fact that we have a TCB lock). So we
+ * only want to read the sctp_flags, which is either bound-all or
+ * not.. no protection needed since once an assoc is up you can't be
+ * changing your binding.
+ */
+ inp = stcb->sctp_ep;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* if bound all, destination is not restricted */
+ /*
+ * RRS: Question during lock work: Is this correct? If you
+ * are bound-all you still might need to obey the V4--V6
+ * flags??? IMO this bound-all stuff needs to be removed!
+ */
+ return (1);
+ }
+ /* NOTE: all "scope" checks are done when local addresses are added */
+ if (destaddr->sa_family == AF_INET6) {
+ answer = inp->ip_inp.inp.inp_vflag & INP_IPV6;
+ } else if (destaddr->sa_family == AF_INET) {
+ answer = inp->ip_inp.inp.inp_vflag & INP_IPV4;
+ } else {
+ /* invalid family, so it's unreachable */
+ answer = 0;
+ }
+ return (answer);
+}
+
+/*
+ * update the inp_vflags on an endpoint
+ */
+static void
+sctp_update_ep_vflag(struct sctp_inpcb *inp)
+{
+ struct sctp_laddr *laddr;
+
+ /* first clear the flag */
+ inp->ip_inp.inp.inp_vflag = 0;
+ /* set the flag based on addresses on the ep list */
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ printf("An ounce of prevention is worth a pound of cure\n");
+ }
+#endif /* SCTP_DEBUG */
+ continue;
+ }
+ if (laddr->ifa->ifa_addr) {
+ continue;
+ }
+ if (laddr->ifa->ifa_addr->sa_family == AF_INET6) {
+ inp->ip_inp.inp.inp_vflag |= INP_IPV6;
+ } else if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
+ inp->ip_inp.inp.inp_vflag |= INP_IPV4;
+ }
+ }
+}
+
+/*
+ * Add the address to the endpoint local address list There is nothing to be
+ * done if we are bound to all addresses
+ */
+int
+sctp_add_local_addr_ep(struct sctp_inpcb *inp, struct ifaddr *ifa)
+{
+ struct sctp_laddr *laddr;
+ int fnd, error;
+
+ fnd = 0;
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* You are already bound to all. You have it already */
+ return (0);
+ }
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ struct in6_ifaddr *ifa6;
+
+ ifa6 = (struct in6_ifaddr *)ifa;
+ if (ifa6->ia6_flags & (IN6_IFF_DETACHED |
+ IN6_IFF_DEPRECATED | IN6_IFF_ANYCAST | IN6_IFF_NOTREADY))
+ /* Can't bind a non-existent addr. */
+ return (-1);
+ }
+ /* first, is it already present? */
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == ifa) {
+ fnd = 1;
+ break;
+ }
+ }
+
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && (fnd == 0)) {
+ /* Not bound to all */
+ error = sctp_insert_laddr(&inp->sctp_addr_list, ifa);
+ if (error != 0)
+ return (error);
+ inp->laddr_count++;
+ /* update inp_vflag flags */
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ inp->ip_inp.inp.inp_vflag |= INP_IPV6;
+ } else if (ifa->ifa_addr->sa_family == AF_INET) {
+ inp->ip_inp.inp.inp_vflag |= INP_IPV4;
+ }
+ }
+ return (0);
+}
+
+
+/*
+ * select a new (hopefully reachable) destination net (should only be used
+ * when we deleted an ep addr that is the only usable source address to reach
+ * the destination net)
+ */
+static void
+sctp_select_primary_destination(struct sctp_tcb *stcb)
+{
+ struct sctp_nets *net;
+
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ /* for now, we'll just pick the first reachable one we find */
+ if (net->dest_state & SCTP_ADDR_UNCONFIRMED)
+ continue;
+ if (sctp_destination_is_reachable(stcb,
+ (struct sockaddr *)&net->ro._l_addr)) {
+ /* found a reachable destination */
+ stcb->asoc.primary_destination = net;
+ }
+ }
+ /* I can't there from here! ...we're gonna die shortly... */
+}
+
+
+/*
+ * Delete the address from the endpoint local address list There is nothing
+ * to be done if we are bound to all addresses
+ */
+int
+sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct ifaddr *ifa)
+{
+ struct sctp_laddr *laddr;
+ int fnd;
+
+ fnd = 0;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* You are already bound to all. You have it already */
+ return (EINVAL);
+ }
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == ifa) {
+ fnd = 1;
+ break;
+ }
+ }
+ if (fnd && (inp->laddr_count < 2)) {
+ /* can't delete unless there are at LEAST 2 addresses */
+ return (-1);
+ }
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && (fnd)) {
+ /*
+ * clean up any use of this address go through our
+ * associations and clear any last_used_address that match
+ * this one for each assoc, see if a new primary_destination
+ * is needed
+ */
+ struct sctp_tcb *stcb;
+
+ /* clean up "next_addr_touse" */
+ if (inp->next_addr_touse == laddr)
+ /* delete this address */
+ inp->next_addr_touse = NULL;
+
+ /* clean up "last_used_address" */
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ if (stcb->asoc.last_used_address == laddr)
+ /* delete this address */
+ stcb->asoc.last_used_address = NULL;
+ } /* for each tcb */
+
+ /* remove it from the ep list */
+ sctp_remove_laddr(laddr);
+ inp->laddr_count--;
+ /* update inp_vflag flags */
+ sctp_update_ep_vflag(inp);
+ /* select a new primary destination if needed */
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ /*
+ * presume caller (sctp_asconf.c) already owns INP
+ * lock
+ */
+ SCTP_TCB_LOCK(stcb);
+ if (sctp_destination_is_reachable(stcb,
+ (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr) == 0) {
+ sctp_select_primary_destination(stcb);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } /* for each tcb */
+ }
+ return (0);
+}
+
+/*
+ * Add the addr to the TCB local address list For the BOUNDALL or dynamic
+ * case, this is a "pending" address list (eg. addresses waiting for an
+ * ASCONF-ACK response) For the subset binding, static case, this is a
+ * "valid" address list
+ */
+int
+sctp_add_local_addr_assoc(struct sctp_tcb *stcb, struct ifaddr *ifa)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_laddr *laddr;
+ int error;
+
+ /*
+ * Assumes TCP is locked.. and possiblye the INP. May need to
+ * confirm/fix that if we need it and is not the case.
+ */
+ inp = stcb->sctp_ep;
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ struct in6_ifaddr *ifa6;
+
+ ifa6 = (struct in6_ifaddr *)ifa;
+ if (ifa6->ia6_flags & (IN6_IFF_DETACHED |
+ /* IN6_IFF_DEPRECATED | */
+ IN6_IFF_ANYCAST |
+ IN6_IFF_NOTREADY))
+ /* Can't bind a non-existent addr. */
+ return (-1);
+ }
+ /* does the address already exist? */
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == ifa) {
+ return (-1);
+ }
+ }
+
+ /* add to the list */
+ error = sctp_insert_laddr(&stcb->asoc.sctp_local_addr_list, ifa);
+ if (error != 0)
+ return (error);
+ return (0);
+}
+
+/*
+ * insert an laddr entry with the given ifa for the desired list
+ */
+int
+sctp_insert_laddr(struct sctpladdr *list, struct ifaddr *ifa)
+{
+ struct sctp_laddr *laddr;
+ int s;
+
+ s = splnet();
+
+ laddr = (struct sctp_laddr *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr);
+ if (laddr == NULL) {
+ /* out of memory? */
+ splx(s);
+ return (EINVAL);
+ }
+ SCTP_INCR_LADDR_COUNT();
+ bzero(laddr, sizeof(*laddr));
+ laddr->ifa = ifa;
+ /* insert it */
+ LIST_INSERT_HEAD(list, laddr, sctp_nxt_addr);
+
+ splx(s);
+ return (0);
+}
+
+/*
+ * Remove an laddr entry from the local address list (on an assoc)
+ */
+void
+sctp_remove_laddr(struct sctp_laddr *laddr)
+{
+ int s;
+
+ s = splnet();
+ /* remove from the list */
+ LIST_REMOVE(laddr, sctp_nxt_addr);
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr);
+ SCTP_DECR_LADDR_COUNT();
+ splx(s);
+}
+
+/*
+ * Remove an address from the TCB local address list
+ */
+int
+sctp_del_local_addr_assoc(struct sctp_tcb *stcb, struct ifaddr *ifa)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_laddr *laddr;
+
+ /*
+ * This is called by asconf work. It is assumed that a) The TCB is
+ * locked and b) The INP is locked. This is true in as much as I can
+ * trace through the entry asconf code where I did these locks.
+ * Again, the ASCONF code is a bit different in that it does lock
+ * the INP during its work often times. This must be since we don't
+ * want other proc's looking up things while what they are looking
+ * up is changing :-D
+ */
+
+ inp = stcb->sctp_ep;
+ /* if subset bound and don't allow ASCONF's, can't delete last */
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) &&
+ (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF) == 0)) {
+ if (stcb->asoc.numnets < 2) {
+ /* can't delete last address */
+ return (-1);
+ }
+ }
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
+ /* remove the address if it exists */
+ if (laddr->ifa == NULL)
+ continue;
+ if (laddr->ifa == ifa) {
+ sctp_remove_laddr(laddr);
+ return (0);
+ }
+ }
+
+ /* address not found! */
+ return (-1);
+}
+
+/*
+ * Remove an address from the TCB local address list lookup using a sockaddr
+ * addr
+ */
+int
+sctp_del_local_addr_assoc_sa(struct sctp_tcb *stcb, struct sockaddr *sa)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_laddr *laddr;
+ struct sockaddr *l_sa;
+
+ /*
+ * This function I find does not seem to have a caller. As such we
+ * NEED TO DELETE this code. If we do find a caller, the caller MUST
+ * have locked the TCB at the least and probably the INP as well.
+ */
+ inp = stcb->sctp_ep;
+ /* if subset bound and don't allow ASCONF's, can't delete last */
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) &&
+ (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF) == 0)) {
+ if (stcb->asoc.numnets < 2) {
+ /* can't delete last address */
+ return (-1);
+ }
+ }
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
+ /* make sure the address exists */
+ if (laddr->ifa == NULL)
+ continue;
+ if (laddr->ifa->ifa_addr == NULL)
+ continue;
+
+ l_sa = laddr->ifa->ifa_addr;
+ if (l_sa->sa_family == AF_INET6) {
+ /* IPv6 address */
+ struct sockaddr_in6 *sin1, *sin2;
+
+ sin1 = (struct sockaddr_in6 *)l_sa;
+ sin2 = (struct sockaddr_in6 *)sa;
+ if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
+ sizeof(struct in6_addr)) == 0) {
+ /* matched */
+ sctp_remove_laddr(laddr);
+ return (0);
+ }
+ } else if (l_sa->sa_family == AF_INET) {
+ /* IPv4 address */
+ struct sockaddr_in *sin1, *sin2;
+
+ sin1 = (struct sockaddr_in *)l_sa;
+ sin2 = (struct sockaddr_in *)sa;
+ if (sin1->sin_addr.s_addr == sin2->sin_addr.s_addr) {
+ /* matched */
+ sctp_remove_laddr(laddr);
+ return (0);
+ }
+ } else {
+ /* invalid family */
+ return (-1);
+ }
+ } /* end foreach */
+ /* address not found! */
+ return (-1);
+}
+
+static char sctp_pcb_initialized = 0;
+
+/*
+ * Temporarily remove for __APPLE__ until we use the Tiger equivalents
+ */
+/* sysctl */
+static int sctp_max_number_of_assoc = SCTP_MAX_NUM_OF_ASOC;
+static int sctp_scale_up_for_address = SCTP_SCALE_FOR_ADDR;
+
+
+void
+sctp_pcb_init()
+{
+ /*
+ * SCTP initialization for the PCB structures should be called by
+ * the sctp_init() funciton.
+ */
+ int i;
+
+ if (sctp_pcb_initialized != 0) {
+ /* error I was called twice */
+ return;
+ }
+ sctp_pcb_initialized = 1;
+
+ bzero(&sctpstat, sizeof(struct sctpstat));
+
+ /* init the empty list of (All) Endpoints */
+ LIST_INIT(&sctppcbinfo.listhead);
+
+ /* init the iterator head */
+ LIST_INIT(&sctppcbinfo.iteratorhead);
+
+ /* init the hash table of endpoints */
+ TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &sctp_hashtblsize);
+ TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &sctp_pcbtblsize);
+ TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &sctp_chunkscale);
+
+ sctppcbinfo.sctp_asochash = hashinit((sctp_hashtblsize * 31),
+ M_PCB,
+ &sctppcbinfo.hashasocmark);
+
+ sctppcbinfo.sctp_ephash = hashinit(sctp_hashtblsize,
+ M_PCB,
+ &sctppcbinfo.hashmark);
+
+ sctppcbinfo.sctp_tcpephash = hashinit(sctp_hashtblsize,
+ M_PCB,
+ &sctppcbinfo.hashtcpmark);
+
+ sctppcbinfo.hashtblsize = sctp_hashtblsize;
+
+ /*
+ * init the small hash table we use to track restarted asoc's
+ */
+ sctppcbinfo.sctp_restarthash = hashinit(SCTP_STACK_VTAG_HASH_SIZE,
+ M_PCB,
+ &sctppcbinfo.hashrestartmark);
+
+ /* init the zones */
+ /*
+ * FIX ME: Should check for NULL returns, but if it does fail we are
+ * doomed to panic anyways... add later maybe.
+ */
+ SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_ep, "sctp_ep",
+ sizeof(struct sctp_inpcb), maxsockets);
+
+ SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_asoc, "sctp_asoc",
+ sizeof(struct sctp_tcb), sctp_max_number_of_assoc);
+
+ SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_laddr, "sctp_laddr",
+ sizeof(struct sctp_laddr),
+ (sctp_max_number_of_assoc * sctp_scale_up_for_address));
+
+ SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_net, "sctp_raddr",
+ sizeof(struct sctp_nets),
+ (sctp_max_number_of_assoc * sctp_scale_up_for_address));
+
+ SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_chunk, "sctp_chunk",
+ sizeof(struct sctp_tmit_chunk),
+ (sctp_max_number_of_assoc * sctp_chunkscale));
+
+ SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_readq, "sctp_readq",
+ sizeof(struct sctp_queued_to_read),
+ (sctp_max_number_of_assoc * sctp_chunkscale));
+
+ SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_strmoq, "sctp_stream_msg_out",
+ sizeof(struct sctp_stream_queue_pending),
+ (sctp_max_number_of_assoc * sctp_chunkscale));
+
+ /* Master Lock INIT for info structure */
+ SCTP_INP_INFO_LOCK_INIT();
+ SCTP_STATLOG_INIT_LOCK();
+ SCTP_ITERATOR_LOCK_INIT();
+ SCTP_IPI_COUNT_INIT();
+ SCTP_IPI_ADDR_INIT();
+ LIST_INIT(&sctppcbinfo.addr_wq);
+
+ /* not sure if we need all the counts */
+ sctppcbinfo.ipi_count_ep = 0;
+ /* assoc/tcb zone info */
+ sctppcbinfo.ipi_count_asoc = 0;
+ /* local addrlist zone info */
+ sctppcbinfo.ipi_count_laddr = 0;
+ /* remote addrlist zone info */
+ sctppcbinfo.ipi_count_raddr = 0;
+ /* chunk info */
+ sctppcbinfo.ipi_count_chunk = 0;
+
+ /* socket queue zone info */
+ sctppcbinfo.ipi_count_readq = 0;
+
+ /* stream out queue cont */
+ sctppcbinfo.ipi_count_strmoq = 0;
+
+ sctppcbinfo.ipi_free_strmoq = 0;
+ sctppcbinfo.ipi_free_chunks = 0;
+
+
+ callout_init(&sctppcbinfo.addr_wq_timer.timer, 1);
+
+ /* port stuff */
+ sctppcbinfo.lastlow = ipport_firstauto;
+ /* Init the TIMEWAIT list */
+ for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) {
+ LIST_INIT(&sctppcbinfo.vtag_timewait[i]);
+ }
+
+}
+
+
+int
+sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
+ int iphlen, int offset, int limit, struct sctphdr *sh,
+ struct sockaddr *altsa)
+{
+ /*
+ * grub through the INIT pulling addresses and loading them to the
+ * nets structure in the asoc. The from address in the mbuf should
+ * also be loaded (if it is not already). This routine can be called
+ * with either INIT or INIT-ACK's as long as the m points to the IP
+ * packet and the offset points to the beginning of the parameters.
+ */
+ struct sctp_inpcb *inp, *l_inp;
+ struct sctp_nets *net, *net_tmp;
+ struct ip *iph;
+ struct sctp_paramhdr *phdr, parm_buf;
+ struct sctp_tcb *stcb_tmp;
+ uint16_t ptype, plen;
+ struct sockaddr *sa;
+ struct sockaddr_storage dest_store;
+ struct sockaddr *local_sa = (struct sockaddr *)&dest_store;
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ uint8_t store[384];
+ struct sctp_auth_random *random = NULL;
+ uint16_t random_len = 0;
+ struct sctp_auth_hmac_algo *hmacs = NULL;
+ uint16_t hmacs_len = 0;
+ struct sctp_auth_chunk_list *chunks = NULL;
+ uint16_t num_chunks = 0;
+ sctp_key_t *new_key;
+ uint32_t keylen;
+ int got_random = 0, got_hmacs = 0, got_chklist = 0;
+
+ /* First get the destination address setup too. */
+ memset(&sin, 0, sizeof(sin));
+ memset(&sin6, 0, sizeof(sin6));
+
+ sin.sin_family = AF_INET;
+ sin.sin_len = sizeof(sin);
+ sin.sin_port = stcb->rport;
+
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+ sin6.sin6_port = stcb->rport;
+ if (altsa == NULL) {
+ iph = mtod(m, struct ip *);
+ if (iph->ip_v == IPVERSION) {
+ /* its IPv4 */
+ struct sockaddr_in *sin_2;
+
+ sin_2 = (struct sockaddr_in *)(local_sa);
+ memset(sin_2, 0, sizeof(sin));
+ sin_2->sin_family = AF_INET;
+ sin_2->sin_len = sizeof(sin);
+ sin_2->sin_port = sh->dest_port;
+ sin_2->sin_addr.s_addr = iph->ip_dst.s_addr;
+ sin.sin_addr = iph->ip_src;
+ sa = (struct sockaddr *)&sin;
+ } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
+ /* its IPv6 */
+ struct ip6_hdr *ip6;
+ struct sockaddr_in6 *sin6_2;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ sin6_2 = (struct sockaddr_in6 *)(local_sa);
+ memset(sin6_2, 0, sizeof(sin6));
+ sin6_2->sin6_family = AF_INET6;
+ sin6_2->sin6_len = sizeof(struct sockaddr_in6);
+ sin6_2->sin6_port = sh->dest_port;
+ sin6.sin6_addr = ip6->ip6_src;
+ sa = (struct sockaddr *)&sin6;
+ } else {
+ sa = NULL;
+ }
+ } else {
+ /*
+ * For cookies we use the src address NOT from the packet
+ * but from the original INIT
+ */
+ sa = altsa;
+ }
+ /* Turn off ECN until we get through all params */
+ stcb->asoc.ecn_allowed = 0;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ /* mark all addresses that we have currently on the list */
+ net->dest_state |= SCTP_ADDR_NOT_IN_ASSOC;
+ }
+ /* does the source address already exist? if so skip it */
+ l_inp = inp = stcb->sctp_ep;
+
+ atomic_add_16(&stcb->asoc.refcnt, 1);
+ stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net_tmp, local_sa, stcb);
+ atomic_add_16(&stcb->asoc.refcnt, -1);
+
+ if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) {
+ /* we must add the source address */
+ /* no scope set here since we have a tcb already. */
+ if ((sa->sa_family == AF_INET) &&
+ (stcb->asoc.ipv4_addr_legal)) {
+ if (sctp_add_remote_addr(stcb, sa, 0, 2)) {
+ return (-1);
+ }
+ } else if ((sa->sa_family == AF_INET6) &&
+ (stcb->asoc.ipv6_addr_legal)) {
+ if (sctp_add_remote_addr(stcb, sa, 0, 3)) {
+ return (-2);
+ }
+ }
+ } else {
+ if (net_tmp != NULL && stcb_tmp == stcb) {
+ net_tmp->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC;
+ } else if (stcb_tmp != stcb) {
+ /* It belongs to another association? */
+ SCTP_TCB_UNLOCK(stcb_tmp);
+ return (-3);
+ }
+ }
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-4);
+ }
+ /* now we must go through each of the params. */
+ phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
+ while (phdr) {
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+ /*
+ * printf("ptype => %0x, plen => %d\n", (uint32_t)ptype,
+ * (int)plen);
+ */
+ if (offset + plen > limit) {
+ break;
+ }
+ if (plen == 0) {
+ break;
+ }
+ if (ptype == SCTP_IPV4_ADDRESS) {
+ if (stcb->asoc.ipv4_addr_legal) {
+ struct sctp_ipv4addr_param *p4, p4_buf;
+
+ /* ok get the v4 address and check/add */
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
+ if (plen != sizeof(struct sctp_ipv4addr_param) ||
+ phdr == NULL) {
+ return (-5);
+ }
+ p4 = (struct sctp_ipv4addr_param *)phdr;
+ sin.sin_addr.s_addr = p4->addr;
+ sa = (struct sockaddr *)&sin;
+ inp = stcb->sctp_ep;
+ atomic_add_16(&stcb->asoc.refcnt, 1);
+ stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
+ local_sa, stcb);
+ atomic_add_16(&stcb->asoc.refcnt, -1);
+
+ if ((stcb_tmp == NULL && inp == stcb->sctp_ep) ||
+ inp == NULL) {
+ /* we must add the source address */
+ /*
+ * no scope set since we have a tcb
+ * already
+ */
+
+ /*
+ * we must validate the state again
+ * here
+ */
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-7);
+ }
+ if (sctp_add_remote_addr(stcb, sa, 0, 4)) {
+ return (-8);
+ }
+ } else if (stcb_tmp == stcb) {
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-10);
+ }
+ if (net != NULL) {
+ /* clear flag */
+ net->dest_state &=
+ ~SCTP_ADDR_NOT_IN_ASSOC;
+ }
+ } else {
+ /*
+ * strange, address is in another
+ * assoc? straighten out locks.
+ */
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-12);
+ }
+ return (-13);
+ }
+ }
+ } else if (ptype == SCTP_IPV6_ADDRESS) {
+ if (stcb->asoc.ipv6_addr_legal) {
+ /* ok get the v6 address and check/add */
+ struct sctp_ipv6addr_param *p6, p6_buf;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
+ if (plen != sizeof(struct sctp_ipv6addr_param) ||
+ phdr == NULL) {
+ return (-14);
+ }
+ p6 = (struct sctp_ipv6addr_param *)phdr;
+ memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
+ sizeof(p6->addr));
+ sa = (struct sockaddr *)&sin6;
+ inp = stcb->sctp_ep;
+ atomic_add_16(&stcb->asoc.refcnt, 1);
+ stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
+ local_sa, stcb);
+ atomic_add_16(&stcb->asoc.refcnt, -1);
+ if (stcb_tmp == NULL && (inp == stcb->sctp_ep ||
+ inp == NULL)) {
+ /*
+ * we must validate the state again
+ * here
+ */
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-16);
+ }
+ /*
+ * we must add the address, no scope
+ * set
+ */
+ if (sctp_add_remote_addr(stcb, sa, 0, 5)) {
+ return (-17);
+ }
+ } else if (stcb_tmp == stcb) {
+ /*
+ * we must validate the state again
+ * here
+ */
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-19);
+ }
+ if (net != NULL) {
+ /* clear flag */
+ net->dest_state &=
+ ~SCTP_ADDR_NOT_IN_ASSOC;
+ }
+ } else {
+ /*
+ * strange, address is in another
+ * assoc? straighten out locks.
+ */
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-21);
+ }
+ return (-22);
+ }
+ }
+ } else if (ptype == SCTP_ECN_CAPABLE) {
+ stcb->asoc.ecn_allowed = 1;
+ } else if (ptype == SCTP_ULP_ADAPTATION) {
+ if (stcb->asoc.state != SCTP_STATE_OPEN) {
+ struct sctp_adaptation_layer_indication ai,
+ *aip;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&ai, sizeof(ai));
+ aip = (struct sctp_adaptation_layer_indication *)phdr;
+ sctp_ulp_notify(SCTP_NOTIFY_ADAPTATION_INDICATION,
+ stcb, ntohl(aip->indication), NULL);
+ }
+ } else if (ptype == SCTP_SET_PRIM_ADDR) {
+ struct sctp_asconf_addr_param lstore, *fee;
+ struct sctp_asconf_addrv4_param *fii;
+ int lptype;
+ struct sockaddr *lsa = NULL;
+
+ stcb->asoc.peer_supports_asconf = 1;
+ if (plen > sizeof(lstore)) {
+ return (-23);
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&lstore, plen);
+ if (phdr == NULL) {
+ return (-24);
+ }
+ fee = (struct sctp_asconf_addr_param *)phdr;
+ lptype = ntohs(fee->addrp.ph.param_type);
+ if (lptype == SCTP_IPV4_ADDRESS) {
+ if (plen !=
+ sizeof(struct sctp_asconf_addrv4_param)) {
+ printf("Sizeof setprim in init/init ack not %d but %d - ignored\n",
+ (int)sizeof(struct sctp_asconf_addrv4_param),
+ plen);
+ } else {
+ fii = (struct sctp_asconf_addrv4_param *)fee;
+ sin.sin_addr.s_addr = fii->addrp.addr;
+ lsa = (struct sockaddr *)&sin;
+ }
+ } else if (lptype == SCTP_IPV6_ADDRESS) {
+ if (plen !=
+ sizeof(struct sctp_asconf_addr_param)) {
+ printf("Sizeof setprim (v6) in init/init ack not %d but %d - ignored\n",
+ (int)sizeof(struct sctp_asconf_addr_param),
+ plen);
+ } else {
+ memcpy(sin6.sin6_addr.s6_addr,
+ fee->addrp.addr,
+ sizeof(fee->addrp.addr));
+ lsa = (struct sockaddr *)&sin6;
+ }
+ }
+ if (lsa) {
+ sctp_set_primary_addr(stcb, sa, NULL);
+ }
+ } else if (ptype == SCTP_PRSCTP_SUPPORTED) {
+ /* Peer supports pr-sctp */
+ stcb->asoc.peer_supports_prsctp = 1;
+ } else if (ptype == SCTP_SUPPORTED_CHUNK_EXT) {
+ /* A supported extension chunk */
+ struct sctp_supported_chunk_types_param *pr_supported;
+ uint8_t local_store[128];
+ int num_ent, i;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&local_store, plen);
+ if (phdr == NULL) {
+ return (-25);
+ }
+ stcb->asoc.peer_supports_asconf = 0;
+ stcb->asoc.peer_supports_prsctp = 0;
+ stcb->asoc.peer_supports_pktdrop = 0;
+ stcb->asoc.peer_supports_strreset = 0;
+ stcb->asoc.peer_supports_auth = 0;
+ pr_supported = (struct sctp_supported_chunk_types_param *)phdr;
+ num_ent = plen - sizeof(struct sctp_paramhdr);
+ for (i = 0; i < num_ent; i++) {
+ switch (pr_supported->chunk_types[i]) {
+ case SCTP_ASCONF:
+ case SCTP_ASCONF_ACK:
+ stcb->asoc.peer_supports_asconf = 1;
+ break;
+ case SCTP_FORWARD_CUM_TSN:
+ stcb->asoc.peer_supports_prsctp = 1;
+ break;
+ case SCTP_PACKET_DROPPED:
+ stcb->asoc.peer_supports_pktdrop = 1;
+ break;
+ case SCTP_STREAM_RESET:
+ stcb->asoc.peer_supports_strreset = 1;
+ break;
+ case SCTP_AUTHENTICATION:
+ stcb->asoc.peer_supports_auth = 1;
+ break;
+ default:
+ /* one I have not learned yet */
+ break;
+
+ }
+ }
+ } else if (ptype == SCTP_ECN_NONCE_SUPPORTED) {
+ /* Peer supports ECN-nonce */
+ stcb->asoc.peer_supports_ecn_nonce = 1;
+ stcb->asoc.ecn_nonce_allowed = 1;
+ } else if (ptype == SCTP_RANDOM) {
+ if (plen > sizeof(store))
+ break;
+ if (got_random) {
+ /* already processed a RANDOM */
+ goto next_param;
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)store,
+ plen);
+ if (phdr == NULL)
+ return (-26);
+ random = (struct sctp_auth_random *)phdr;
+ random_len = plen - sizeof(*random);
+ /* enforce the random length */
+ if (random_len != SCTP_AUTH_RANDOM_SIZE_REQUIRED) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_AUTH1)
+ printf("SCTP: invalid RANDOM len\n");
+#endif
+ return (-27);
+ }
+ got_random = 1;
+ } else if (ptype == SCTP_HMAC_LIST) {
+ int num_hmacs;
+ int i;
+
+ if (plen > sizeof(store))
+ break;
+ if (got_hmacs) {
+ /* already processed a HMAC list */
+ goto next_param;
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)store,
+ plen);
+ if (phdr == NULL)
+ return (-28);
+ hmacs = (struct sctp_auth_hmac_algo *)phdr;
+ hmacs_len = plen - sizeof(*hmacs);
+ num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]);
+ /* validate the hmac list */
+ if (sctp_verify_hmac_param(hmacs, num_hmacs)) {
+ return (-29);
+ }
+ if (stcb->asoc.peer_hmacs != NULL)
+ sctp_free_hmaclist(stcb->asoc.peer_hmacs);
+ stcb->asoc.peer_hmacs = sctp_alloc_hmaclist(num_hmacs);
+ if (stcb->asoc.peer_hmacs != NULL) {
+ for (i = 0; i < num_hmacs; i++) {
+ sctp_auth_add_hmacid(stcb->asoc.peer_hmacs,
+ ntohs(hmacs->hmac_ids[i]));
+ }
+ }
+ got_hmacs = 1;
+ } else if (ptype == SCTP_CHUNK_LIST) {
+ int i;
+
+ if (plen > sizeof(store))
+ break;
+ if (got_chklist) {
+ /* already processed a Chunks list */
+ goto next_param;
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)store,
+ plen);
+ if (phdr == NULL)
+ return (-30);
+ chunks = (struct sctp_auth_chunk_list *)phdr;
+ num_chunks = plen - sizeof(*chunks);
+ if (stcb->asoc.peer_auth_chunks != NULL)
+ sctp_clear_chunklist(stcb->asoc.peer_auth_chunks);
+ else
+ stcb->asoc.peer_auth_chunks = sctp_alloc_chunklist();
+ for (i = 0; i < num_chunks; i++) {
+ sctp_auth_add_chunk(chunks->chunk_types[i],
+ stcb->asoc.peer_auth_chunks);
+ }
+ got_chklist = 1;
+ } else if ((ptype == SCTP_HEARTBEAT_INFO) ||
+ (ptype == SCTP_STATE_COOKIE) ||
+ (ptype == SCTP_UNRECOG_PARAM) ||
+ (ptype == SCTP_COOKIE_PRESERVE) ||
+ (ptype == SCTP_SUPPORTED_ADDRTYPE) ||
+ (ptype == SCTP_ADD_IP_ADDRESS) ||
+ (ptype == SCTP_DEL_IP_ADDRESS) ||
+ (ptype == SCTP_ERROR_CAUSE_IND) ||
+ (ptype == SCTP_SUCCESS_REPORT)) {
+ /* don't care */ ;
+ } else {
+ if ((ptype & 0x8000) == 0x0000) {
+ /*
+ * must stop processing the rest of the
+ * param's. Any report bits were handled
+ * with the call to
+ * sctp_arethere_unrecognized_parameters()
+ * when the INIT or INIT-ACK was first seen.
+ */
+ break;
+ }
+ }
+next_param:
+ offset += SCTP_SIZE32(plen);
+ if (offset >= limit) {
+ break;
+ }
+ phdr = sctp_get_next_param(m, offset, &parm_buf,
+ sizeof(parm_buf));
+ }
+ /* Now check to see if we need to purge any addresses */
+ for (net = TAILQ_FIRST(&stcb->asoc.nets); net != NULL; net = net_tmp) {
+ net_tmp = TAILQ_NEXT(net, sctp_next);
+ if ((net->dest_state & SCTP_ADDR_NOT_IN_ASSOC) ==
+ SCTP_ADDR_NOT_IN_ASSOC) {
+ /* This address has been removed from the asoc */
+ /* remove and free it */
+ stcb->asoc.numnets--;
+ TAILQ_REMOVE(&stcb->asoc.nets, net, sctp_next);
+ sctp_free_remote_addr(net);
+ if (net == stcb->asoc.primary_destination) {
+ stcb->asoc.primary_destination = NULL;
+ sctp_select_primary_destination(stcb);
+ }
+ }
+ }
+ /* validate authentication required parameters */
+ if (got_random && got_hmacs) {
+ stcb->asoc.peer_supports_auth = 1;
+ } else {
+ stcb->asoc.peer_supports_auth = 0;
+ }
+ if (!sctp_asconf_auth_nochk && stcb->asoc.peer_supports_asconf &&
+ !stcb->asoc.peer_supports_auth) {
+ return (-31);
+ }
+ /* concatenate the full random key */
+ keylen = random_len + num_chunks + hmacs_len;
+ new_key = sctp_alloc_key(keylen);
+ if (new_key != NULL) {
+ /* copy in the RANDOM */
+ if (random != NULL)
+ bcopy(random->random_data, new_key->key, random_len);
+ /* append in the AUTH chunks */
+ if (chunks != NULL)
+ bcopy(chunks->chunk_types, new_key->key + random_len,
+ num_chunks);
+ /* append in the HMACs */
+ if (hmacs != NULL)
+ bcopy(hmacs->hmac_ids, new_key->key + random_len + num_chunks,
+ hmacs_len);
+ } else {
+ return (-32);
+ }
+ if (stcb->asoc.authinfo.peer_random != NULL)
+ sctp_free_key(stcb->asoc.authinfo.peer_random);
+ stcb->asoc.authinfo.peer_random = new_key;
+#ifdef SCTP_AUTH_DRAFT_04
+ /* don't include the chunks and hmacs for draft -04 */
+ stcb->asoc.authinfo.peer_random->keylen = random_len;
+#endif
+ sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid);
+ sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid);
+
+ return (0);
+}
+
+int
+sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa,
+ struct sctp_nets *net)
+{
+ /* make sure the requested primary address exists in the assoc */
+ if (net == NULL && sa)
+ net = sctp_findnet(stcb, sa);
+
+ if (net == NULL) {
+ /* didn't find the requested primary address! */
+ return (-1);
+ } else {
+ /* set the primary address */
+ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ /* Must be confirmed */
+ return (-1);
+ }
+ stcb->asoc.primary_destination = net;
+ net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
+ net = TAILQ_FIRST(&stcb->asoc.nets);
+ if (net != stcb->asoc.primary_destination) {
+ /*
+ * first one on the list is NOT the primary
+ * sctp_cmpaddr() is much more efficent if the
+ * primary is the first on the list, make it so.
+ */
+ TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
+ }
+ return (0);
+ }
+}
+
+
+int
+sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, struct timeval *now)
+{
+ /*
+ * This function serves two purposes. It will see if a TAG can be
+ * re-used and return 1 for yes it is ok and 0 for don't use that
+ * tag. A secondary function it will do is purge out old tags that
+ * can be removed.
+ */
+ struct sctpasochead *head;
+ struct sctpvtaghead *chain;
+ struct sctp_tagblock *twait_block;
+ struct sctp_tcb *stcb;
+ int i;
+
+ SCTP_INP_INFO_WLOCK();
+ chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
+ /* First is the vtag in use ? */
+
+ head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(tag,
+ sctppcbinfo.hashasocmark)];
+ if (head == NULL) {
+ goto check_restart;
+ }
+ LIST_FOREACH(stcb, head, sctp_asocs) {
+
+ if (stcb->asoc.my_vtag == tag) {
+ /*
+ * We should remove this if and return 0 always if
+ * we want vtags unique across all endpoints. For
+ * now within a endpoint is ok.
+ */
+ if (inp == stcb->sctp_ep) {
+ /* bad tag, in use */
+ SCTP_INP_INFO_WUNLOCK();
+ return (0);
+ }
+ }
+ }
+check_restart:
+ /* Now lets check the restart hash */
+ head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(tag,
+ sctppcbinfo.hashrestartmark)];
+ if (head == NULL) {
+ goto check_time_wait;
+ }
+ LIST_FOREACH(stcb, head, sctp_tcbrestarhash) {
+ if (stcb->asoc.assoc_id == tag) {
+ /* candidate */
+ if (inp == stcb->sctp_ep) {
+ /* bad tag, in use */
+ SCTP_INP_INFO_WUNLOCK();
+ return (0);
+ }
+ }
+ }
+check_time_wait:
+ /* Now what about timed wait ? */
+ if (!LIST_EMPTY(chain)) {
+ /*
+ * Block(s) are present, lets see if we have this tag in the
+ * list
+ */
+ LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+ for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
+ if (twait_block->vtag_block[i].v_tag == 0) {
+ /* not used */
+ continue;
+ } else if ((long)twait_block->vtag_block[i].tv_sec_at_expire >
+ now->tv_sec) {
+ /* Audit expires this guy */
+ twait_block->vtag_block[i].tv_sec_at_expire = 0;
+ twait_block->vtag_block[i].v_tag = 0;
+ } else if (twait_block->vtag_block[i].v_tag ==
+ tag) {
+ /* Bad tag, sorry :< */
+ SCTP_INP_INFO_WUNLOCK();
+ return (0);
+ }
+ }
+ }
+ }
+ /* Not found, ok to use the tag */
+ SCTP_INP_INFO_WUNLOCK();
+ return (1);
+}
+
+
+/*
+ * Delete the address from the endpoint local address list Lookup using a
+ * sockaddr address (ie. not an ifaddr)
+ */
+int
+sctp_del_local_addr_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa)
+{
+ struct sctp_laddr *laddr;
+ struct sockaddr *l_sa;
+ int found = 0;
+
+ /*
+ * Here is another function I cannot find a caller for. As such we
+ * SHOULD delete it if we have no users. If we find a user that user
+ * MUST have the INP locked.
+ *
+ */
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* You are already bound to all. You have it already */
+ return (EINVAL);
+ }
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ /* make sure the address exists */
+ if (laddr->ifa == NULL)
+ continue;
+ if (laddr->ifa->ifa_addr == NULL)
+ continue;
+
+ l_sa = laddr->ifa->ifa_addr;
+ if (l_sa->sa_family == AF_INET6) {
+ /* IPv6 address */
+ struct sockaddr_in6 *sin1, *sin2;
+
+ sin1 = (struct sockaddr_in6 *)l_sa;
+ sin2 = (struct sockaddr_in6 *)sa;
+ if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
+ sizeof(struct in6_addr)) == 0) {
+ /* matched */
+ found = 1;
+ break;
+ }
+ } else if (l_sa->sa_family == AF_INET) {
+ /* IPv4 address */
+ struct sockaddr_in *sin1, *sin2;
+
+ sin1 = (struct sockaddr_in *)l_sa;
+ sin2 = (struct sockaddr_in *)sa;
+ if (sin1->sin_addr.s_addr == sin2->sin_addr.s_addr) {
+ /* matched */
+ found = 1;
+ break;
+ }
+ } else {
+ /* invalid family */
+ return (-1);
+ }
+ }
+
+ if (found && inp->laddr_count < 2) {
+ /* can't delete unless there are at LEAST 2 addresses */
+ return (-1);
+ }
+ if (found && (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /*
+ * remove it from the ep list, this should NOT be done until
+ * its really gone from the interface list and we won't be
+ * receiving more of these. Probably right away. If we do
+ * allow a removal of an address from an association
+ * (sub-set bind) than this should NOT be called until the
+ * all ASCONF come back from this association.
+ */
+ sctp_remove_laddr(laddr);
+ return (0);
+ } else {
+ return (-1);
+ }
+}
+
+static sctp_assoc_t reneged_asoc_ids[256];
+static uint8_t reneged_at = 0;
+
+extern int sctp_do_drain;
+
+static void
+sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+ /*
+ * We must hunt this association for MBUF's past the cumack (i.e.
+ * out of order data that we can renege on).
+ */
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk, *nchk;
+ uint32_t cumulative_tsn_p1, tsn;
+ struct sctp_queued_to_read *ctl, *nctl;
+ int cnt, strmat, gap;
+
+ /* We look for anything larger than the cum-ack + 1 */
+
+ if (sctp_do_drain == 0) {
+ return;
+ }
+ asoc = &stcb->asoc;
+ if (asoc->cumulative_tsn == asoc->highest_tsn_inside_map) {
+ /* none we can reneg on. */
+ return;
+ }
+ cumulative_tsn_p1 = asoc->cumulative_tsn + 1;
+ cnt = 0;
+ /* First look in the re-assembly queue */
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ while (chk) {
+ /* Get the next one */
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ if (compare_with_wrap(chk->rec.data.TSN_seq,
+ cumulative_tsn_p1, MAX_TSN)) {
+ /* Yep it is above cum-ack */
+ cnt++;
+ tsn = chk->rec.data.TSN_seq;
+ if (tsn >= asoc->mapping_array_base_tsn) {
+ gap = tsn - asoc->mapping_array_base_tsn;
+ } else {
+ gap = (MAX_TSN - asoc->mapping_array_base_tsn) +
+ tsn + 1;
+ }
+ asoc->size_on_reasm_queue = sctp_sbspace_sub(asoc->size_on_reasm_queue, chk->send_size);
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+ SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
+ TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_remote_addr(chk->whoTo);
+ sctp_free_a_chunk(stcb, chk);
+ }
+ chk = nchk;
+ }
+ /* Ok that was fun, now we will drain all the inbound streams? */
+ for (strmat = 0; strmat < asoc->streamincnt; strmat++) {
+ ctl = TAILQ_FIRST(&asoc->strmin[strmat].inqueue);
+ while (ctl) {
+ nctl = TAILQ_NEXT(ctl, next);
+ if (compare_with_wrap(ctl->sinfo_tsn,
+ cumulative_tsn_p1, MAX_TSN)) {
+ /* Yep it is above cum-ack */
+ cnt++;
+ tsn = ctl->sinfo_tsn;
+ if (tsn >= asoc->mapping_array_base_tsn) {
+ gap = tsn -
+ asoc->mapping_array_base_tsn;
+ } else {
+ gap = (MAX_TSN -
+ asoc->mapping_array_base_tsn) +
+ tsn + 1;
+ }
+ asoc->size_on_all_streams = sctp_sbspace_sub(asoc->size_on_all_streams, ctl->length);
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+
+ SCTP_UNSET_TSN_PRESENT(asoc->mapping_array,
+ gap);
+ TAILQ_REMOVE(&asoc->strmin[strmat].inqueue,
+ ctl, next);
+ if (ctl->data) {
+ sctp_m_freem(ctl->data);
+ ctl->data = NULL;
+ }
+ sctp_free_remote_addr(ctl->whoFrom);
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, ctl);
+ SCTP_DECR_READQ_COUNT();
+ }
+ ctl = nctl;
+ }
+ }
+ /*
+ * Question, should we go through the delivery queue? The only
+ * reason things are on here is the app not reading OR a p-d-api up.
+ * An attacker COULD send enough in to initiate the PD-API and then
+ * send a bunch of stuff to other streams... these would wind up on
+ * the delivery queue.. and then we would not get to them. But in
+ * order to do this I then have to back-track and un-deliver
+ * sequence numbers in streams.. el-yucko. I think for now we will
+ * NOT look at the delivery queue and leave it to be something to
+ * consider later. An alternative would be to abort the P-D-API with
+ * a notification and then deliver the data.... Or another method
+ * might be to keep track of how many times the situation occurs and
+ * if we see a possible attack underway just abort the association.
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ if (cnt) {
+ printf("Freed %d chunks from reneg harvest\n", cnt);
+ }
+ }
+#endif /* SCTP_DEBUG */
+ if (cnt) {
+ /*
+ * Now do we need to find a new
+ * asoc->highest_tsn_inside_map?
+ */
+ if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
+ gap = asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn;
+ } else {
+ gap = (MAX_TSN - asoc->mapping_array_base_tsn) +
+ asoc->highest_tsn_inside_map + 1;
+ }
+ if (gap >= (asoc->mapping_array_size << 3)) {
+ /*
+ * Something bad happened or cum-ack and high were
+ * behind the base, but if so earlier checks should
+ * have found NO data... wierd... we will start at
+ * end of mapping array.
+ */
+ printf("Gap was larger than array?? %d set to max:%d maparraymax:%x\n",
+ (int)gap,
+ (int)(asoc->mapping_array_size << 3),
+ (int)asoc->highest_tsn_inside_map);
+ gap = asoc->mapping_array_size << 3;
+ }
+ while (gap > 0) {
+ if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
+ /* found the new highest */
+ asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn + gap;
+ break;
+ }
+ gap--;
+ }
+ if (gap == 0) {
+ /* Nothing left in map */
+ memset(asoc->mapping_array, 0, asoc->mapping_array_size);
+ asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
+ asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
+ }
+ asoc->last_revoke_count = cnt;
+ callout_stop(&stcb->asoc.dack_timer.timer);
+ sctp_send_sack(stcb);
+ reneged_asoc_ids[reneged_at] = sctp_get_associd(stcb);
+ reneged_at++;
+ }
+ /*
+ * Another issue, in un-setting the TSN's in the mapping array we
+ * DID NOT adjust the higest_tsn marker. This will cause one of two
+ * things to occur. It may cause us to do extra work in checking for
+ * our mapping array movement. More importantly it may cause us to
+ * SACK every datagram. This may not be a bad thing though since we
+ * will recover once we get our cum-ack above and all this stuff we
+ * dumped recovered.
+ */
+}
+
+void
+sctp_drain()
+{
+ /*
+ * We must walk the PCB lists for ALL associations here. The system
+ * is LOW on MBUF's and needs help. This is where reneging will
+ * occur. We really hope this does NOT happen!
+ */
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+
+ SCTP_INP_INFO_RLOCK();
+ LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
+ /* For each endpoint */
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ /* For each association */
+ SCTP_TCB_LOCK(stcb);
+ sctp_drain_mbufs(inp, stcb);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ SCTP_INP_INFO_RUNLOCK();
+}
+
+/*
+ * start a new iterator
+ * iterates through all endpoints and associations based on the pcb_state
+ * flags and asoc_state. "af" (mandatory) is executed for all matching
+ * assocs and "ef" (optional) is executed when the iterator completes.
+ * "inpf" (optional) is executed for each new endpoint as it is being
+ * iterated through.
+ */
+int
+sctp_initiate_iterator(inp_func inpf, asoc_func af, uint32_t pcb_state,
+ uint32_t pcb_features, uint32_t asoc_state, void *argp, uint32_t argi,
+ end_func ef, struct sctp_inpcb *s_inp, uint8_t chunk_output_off)
+{
+ struct sctp_iterator *it = NULL;
+ int s;
+
+ if (af == NULL) {
+ return (-1);
+ }
+ SCTP_MALLOC(it, struct sctp_iterator *, sizeof(struct sctp_iterator),
+ "Iterator");
+ if (it == NULL) {
+ return (ENOMEM);
+ }
+ memset(it, 0, sizeof(*it));
+ it->function_assoc = af;
+ it->function_inp = inpf;
+ it->function_atend = ef;
+ it->pointer = argp;
+ it->val = argi;
+ it->pcb_flags = pcb_state;
+ it->pcb_features = pcb_features;
+ it->asoc_state = asoc_state;
+ it->no_chunk_output = chunk_output_off;
+ if (s_inp) {
+ it->inp = s_inp;
+ it->iterator_flags = SCTP_ITERATOR_DO_SINGLE_INP;
+ } else {
+ SCTP_INP_INFO_RLOCK();
+ it->inp = LIST_FIRST(&sctppcbinfo.listhead);
+ SCTP_INP_INFO_RUNLOCK();
+ it->iterator_flags = SCTP_ITERATOR_DO_ALL_INP;
+
+ }
+ /* Init the timer */
+ callout_init(&it->tmr.timer, 1);
+ /* add to the list of all iterators */
+ SCTP_INP_INFO_WLOCK();
+ LIST_INSERT_HEAD(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
+ SCTP_INP_INFO_WUNLOCK();
+ s = splnet();
+ sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR, (struct sctp_inpcb *)it,
+ NULL, NULL);
+ splx(s);
+ return (0);
+}
+
+
+/*
+ * Callout/Timer routines for OS that doesn't have them
+ */
diff --git a/sys/netinet/sctp_pcb.h b/sys/netinet/sctp_pcb.h
new file mode 100644
index 0000000..1e0750f
--- /dev/null
+++ b/sys/netinet/sctp_pcb.h
@@ -0,0 +1,504 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_pcb.h,v 1.21 2005/07/16 01:18:47 suz Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_pcb_h__
+#define __sctp_pcb_h__
+
+
+
+/*
+ * We must have V6 so the size of the proto can be calculated. Otherwise we
+ * would not allocate enough for Net/Open BSD :-<
+ */
+
+#if defined(_KERNEL)
+#include <net/pfil.h>
+#endif
+
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/if_var.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/ip6.h>
+#include <netinet6/ip6_var.h>
+#include <netinet6/ip6protosw.h>
+#include <netinet6/in6_var.h>
+#include <netinet6/in6_pcb.h>
+
+#ifndef in6pcb
+#define in6pcb inpcb
+#endif
+
+#include <netinet/sctp.h>
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_constants.h>
+
+LIST_HEAD(sctppcbhead, sctp_inpcb);
+LIST_HEAD(sctpasochead, sctp_tcb);
+LIST_HEAD(sctpladdr, sctp_laddr);
+LIST_HEAD(sctpvtaghead, sctp_tagblock);
+TAILQ_HEAD(sctp_readhead, sctp_queued_to_read);
+TAILQ_HEAD(sctp_streamhead, sctp_stream_queue_pending);
+
+#include <netinet/sctp_structs.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_auth.h>
+
+/*
+ * PCB flags (in sctp_flags bitmask)
+ */
+#define SCTP_PCB_FLAGS_UDPTYPE 0x00000001
+#define SCTP_PCB_FLAGS_TCPTYPE 0x00000002
+#define SCTP_PCB_FLAGS_BOUNDALL 0x00000004
+#define SCTP_PCB_FLAGS_ACCEPTING 0x00000008
+#define SCTP_PCB_FLAGS_UNBOUND 0x00000010
+#define SCTP_PCB_FLAGS_CLOSE_IP 0x00040000
+#define SCTP_PCB_FLAGS_WAS_CONNECTED 0x00080000
+#define SCTP_PCB_FLAGS_WAS_ABORTED 0x00100000
+/* TCP model support */
+
+#define SCTP_PCB_FLAGS_CONNECTED 0x00200000
+#define SCTP_PCB_FLAGS_IN_TCPPOOL 0x00400000
+#define SCTP_PCB_FLAGS_DONT_WAKE 0x00800000
+#define SCTP_PCB_FLAGS_WAKEOUTPUT 0x01000000
+#define SCTP_PCB_FLAGS_WAKEINPUT 0x02000000
+#define SCTP_PCB_FLAGS_BOUND_V6 0x04000000
+#define SCTP_PCB_FLAGS_NEEDS_MAPPED_V4 0x08000000
+#define SCTP_PCB_FLAGS_BLOCKING_IO 0x10000000
+#define SCTP_PCB_FLAGS_SOCKET_GONE 0x20000000
+#define SCTP_PCB_FLAGS_SOCKET_ALLGONE 0x40000000
+/* flags to copy to new PCB */
+#define SCTP_PCB_COPY_FLAGS 0x0e000004
+
+
+/*
+ * PCB Features (in sctp_features bitmask)
+ */
+#define SCTP_PCB_FLAGS_EXT_RCVINFO 0x00000004
+#define SCTP_PCB_FLAGS_DONOT_HEARTBEAT 0x00000008
+#define SCTP_PCB_FLAGS_FRAG_INTERLEAVE 0x00000010
+#define SCTP_PCB_FLAGS_DO_ASCONF 0x00000020
+#define SCTP_PCB_FLAGS_AUTO_ASCONF 0x00000040
+/* socket options */
+#define SCTP_PCB_FLAGS_NODELAY 0x00000100
+#define SCTP_PCB_FLAGS_AUTOCLOSE 0x00000200
+#define SCTP_PCB_FLAGS_RECVDATAIOEVNT 0x00000400
+#define SCTP_PCB_FLAGS_RECVASSOCEVNT 0x00000800
+#define SCTP_PCB_FLAGS_RECVPADDREVNT 0x00001000
+#define SCTP_PCB_FLAGS_RECVPEERERR 0x00002000
+#define SCTP_PCB_FLAGS_RECVSENDFAILEVNT 0x00004000
+#define SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT 0x00008000
+#define SCTP_PCB_FLAGS_ADAPTATIONEVNT 0x00010000
+#define SCTP_PCB_FLAGS_PDAPIEVNT 0x00020000
+#define SCTP_PCB_FLAGS_AUTHEVNT 0x00040000
+#define SCTP_PCB_FLAGS_STREAM_RESETEVNT 0x00080000
+#define SCTP_PCB_FLAGS_NO_FRAGMENT 0x00100000
+#define SCTP_PCB_FLAGS_EXPLICIT_EOR 0x00200000
+
+
+#define SCTP_PCBHASH_ALLADDR(port, mask) (port & mask)
+#define SCTP_PCBHASH_ASOC(tag, mask) (tag & mask)
+
+struct sctp_laddr {
+ LIST_ENTRY(sctp_laddr) sctp_nxt_addr; /* next in list */
+ struct ifaddr *ifa;
+ int action; /* Only used in delayed asconf stuff */
+};
+
+struct sctp_block_entry {
+ int error;
+};
+
+struct sctp_timewait {
+ uint32_t tv_sec_at_expire; /* the seconds from boot to expire */
+ uint32_t v_tag; /* the vtag that can not be reused */
+};
+
+struct sctp_tagblock {
+ LIST_ENTRY(sctp_tagblock) sctp_nxt_tagblock;
+ struct sctp_timewait vtag_block[SCTP_NUMBER_IN_VTAG_BLOCK];
+};
+
+
+struct sctp_epinfo {
+ struct sctpasochead *sctp_asochash;
+ u_long hashasocmark;
+
+ struct sctppcbhead *sctp_ephash;
+ u_long hashmark;
+
+ struct sctpasochead *sctp_restarthash;
+ u_long hashrestartmark;
+ /*
+ * The TCP model represents a substantial overhead in that we get an
+ * additional hash table to keep explicit connections in. The
+ * listening TCP endpoint will exist in the usual ephash above and
+ * accept only INIT's. It will be incapable of sending off an INIT.
+ * When a dg arrives we must look in the normal ephash. If we find a
+ * TCP endpoint that will tell us to go to the specific endpoint
+ * hash and re-hash to find the right assoc/socket. If we find a UDP
+ * model socket we then must complete the lookup. If this fails,
+ * i.e. no association can be found then we must continue to see if
+ * a sctp_peeloff()'d socket is in the tcpephash (a spun off socket
+ * acts like a TCP model connected socket).
+ */
+ struct sctppcbhead *sctp_tcpephash;
+ u_long hashtcpmark;
+ uint32_t hashtblsize;
+
+ struct sctppcbhead listhead;
+ struct sctpladdr addr_wq;
+
+ struct sctpiterators iteratorhead;
+
+ /* ep zone info */
+ sctp_zone_t ipi_zone_ep;
+ sctp_zone_t ipi_zone_asoc;
+ sctp_zone_t ipi_zone_laddr;
+ sctp_zone_t ipi_zone_net;
+ sctp_zone_t ipi_zone_chunk;
+ sctp_zone_t ipi_zone_readq;
+ sctp_zone_t ipi_zone_strmoq;
+
+ struct mtx ipi_ep_mtx;
+ struct mtx it_mtx;
+ struct mtx ipi_addr_mtx;
+ uint32_t ipi_count_ep;
+
+ /* assoc/tcb zone info */
+ uint32_t ipi_count_asoc;
+
+ /* local addrlist zone info */
+ uint32_t ipi_count_laddr;
+
+ /* remote addrlist zone info */
+ uint32_t ipi_count_raddr;
+
+ /* chunk structure list for output */
+ uint32_t ipi_count_chunk;
+
+ /* socket queue zone info */
+ uint32_t ipi_count_readq;
+
+ /* socket queue zone info */
+ uint32_t ipi_count_strmoq;
+
+ /* system wide number of free chunks hanging around */
+ uint32_t ipi_free_chunks;
+ uint32_t ipi_free_strmoq;
+
+ struct sctpvtaghead vtag_timewait[SCTP_STACK_VTAG_HASH_SIZE];
+
+
+ struct sctp_timer addr_wq_timer;
+
+ /* for port allocations */
+ uint16_t lastport;
+ uint16_t lastlow;
+ uint16_t lasthi;
+
+};
+
+extern struct sctpstat sctpstat;
+
+/*
+ * Here we have all the relevant information for each SCTP entity created. We
+ * will need to modify this as approprate. We also need to figure out how to
+ * access /dev/random.
+ */
+struct sctp_pcb {
+ unsigned int time_of_secret_change; /* number of seconds from
+ * timeval.tv_sec */
+ uint32_t secret_key[SCTP_HOW_MANY_SECRETS][SCTP_NUMBER_OF_SECRETS];
+ unsigned int size_of_a_cookie;
+
+ unsigned int sctp_timeoutticks[SCTP_NUM_TMRS];
+ unsigned int sctp_minrto;
+ unsigned int sctp_maxrto;
+ unsigned int initial_rto;
+
+ int initial_init_rto_max;
+
+ uint32_t sctp_sws_sender;
+ uint32_t sctp_sws_receiver;
+
+ /* authentication related fields */
+ struct sctp_keyhead shared_keys;
+ sctp_auth_chklist_t *local_auth_chunks;
+ sctp_hmaclist_t *local_hmacs;
+ uint16_t default_keyid;
+
+ /* various thresholds */
+ /* Max times I will init at a guy */
+ uint16_t max_init_times;
+
+ /* Max times I will send before we consider someone dead */
+ uint16_t max_send_times;
+
+ uint16_t def_net_failure;
+
+ /* number of streams to pre-open on a association */
+ uint16_t pre_open_stream_count;
+ uint16_t max_open_streams_intome;
+
+ /* random number generator */
+ uint32_t random_counter;
+ uint8_t random_numbers[SCTP_SIGNATURE_ALOC_SIZE];
+ uint8_t random_store[SCTP_SIGNATURE_ALOC_SIZE];
+
+ /*
+ * This timer is kept running per endpoint. When it fires it will
+ * change the secret key. The default is once a hour
+ */
+ struct sctp_timer signature_change;
+ int def_cookie_life;
+ /* defaults to 0 */
+ int auto_close_time;
+ uint32_t initial_sequence_debug;
+ uint32_t adaptation_layer_indicator;
+ char store_at;
+ uint8_t max_burst;
+ char current_secret_number;
+ char last_secret_number;
+};
+
+#ifndef SCTP_ALIGNMENT
+#define SCTP_ALIGNMENT 32
+#endif
+
+#ifndef SCTP_ALIGNM1
+#define SCTP_ALIGNM1 (SCTP_ALIGNMENT-1)
+#endif
+
+#define sctp_lport ip_inp.inp.inp_lport
+
+struct sctp_inpcb {
+ /*
+ * put an inpcb in front of it all, kind of a waste but we need to
+ * for compatability with all the other stuff.
+ */
+ union {
+ struct inpcb inp;
+ char align[(sizeof(struct in6pcb) + SCTP_ALIGNM1) &
+ ~SCTP_ALIGNM1];
+ } ip_inp;
+
+
+ /* Socket buffer lock protects read_queue and of course sb_cc */
+ struct sctp_readhead read_queue;
+
+ LIST_ENTRY(sctp_inpcb) sctp_list; /* lists all endpoints */
+ /* hash of all endpoints for model */
+ LIST_ENTRY(sctp_inpcb) sctp_hash;
+ /* count of local addresses bound, 0 if bound all */
+ int laddr_count;
+ /* list of addrs in use by the EP */
+ struct sctpladdr sctp_addr_list;
+ /* used for source address selection rotation */
+ struct sctp_laddr *next_addr_touse;
+ struct ifnet *next_ifn_touse;
+ /* back pointer to our socket */
+ struct socket *sctp_socket;
+ uint32_t sctp_flags; /* INP state flag set */
+ uint32_t sctp_features; /* Feature flags */
+ struct sctp_pcb sctp_ep;/* SCTP ep data */
+ /* head of the hash of all associations */
+ struct sctpasochead *sctp_tcbhash;
+ u_long sctp_hashmark;
+ /* head of the list of all associations */
+ struct sctpasochead sctp_asoc_list;
+ struct sctp_iterator *inp_starting_point_for_iterator;
+ uint32_t sctp_frag_point;
+ uint32_t partial_delivery_point;
+ uint32_t sctp_context;
+ struct sctp_sndrcvinfo def_send;
+ /*
+ * These three are here for the sosend_dgram (pkt, pkt_last and
+ * control). routine. However, I don't think anyone in the current
+ * FreeBSD kernel calls this. So they are candidates with sctp_sendm
+ * for de-supporting.
+ */
+ struct mbuf *pkt, *pkt_last;
+ struct mbuf *control;
+ struct mtx inp_mtx;
+ struct mtx inp_create_mtx;
+ struct mtx inp_rdata_mtx;
+ int32_t refcount;
+};
+
+struct sctp_tcb {
+ struct socket *sctp_socket; /* back pointer to socket */
+ struct sctp_inpcb *sctp_ep; /* back pointer to ep */
+ LIST_ENTRY(sctp_tcb) sctp_tcbhash; /* next link in hash
+ * table */
+ LIST_ENTRY(sctp_tcb) sctp_tcblist; /* list of all of the
+ * TCB's */
+ LIST_ENTRY(sctp_tcb) sctp_tcbrestarhash; /* next link in restart
+ * hash table */
+ LIST_ENTRY(sctp_tcb) sctp_asocs; /* vtag hash list */
+ struct sctp_block_entry *block_entry; /* pointer locked by socket
+ * send buffer */
+ struct sctp_association asoc;
+ /*
+ * freed_by_sorcv_sincelast is protected by the sockbuf_lock NOT the
+ * tcb_lock. Its special in this way to help avoid extra mutex calls
+ * in the reading of data.
+ */
+ uint32_t freed_by_sorcv_sincelast;
+ uint16_t rport; /* remote port in network format */
+ uint16_t resv;
+ struct mtx tcb_mtx;
+ struct mtx tcb_send_mtx;
+};
+
+
+
+#include <netinet/sctp_lock_bsd.h>
+
+
+
+#if defined(_KERNEL)
+
+extern struct sctp_epinfo sctppcbinfo;
+extern int sctp_auto_asconf;
+
+int SCTP6_ARE_ADDR_EQUAL(struct in6_addr *a, struct in6_addr *b);
+
+void sctp_fill_pcbinfo(struct sctp_pcbinfo *);
+
+struct sctp_nets *sctp_findnet(struct sctp_tcb *, struct sockaddr *);
+
+struct sctp_inpcb *sctp_pcb_findep(struct sockaddr *, int, int);
+
+int sctp_inpcb_bind(struct socket *, struct sockaddr *, struct thread *);
+
+
+struct sctp_tcb *
+sctp_findassociation_addr(struct mbuf *, int, int,
+ struct sctphdr *, struct sctp_chunkhdr *, struct sctp_inpcb **,
+ struct sctp_nets **);
+
+struct sctp_tcb *
+sctp_findassociation_addr_sa(struct sockaddr *,
+ struct sockaddr *, struct sctp_inpcb **, struct sctp_nets **, int);
+
+void
+sctp_move_pcb_and_assoc(struct sctp_inpcb *, struct sctp_inpcb *,
+ struct sctp_tcb *);
+
+/*
+ * For this call ep_addr, the to is the destination endpoint address of the
+ * peer (relative to outbound). The from field is only used if the TCP model
+ * is enabled and helps distingush amongst the subset bound (non-boundall).
+ * The TCP model MAY change the actual ep field, this is why it is passed.
+ */
+struct sctp_tcb *
+sctp_findassociation_ep_addr(struct sctp_inpcb **,
+ struct sockaddr *, struct sctp_nets **, struct sockaddr *,
+ struct sctp_tcb *);
+
+struct sctp_tcb *
+sctp_findassociation_ep_asocid(struct sctp_inpcb *,
+ sctp_assoc_t, int);
+
+struct sctp_tcb *
+sctp_findassociation_ep_asconf(struct mbuf *, int, int,
+ struct sctphdr *, struct sctp_inpcb **, struct sctp_nets **);
+
+int sctp_inpcb_alloc(struct socket *);
+
+int sctp_is_address_on_local_host(struct sockaddr *addr);
+
+void sctp_inpcb_free(struct sctp_inpcb *, int, int);
+
+struct sctp_tcb *
+sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *,
+ int, int *, uint32_t);
+
+int sctp_free_assoc(struct sctp_inpcb *, struct sctp_tcb *, int);
+
+int sctp_add_local_addr_ep(struct sctp_inpcb *, struct ifaddr *);
+
+int sctp_insert_laddr(struct sctpladdr *, struct ifaddr *);
+
+void sctp_remove_laddr(struct sctp_laddr *);
+
+int sctp_del_local_addr_ep(struct sctp_inpcb *, struct ifaddr *);
+
+int sctp_del_local_addr_ep_sa(struct sctp_inpcb *, struct sockaddr *);
+
+int sctp_add_remote_addr(struct sctp_tcb *, struct sockaddr *, int, int);
+
+void sctp_remove_net(struct sctp_tcb *, struct sctp_nets *);
+
+int sctp_del_remote_addr(struct sctp_tcb *, struct sockaddr *);
+
+void sctp_pcb_init(void);
+
+int sctp_add_local_addr_assoc(struct sctp_tcb *, struct ifaddr *);
+
+int sctp_del_local_addr_assoc(struct sctp_tcb *, struct ifaddr *);
+
+int sctp_del_local_addr_assoc_sa(struct sctp_tcb *, struct sockaddr *);
+
+int
+sctp_load_addresses_from_init(struct sctp_tcb *, struct mbuf *, int, int,
+ int, struct sctphdr *, struct sockaddr *);
+
+int
+sctp_set_primary_addr(struct sctp_tcb *, struct sockaddr *,
+ struct sctp_nets *);
+
+int sctp_is_vtag_good(struct sctp_inpcb *, uint32_t, struct timeval *);
+
+/* void sctp_drain(void); */
+
+int sctp_destination_is_reachable(struct sctp_tcb *, struct sockaddr *);
+
+/*
+ * Null in last arg inpcb indicate run on ALL ep's. Specific inp in last arg
+ * indicates run on ONLY assoc's of the specified endpoint.
+ */
+int
+sctp_initiate_iterator(inp_func inpf, asoc_func af, uint32_t, uint32_t,
+ uint32_t, void *, uint32_t, end_func ef, struct sctp_inpcb *, uint8_t co_off);
+
+
+
+#endif /* _KERNEL */
+#endif /* !__sctp_pcb_h__ */
diff --git a/sys/netinet/sctp_peeloff.c b/sys/netinet/sctp_peeloff.c
new file mode 100644
index 0000000..cfa4a2d
--- /dev/null
+++ b/sys/netinet/sctp_peeloff.c
@@ -0,0 +1,240 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/* $KAME: sctp_peeloff.c,v 1.13 2005/03/06 16:04:18 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_ipsec.h"
+#include "opt_inet6.h"
+#include "opt_inet.h"
+
+#include "opt_sctp.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/domain.h>
+#include <sys/proc.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sysctl.h>
+#include <sys/syslog.h>
+#include <net/if.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#ifdef INET6
+#include <netinet/ip6.h>
+#endif
+#include <netinet/in_pcb.h>
+#include <netinet/in_var.h>
+#include <netinet/ip_var.h>
+#ifdef INET6
+#include <netinet6/ip6_var.h>
+#endif
+#include <netinet/ip_icmp.h>
+#include <netinet/icmp_var.h>
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_peeloff.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_auth.h>
+
+#ifdef IPSEC
+#include <netinet6/ipsec.h>
+#include <netkey/key.h>
+#endif /* IPSEC */
+
+
+#ifdef SCTP_DEBUG
+extern uint32_t sctp_debug_on;
+
+#endif /* SCTP_DEBUG */
+
+
+int
+sctp_can_peel_off(struct socket *head, sctp_assoc_t assoc_id)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+
+ inp = (struct sctp_inpcb *)head->so_pcb;
+ if (inp == NULL) {
+ return (EFAULT);
+ }
+ stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1);
+ if (stcb == NULL) {
+ return (ENOTCONN);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ /* We are clear to peel this one off */
+ return (0);
+}
+
+int
+sctp_do_peeloff(struct socket *head, struct socket *so, sctp_assoc_t assoc_id)
+{
+ struct sctp_inpcb *inp, *n_inp;
+ struct sctp_tcb *stcb;
+
+ inp = (struct sctp_inpcb *)head->so_pcb;
+ if (inp == NULL)
+ return (EFAULT);
+ stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1);
+ if (stcb == NULL)
+ return (ENOTCONN);
+
+ n_inp = (struct sctp_inpcb *)so->so_pcb;
+ n_inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
+ SCTP_PCB_FLAGS_CONNECTED |
+ SCTP_PCB_FLAGS_IN_TCPPOOL | /* Turn on Blocking IO */
+ (SCTP_PCB_COPY_FLAGS & inp->sctp_flags));
+ n_inp->sctp_socket = so;
+ n_inp->sctp_features = inp->sctp_features;
+ n_inp->sctp_frag_point = inp->sctp_frag_point;
+ n_inp->partial_delivery_point = inp->partial_delivery_point;
+ n_inp->sctp_context = inp->sctp_context;
+ n_inp->inp_starting_point_for_iterator = NULL;
+
+ /*
+ * Now we must move it from one hash table to another and get the
+ * stcb in the right place.
+ */
+ sctp_move_pcb_and_assoc(inp, n_inp, stcb);
+
+ sctp_pull_off_control_to_new_inp(inp, n_inp, stcb);
+
+ SCTP_TCB_UNLOCK(stcb);
+ return (0);
+}
+
+struct socket *
+sctp_get_peeloff(struct socket *head, sctp_assoc_t assoc_id, int *error)
+{
+ struct socket *newso;
+ struct sctp_inpcb *inp, *n_inp;
+ struct sctp_tcb *stcb;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PEEL1) {
+ printf("SCTP peel-off called\n");
+ }
+#endif /* SCTP_DEBUG */
+
+ inp = (struct sctp_inpcb *)head->so_pcb;
+ if (inp == NULL) {
+ *error = EFAULT;
+ return (NULL);
+ }
+ stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1);
+ if (stcb == NULL) {
+ *error = ENOTCONN;
+ return (NULL);
+ }
+ newso = sonewconn(head, SS_ISCONNECTED
+ );
+ if (newso == NULL) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PEEL1) {
+ printf("sctp_peeloff:sonewconn failed err\n");
+ }
+#endif /* SCTP_DEBUG */
+ *error = ENOMEM;
+ SCTP_TCB_UNLOCK(stcb);
+ return (NULL);
+ }
+ n_inp = (struct sctp_inpcb *)newso->so_pcb;
+ SOCK_LOCK(head);
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_WLOCK(n_inp);
+ n_inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
+ SCTP_PCB_FLAGS_CONNECTED |
+ SCTP_PCB_FLAGS_IN_TCPPOOL | /* Turn on Blocking IO */
+ (SCTP_PCB_COPY_FLAGS & inp->sctp_flags));
+ n_inp->sctp_features = inp->sctp_features;
+ n_inp->sctp_frag_point = inp->sctp_frag_point;
+ n_inp->partial_delivery_point = inp->partial_delivery_point;
+ n_inp->sctp_context = inp->sctp_context;
+ n_inp->inp_starting_point_for_iterator = NULL;
+
+ /* copy in the authentication parameters from the original endpoint */
+ if (n_inp->sctp_ep.local_hmacs)
+ sctp_free_hmaclist(n_inp->sctp_ep.local_hmacs);
+ n_inp->sctp_ep.local_hmacs =
+ sctp_copy_hmaclist(inp->sctp_ep.local_hmacs);
+ if (n_inp->sctp_ep.local_auth_chunks)
+ sctp_free_chunklist(n_inp->sctp_ep.local_auth_chunks);
+ n_inp->sctp_ep.local_auth_chunks =
+ sctp_copy_chunklist(inp->sctp_ep.local_auth_chunks);
+ (void)sctp_copy_skeylist(&inp->sctp_ep.shared_keys,
+ &n_inp->sctp_ep.shared_keys);
+
+ n_inp->sctp_socket = newso;
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+ sctp_feature_off(n_inp, SCTP_PCB_FLAGS_AUTOCLOSE);
+ n_inp->sctp_ep.auto_close_time = 0;
+ sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, n_inp, stcb, NULL);
+ }
+ /* Turn off any non-blocking semantic. */
+ newso->so_state &= ~SS_NBIO;
+ newso->so_state |= SS_ISCONNECTED;
+ /* We remove it right away */
+#ifdef SCTP_LOCK_LOGGING
+ sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
+#endif
+ TAILQ_REMOVE(&head->so_comp, newso, so_list);
+ head->so_qlen--;
+ SOCK_UNLOCK(head);
+ /*
+ * Now we must move it from one hash table to another and get the
+ * stcb in the right place.
+ */
+ SCTP_INP_WUNLOCK(n_inp);
+ SCTP_INP_WUNLOCK(inp);
+ sctp_move_pcb_and_assoc(inp, n_inp, stcb);
+ /*
+ * And now the final hack. We move data in the pending side i.e.
+ * head to the new socket buffer. Let the GRUBBING begin :-0
+ */
+ sctp_pull_off_control_to_new_inp(inp, n_inp, stcb);
+
+ SCTP_TCB_UNLOCK(stcb);
+ return (newso);
+}
diff --git a/sys/netinet/sctp_peeloff.h b/sys/netinet/sctp_peeloff.h
new file mode 100644
index 0000000..226b5d6
--- /dev/null
+++ b/sys/netinet/sctp_peeloff.h
@@ -0,0 +1,56 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_peeloff.h,v 1.6 2005/03/06 16:04:18 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_peeloff_h__
+#define __sctp_peeloff_h__
+
+#include <sys/types.h>
+#include <sys/socketvar.h>
+#include <sys/socket.h>
+
+
+
+
+#if defined(_KERNEL)
+
+int sctp_can_peel_off(struct socket *, sctp_assoc_t);
+int sctp_do_peeloff(struct socket *, struct socket *, sctp_assoc_t);
+struct socket *sctp_get_peeloff(struct socket *, sctp_assoc_t, int *);
+
+
+
+#endif /* _KERNEL */
+
+#endif
diff --git a/sys/netinet/sctp_structs.h b/sys/netinet/sctp_structs.h
new file mode 100644
index 0000000..1d43de0
--- /dev/null
+++ b/sys/netinet/sctp_structs.h
@@ -0,0 +1,892 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_structs.h,v 1.13 2005/03/06 16:04:18 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_structs_h__
+#define __sctp_structs_h__
+
+#include <sys/queue.h>
+
+#include <sys/callout.h>
+#include <sys/socket.h>
+
+#ifdef IPSEC
+#include <netinet6/ipsec.h>
+#include <netkey/key.h>
+#endif
+
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_auth.h>
+
+struct sctp_timer {
+ struct callout timer;
+ int type;
+ /*
+ * Depending on the timer type these will be setup and cast with the
+ * appropriate entity.
+ */
+ void *ep;
+ void *tcb;
+ void *net;
+
+ /* for sanity checking */
+ void *self;
+ uint32_t ticks;
+};
+
+struct sctp_nonpad_sndrcvinfo {
+ uint16_t sinfo_stream;
+ uint16_t sinfo_ssn;
+ uint16_t sinfo_flags;
+ uint32_t sinfo_ppid;
+ uint32_t sinfo_context;
+ uint32_t sinfo_timetolive;
+ uint32_t sinfo_tsn;
+ uint32_t sinfo_cumtsn;
+ sctp_assoc_t sinfo_assoc_id;
+};
+
+
+/*
+ * This is the information we track on each interface that we know about from
+ * the distant end.
+ */
+TAILQ_HEAD(sctpnetlisthead, sctp_nets);
+
+struct sctp_stream_reset_list {
+ TAILQ_ENTRY(sctp_stream_reset_list) next_resp;
+ uint32_t tsn;
+ int number_entries;
+ struct sctp_stream_reset_out_request req;
+};
+
+TAILQ_HEAD(sctp_resethead, sctp_stream_reset_list);
+
+/*
+ * Users of the iterator need to malloc a iterator with a call to
+ * sctp_initiate_iterator(inp_func, assoc_func, pcb_flags, pcb_features,
+ * asoc_state, void-ptr-arg, uint32-arg, end_func, inp);
+ *
+ * Use the following two defines if you don't care what pcb flags are on the EP
+ * and/or you don't care what state the association is in.
+ *
+ * Note that if you specify an INP as the last argument then ONLY each
+ * association of that single INP will be executed upon. Note that the pcb
+ * flags STILL apply so if the inp you specify has different pcb_flags then
+ * what you put in pcb_flags nothing will happen. use SCTP_PCB_ANY_FLAGS to
+ * assure the inp you specify gets treated.
+ */
+#define SCTP_PCB_ANY_FLAGS 0x00000000
+#define SCTP_PCB_ANY_FEATURES 0x00000000
+#define SCTP_ASOC_ANY_STATE 0x00000000
+
+typedef void (*asoc_func) (struct sctp_inpcb *, struct sctp_tcb *, void *ptr,
+ uint32_t val);
+typedef void (*inp_func) (struct sctp_inpcb *, void *ptr, uint32_t val);
+typedef void (*end_func) (void *ptr, uint32_t val);
+
+struct sctp_iterator {
+ LIST_ENTRY(sctp_iterator) sctp_nxt_itr;
+ struct sctp_timer tmr;
+ struct sctp_inpcb *inp; /* current endpoint */
+ struct sctp_tcb *stcb; /* current* assoc */
+ asoc_func function_assoc; /* per assoc function */
+ inp_func function_inp; /* per endpoint function */
+ end_func function_atend;/* iterator completion function */
+ void *pointer; /* pointer for apply func to use */
+ uint32_t val; /* value for apply func to use */
+ uint32_t pcb_flags; /* endpoint flags being checked */
+ uint32_t pcb_features; /* endpoint features being checked */
+ uint32_t asoc_state; /* assoc state being checked */
+ uint32_t iterator_flags;
+ uint8_t no_chunk_output;
+};
+
+/* iterator_flags values */
+#define SCTP_ITERATOR_DO_ALL_INP 0x00000001
+#define SCTP_ITERATOR_DO_SINGLE_INP 0x00000002
+
+LIST_HEAD(sctpiterators, sctp_iterator);
+
+struct sctp_copy_all {
+ struct sctp_inpcb *inp; /* ep */
+ struct mbuf *m;
+ struct sctp_sndrcvinfo sndrcv;
+ int sndlen;
+ int cnt_sent;
+ int cnt_failed;
+};
+
+union sctp_sockstore {
+#ifdef AF_INET
+ struct sockaddr_in sin;
+#endif
+#ifdef AF_INET6
+ struct sockaddr_in6 sin6;
+#endif
+ struct sockaddr sa;
+};
+
+struct sctp_nets {
+ TAILQ_ENTRY(sctp_nets) sctp_next; /* next link */
+
+ /*
+ * Things on the top half may be able to be split into a common
+ * structure shared by all.
+ */
+ struct sctp_timer pmtu_timer;
+
+ /*
+ * The following two in combination equate to a route entry for v6
+ * or v4.
+ */
+ struct sctp_route {
+ struct rtentry *ro_rt;
+ union sctp_sockstore _l_addr; /* remote peer addr */
+ union sctp_sockstore _s_addr; /* our selected src addr */
+ } ro;
+ /* mtu discovered so far */
+ uint32_t mtu;
+ uint32_t ssthresh; /* not sure about this one for split */
+
+ /* smoothed average things for RTT and RTO itself */
+ int lastsa;
+ int lastsv;
+ unsigned int RTO;
+
+ /* This is used for SHUTDOWN/SHUTDOWN-ACK/SEND or INIT timers */
+ struct sctp_timer rxt_timer;
+ struct sctp_timer fr_timer; /* for early fr */
+
+ /* last time in seconds I sent to it */
+ struct timeval last_sent_time;
+ int ref_count;
+
+ /* Congestion stats per destination */
+ /*
+ * flight size variables and such, sorry Vern, I could not avoid
+ * this if I wanted performance :>
+ */
+ uint32_t flight_size;
+ uint32_t cwnd; /* actual cwnd */
+ uint32_t prev_cwnd; /* cwnd before any processing */
+ uint32_t partial_bytes_acked; /* in CA tracks when to incr a MTU */
+ uint32_t rtt_variance;
+ uint32_t prev_rtt;
+ /* tracking variables to avoid the aloc/free in sack processing */
+ unsigned int net_ack;
+ unsigned int net_ack2;
+
+ /*
+ * CMT variables (iyengar@cis.udel.edu)
+ */
+ uint32_t this_sack_highest_newack; /* tracks highest TSN newly
+ * acked for a given dest in
+ * the current SACK. Used in
+ * SFR and HTNA algos */
+ uint32_t pseudo_cumack; /* CMT CUC algorithm. Maintains next expected
+ * pseudo-cumack for this destination */
+ uint32_t rtx_pseudo_cumack; /* CMT CUC algorithm. Maintains next
+ * expected pseudo-cumack for this
+ * destination */
+
+ /* CMT fast recovery variables */
+ uint32_t fast_recovery_tsn;
+ uint32_t heartbeat_random1;
+ uint32_t heartbeat_random2;
+ uint32_t tos_flowlabel;
+
+ /* if this guy is ok or not ... status */
+ uint16_t dest_state;
+ /* number of transmit failures to down this guy */
+ uint16_t failure_threshold;
+ /* error stats on destination */
+ uint16_t error_count;
+
+ uint8_t fast_retran_loss_recovery;
+ uint8_t will_exit_fast_recovery;
+ /* Flags that probably can be combined into dest_state */
+ uint8_t rto_variance_dir; /* increase = 1, decreasing = 0 */
+ uint8_t rto_pending; /* is segment marked for RTO update ** if we
+ * split? */
+ uint8_t fast_retran_ip; /* fast retransmit in progress */
+ uint8_t hb_responded;
+ uint8_t saw_newack; /* CMT's SFR algorithm flag */
+ uint8_t src_addr_selected; /* if we split we move */
+ uint8_t indx_of_eligible_next_to_use;
+ uint8_t addr_is_local; /* its a local address (if known) could move
+ * in split */
+
+ /*
+ * CMT variables (iyengar@cis.udel.edu)
+ */
+ uint8_t find_pseudo_cumack; /* CMT CUC algorithm. Flag used to
+ * find a new pseudocumack. This flag
+ * is set after a new pseudo-cumack
+ * has been received and indicates
+ * that the sender should find the
+ * next pseudo-cumack expected for
+ * this destination */
+ uint8_t find_rtx_pseudo_cumack; /* CMT CUCv2 algorithm. Flag used to
+ * find a new rtx-pseudocumack. This
+ * flag is set after a new
+ * rtx-pseudo-cumack has been received
+ * and indicates that the sender
+ * should find the next
+ * rtx-pseudo-cumack expected for this
+ * destination */
+ uint8_t new_pseudo_cumack; /* CMT CUC algorithm. Flag used to
+ * indicate if a new pseudo-cumack or
+ * rtx-pseudo-cumack has been received */
+#ifdef SCTP_HIGH_SPEED
+ uint8_t last_hs_used; /* index into the last HS table entry we used */
+#endif
+};
+
+
+struct sctp_data_chunkrec {
+ uint32_t TSN_seq; /* the TSN of this transmit */
+ uint16_t stream_seq; /* the stream sequence number of this transmit */
+ uint16_t stream_number; /* the stream number of this guy */
+ uint32_t payloadtype;
+ uint32_t context; /* from send */
+
+ /* ECN Nonce: Nonce Value for this chunk */
+ uint8_t ect_nonce;
+
+ /*
+ * part of the Highest sacked algorithm to be able to stroke counts
+ * on ones that are FR'd.
+ */
+ uint32_t fast_retran_tsn; /* sending_seq at the time of FR */
+ struct timeval timetodrop; /* time we drop it from queue */
+ uint8_t doing_fast_retransmit;
+ uint8_t rcv_flags; /* flags pulled from data chunk on inbound for
+ * outbound holds sending flags. */
+ uint8_t state_flags;
+ uint8_t chunk_was_revoked;
+};
+
+TAILQ_HEAD(sctpchunk_listhead, sctp_tmit_chunk);
+
+/* The lower byte is used to enumerate PR_SCTP policies */
+#define CHUNK_FLAGS_PR_SCTP_TTL SCTP_PR_SCTP_TTL
+#define CHUNK_FLAGS_PR_SCTP_BUF SCTP_PR_SCTP_BUF
+#define CHUNK_FLAGS_PR_SCTP_RTX SCTP_PR_SCTP_RTX
+
+/* The upper byte is used a a bit mask */
+#define CHUNK_FLAGS_FRAGMENT_OK 0x0100
+
+struct chk_id {
+ uint16_t id;
+ uint16_t can_take_data;
+};
+
+
+struct sctp_tmit_chunk {
+ union {
+ struct sctp_data_chunkrec data;
+ struct chk_id chunk_id;
+ } rec;
+ struct sctp_association *asoc; /* bp to asoc this belongs to */
+ struct timeval sent_rcv_time; /* filled in if RTT being calculated */
+ struct mbuf *data; /* pointer to mbuf chain of data */
+ struct mbuf *last_mbuf; /* pointer to last mbuf in chain */
+ struct sctp_nets *whoTo;
+ TAILQ_ENTRY(sctp_tmit_chunk) sctp_next; /* next link */
+ int32_t sent; /* the send status */
+ uint16_t snd_count; /* number of times I sent */
+ uint16_t flags; /* flags, such as FRAGMENT_OK */
+ uint16_t send_size;
+ uint16_t book_size;
+ uint16_t mbcnt;
+ uint8_t pad_inplace;
+ uint8_t do_rtt;
+ uint8_t book_size_scale;
+ uint8_t addr_over; /* flag which is set if the dest address for
+ * this chunk is overridden by user. Used for
+ * CMT (iyengar@cis.udel.edu, 2005/06/21) */
+ uint8_t no_fr_allowed;
+ uint8_t pr_sctp_on;
+ uint8_t copy_by_ref;
+};
+
+/*
+ * The first part of this structure MUST be the entire sinfo structure. Maybe
+ * I should have made it a sub structure... we can circle back later and do
+ * that if we want.
+ */
+struct sctp_queued_to_read { /* sinfo structure Pluse more */
+ uint16_t sinfo_stream; /* off the wire */
+ uint16_t sinfo_ssn; /* off the wire */
+ uint16_t sinfo_flags; /* SCTP_UNORDERED from wire use SCTP_EOF for
+ * EOR */
+ uint32_t sinfo_ppid; /* off the wire */
+ uint32_t sinfo_context; /* pick this up from assoc def context? */
+ uint32_t sinfo_timetolive; /* not used by kernel */
+ uint32_t sinfo_tsn; /* Use this in reassembly as first TSN */
+ uint32_t sinfo_cumtsn; /* Use this in reassembly as last TSN */
+ sctp_assoc_t sinfo_assoc_id; /* our assoc id */
+ /* Non sinfo stuff */
+ uint32_t length; /* length of data */
+ uint32_t held_length; /* length held in sb */
+ struct sctp_nets *whoFrom; /* where it came from */
+ struct mbuf *data; /* front of the mbuf chain of data with
+ * PKT_HDR */
+ struct mbuf *tail_mbuf; /* used for multi-part data */
+ struct sctp_tcb *stcb; /* assoc, used for window update */
+ TAILQ_ENTRY(sctp_queued_to_read) next;
+ uint16_t port_from;
+ uint8_t do_not_ref_stcb;
+ uint8_t end_added;
+};
+
+/* This data structure will be on the outbound
+ * stream queues. Data will be pulled off from
+ * the front of the mbuf data and chunk-ified
+ * by the output routines. We will custom
+ * fit every chunk we pull to the send/sent
+ * queue to make up the next full packet
+ * if we can. An entry cannot be removed
+ * from the stream_out queue until
+ * the msg_is_complete flag is set. This
+ * means at times data/tail_mbuf MIGHT
+ * be NULL.. If that occurs it happens
+ * for one of two reasons. Either the user
+ * is blocked on a send() call and has not
+ * awoken to copy more data down... OR
+ * the user is in the explict MSG_EOR mode
+ * and wrote some data, but has not completed
+ * sending.
+ */
+struct sctp_stream_queue_pending {
+ struct mbuf *data;
+ struct mbuf *tail_mbuf;
+ struct timeval ts;
+ struct sctp_nets *net;
+ TAILQ_ENTRY(sctp_stream_queue_pending) next;
+ uint32_t length;
+ uint32_t timetolive;
+ uint32_t ppid;
+ uint32_t context;
+ uint16_t sinfo_flags;
+ uint16_t stream;
+ uint16_t strseq;
+ uint8_t msg_is_complete;
+ uint8_t some_taken;
+ uint8_t addr_over;
+ uint8_t act_flags;
+ uint8_t pr_sctp_on;
+ uint8_t resv;
+};
+
+/*
+ * this struct contains info that is used to track inbound stream data and
+ * help with ordering.
+ */
+TAILQ_HEAD(sctpwheelunrel_listhead, sctp_stream_in);
+struct sctp_stream_in {
+ struct sctp_readhead inqueue;
+ TAILQ_ENTRY(sctp_stream_in) next_spoke;
+ uint16_t stream_no;
+ uint16_t last_sequence_delivered; /* used for re-order */
+};
+
+/* This struct is used to track the traffic on outbound streams */
+TAILQ_HEAD(sctpwheel_listhead, sctp_stream_out);
+struct sctp_stream_out {
+ struct sctp_streamhead outqueue;
+ TAILQ_ENTRY(sctp_stream_out) next_spoke; /* next link in wheel */
+ uint16_t stream_no;
+ uint16_t next_sequence_sent; /* next one I expect to send out */
+ uint8_t last_msg_incomplete;
+};
+
+/* used to keep track of the addresses yet to try to add/delete */
+TAILQ_HEAD(sctp_asconf_addrhead, sctp_asconf_addr);
+struct sctp_asconf_addr {
+ TAILQ_ENTRY(sctp_asconf_addr) next;
+ struct sctp_asconf_addr_param ap;
+ struct ifaddr *ifa; /* save the ifa for add/del ip */
+ uint8_t sent; /* has this been sent yet? */
+};
+
+struct sctp_scoping {
+ uint8_t ipv4_addr_legal;
+ uint8_t ipv6_addr_legal;
+ uint8_t loopback_scope;
+ uint8_t ipv4_local_scope;
+ uint8_t local_scope;
+ uint8_t site_scope;
+};
+
+/*
+ * Here we have information about each individual association that we track.
+ * We probably in production would be more dynamic. But for ease of
+ * implementation we will have a fixed array that we hunt for in a linear
+ * fashion.
+ */
+struct sctp_association {
+ /* association state */
+ int state;
+ /* queue of pending addrs to add/delete */
+ struct sctp_asconf_addrhead asconf_queue;
+ struct timeval time_entered; /* time we entered state */
+ struct timeval time_last_rcvd;
+ struct timeval time_last_sent;
+ struct timeval time_last_sat_advance;
+ struct sctp_sndrcvinfo def_send; /* default send parameters */
+
+ /* timers and such */
+ struct sctp_timer hb_timer; /* hb timer */
+ struct sctp_timer dack_timer; /* Delayed ack timer */
+ struct sctp_timer asconf_timer; /* Asconf */
+ struct sctp_timer strreset_timer; /* stream reset */
+ struct sctp_timer shut_guard_timer; /* guard */
+ struct sctp_timer autoclose_timer; /* automatic close timer */
+ struct sctp_timer delayed_event_timer; /* timer for delayed events */
+
+ /* list of local addresses when add/del in progress */
+ struct sctpladdr sctp_local_addr_list;
+ struct sctpnetlisthead nets;
+
+ /* Free chunk list */
+ struct sctpchunk_listhead free_chunks;
+
+ /* Free stream output control list */
+ struct sctp_streamhead free_strmoq;
+
+ /* Control chunk queue */
+ struct sctpchunk_listhead control_send_queue;
+
+ /*
+ * Once a TSN hits the wire it is moved to the sent_queue. We
+ * maintain two counts here (don't know if any but retran_cnt is
+ * needed). The idea is that the sent_queue_retran_cnt reflects how
+ * many chunks have been marked for retranmission by either T3-rxt
+ * or FR.
+ */
+ struct sctpchunk_listhead sent_queue;
+ struct sctpchunk_listhead send_queue;
+
+
+ /* re-assembly queue for fragmented chunks on the inbound path */
+ struct sctpchunk_listhead reasmqueue;
+
+ /*
+ * this queue is used when we reach a condition that we can NOT put
+ * data into the socket buffer. We track the size of this queue and
+ * set our rwnd to the space in the socket minus also the
+ * size_on_delivery_queue.
+ */
+ struct sctpwheel_listhead out_wheel;
+
+ /*
+ * This pointer will be set to NULL most of the time. But when we
+ * have a fragmented message, where we could not get out all of the
+ * message at the last send then this will point to the stream to go
+ * get data from.
+ */
+ struct sctp_stream_out *locked_on_sending;
+
+ /* If an iterator is looking at me, this is it */
+ struct sctp_iterator *stcb_starting_point_for_iterator;
+
+ /* ASCONF destination address last sent to */
+/* struct sctp_nets *asconf_last_sent_to;*/
+/* Peter, greppign for the above shows only on strange set
+ * I don't think we need it so I have commented it out.
+ */
+
+ /* ASCONF save the last ASCONF-ACK so we can resend it if necessary */
+ struct mbuf *last_asconf_ack_sent;
+
+ /*
+ * pointer to last stream reset queued to control queue by us with
+ * requests.
+ */
+ struct sctp_tmit_chunk *str_reset;
+ /*
+ * if Source Address Selection happening, this will rotate through
+ * the link list.
+ */
+ struct sctp_laddr *last_used_address;
+
+ /* stream arrays */
+ struct sctp_stream_in *strmin;
+ struct sctp_stream_out *strmout;
+ uint8_t *mapping_array;
+ /* primary destination to use */
+ struct sctp_nets *primary_destination;
+ /* For CMT */
+ struct sctp_nets *last_net_data_came_from;
+ /* last place I got a data chunk from */
+ struct sctp_nets *last_data_chunk_from;
+ /* last place I got a control from */
+ struct sctp_nets *last_control_chunk_from;
+
+ /* circular looking for output selection */
+ struct sctp_stream_out *last_out_stream;
+
+ /*
+ * wait to the point the cum-ack passes req->send_reset_at_tsn for
+ * any req on the list.
+ */
+ struct sctp_resethead resetHead;
+
+ /* queue of chunks waiting to be sent into the local stack */
+ struct sctp_readhead pending_reply_queue;
+
+ uint32_t cookie_preserve_req;
+ /* ASCONF next seq I am sending out, inits at init-tsn */
+ uint32_t asconf_seq_out;
+ /* ASCONF last received ASCONF from peer, starts at peer's TSN-1 */
+ uint32_t asconf_seq_in;
+
+ /* next seq I am sending in str reset messages */
+ uint32_t str_reset_seq_out;
+
+ /* next seq I am expecting in str reset messages */
+ uint32_t str_reset_seq_in;
+
+
+ /* various verification tag information */
+ uint32_t my_vtag; /* The tag to be used. if assoc is re-initited
+ * by remote end, and I have unlocked this
+ * will be regenerated to a new random value. */
+ uint32_t peer_vtag; /* The peers last tag */
+
+ uint32_t my_vtag_nonce;
+ uint32_t peer_vtag_nonce;
+
+ uint32_t assoc_id;
+
+ /* This is the SCTP fragmentation threshold */
+ uint32_t smallest_mtu;
+
+ /*
+ * Special hook for Fast retransmit, allows us to track the highest
+ * TSN that is NEW in this SACK if gap ack blocks are present.
+ */
+ uint32_t this_sack_highest_gap;
+
+ /*
+ * The highest consecutive TSN that has been acked by peer on my
+ * sends
+ */
+ uint32_t last_acked_seq;
+
+ /* The next TSN that I will use in sending. */
+ uint32_t sending_seq;
+
+ /* Original seq number I used ??questionable to keep?? */
+ uint32_t init_seq_number;
+
+
+ /* The Advanced Peer Ack Point, as required by the PR-SCTP */
+ /* (A1 in Section 4.2) */
+ uint32_t advanced_peer_ack_point;
+
+ /*
+ * The highest consequetive TSN at the bottom of the mapping array
+ * (for his sends).
+ */
+ uint32_t cumulative_tsn;
+ /*
+ * Used to track the mapping array and its offset bits. This MAY be
+ * lower then cumulative_tsn.
+ */
+ uint32_t mapping_array_base_tsn;
+ /*
+ * used to track highest TSN we have received and is listed in the
+ * mapping array.
+ */
+ uint32_t highest_tsn_inside_map;
+
+ uint32_t last_echo_tsn;
+ uint32_t last_cwr_tsn;
+ uint32_t fast_recovery_tsn;
+ uint32_t sat_t3_recovery_tsn;
+ uint32_t tsn_last_delivered;
+ /*
+ * For the pd-api we should re-write this a bit more efficent. We
+ * could have multiple sctp_queued_to_read's that we are building at
+ * once. Now we only do this when we get ready to deliver to the
+ * socket buffer. Note that we depend on the fact that the struct is
+ * "stuck" on the read queue until we finish all the pd-api.
+ */
+ struct sctp_queued_to_read *control_pdapi;
+
+ uint32_t tsn_of_pdapi_last_delivered;
+ uint32_t pdapi_ppid;
+ uint32_t context;
+ uint32_t last_reset_action[SCTP_MAX_RESET_PARAMS];
+ uint32_t last_sending_seq[SCTP_MAX_RESET_PARAMS];
+ uint32_t last_base_tsnsent[SCTP_MAX_RESET_PARAMS];
+ /*
+ * window state information and smallest MTU that I use to bound
+ * segmentation
+ */
+ uint32_t peers_rwnd;
+ uint32_t my_rwnd;
+ uint32_t my_last_reported_rwnd;
+ uint32_t my_rwnd_control_len;
+
+ uint32_t total_output_queue_size;
+
+ uint32_t sb_cc; /* shadow of sb_cc in one-2-one */
+ uint32_t sb_mbcnt; /* shadow of sb_mbcnt in one-2-one */
+ /* 32 bit nonce stuff */
+ uint32_t nonce_resync_tsn;
+ uint32_t nonce_wait_tsn;
+ uint32_t default_flowlabel;
+ uint32_t pr_sctp_cnt;
+ int ctrl_queue_cnt; /* could be removed REM */
+ /*
+ * All outbound datagrams queue into this list from the individual
+ * stream queue. Here they get assigned a TSN and then await
+ * sending. The stream seq comes when it is first put in the
+ * individual str queue
+ */
+ unsigned int stream_queue_cnt;
+ unsigned int send_queue_cnt;
+ unsigned int sent_queue_cnt;
+ unsigned int sent_queue_cnt_removeable;
+ /*
+ * Number on sent queue that are marked for retran until this value
+ * is 0 we only send one packet of retran'ed data.
+ */
+ unsigned int sent_queue_retran_cnt;
+
+ unsigned int size_on_reasm_queue;
+ unsigned int cnt_on_reasm_queue;
+ /* amount of data (bytes) currently in flight (on all destinations) */
+ unsigned int total_flight;
+ /* Total book size in flight */
+ unsigned int total_flight_count; /* count of chunks used with
+ * book total */
+ /* count of destinaton nets and list of destination nets */
+ unsigned int numnets;
+
+ /* Total error count on this association */
+ unsigned int overall_error_count;
+
+ unsigned int cnt_msg_on_sb;
+
+ /* All stream count of chunks for delivery */
+ unsigned int size_on_all_streams;
+ unsigned int cnt_on_all_streams;
+
+ /* Heart Beat delay in ticks */
+ unsigned int heart_beat_delay;
+
+ /* autoclose */
+ unsigned int sctp_autoclose_ticks;
+
+ /* how many preopen streams we have */
+ unsigned int pre_open_streams;
+
+ /* How many streams I support coming into me */
+ unsigned int max_inbound_streams;
+
+ /* the cookie life I award for any cookie, in seconds */
+ unsigned int cookie_life;
+ /* time to delay acks for */
+ unsigned int delayed_ack;
+
+ unsigned int numduptsns;
+ int dup_tsns[SCTP_MAX_DUP_TSNS];
+ unsigned int initial_init_rto_max; /* initial RTO for INIT's */
+ unsigned int initial_rto; /* initial send RTO */
+ unsigned int minrto; /* per assoc RTO-MIN */
+ unsigned int maxrto; /* per assoc RTO-MAX */
+
+ /* authentication fields */
+ sctp_auth_chklist_t *local_auth_chunks;
+ sctp_auth_chklist_t *peer_auth_chunks;
+ sctp_hmaclist_t *local_hmacs; /* local HMACs supported */
+ sctp_hmaclist_t *peer_hmacs; /* peer HMACs supported */
+ struct sctp_keyhead shared_keys; /* assoc's shared keys */
+ sctp_authinfo_t authinfo; /* randoms, cached keys */
+ uint16_t peer_hmac_id; /* peer HMAC id to send */
+ /*
+ * refcnt to block freeing when a sender or receiver is off coping
+ * user data in.
+ */
+ uint16_t refcnt;
+
+ /*
+ * Being that we have no bag to collect stale cookies, and that we
+ * really would not want to anyway.. we will count them in this
+ * counter. We of course feed them to the pigeons right away (I have
+ * always thought of pigeons as flying rats).
+ */
+ uint16_t stale_cookie_count;
+
+ /*
+ * For the partial delivery API, if up, invoked this is what last
+ * TSN I delivered
+ */
+ uint16_t str_of_pdapi;
+ uint16_t ssn_of_pdapi;
+
+ /* counts of actual built streams. Allocation may be more however */
+ /* could re-arrange to optimize space here. */
+ uint16_t streamincnt;
+ uint16_t streamoutcnt;
+
+ /* my maximum number of retrans of INIT and SEND */
+ /* copied from SCTP but should be individually setable */
+ uint16_t max_init_times;
+ uint16_t max_send_times;
+
+ uint16_t def_net_failure;
+
+ /*
+ * lock flag: 0 is ok to send, 1+ (duals as a retran count) is
+ * awaiting ACK
+ */
+ uint16_t asconf_sent; /* possibly removable REM */
+ uint16_t mapping_array_size;
+
+ uint16_t last_strm_seq_delivered;
+ uint16_t last_strm_no_delivered;
+
+ uint16_t chunks_on_out_queue; /* total chunks floating around,
+ * locked by send socket buffer */
+ uint16_t last_revoke_count;
+ int16_t num_send_timers_up;
+
+ uint16_t stream_locked_on;
+ uint16_t ecn_echo_cnt_onq;
+
+ uint16_t free_chunk_cnt;
+ uint16_t free_strmoq_cnt;
+
+ uint8_t stream_locked;
+ uint8_t authenticated; /* packet authenticated ok */
+ /*
+ * This flag indicates that we need to send the first SACK. If in
+ * place it says we have NOT yet sent a SACK and need to.
+ */
+ uint8_t first_ack_sent;
+
+ /* max burst after fast retransmit completes */
+ uint8_t max_burst;
+
+ uint8_t sat_network; /* RTT is in range of sat net or greater */
+ uint8_t sat_network_lockout; /* lockout code */
+ uint8_t burst_limit_applied; /* Burst limit in effect at last send? */
+ /* flag goes on when we are doing a partial delivery api */
+ uint8_t hb_random_values[4];
+ uint8_t fragmented_delivery_inprogress;
+ uint8_t fragment_flags;
+ uint8_t last_flags_delivered;
+ uint8_t hb_ect_randombit;
+ uint8_t hb_random_idx;
+ uint8_t hb_is_disabled; /* is the hb disabled? */
+ uint8_t default_tos;
+
+ /* ECN Nonce stuff */
+ uint8_t receiver_nonce_sum; /* nonce I sum and put in my sack */
+ uint8_t ecn_nonce_allowed; /* Tells us if ECN nonce is on */
+ uint8_t nonce_sum_check;/* On off switch used during re-sync */
+ uint8_t nonce_wait_for_ecne; /* flag when we expect a ECN */
+ uint8_t peer_supports_ecn_nonce;
+
+ /*
+ * This value, plus all other ack'd but above cum-ack is added
+ * together to cross check against the bit that we have yet to
+ * define (probably in the SACK). When the cum-ack is updated, this
+ * sum is updated as well.
+ */
+ uint8_t nonce_sum_expect_base;
+ /* Flag to tell if ECN is allowed */
+ uint8_t ecn_allowed;
+
+ /* flag to indicate if peer can do asconf */
+ uint8_t peer_supports_asconf;
+ /* pr-sctp support flag */
+ uint8_t peer_supports_prsctp;
+ /* peer authentication support flag */
+ uint8_t peer_supports_auth;
+ /* stream resets are supported by the peer */
+ uint8_t peer_supports_strreset;
+
+ /*
+ * packet drop's are supported by the peer, we don't really care
+ * about this but we bookkeep it anyway.
+ */
+ uint8_t peer_supports_pktdrop;
+
+ /* Do we allow V6/V4? */
+ uint8_t ipv4_addr_legal;
+ uint8_t ipv6_addr_legal;
+ /* Address scoping flags */
+ /* scope value for IPv4 */
+ uint8_t ipv4_local_scope;
+ /* scope values for IPv6 */
+ uint8_t local_scope;
+ uint8_t site_scope;
+ /* loopback scope */
+ uint8_t loopback_scope;
+ /* flags to handle send alternate net tracking */
+ uint8_t used_alt_onsack;
+ uint8_t used_alt_asconfack;
+ uint8_t fast_retran_loss_recovery;
+ uint8_t sat_t3_loss_recovery;
+ uint8_t dropped_special_cnt;
+ uint8_t seen_a_sack_this_pkt;
+ uint8_t stream_reset_outstanding;
+ uint8_t stream_reset_out_is_outstanding;
+ uint8_t delayed_connection;
+ uint8_t ifp_had_enobuf;
+ uint8_t saw_sack_with_frags;
+ uint8_t in_restart_hash;
+ uint8_t assoc_up_sent;
+ /* CMT variables */
+ uint8_t cmt_dac_pkts_rcvd;
+ uint8_t sctp_cmt_on_off;
+ uint8_t iam_blocking;
+ /*
+ * The mapping array is used to track out of order sequences above
+ * last_acked_seq. 0 indicates packet missing 1 indicates packet
+ * rec'd. We slide it up every time we raise last_acked_seq and 0
+ * trailing locactions out. If I get a TSN above the array
+ * mappingArraySz, I discard the datagram and let retransmit happen.
+ */
+};
+
+#endif
diff --git a/sys/netinet/sctp_timer.c b/sys/netinet/sctp_timer.c
new file mode 100644
index 0000000..af59fa4
--- /dev/null
+++ b/sys/netinet/sctp_timer.c
@@ -0,0 +1,1736 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_ipsec.h"
+#include "opt_compat.h"
+#include "opt_inet6.h"
+#include "opt_inet.h"
+#include "opt_sctp.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/domain.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#ifdef INET6
+#include <sys/domain.h>
+#endif
+
+#include <sys/limits.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#define _IP_VHL
+#include <netinet/ip.h>
+#include <netinet/in_pcb.h>
+#include <netinet/in_var.h>
+#include <netinet/ip_var.h>
+
+#ifdef INET6
+#include <netinet/ip6.h>
+#include <netinet6/ip6_var.h>
+#include <netinet6/scope6_var.h>
+#endif /* INET6 */
+
+#include <netinet/sctp_pcb.h>
+
+#ifdef IPSEC
+#include <netinet6/ipsec.h>
+#include <netkey/key.h>
+#endif /* IPSEC */
+#ifdef INET6
+#include <netinet6/sctp6_var.h>
+#endif
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_input.h>
+
+#include <netinet/sctp.h>
+#include <netinet/sctp_uio.h>
+
+
+#ifdef SCTP_DEBUG
+extern uint32_t sctp_debug_on;
+
+#endif /* SCTP_DEBUG */
+
+
+extern unsigned int sctp_early_fr_msec;
+
+void
+sctp_early_fr_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_tmit_chunk *chk, *tp2;
+ struct timeval now, min_wait, tv;
+ unsigned int cur_rtt, cnt = 0, cnt_resend = 0;
+
+ /* an early FR is occuring. */
+ SCTP_GETTIME_TIMEVAL(&now);
+ /* get cur rto in micro-seconds */
+ if (net->lastsa == 0) {
+ /* Hmm no rtt estimate yet? */
+ cur_rtt = stcb->asoc.initial_rto >> 2;
+ } else {
+
+ cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
+ }
+ if (cur_rtt < sctp_early_fr_msec) {
+ cur_rtt = sctp_early_fr_msec;
+ }
+ cur_rtt *= 1000;
+ tv.tv_sec = cur_rtt / 1000000;
+ tv.tv_usec = cur_rtt % 1000000;
+ min_wait = now;
+ timevalsub(&min_wait, &tv);
+ if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
+ /*
+ * if we hit here, we don't have enough seconds on the clock
+ * to account for the RTO. We just let the lower seconds be
+ * the bounds and don't worry about it. This may mean we
+ * will mark a lot more than we should.
+ */
+ min_wait.tv_sec = min_wait.tv_usec = 0;
+ }
+ chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead);
+ for (; chk != NULL; chk = tp2) {
+ tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next);
+ if (chk->whoTo != net) {
+ continue;
+ }
+ if (chk->sent == SCTP_DATAGRAM_RESEND)
+ cnt_resend++;
+ else if ((chk->sent > SCTP_DATAGRAM_UNSENT) &&
+ (chk->sent < SCTP_DATAGRAM_RESEND)) {
+ /* pending, may need retran */
+ if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) {
+ /*
+ * we have reached a chunk that was sent
+ * some seconds past our min.. forget it we
+ * will find no more to send.
+ */
+ continue;
+ } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) {
+ /*
+ * we must look at the micro seconds to
+ * know.
+ */
+ if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
+ /*
+ * ok it was sent after our boundary
+ * time.
+ */
+ continue;
+ }
+ }
+#ifdef SCTP_EARLYFR_LOGGING
+ sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
+ 4, SCTP_FR_MARKED_EARLY);
+#endif
+ SCTP_STAT_INCR(sctps_earlyfrmrkretrans);
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ /* double book size since we are doing an early FR */
+ chk->book_size_scale++;
+ cnt += chk->send_size;
+ if ((cnt + net->flight_size) > net->cwnd) {
+ /* Mark all we could possibly resend */
+ break;
+ }
+ }
+ }
+ if (cnt) {
+#ifdef SCTP_CWND_MONITOR
+ int old_cwnd;
+
+ old_cwnd = net->cwnd;
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR);
+ /*
+ * make a small adjustment to cwnd and force to CA.
+ */
+
+ if (net->cwnd > net->mtu)
+ /* drop down one MTU after sending */
+ net->cwnd -= net->mtu;
+ if (net->cwnd < net->ssthresh)
+ /* still in SS move to CA */
+ net->ssthresh = net->cwnd - 1;
+#ifdef SCTP_CWND_MONITOR
+ sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
+#endif
+ } else if (cnt_resend) {
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR);
+ }
+ /* Restart it? */
+ if (net->flight_size < net->cwnd) {
+ SCTP_STAT_INCR(sctps_earlyfrstrtmr);
+ sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
+ }
+}
+
+void
+sctp_audit_retranmission_queue(struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
+ printf("Audit invoked on send queue cnt:%d onqueue:%d\n",
+ asoc->sent_queue_retran_cnt,
+ asoc->sent_queue_cnt);
+ }
+#endif /* SCTP_DEBUG */
+ asoc->sent_queue_retran_cnt = 0;
+ asoc->sent_queue_cnt = 0;
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+ }
+ asoc->sent_queue_cnt++;
+ }
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+ }
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
+ printf("Audit completes retran:%d onqueue:%d\n",
+ asoc->sent_queue_retran_cnt,
+ asoc->sent_queue_cnt);
+ }
+#endif /* SCTP_DEBUG */
+}
+
+int
+sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, uint16_t threshold)
+{
+ if (net) {
+ net->error_count++;
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
+ printf("Error count for %p now %d thresh:%d\n",
+ net, net->error_count,
+ net->failure_threshold);
+ }
+#endif /* SCTP_DEBUG */
+ if (net->error_count > net->failure_threshold) {
+ /* We had a threshold failure */
+ if (net->dest_state & SCTP_ADDR_REACHABLE) {
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
+ if (net == stcb->asoc.primary_destination) {
+ net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
+ }
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+ stcb,
+ SCTP_FAILED_THRESHOLD,
+ (void *)net);
+ }
+ }
+ /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE
+ *********ROUTING CODE
+ */
+ /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE
+ *********ROUTING CODE
+ */
+ }
+ if (stcb == NULL)
+ return (0);
+
+ if (net) {
+ if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
+ stcb->asoc.overall_error_count++;
+ }
+ } else {
+ stcb->asoc.overall_error_count++;
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
+ printf("Overall error count for %p now %d thresh:%u state:%x\n",
+ &stcb->asoc,
+ stcb->asoc.overall_error_count,
+ (uint32_t) threshold,
+ ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state));
+ }
+#endif /* SCTP_DEBUG */
+ /*
+ * We specifically do not do >= to give the assoc one more change
+ * before we fail it.
+ */
+ if (stcb->asoc.overall_error_count > threshold) {
+ /* Abort notification sends a ULP notify */
+ struct mbuf *oper;
+
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x40000001);
+ }
+ sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper);
+ return (1);
+ }
+ return (0);
+}
+
+struct sctp_nets *
+sctp_find_alternate_net(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ int highest_ssthresh)
+{
+ /* Find and return an alternate network if possible */
+ struct sctp_nets *alt, *mnet, *hthresh = NULL;
+ int once;
+ uint32_t val = 0;
+
+ if (stcb->asoc.numnets == 1) {
+ /* No others but net */
+ return (TAILQ_FIRST(&stcb->asoc.nets));
+ }
+ if (highest_ssthresh) {
+ TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
+ if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
+ (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)
+ ) {
+ /*
+ * will skip ones that are not-reachable or
+ * unconfirmed
+ */
+ continue;
+ }
+ if (val > mnet->ssthresh) {
+ hthresh = mnet;
+ val = mnet->ssthresh;
+ } else if (val == mnet->ssthresh) {
+ uint32_t rndval;
+ uint8_t this_random;
+
+ if (stcb->asoc.hb_random_idx > 3) {
+ rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+ memcpy(stcb->asoc.hb_random_values, &rndval,
+ sizeof(stcb->asoc.hb_random_values));
+ this_random = stcb->asoc.hb_random_values[0];
+ stcb->asoc.hb_random_idx = 0;
+ stcb->asoc.hb_ect_randombit = 0;
+ } else {
+ this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
+ stcb->asoc.hb_random_idx++;
+ stcb->asoc.hb_ect_randombit = 0;
+ }
+ if (this_random % 2) {
+ hthresh = mnet;
+ val = mnet->ssthresh;
+ }
+ }
+ }
+ if (hthresh) {
+ return (hthresh);
+ }
+ }
+ mnet = net;
+ once = 0;
+
+ if (mnet == NULL) {
+ mnet = TAILQ_FIRST(&stcb->asoc.nets);
+ }
+ do {
+ alt = TAILQ_NEXT(mnet, sctp_next);
+ if (alt == NULL) {
+ once++;
+ if (once > 1) {
+ break;
+ }
+ alt = TAILQ_FIRST(&stcb->asoc.nets);
+ }
+ if (alt->ro.ro_rt == NULL) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&alt->ro._l_addr;
+ if (sin6->sin6_family == AF_INET6) {
+ (void)sa6_embedscope(sin6, ip6_use_defzone);
+ }
+ rtalloc_ign((struct route *)&alt->ro, 0UL);
+ if (sin6->sin6_family == AF_INET6) {
+ (void)sa6_recoverscope(sin6);
+ }
+ alt->src_addr_selected = 0;
+ }
+ if (
+ ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
+ (alt->ro.ro_rt != NULL) &&
+ (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))
+ ) {
+ /* Found a reachable address */
+ break;
+ }
+ mnet = alt;
+ } while (alt != NULL);
+
+ if (alt == NULL) {
+ /* Case where NO insv network exists (dormant state) */
+ /* we rotate destinations */
+ once = 0;
+ mnet = net;
+ do {
+ alt = TAILQ_NEXT(mnet, sctp_next);
+ if (alt == NULL) {
+ once++;
+ if (once > 1) {
+ break;
+ }
+ alt = TAILQ_FIRST(&stcb->asoc.nets);
+ }
+ if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
+ (alt != net)) {
+ /* Found an alternate address */
+ break;
+ }
+ mnet = alt;
+ } while (alt != NULL);
+ }
+ if (alt == NULL) {
+ return (net);
+ }
+ return (alt);
+}
+
+static void
+sctp_backoff_on_timeout(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ int win_probe,
+ int num_marked)
+{
+ net->RTO <<= 1;
+ if (net->RTO > stcb->asoc.maxrto) {
+ net->RTO = stcb->asoc.maxrto;
+ }
+ if ((win_probe == 0) && num_marked) {
+ /* We don't apply penalty to window probe scenarios */
+#ifdef SCTP_CWND_MONITOR
+ int old_cwnd = net->cwnd;
+
+#endif
+ net->ssthresh = net->cwnd >> 1;
+ if (net->ssthresh < (net->mtu << 1)) {
+ net->ssthresh = (net->mtu << 1);
+ }
+ net->cwnd = net->mtu;
+ /* floor of 1 mtu */
+ if (net->cwnd < net->mtu)
+ net->cwnd = net->mtu;
+#ifdef SCTP_CWND_MONITOR
+ sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
+#endif
+
+ net->partial_bytes_acked = 0;
+ }
+}
+
+extern int sctp_peer_chunk_oh;
+
+static int
+sctp_mark_all_for_resend(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ struct sctp_nets *alt,
+ int window_probe,
+ int *num_marked)
+{
+
+ /*
+ * Mark all chunks (well not all) that were sent to *net for
+ * retransmission. Move them to alt for there destination as well...
+ * We only mark chunks that have been outstanding long enough to
+ * have received feed-back.
+ */
+ struct sctp_tmit_chunk *chk, *tp2, *could_be_sent = NULL;
+ struct sctp_nets *lnets;
+ struct timeval now, min_wait, tv;
+ int cur_rtt;
+ int orig_rwnd, audit_tf, num_mk, fir;
+ unsigned int cnt_mk;
+ uint32_t orig_flight;
+ uint32_t tsnlast, tsnfirst;
+
+ /*
+ * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being used,
+ * then pick dest with largest ssthresh for any retransmission.
+ * (iyengar@cis.udel.edu, 2005/08/12)
+ */
+ if (sctp_cmt_on_off) {
+ alt = sctp_find_alternate_net(stcb, net, 1);
+ /*
+ * CUCv2: If a different dest is picked for the
+ * retransmission, then new (rtx-)pseudo_cumack needs to be
+ * tracked for orig dest. Let CUCv2 track new (rtx-)
+ * pseudo-cumack always.
+ */
+ net->find_pseudo_cumack = 1;
+ net->find_rtx_pseudo_cumack = 1;
+ }
+ /* none in flight now */
+ audit_tf = 0;
+ fir = 0;
+ /*
+ * figure out how long a data chunk must be pending before we can
+ * mark it ..
+ */
+ SCTP_GETTIME_TIMEVAL(&now);
+ /* get cur rto in micro-seconds */
+ cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1);
+ cur_rtt *= 1000;
+#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
+ sctp_log_fr(cur_rtt,
+ stcb->asoc.peers_rwnd,
+ window_probe,
+ SCTP_FR_T3_MARK_TIME);
+ sctp_log_fr(net->flight_size,
+ callout_pending(&net->fr_timer.timer),
+ callout_active(&net->fr_timer.timer),
+ SCTP_FR_CWND_REPORT);
+ sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT);
+#endif
+ tv.tv_sec = cur_rtt / 1000000;
+ tv.tv_usec = cur_rtt % 1000000;
+ min_wait = now;
+ timevalsub(&min_wait, &tv);
+ if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
+ /*
+ * if we hit here, we don't have enough seconds on the clock
+ * to account for the RTO. We just let the lower seconds be
+ * the bounds and don't worry about it. This may mean we
+ * will mark a lot more than we should.
+ */
+ min_wait.tv_sec = min_wait.tv_usec = 0;
+ }
+#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
+ sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
+ sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
+#endif
+ /*
+ * Our rwnd will be incorrect here since we are not adding back the
+ * cnt * mbuf but we will fix that down below.
+ */
+ orig_rwnd = stcb->asoc.peers_rwnd;
+ orig_flight = net->flight_size;
+ net->rto_pending = 0;
+ net->fast_retran_ip = 0;
+ /* Now on to each chunk */
+ num_mk = cnt_mk = 0;
+ tsnfirst = tsnlast = 0;
+ chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ for (; chk != NULL; chk = tp2) {
+ tp2 = TAILQ_NEXT(chk, sctp_next);
+ if ((compare_with_wrap(stcb->asoc.last_acked_seq,
+ chk->rec.data.TSN_seq,
+ MAX_TSN)) ||
+ (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
+ /* Strange case our list got out of order? */
+ printf("Our list is out of order?\n");
+ panic("Out of order list");
+ }
+ if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
+ /*
+ * found one to mark: If it is less than
+ * DATAGRAM_ACKED it MUST not be a skipped or marked
+ * TSN but instead one that is either already set
+ * for retransmission OR one that needs
+ * retransmission.
+ */
+
+ /* validate its been outstanding long enough */
+#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
+ sctp_log_fr(chk->rec.data.TSN_seq,
+ chk->sent_rcv_time.tv_sec,
+ chk->sent_rcv_time.tv_usec,
+ SCTP_FR_T3_MARK_TIME);
+#endif
+ if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) {
+ /*
+ * we have reached a chunk that was sent
+ * some seconds past our min.. forget it we
+ * will find no more to send.
+ */
+#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
+ sctp_log_fr(0,
+ chk->sent_rcv_time.tv_sec,
+ chk->sent_rcv_time.tv_usec,
+ SCTP_FR_T3_STOPPED);
+#endif
+ continue;
+ } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) &&
+ (window_probe == 0)) {
+ /*
+ * we must look at the micro seconds to
+ * know.
+ */
+ if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
+ /*
+ * ok it was sent after our boundary
+ * time.
+ */
+#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
+ sctp_log_fr(0,
+ chk->sent_rcv_time.tv_sec,
+ chk->sent_rcv_time.tv_usec,
+ SCTP_FR_T3_STOPPED);
+#endif
+ continue;
+ }
+ }
+ if (PR_SCTP_TTL_ENABLED(chk->flags)) {
+ /* Is it expired? */
+ if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) ||
+ ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) &&
+ (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) {
+ /* Yes so drop it */
+ if (chk->data) {
+ sctp_release_pr_sctp_chunk(stcb,
+ chk,
+ (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
+ &stcb->asoc.sent_queue);
+ }
+ }
+ continue;
+ }
+ if (PR_SCTP_RTX_ENABLED(chk->flags)) {
+ /* Has it been retransmitted tv_sec times? */
+ if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) {
+ if (chk->data) {
+ sctp_release_pr_sctp_chunk(stcb,
+ chk,
+ (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
+ &stcb->asoc.sent_queue);
+ }
+ }
+ continue;
+ }
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ num_mk++;
+ if (fir == 0) {
+ fir = 1;
+ tsnfirst = chk->rec.data.TSN_seq;
+ }
+ tsnlast = chk->rec.data.TSN_seq;
+#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
+ sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
+ 0, SCTP_FR_T3_MARKED);
+
+#endif
+ }
+ if (stcb->asoc.total_flight_count > 0)
+ stcb->asoc.total_flight_count--;
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ SCTP_STAT_INCR(sctps_markedretrans);
+ net->flight_size -= chk->book_size;
+ stcb->asoc.peers_rwnd += chk->send_size;
+ stcb->asoc.peers_rwnd += sctp_peer_chunk_oh;
+
+ /* reset the TSN for striking and other FR stuff */
+ chk->rec.data.doing_fast_retransmit = 0;
+ /* Clear any time so NO RTT is being done */
+ chk->do_rtt = 0;
+ if (alt != net) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->no_fr_allowed = 1;
+ chk->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ } else {
+ chk->no_fr_allowed = 0;
+ if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
+ chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
+ } else {
+ chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
+ }
+ }
+ if (sctp_cmt_on_off == 1) {
+ chk->no_fr_allowed = 1;
+ }
+ } else if (chk->sent == SCTP_DATAGRAM_ACKED) {
+ /* remember highest acked one */
+ could_be_sent = chk;
+ }
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ cnt_mk++;
+ }
+ }
+#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
+ sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
+#endif
+
+ if (stcb->asoc.total_flight >= (orig_flight - net->flight_size)) {
+ stcb->asoc.total_flight -= (orig_flight - net->flight_size);
+ } else {
+ stcb->asoc.total_flight = 0;
+ stcb->asoc.total_flight_count = 0;
+ audit_tf = 1;
+ }
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
+ if (num_mk) {
+ printf("LAST TSN marked was %x\n", tsnlast);
+ printf("Num marked for retransmission was %d peer-rwd:%ld\n",
+ num_mk, (u_long)stcb->asoc.peers_rwnd);
+ printf("LAST TSN marked was %x\n", tsnlast);
+ printf("Num marked for retransmission was %d peer-rwd:%d\n",
+ num_mk,
+ (int)stcb->asoc.peers_rwnd
+ );
+ }
+ }
+#endif
+ *num_marked = num_mk;
+ if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) {
+ /* fix it so we retransmit the highest acked anyway */
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ cnt_mk++;
+ could_be_sent->sent = SCTP_DATAGRAM_RESEND;
+ }
+ if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
+#ifdef INVARIENTS
+ printf("Local Audit says there are %d for retran asoc cnt:%d\n",
+ cnt_mk, stcb->asoc.sent_queue_retran_cnt);
+#endif
+#ifndef SCTP_AUDITING_ENABLED
+ stcb->asoc.sent_queue_retran_cnt = cnt_mk;
+#endif
+ }
+ /* Now check for a ECN Echo that may be stranded */
+ TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+ if ((chk->whoTo == net) &&
+ (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = alt;
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ }
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ }
+ if (audit_tf) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
+ printf("Audit total flight due to negative value net:%p\n",
+ net);
+ }
+#endif /* SCTP_DEBUG */
+ stcb->asoc.total_flight = 0;
+ stcb->asoc.total_flight_count = 0;
+ /* Clear all networks flight size */
+ TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
+ lnets->flight_size = 0;
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
+ printf("Net:%p c-f cwnd:%d ssthresh:%d\n",
+ lnets, lnets->cwnd, lnets->ssthresh);
+ }
+#endif /* SCTP_DEBUG */
+ }
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ stcb->asoc.total_flight += chk->book_size;
+ chk->whoTo->flight_size += chk->book_size;
+ stcb->asoc.total_flight_count++;
+ }
+ }
+ }
+ /*
+ * Setup the ecn nonce re-sync point. We do this since
+ * retranmissions are NOT setup for ECN. This means that do to
+ * Karn's rule, we don't know the total of the peers ecn bits.
+ */
+ chk = TAILQ_FIRST(&stcb->asoc.send_queue);
+ if (chk == NULL) {
+ stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
+ } else {
+ stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
+ }
+ stcb->asoc.nonce_wait_for_ecne = 0;
+ stcb->asoc.nonce_sum_check = 0;
+ /* We return 1 if we only have a window probe outstanding */
+ return (0);
+}
+
+static void
+sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ struct sctp_nets *alt)
+{
+ struct sctp_association *asoc;
+ struct sctp_stream_out *outs;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_stream_queue_pending *sp;
+
+ if (net == alt)
+ /* nothing to do */
+ return;
+
+ asoc = &stcb->asoc;
+
+ /*
+ * now through all the streams checking for chunks sent to our bad
+ * network.
+ */
+ TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
+ /* now clean up any chunks here */
+ TAILQ_FOREACH(sp, &outs->outqueue, next) {
+ if (sp->net == net) {
+ sctp_free_remote_addr(sp->net);
+ sp->net = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ }
+ }
+ /* Now check the pending queue */
+ TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
+ if (chk->whoTo == net) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ }
+
+}
+
+int
+sctp_t3rxt_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+ int win_probe, num_mk;
+
+#ifdef SCTP_FR_LOGGING
+ sctp_log_fr(sctps_datadropchklmt.sctps_senddata, 0, 0, SCTP_FR_T3_TIMEOUT);
+#ifdef SCTP_CWND_LOGGING
+ {
+ struct sctp_nets *lnet;
+
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+ if (net == lnet) {
+ sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3);
+ } else {
+ sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3);
+ }
+ }
+ }
+#endif
+#endif
+ /* Find an alternate and mark those for retransmission */
+ if ((stcb->asoc.peers_rwnd == 0) &&
+ (stcb->asoc.total_flight < net->mtu)) {
+ SCTP_STAT_INCR(sctps_timowindowprobe);
+ win_probe = 1;
+ } else {
+ win_probe = 0;
+ }
+ alt = sctp_find_alternate_net(stcb, net, 0);
+ sctp_mark_all_for_resend(stcb, net, alt, win_probe, &num_mk);
+ /* FR Loss recovery just ended with the T3. */
+ stcb->asoc.fast_retran_loss_recovery = 0;
+
+ /* CMT FR loss recovery ended with the T3 */
+ net->fast_retran_loss_recovery = 0;
+
+ /*
+ * setup the sat loss recovery that prevents satellite cwnd advance.
+ */
+ stcb->asoc.sat_t3_loss_recovery = 1;
+ stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
+
+ /* Backoff the timer and cwnd */
+ sctp_backoff_on_timeout(stcb, net, win_probe, num_mk);
+ if (win_probe == 0) {
+ /* We don't do normal threshold management on window probes */
+ if (sctp_threshold_management(inp, stcb, net,
+ stcb->asoc.max_send_times)) {
+ /* Association was destroyed */
+ return (1);
+ } else {
+ if (net != stcb->asoc.primary_destination) {
+ /* send a immediate HB if our RTO is stale */
+ struct timeval now;
+ unsigned int ms_goneby;
+
+ SCTP_GETTIME_TIMEVAL(&now);
+ if (net->last_sent_time.tv_sec) {
+ ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
+ } else {
+ ms_goneby = 0;
+ }
+ if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
+ /*
+ * no recent feed back in an RTO or
+ * more, request a RTT update
+ */
+ sctp_send_hb(stcb, 1, net);
+ }
+ }
+ }
+ } else {
+ /*
+ * For a window probe we don't penalize the net's but only
+ * the association. This may fail it if SACKs are not coming
+ * back. If sack's are coming with rwnd locked at 0, we will
+ * continue to hold things waiting for rwnd to raise
+ */
+ if (sctp_threshold_management(inp, stcb, NULL,
+ stcb->asoc.max_send_times)) {
+ /* Association was destroyed */
+ return (1);
+ }
+ }
+ if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
+ /* Move all pending over too */
+ sctp_move_all_chunks_to_alt(stcb, net, alt);
+ /* Was it our primary? */
+ if ((stcb->asoc.primary_destination == net) && (alt != net)) {
+ /*
+ * Yes, note it as such and find an alternate note:
+ * this means HB code must use this to resent the
+ * primary if it goes active AND if someone does a
+ * change-primary then this flag must be cleared
+ * from any net structures.
+ */
+ if (sctp_set_primary_addr(stcb,
+ (struct sockaddr *)NULL,
+ alt) == 0) {
+ net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
+ net->src_addr_selected = 0;
+ }
+ }
+ }
+ /*
+ * Special case for cookie-echo'ed case, we don't do output but must
+ * await the COOKIE-ACK before retransmission
+ */
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
+ /*
+ * Here we just reset the timer and start again since we
+ * have not established the asoc
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ return (0);
+ }
+ if (stcb->asoc.peer_supports_prsctp) {
+ struct sctp_tmit_chunk *lchk;
+
+ lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
+ /* C3. See if we need to send a Fwd-TSN */
+ if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
+ stcb->asoc.last_acked_seq, MAX_TSN)) {
+ /*
+ * ISSUE with ECN, see FWD-TSN processing for notes
+ * on issues that will occur when the ECN NONCE
+ * stuff is put into SCTP for cross checking.
+ */
+ send_forward_tsn(stcb, &stcb->asoc);
+ if (lchk) {
+ /* Assure a timer is up */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
+ }
+ }
+ }
+#ifdef SCTP_CWND_MONITOR
+ sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX);
+#endif
+ return (0);
+}
+
+int
+sctp_t1init_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ /* bump the thresholds */
+ if (stcb->asoc.delayed_connection) {
+ /*
+ * special hook for delayed connection. The library did NOT
+ * complete the rest of its sends.
+ */
+ stcb->asoc.delayed_connection = 0;
+ sctp_send_initiate(inp, stcb);
+ return (0);
+ }
+ if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) {
+ return (0);
+ }
+ if (sctp_threshold_management(inp, stcb, net,
+ stcb->asoc.max_init_times)) {
+ /* Association was destroyed */
+ return (1);
+ }
+ stcb->asoc.dropped_special_cnt = 0;
+ sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0);
+ if (stcb->asoc.initial_init_rto_max < net->RTO) {
+ net->RTO = stcb->asoc.initial_init_rto_max;
+ }
+ if (stcb->asoc.numnets > 1) {
+ /* If we have more than one addr use it */
+ struct sctp_nets *alt;
+
+ alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0);
+ if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) {
+ sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt);
+ stcb->asoc.primary_destination = alt;
+ }
+ }
+ /* Send out a new init */
+ sctp_send_initiate(inp, stcb);
+ return (0);
+}
+
+/*
+ * For cookie and asconf we actually need to find and mark for resend, then
+ * increment the resend counter (after all the threshold management stuff of
+ * course).
+ */
+int
+sctp_cookie_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+ struct sctp_tmit_chunk *cookie;
+
+ /* first before all else we must find the cookie */
+ TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
+ if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+ break;
+ }
+ }
+ if (cookie == NULL) {
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
+ /* FOOBAR! */
+ struct mbuf *oper;
+
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ oper->m_len = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(oper->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x40000002);
+ }
+ sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR,
+ oper);
+ } else {
+#ifdef INVARIENTS
+ panic("Cookie timer expires in wrong state?");
+#else
+ printf("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc));
+ return (0);
+#endif
+ }
+ return (0);
+ }
+ /* Ok we found the cookie, threshold management next */
+ if (sctp_threshold_management(inp, stcb, cookie->whoTo,
+ stcb->asoc.max_init_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ /*
+ * cleared theshold management now lets backoff the address & select
+ * an alternate
+ */
+ stcb->asoc.dropped_special_cnt = 0;
+ sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0);
+ alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0);
+ if (alt != cookie->whoTo) {
+ sctp_free_remote_addr(cookie->whoTo);
+ cookie->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ /* Now mark the retran info */
+ if (cookie->sent != SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ }
+ cookie->sent = SCTP_DATAGRAM_RESEND;
+ /*
+ * Now call the output routine to kick out the cookie again, Note we
+ * don't mark any chunks for retran so that FR will need to kick in
+ * to move these (or a send timer).
+ */
+ return (0);
+}
+
+int
+sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+ struct sctp_tmit_chunk *strrst = NULL, *chk = NULL;
+
+ if (stcb->asoc.stream_reset_outstanding == 0) {
+ return (0);
+ }
+ /* find the existing STRRESET, we use the seq number we sent out on */
+ sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst);
+ if (strrst == NULL) {
+ return (0);
+ }
+ /* do threshold management */
+ if (sctp_threshold_management(inp, stcb, strrst->whoTo,
+ stcb->asoc.max_send_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ /*
+ * cleared theshold management now lets backoff the address & select
+ * an alternate
+ */
+ sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0);
+ alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0);
+ sctp_free_remote_addr(strrst->whoTo);
+ strrst->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+
+ /* See if a ECN Echo is also stranded */
+ TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+ if ((chk->whoTo == net) &&
+ (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
+ sctp_free_remote_addr(chk->whoTo);
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ }
+ chk->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ }
+ if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
+ /*
+ * If the address went un-reachable, we need to move to
+ * alternates for ALL chk's in queue
+ */
+ sctp_move_all_chunks_to_alt(stcb, net, alt);
+ }
+ /* mark the retran info */
+ if (strrst->sent != SCTP_DATAGRAM_RESEND)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ strrst->sent = SCTP_DATAGRAM_RESEND;
+
+ /* restart the timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
+ return (0);
+}
+
+int
+sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+ struct sctp_tmit_chunk *asconf, *chk;
+
+ /* is this the first send, or a retransmission? */
+ if (stcb->asoc.asconf_sent == 0) {
+ /* compose a new ASCONF chunk and send it */
+ sctp_send_asconf(stcb, net);
+ } else {
+ /* Retransmission of the existing ASCONF needed... */
+
+ /* find the existing ASCONF */
+ TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
+ sctp_next) {
+ if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
+ break;
+ }
+ }
+ if (asconf == NULL) {
+ return (0);
+ }
+ /* do threshold management */
+ if (sctp_threshold_management(inp, stcb, asconf->whoTo,
+ stcb->asoc.max_send_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ /*
+ * PETER? FIX? How will the following code ever run? If the
+ * max_send_times is hit, threshold managment will blow away
+ * the association?
+ */
+ if (asconf->snd_count > stcb->asoc.max_send_times) {
+ /*
+ * Something is rotten, peer is not responding to
+ * ASCONFs but maybe is to data etc. e.g. it is not
+ * properly handling the chunk type upper bits Mark
+ * this peer as ASCONF incapable and cleanup
+ */
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
+ printf("asconf_timer: Peer has not responded to our repeated ASCONFs\n");
+ }
+#endif /* SCTP_DEBUG */
+ sctp_asconf_cleanup(stcb, net);
+ return (0);
+ }
+ /*
+ * cleared theshold management now lets backoff the address
+ * & select an alternate
+ */
+ sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
+ alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0);
+ sctp_free_remote_addr(asconf->whoTo);
+ asconf->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+
+ /* See if a ECN Echo is also stranded */
+ TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+ if ((chk->whoTo == net) &&
+ (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = alt;
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ }
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ }
+ if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
+ /*
+ * If the address went un-reachable, we need to move
+ * to alternates for ALL chk's in queue
+ */
+ sctp_move_all_chunks_to_alt(stcb, net, alt);
+ }
+ /* mark the retran info */
+ if (asconf->sent != SCTP_DATAGRAM_RESEND)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ asconf->sent = SCTP_DATAGRAM_RESEND;
+ }
+ return (0);
+}
+
+/*
+ * For the shutdown and shutdown-ack, we do not keep one around on the
+ * control queue. This means we must generate a new one and call the general
+ * chunk output routine, AFTER having done threshold management.
+ */
+int
+sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+
+ /* first threshold managment */
+ if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ /* second select an alternative */
+ alt = sctp_find_alternate_net(stcb, net, 0);
+
+ /* third generate a shutdown into the queue for out net */
+ if (alt) {
+ sctp_send_shutdown(stcb, alt);
+ } else {
+ /*
+ * if alt is NULL, there is no dest to send to??
+ */
+ return (0);
+ }
+ /* fourth restart timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
+ return (0);
+}
+
+int
+sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+
+ /* first threshold managment */
+ if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ /* second select an alternative */
+ alt = sctp_find_alternate_net(stcb, net, 0);
+
+ /* third generate a shutdown into the queue for out net */
+ sctp_send_shutdown_ack(stcb, alt);
+
+ /* fourth restart timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
+ return (0);
+}
+
+static void
+sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb)
+{
+ struct sctp_stream_out *outs;
+ struct sctp_stream_queue_pending *sp;
+ unsigned int chks_in_queue = 0;
+ int being_filled = 0;
+
+ /*
+ * This function is ONLY called when the send/sent queues are empty.
+ */
+ if ((stcb == NULL) || (inp == NULL))
+ return;
+
+ if (stcb->asoc.sent_queue_retran_cnt) {
+ printf("Hmm, sent_queue_retran_cnt is non-zero %d\n",
+ stcb->asoc.sent_queue_retran_cnt);
+ stcb->asoc.sent_queue_retran_cnt = 0;
+ }
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
+ int i, cnt = 0;
+
+ /* Check to see if a spoke fell off the wheel */
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
+ sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1);
+ cnt++;
+ }
+ }
+ if (cnt) {
+ /* yep, we lost a spoke or two */
+ printf("Found an additional %d streams NOT on outwheel, corrected\n", cnt);
+ } else {
+ /* no spokes lost, */
+ stcb->asoc.total_output_queue_size = 0;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ return;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ /* Check to see if some data queued, if so report it */
+ TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
+ if (!TAILQ_EMPTY(&outs->outqueue)) {
+ TAILQ_FOREACH(sp, &outs->outqueue, next) {
+ if (sp->msg_is_complete)
+ being_filled++;
+ chks_in_queue++;
+ }
+ }
+ }
+ if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
+ printf("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
+ stcb->asoc.stream_queue_cnt, chks_in_queue);
+ }
+ if (chks_in_queue) {
+ /* call the output queue function */
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3);
+ if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
+ (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
+ /*
+ * Probably should go in and make it go back through
+ * and add fragments allowed
+ */
+ if (being_filled == 0) {
+ printf("Still nothing moved %d chunks are stuck\n",
+ chks_in_queue);
+ }
+ }
+ } else {
+ printf("Found no chunks on any queue tot:%lu\n",
+ (u_long)stcb->asoc.total_output_queue_size);
+ stcb->asoc.total_output_queue_size = 0;
+ }
+}
+
+int
+sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, int cnt_of_unconf)
+{
+ if (net) {
+ if (net->hb_responded == 0) {
+ sctp_backoff_on_timeout(stcb, net, 1, 0);
+ }
+ /* Zero PBA, if it needs it */
+ if (net->partial_bytes_acked) {
+ net->partial_bytes_acked = 0;
+ }
+ }
+ if ((stcb->asoc.total_output_queue_size > 0) &&
+ (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
+ (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
+ sctp_audit_stream_queues_for_size(inp, stcb);
+ }
+ /* Send a new HB, this will do threshold managment, pick a new dest */
+ if (cnt_of_unconf == 0) {
+ if (sctp_send_hb(stcb, 0, NULL) < 0) {
+ return (1);
+ }
+ } else {
+ /*
+ * this will send out extra hb's up to maxburst if there are
+ * any unconfirmed addresses.
+ */
+ int cnt_sent = 0;
+
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
+ (net->dest_state & SCTP_ADDR_REACHABLE)) {
+ cnt_sent++;
+ if (sctp_send_hb(stcb, 1, net) == 0) {
+ break;
+ }
+ if (cnt_sent >= stcb->asoc.max_burst)
+ break;
+ }
+ }
+ }
+ return (0);
+}
+
+int
+sctp_is_hb_timer_running(struct sctp_tcb *stcb)
+{
+ if (callout_pending(&stcb->asoc.hb_timer.timer)) {
+ /* its running */
+ return (1);
+ } else {
+ /* nope */
+ return (0);
+ }
+}
+
+int
+sctp_is_sack_timer_running(struct sctp_tcb *stcb)
+{
+ if (callout_pending(&stcb->asoc.dack_timer.timer)) {
+ /* its running */
+ return (1);
+ } else {
+ /* nope */
+ return (0);
+ }
+}
+
+
+#define SCTP_NUMBER_OF_MTU_SIZES 18
+static uint32_t mtu_sizes[] = {
+ 68,
+ 296,
+ 508,
+ 512,
+ 544,
+ 576,
+ 1006,
+ 1492,
+ 1500,
+ 1536,
+ 2002,
+ 2048,
+ 4352,
+ 4464,
+ 8166,
+ 17914,
+ 32000,
+ 65535
+};
+
+
+static uint32_t
+sctp_getnext_mtu(struct sctp_inpcb *inp, uint32_t cur_mtu)
+{
+ /* select another MTU that is just bigger than this one */
+ int i;
+
+ for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) {
+ if (cur_mtu < mtu_sizes[i]) {
+ /* no max_mtu is bigger than this one */
+ return (mtu_sizes[i]);
+ }
+ }
+ /* here return the highest allowable */
+ return (cur_mtu);
+}
+
+
+void
+sctp_pathmtu_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ uint32_t next_mtu;
+
+ /* restart the timer in any case */
+ next_mtu = sctp_getnext_mtu(inp, net->mtu);
+ if (next_mtu <= net->mtu) {
+ /* nothing to do */
+ return;
+ }
+ if (net->ro.ro_rt != NULL) {
+ /*
+ * only if we have a route and interface do we set anything.
+ * Note we always restart the timer though just in case it
+ * is updated (i.e. the ifp) or route/ifp is populated.
+ */
+ if (net->ro.ro_rt->rt_ifp != NULL) {
+ if (net->ro.ro_rt->rt_ifp->if_mtu > next_mtu) {
+ /* ok it will fit out the door */
+ net->mtu = next_mtu;
+ }
+ }
+ }
+ /* restart the timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+}
+
+void
+sctp_autoclose_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct timeval tn, *tim_touse;
+ struct sctp_association *asoc;
+ int ticks_gone_by;
+
+ SCTP_GETTIME_TIMEVAL(&tn);
+ if (stcb->asoc.sctp_autoclose_ticks &&
+ sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+ /* Auto close is on */
+ asoc = &stcb->asoc;
+ /* pick the time to use */
+ if (asoc->time_last_rcvd.tv_sec >
+ asoc->time_last_sent.tv_sec) {
+ tim_touse = &asoc->time_last_rcvd;
+ } else {
+ tim_touse = &asoc->time_last_sent;
+ }
+ /* Now has long enough transpired to autoclose? */
+ ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec);
+ if ((ticks_gone_by > 0) &&
+ (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) {
+ /*
+ * autoclose time has hit, call the output routine,
+ * which should do nothing just to be SURE we don't
+ * have hanging data. We can then safely check the
+ * queues and know that we are clear to send
+ * shutdown
+ */
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR);
+ /* Are we clean? */
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue)) {
+ /*
+ * there is nothing queued to send, so I'm
+ * done...
+ */
+ if (SCTP_GET_STATE(asoc) !=
+ SCTP_STATE_SHUTDOWN_SENT) {
+ /* only send SHUTDOWN 1st time thru */
+ sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
+ asoc->state = SCTP_STATE_SHUTDOWN_SENT;
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ }
+ }
+ } else {
+ /*
+ * No auto close at this time, reset t-o to check
+ * later
+ */
+ int tmp;
+
+ /* fool the timer startup to use the time left */
+ tmp = asoc->sctp_autoclose_ticks;
+ asoc->sctp_autoclose_ticks -= ticks_gone_by;
+ sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
+ net);
+ /* restore the real tick value */
+ asoc->sctp_autoclose_ticks = tmp;
+ }
+ }
+}
+
+
+void
+sctp_iterator_timer(struct sctp_iterator *it)
+{
+ int iteration_count = 0;
+
+ /*
+ * only one iterator can run at a time. This is the only way we can
+ * cleanly pull ep's from underneath all the running interators when
+ * a ep is freed.
+ */
+ SCTP_ITERATOR_LOCK();
+ if (it->inp == NULL) {
+ /* iterator is complete */
+done_with_iterator:
+ SCTP_ITERATOR_UNLOCK();
+ SCTP_INP_INFO_WLOCK();
+ LIST_REMOVE(it, sctp_nxt_itr);
+ /* stopping the callout is not needed, in theory */
+ SCTP_INP_INFO_WUNLOCK();
+ callout_stop(&it->tmr.timer);
+ if (it->function_atend != NULL) {
+ (*it->function_atend) (it->pointer, it->val);
+ }
+ SCTP_FREE(it);
+ return;
+ }
+select_a_new_ep:
+ SCTP_INP_WLOCK(it->inp);
+ while (((it->pcb_flags) &&
+ ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
+ ((it->pcb_features) &&
+ ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
+ /* endpoint flags or features don't match, so keep looking */
+ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+ SCTP_INP_WUNLOCK(it->inp);
+ goto done_with_iterator;
+ }
+ SCTP_INP_WUNLOCK(it->inp);
+ it->inp = LIST_NEXT(it->inp, sctp_list);
+ if (it->inp == NULL) {
+ goto done_with_iterator;
+ }
+ SCTP_INP_WLOCK(it->inp);
+ }
+ if ((it->inp->inp_starting_point_for_iterator != NULL) &&
+ (it->inp->inp_starting_point_for_iterator != it)) {
+ printf("Iterator collision, waiting for one at 0x%x\n",
+ (uint32_t) it->inp);
+ SCTP_INP_WUNLOCK(it->inp);
+ goto start_timer_return;
+ }
+ /* mark the current iterator on the endpoint */
+ it->inp->inp_starting_point_for_iterator = it;
+ SCTP_INP_WUNLOCK(it->inp);
+ SCTP_INP_RLOCK(it->inp);
+ /* now go through each assoc which is in the desired state */
+ if (it->stcb == NULL) {
+ /* run the per instance function */
+ if (it->function_inp != NULL)
+ (*it->function_inp) (it->inp, it->pointer, it->val);
+
+ it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
+ }
+ SCTP_INP_RUNLOCK(it->inp);
+ if ((it->stcb) &&
+ (it->stcb->asoc.stcb_starting_point_for_iterator == it)) {
+ it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
+ }
+ while (it->stcb) {
+ SCTP_TCB_LOCK(it->stcb);
+ if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
+ /* not in the right state... keep looking */
+ SCTP_TCB_UNLOCK(it->stcb);
+ goto next_assoc;
+ }
+ /* mark the current iterator on the assoc */
+ it->stcb->asoc.stcb_starting_point_for_iterator = it;
+ /* see if we have limited out the iterator loop */
+ iteration_count++;
+ if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
+ start_timer_return:
+ /* set a timer to continue this later */
+ SCTP_TCB_UNLOCK(it->stcb);
+ sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR,
+ (struct sctp_inpcb *)it, NULL, NULL);
+ SCTP_ITERATOR_UNLOCK();
+ return;
+ }
+ /* run function on this one */
+ (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
+
+ /*
+ * we lie here, it really needs to have its own type but
+ * first I must verify that this won't effect things :-0
+ */
+ if (it->no_chunk_output == 0)
+ sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3);
+
+ SCTP_TCB_UNLOCK(it->stcb);
+next_assoc:
+ it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
+ }
+ /* done with all assocs on this endpoint, move on to next endpoint */
+ SCTP_INP_WLOCK(it->inp);
+ it->inp->inp_starting_point_for_iterator = NULL;
+ SCTP_INP_WUNLOCK(it->inp);
+ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+ it->inp = NULL;
+ } else {
+ SCTP_INP_INFO_RLOCK();
+ it->inp = LIST_NEXT(it->inp, sctp_list);
+ SCTP_INP_INFO_RUNLOCK();
+ }
+ if (it->inp == NULL) {
+ goto done_with_iterator;
+ }
+ goto select_a_new_ep;
+}
diff --git a/sys/netinet/sctp_timer.h b/sys/netinet/sctp_timer.h
new file mode 100644
index 0000000..6ce4b89
--- /dev/null
+++ b/sys/netinet/sctp_timer.h
@@ -0,0 +1,99 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_timer.h,v 1.6 2005/03/06 16:04:18 itojun Exp $ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_timer_h__
+#define __sctp_timer_h__
+
+
+
+#if defined(_KERNEL)
+
+void
+sctp_early_fr_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+
+struct sctp_nets *
+sctp_find_alternate_net(struct sctp_tcb *,
+ struct sctp_nets *, int high_ssthresh);
+
+int
+sctp_threshold_management(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *, uint16_t);
+
+int
+sctp_t3rxt_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+int
+sctp_t1init_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+int
+sctp_shutdown_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+int
+sctp_heartbeat_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *, int);
+
+int sctp_is_hb_timer_running(struct sctp_tcb *stcb);
+int sctp_is_sack_timer_running(struct sctp_tcb *stcb);
+
+int
+sctp_cookie_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+void
+sctp_pathmtu_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+int
+sctp_shutdownack_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+int
+sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+
+int
+sctp_asconf_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+void
+sctp_autoclose_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *net);
+
+void sctp_audit_retranmission_queue(struct sctp_association *);
+
+void sctp_iterator_timer(struct sctp_iterator *it);
+
+
+#endif
+#endif
diff --git a/sys/netinet/sctp_uio.h b/sys/netinet/sctp_uio.h
new file mode 100644
index 0000000..abbc858
--- /dev/null
+++ b/sys/netinet/sctp_uio.h
@@ -0,0 +1,946 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_uio.h,v 1.11 2005/03/06 16:04:18 itojun Exp $ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_uio_h__
+#define __sctp_uio_h__
+
+
+
+
+#if ! defined(_KERNEL)
+#include <stdint.h>
+#endif
+#include <sys/types.h>
+#include <sys/socket.h>
+
+typedef uint32_t sctp_assoc_t;
+
+/* On/Off setup for subscription to events */
+struct sctp_event_subscribe {
+ uint8_t sctp_data_io_event;
+ uint8_t sctp_association_event;
+ uint8_t sctp_address_event;
+ uint8_t sctp_send_failure_event;
+ uint8_t sctp_peer_error_event;
+ uint8_t sctp_shutdown_event;
+ uint8_t sctp_partial_delivery_event;
+ uint8_t sctp_adaptation_layer_event;
+ uint8_t sctp_authentication_event;
+ uint8_t sctp_stream_reset_events;
+};
+
+/* ancillary data types */
+#define SCTP_INIT 0x0001
+#define SCTP_SNDRCV 0x0002
+#define SCTP_EXTRCV 0x0003
+/*
+ * ancillary data structures
+ */
+struct sctp_initmsg {
+ uint32_t sinit_num_ostreams;
+ uint32_t sinit_max_instreams;
+ uint16_t sinit_max_attempts;
+ uint16_t sinit_max_init_timeo;
+};
+
+/* We add 96 bytes to the size of sctp_sndrcvinfo.
+ * This makes the current structure 128 bytes long
+ * which is nicely 64 bit aligned but also has room
+ * for us to add more and keep ABI compatability.
+ * For example, already we have the sctp_extrcvinfo
+ * when enabled which is 48 bytes.
+ */
+
+#define SCTP_ALIGN_RESV_PAD 96
+
+struct sctp_sndrcvinfo {
+ uint16_t sinfo_stream;
+ uint16_t sinfo_ssn;
+ uint16_t sinfo_flags;
+ uint32_t sinfo_ppid;
+ uint32_t sinfo_context;
+ uint32_t sinfo_timetolive;
+ uint32_t sinfo_tsn;
+ uint32_t sinfo_cumtsn;
+ sctp_assoc_t sinfo_assoc_id;
+ uint8_t __reserve_pad[SCTP_ALIGN_RESV_PAD];
+};
+
+struct sctp_extrcvinfo {
+ uint16_t sinfo_stream;
+ uint16_t sinfo_ssn;
+ uint16_t sinfo_flags;
+ uint32_t sinfo_ppid;
+ uint32_t sinfo_context;
+ uint32_t sinfo_timetolive;
+ uint32_t sinfo_tsn;
+ uint32_t sinfo_cumtsn;
+ sctp_assoc_t sinfo_assoc_id;
+ uint16_t next_flags;
+ uint16_t next_stream;
+ uint32_t next_asocid;
+ uint32_t next_length;
+ uint32_t next_ppid;
+};
+
+#define SCTP_NO_NEXT_MSG 0x0000
+#define SCTP_NEXT_MSG_AVAIL 0x0001
+#define SCTP_NEXT_MSG_ISCOMPLETE 0x0002
+#define SCTP_NEXT_MSG_IS_UNORDERED 0x0004
+
+struct sctp_snd_all_completes {
+ uint16_t sall_stream;
+ uint16_t sall_flags;
+ uint32_t sall_ppid;
+ uint32_t sall_context;
+ uint32_t sall_num_sent;
+ uint32_t sall_num_failed;
+};
+
+/* Flags that go into the sinfo->sinfo_flags field */
+#define SCTP_EOF 0x0100/* Start shutdown procedures */
+#define SCTP_ABORT 0x0200/* Send an ABORT to peer */
+#define SCTP_UNORDERED 0x0400/* Message is un-ordered */
+#define SCTP_ADDR_OVER 0x0800/* Override the primary-address */
+#define SCTP_SENDALL 0x1000/* Send this on all associations */
+#define SCTP_EOR 0x2000/* end of message signal */
+/* for the endpoint */
+
+/* The lower byte is an enumeration of PR-SCTP policies */
+#define SCTP_PR_SCTP_TTL 0x0001/* Time based PR-SCTP */
+#define SCTP_PR_SCTP_BUF 0x0002/* Buffer based PR-SCTP */
+#define SCTP_PR_SCTP_RTX 0x0003/* Number of retransmissions based PR-SCTP */
+
+#define PR_SCTP_POLICY(x) ((x) & 0xff)
+#define PR_SCTP_ENABLED(x) (PR_SCTP_POLICY(x) != 0)
+#define PR_SCTP_TTL_ENABLED(x) (PR_SCTP_POLICY(x) == SCTP_PR_SCTP_TTL)
+#define PR_SCTP_BUF_ENABLED(x) (PR_SCTP_POLICY(x) == SCTP_PR_SCTP_BUF)
+#define PR_SCTP_RTX_ENABLED(x) (PR_SCTP_POLICY(x) == SCTP_PR_SCTP_RTX)
+
+/* Stat's */
+struct sctp_pcbinfo {
+ uint32_t ep_count;
+ uint32_t asoc_count;
+ uint32_t laddr_count;
+ uint32_t raddr_count;
+ uint32_t chk_count;
+ uint32_t readq_count;
+ uint32_t free_chunks;
+ uint32_t stream_oque;
+};
+
+struct sctp_sockstat {
+ sctp_assoc_t ss_assoc_id;
+ uint32_t ss_total_sndbuf;
+ uint32_t ss_total_recv_buf;
+};
+
+/*
+ * notification event structures
+ */
+
+/*
+ * association change event
+ */
+struct sctp_assoc_change {
+ uint16_t sac_type;
+ uint16_t sac_flags;
+ uint32_t sac_length;
+ uint16_t sac_state;
+ uint16_t sac_error;
+ uint16_t sac_outbound_streams;
+ uint16_t sac_inbound_streams;
+ sctp_assoc_t sac_assoc_id;
+};
+
+/* sac_state values */
+#define SCTP_COMM_UP 0x0001
+#define SCTP_COMM_LOST 0x0002
+#define SCTP_RESTART 0x0003
+#define SCTP_SHUTDOWN_COMP 0x0004
+#define SCTP_CANT_STR_ASSOC 0x0005
+
+
+/*
+ * Address event
+ */
+struct sctp_paddr_change {
+ uint16_t spc_type;
+ uint16_t spc_flags;
+ uint32_t spc_length;
+ struct sockaddr_storage spc_aaddr;
+ uint32_t spc_state;
+ uint32_t spc_error;
+ sctp_assoc_t spc_assoc_id;
+};
+
+/* paddr state values */
+#define SCTP_ADDR_AVAILABLE 0x0001
+#define SCTP_ADDR_UNREACHABLE 0x0002
+#define SCTP_ADDR_REMOVED 0x0003
+#define SCTP_ADDR_ADDED 0x0004
+#define SCTP_ADDR_MADE_PRIM 0x0005
+#define SCTP_ADDR_CONFIRMED 0x0006
+
+/*
+ * CAUTION: these are user exposed SCTP addr reachability states must be
+ * compatible with SCTP_ADDR states in sctp_constants.h
+ */
+#ifdef SCTP_ACTIVE
+#undef SCTP_ACTIVE
+#endif
+#define SCTP_ACTIVE 0x0001 /* SCTP_ADDR_REACHABLE */
+
+#ifdef SCTP_INACTIVE
+#undef SCTP_INACTIVE
+#endif
+#define SCTP_INACTIVE 0x0002 /* SCTP_ADDR_NOT_REACHABLE */
+
+#ifdef SCTP_UNCONFIRMED
+#undef SCTP_UNCONFIRMED
+#endif
+#define SCTP_UNCONFIRMED 0x0200 /* SCTP_ADDR_UNCONFIRMED */
+
+#ifdef SCTP_NOHEARTBEAT
+#undef SCTP_NOHEARTBEAT
+#endif
+#define SCTP_NOHEARTBEAT 0x0040 /* SCTP_ADDR_NOHB */
+
+
+/* remote error events */
+struct sctp_remote_error {
+ uint16_t sre_type;
+ uint16_t sre_flags;
+ uint32_t sre_length;
+ uint16_t sre_error;
+ sctp_assoc_t sre_assoc_id;
+ uint8_t sre_data[4];
+};
+
+/* data send failure event */
+struct sctp_send_failed {
+ uint16_t ssf_type;
+ uint16_t ssf_flags;
+ uint32_t ssf_length;
+ uint32_t ssf_error;
+ struct sctp_sndrcvinfo ssf_info;
+ sctp_assoc_t ssf_assoc_id;
+ uint8_t ssf_data[4];
+};
+
+/* flag that indicates state of data */
+#define SCTP_DATA_UNSENT 0x0001 /* inqueue never on wire */
+#define SCTP_DATA_SENT 0x0002 /* on wire at failure */
+
+/* shutdown event */
+struct sctp_shutdown_event {
+ uint16_t sse_type;
+ uint16_t sse_flags;
+ uint32_t sse_length;
+ sctp_assoc_t sse_assoc_id;
+};
+
+/* Adaptation layer indication stuff */
+struct sctp_adaptation_event {
+ uint16_t sai_type;
+ uint16_t sai_flags;
+ uint32_t sai_length;
+ uint32_t sai_adaptation_ind;
+ sctp_assoc_t sai_assoc_id;
+};
+
+struct sctp_setadaptation {
+ uint32_t ssb_adaptation_ind;
+};
+
+/* compatable old spelling */
+struct sctp_adaption_event {
+ uint16_t sai_type;
+ uint16_t sai_flags;
+ uint32_t sai_length;
+ uint32_t sai_adaption_ind;
+ sctp_assoc_t sai_assoc_id;
+};
+
+struct sctp_setadaption {
+ uint32_t ssb_adaption_ind;
+};
+
+
+/*
+ * Partial Delivery API event
+ */
+struct sctp_pdapi_event {
+ uint16_t pdapi_type;
+ uint16_t pdapi_flags;
+ uint32_t pdapi_length;
+ uint32_t pdapi_indication;
+ sctp_assoc_t pdapi_assoc_id;
+};
+
+/* indication values */
+#define SCTP_PARTIAL_DELIVERY_ABORTED 0x0001
+
+
+/*
+ * authentication key event
+ */
+struct sctp_authkey_event {
+ uint16_t auth_type;
+ uint16_t auth_flags;
+ uint32_t auth_length;
+ uint16_t auth_keynumber;
+ uint16_t auth_altkeynumber;
+ uint32_t auth_indication;
+ sctp_assoc_t auth_assoc_id;
+};
+
+/* indication values */
+#define SCTP_AUTH_NEWKEY 0x0001
+
+
+/*
+ * stream reset event
+ */
+struct sctp_stream_reset_event {
+ uint16_t strreset_type;
+ uint16_t strreset_flags;
+ uint32_t strreset_length;
+ sctp_assoc_t strreset_assoc_id;
+ uint16_t strreset_list[0];
+};
+
+/* flags in strreset_flags field */
+#define SCTP_STRRESET_INBOUND_STR 0x0001
+#define SCTP_STRRESET_OUTBOUND_STR 0x0002
+#define SCTP_STRRESET_ALL_STREAMS 0x0004
+#define SCTP_STRRESET_STREAM_LIST 0x0008
+#define SCTP_STRRESET_FAILED 0x0010
+
+
+/* SCTP notification event */
+struct sctp_tlv {
+ uint16_t sn_type;
+ uint16_t sn_flags;
+ uint32_t sn_length;
+};
+
+union sctp_notification {
+ struct sctp_tlv sn_header;
+ struct sctp_assoc_change sn_assoc_change;
+ struct sctp_paddr_change sn_paddr_change;
+ struct sctp_remote_error sn_remote_error;
+ struct sctp_send_failed sn_send_failed;
+ struct sctp_shutdown_event sn_shutdown_event;
+ struct sctp_adaptation_event sn_adaptation_event;
+ /* compatability same as above */
+ struct sctp_adaption_event sn_adaption_event;
+ struct sctp_pdapi_event sn_pdapi_event;
+ struct sctp_authkey_event sn_auth_event;
+ struct sctp_stream_reset_event sn_strreset_event;
+};
+
+/* notification types */
+#define SCTP_ASSOC_CHANGE 0x0001
+#define SCTP_PEER_ADDR_CHANGE 0x0002
+#define SCTP_REMOTE_ERROR 0x0003
+#define SCTP_SEND_FAILED 0x0004
+#define SCTP_SHUTDOWN_EVENT 0x0005
+#define SCTP_ADAPTATION_INDICATION 0x0006
+/* same as above */
+#define SCTP_ADAPTION_INDICATION 0x0006
+#define SCTP_PARTIAL_DELIVERY_EVENT 0x0007
+#define SCTP_AUTHENTICATION_EVENT 0x0008
+#define SCTP_STREAM_RESET_EVENT 0x0009
+
+
+/*
+ * socket option structs
+ */
+
+struct sctp_paddrparams {
+ sctp_assoc_t spp_assoc_id;
+ struct sockaddr_storage spp_address;
+ uint32_t spp_hbinterval;
+ uint16_t spp_pathmaxrxt;
+ uint32_t spp_pathmtu;
+ uint32_t spp_sackdelay;
+ uint32_t spp_flags;
+ uint32_t spp_ipv6_flowlabel;
+ uint8_t spp_ipv4_tos;
+
+};
+
+#define SPP_HB_ENABLE 0x00000001
+#define SPP_HB_DISABLE 0x00000002
+#define SPP_HB_DEMAND 0x00000004
+#define SPP_PMTUD_ENABLE 0x00000008
+#define SPP_PMTUD_DISABLE 0x00000010
+#define SPP_SACKDELAY_ENABLE 0x00000020
+#define SPP_SACKDELAY_DISABLE 0x00000040
+#define SPP_HB_TIME_IS_ZERO 0x00000080
+#define SPP_IPV6_FLOWLABEL 0x00000100
+#define SPP_IPV4_TOS 0x00000200
+
+struct sctp_paddrinfo {
+ sctp_assoc_t spinfo_assoc_id;
+ struct sockaddr_storage spinfo_address;
+ int32_t spinfo_state;
+ uint32_t spinfo_cwnd;
+ uint32_t spinfo_srtt;
+ uint32_t spinfo_rto;
+ uint32_t spinfo_mtu;
+};
+
+struct sctp_rtoinfo {
+ sctp_assoc_t srto_assoc_id;
+ uint32_t srto_initial;
+ uint32_t srto_max;
+ uint32_t srto_min;
+};
+
+struct sctp_assocparams {
+ sctp_assoc_t sasoc_assoc_id;
+ uint16_t sasoc_asocmaxrxt;
+ uint16_t sasoc_number_peer_destinations;
+ uint32_t sasoc_peer_rwnd;
+ uint32_t sasoc_local_rwnd;
+ uint32_t sasoc_cookie_life;
+};
+
+struct sctp_setprim {
+ sctp_assoc_t ssp_assoc_id;
+ struct sockaddr_storage ssp_addr;
+};
+
+struct sctp_setpeerprim {
+ sctp_assoc_t sspp_assoc_id;
+ struct sockaddr_storage sspp_addr;
+};
+
+struct sctp_getaddresses {
+ sctp_assoc_t sget_assoc_id;
+ /* addr is filled in for N * sockaddr_storage */
+ struct sockaddr addr[1];
+};
+
+struct sctp_setstrm_timeout {
+ sctp_assoc_t ssto_assoc_id;
+ uint32_t ssto_timeout;
+ uint32_t ssto_streamid_start;
+ uint32_t ssto_streamid_end;
+};
+
+struct sctp_status {
+ sctp_assoc_t sstat_assoc_id;
+ int32_t sstat_state;
+ uint32_t sstat_rwnd;
+ uint16_t sstat_unackdata;
+ uint16_t sstat_penddata;
+ uint16_t sstat_instrms;
+ uint16_t sstat_outstrms;
+ uint32_t sstat_fragmentation_point;
+ struct sctp_paddrinfo sstat_primary;
+};
+
+/*
+ * AUTHENTICATION support
+ */
+/* SCTP_AUTH_CHUNK */
+struct sctp_authchunk {
+ uint8_t sauth_chunk;
+};
+
+/* SCTP_AUTH_KEY */
+struct sctp_authkey {
+ sctp_assoc_t sca_assoc_id;
+ uint16_t sca_keynumber;
+ uint8_t sca_key[0];
+};
+
+/* SCTP_HMAC_IDENT */
+struct sctp_hmacalgo {
+ uint16_t shmac_idents[0];
+};
+
+/* AUTH hmac_id */
+#define SCTP_AUTH_HMAC_ID_RSVD 0x0000
+#define SCTP_AUTH_HMAC_ID_SHA1 0x0001 /* default, mandatory */
+#define SCTP_AUTH_HMAC_ID_MD5 0x0002 /* deprecated */
+#define SCTP_AUTH_HMAC_ID_SHA256 0x0003
+#define SCTP_AUTH_HMAC_ID_SHA224 0x8001
+#define SCTP_AUTH_HMAC_ID_SHA384 0x8002
+#define SCTP_AUTH_HMAC_ID_SHA512 0x8003
+
+
+/* SCTP_AUTH_ACTIVE_KEY / SCTP_AUTH_DELETE_KEY */
+struct sctp_authkeyid {
+ sctp_assoc_t scact_assoc_id;
+ uint16_t scact_keynumber;
+};
+
+/* SCTP_PEER_AUTH_CHUNKS / SCTP_LOCAL_AUTH_CHUNKS */
+struct sctp_authchunks {
+ sctp_assoc_t gauth_assoc_id;
+ uint8_t gauth_chunks[0];
+};
+
+struct sctp_assoc_value {
+ sctp_assoc_t assoc_id;
+ uint32_t assoc_value;
+};
+
+#define MAX_ASOC_IDS_RET 255
+struct sctp_assoc_ids {
+ uint16_t asls_assoc_start; /* array of index's start at 0 */
+ uint8_t asls_numb_present;
+ uint8_t asls_more_to_get;
+ sctp_assoc_t asls_assoc_id[MAX_ASOC_IDS_RET];
+};
+
+struct sctp_cwnd_args {
+ struct sctp_nets *net; /* network to */
+ uint32_t cwnd_new_value;/* cwnd in k */
+ uint32_t inflight; /* flightsize in k */
+ uint32_t pseudo_cumack;
+ int cwnd_augment; /* increment to it */
+ uint8_t meets_pseudo_cumack;
+ uint8_t need_new_pseudo_cumack;
+ uint8_t cnt_in_send;
+ uint8_t cnt_in_str;
+};
+
+struct sctp_blk_args {
+ uint32_t onsb; /* in 1k bytes */
+ uint32_t sndlen; /* len of send being attempted */
+ uint32_t peer_rwnd; /* rwnd of peer */
+ uint16_t send_sent_qcnt;/* chnk cnt */
+ uint16_t stream_qcnt; /* chnk cnt */
+ uint16_t chunks_on_oque;/* chunks out */
+ uint16_t flight_size; /* flight size in k */
+};
+
+/*
+ * Max we can reset in one setting, note this is dictated not by the define
+ * but the size of a mbuf cluster so don't change this define and think you
+ * can specify more. You must do multiple resets if you want to reset more
+ * than SCTP_MAX_EXPLICIT_STR_RESET.
+ */
+#define SCTP_MAX_EXPLICT_STR_RESET 1000
+
+#define SCTP_RESET_LOCAL_RECV 0x0001
+#define SCTP_RESET_LOCAL_SEND 0x0002
+#define SCTP_RESET_BOTH 0x0003
+#define SCTP_RESET_TSN 0x0004
+
+struct sctp_stream_reset {
+ sctp_assoc_t strrst_assoc_id;
+ uint16_t strrst_flags;
+ uint16_t strrst_num_streams; /* 0 == ALL */
+ uint16_t strrst_list[0];/* list if strrst_num_streams is not 0 */
+};
+
+
+struct sctp_get_nonce_values {
+ sctp_assoc_t gn_assoc_id;
+ uint32_t gn_peers_tag;
+ uint32_t gn_local_tag;
+};
+
+/* Debugging logs */
+struct sctp_str_log {
+ uint32_t n_tsn;
+ uint32_t e_tsn;
+ uint16_t n_sseq;
+ uint16_t e_sseq;
+};
+
+struct sctp_sb_log {
+ uint32_t stcb;
+ uint32_t so_sbcc;
+ uint32_t stcb_sbcc;
+ uint32_t incr;
+};
+
+struct sctp_fr_log {
+ uint32_t largest_tsn;
+ uint32_t largest_new_tsn;
+ uint32_t tsn;
+};
+
+struct sctp_fr_map {
+ uint32_t base;
+ uint32_t cum;
+ uint32_t high;
+};
+
+struct sctp_rwnd_log {
+ uint32_t rwnd;
+ uint32_t send_size;
+ uint32_t overhead;
+ uint32_t new_rwnd;
+};
+
+struct sctp_mbcnt_log {
+ uint32_t total_queue_size;
+ uint32_t size_change;
+ uint32_t total_queue_mb_size;
+ uint32_t mbcnt_change;
+};
+
+struct sctp_sack_log {
+ uint32_t cumack;
+ uint32_t oldcumack;
+ uint32_t tsn;
+ uint16_t numGaps;
+ uint16_t numDups;
+};
+
+struct sctp_lock_log {
+ uint32_t sock;
+ uint32_t inp;
+ uint8_t tcb_lock;
+ uint8_t inp_lock;
+ uint8_t info_lock;
+ uint8_t sock_lock;
+ uint8_t sockrcvbuf_lock;
+ uint8_t socksndbuf_lock;
+ uint8_t create_lock;
+ uint8_t resv;
+};
+
+struct sctp_rto_log {
+ uint32_t net;
+ uint32_t rtt;
+ uint32_t rttvar;
+ uint8_t direction;
+};
+
+struct sctp_nagle_log {
+ uint32_t stcb;
+ uint32_t total_flight;
+ uint32_t total_in_queue;
+ uint16_t count_in_queue;
+ uint16_t count_in_flight;
+};
+
+struct sctp_sbwake_log {
+ uint32_t stcb;
+ uint16_t send_q;
+ uint16_t sent_q;
+ uint16_t flight;
+ uint16_t wake_cnt;
+ uint8_t stream_qcnt; /* chnk cnt */
+ uint8_t chunks_on_oque; /* chunks out */
+ uint8_t sbflags;
+ uint8_t sctpflags;
+};
+
+struct sctp_misc_info {
+ uint32_t log1;
+ uint32_t log2;
+ uint32_t log3;
+ uint32_t log4;
+};
+
+struct sctp_log_closing {
+ uint32_t inp;
+ uint32_t stcb;
+ uint32_t sctp_flags;
+ uint16_t state;
+ int16_t loc;
+};
+
+struct sctp_mbuf_log {
+ struct mbuf *mp;
+ caddr_t ext;
+ caddr_t data;
+ uint16_t size;
+ uint8_t refcnt;
+ uint8_t mbuf_flags;
+};
+
+struct sctp_cwnd_log {
+ uint32_t time_event;
+ uint8_t from;
+ uint8_t event_type;
+ uint8_t resv[2];
+ union {
+ struct sctp_log_closing close;
+ struct sctp_blk_args blk;
+ struct sctp_cwnd_args cwnd;
+ struct sctp_str_log strlog;
+ struct sctp_fr_log fr;
+ struct sctp_fr_map map;
+ struct sctp_rwnd_log rwnd;
+ struct sctp_mbcnt_log mbcnt;
+ struct sctp_sack_log sack;
+ struct sctp_lock_log lock;
+ struct sctp_rto_log rto;
+ struct sctp_sb_log sb;
+ struct sctp_nagle_log nagle;
+ struct sctp_sbwake_log wake;
+ struct sctp_mbuf_log mb;
+ struct sctp_misc_info misc;
+ } x;
+};
+
+struct sctp_cwnd_log_req {
+ int num_in_log; /* Number in log */
+ int num_ret; /* Number returned */
+ int start_at; /* start at this one */
+ int end_at; /* end at this one */
+ struct sctp_cwnd_log log[0];
+};
+
+struct sctpstat {
+ /* MIB according to RFC 3873 */
+ u_long sctps_currestab; /* sctpStats 1 (Gauge32) */
+ u_long sctps_activeestab; /* sctpStats 2 (Counter32) */
+ u_long sctps_passiveestab; /* sctpStats 3 (Counter32) */
+ u_long sctps_aborted; /* sctpStats 4 (Counter32) */
+ u_long sctps_shutdown; /* sctpStats 5 (Counter32) */
+ u_long sctps_outoftheblue; /* sctpStats 6 (Counter32) */
+ u_long sctps_checksumerrors; /* sctpStats 7 (Counter32) */
+ u_long sctps_outcontrolchunks; /* sctpStats 8 (Counter64) */
+ u_long sctps_outorderchunks; /* sctpStats 9 (Counter64) */
+ u_long sctps_outunorderchunks; /* sctpStats 10 (Counter64) */
+ u_long sctps_incontrolchunks; /* sctpStats 11 (Counter64) */
+ u_long sctps_inorderchunks; /* sctpStats 12 (Counter64) */
+ u_long sctps_inunorderchunks; /* sctpStats 13 (Counter64) */
+ u_long sctps_fragusrmsgs; /* sctpStats 14 (Counter64) */
+ u_long sctps_reasmusrmsgs; /* sctpStats 15 (Counter64) */
+ u_long sctps_outpackets;/* sctpStats 16 (Counter64) */
+ u_long sctps_inpackets; /* sctpStats 17 (Counter64) */
+ u_long sctps_discontinuitytime; /* sctpStats 18 (TimeStamp) */
+ /* input statistics: */
+ u_long sctps_recvpackets; /* total input packets */
+ u_long sctps_recvdatagrams; /* total input datagrams */
+ u_long sctps_recvpktwithdata;
+ u_long sctps_recvsacks; /* total input SACK chunks */
+ u_long sctps_recvdata; /* total input DATA chunks */
+ u_long sctps_recvdupdata; /* total input duplicate DATA chunks */
+ u_long sctps_recvheartbeat; /* total input HB chunks */
+ u_long sctps_recvheartbeatack; /* total input HB-ACK chunks */
+ u_long sctps_recvecne; /* total input ECNE chunks */
+ u_long sctps_recvauth; /* total input AUTH chunks */
+ u_long sctps_recvauthmissing; /* total input chunks missing AUTH */
+ u_long sctps_recvivalhmacid; /* total number of invalid HMAC ids
+ * received */
+ u_long sctps_recvivalkeyid; /* total number of invalid secret ids
+ * received */
+ u_long sctps_recvauthfailed; /* total number of auth failed */
+ u_long sctps_recvexpress; /* total fast path receives all one
+ * chunk */
+ u_long sctps_recvexpressm; /* total fast path multi-part data */
+ /* output statistics: */
+ u_long sctps_sendpackets; /* total output packets */
+ u_long sctps_sendsacks; /* total output SACKs */
+ u_long sctps_senddata; /* total output DATA chunks */
+ u_long sctps_sendretransdata; /* total output retransmitted DATA
+ * chunks */
+ u_long sctps_sendfastretrans; /* total output fast retransmitted
+ * DATA chunks */
+ u_long sctps_sendmultfastretrans; /* U-del */
+ u_long sctps_sendheartbeat; /* total output HB chunks */
+ u_long sctps_sendecne; /* total output ECNE chunks */
+ u_long sctps_sendauth; /* total output AUTH chunks FIXME */
+ u_long sctps_senderrors;/* ip_output error counter */
+ /* PCKDROPREP statistics: */
+ u_long sctps_pdrpfmbox; /* */
+ u_long sctps_pdrpfehos; /* */
+ u_long sctps_pdrpmbda; /* */
+ u_long sctps_pdrpmbct; /* */
+ u_long sctps_pdrpbwrpt; /* */
+ u_long sctps_pdrpcrupt; /* */
+ u_long sctps_pdrpnedat; /* */
+ u_long sctps_pdrppdbrk; /* */
+ u_long sctps_pdrptsnnf; /* */
+ u_long sctps_pdrpdnfnd; /* */
+ u_long sctps_pdrpdiwnp; /* */
+ u_long sctps_pdrpdizrw; /* */
+ u_long sctps_pdrpbadd; /* */
+ u_long sctps_pdrpmark; /* */
+ /* timeouts */
+ u_long sctps_timoiterator; /* */
+ u_long sctps_timodata; /* */
+ u_long sctps_timowindowprobe; /* */
+ u_long sctps_timoinit; /* */
+ u_long sctps_timosack; /* */
+ u_long sctps_timoshutdown; /* */
+ u_long sctps_timoheartbeat; /* */
+ u_long sctps_timocookie;/* */
+ u_long sctps_timosecret;/* */
+ u_long sctps_timopathmtu; /* */
+ u_long sctps_timoshutdownack; /* */
+ u_long sctps_timoshutdownguard; /* */
+ u_long sctps_timostrmrst; /* */
+ u_long sctps_timoearlyfr; /* */
+ u_long sctps_timoasconf;/* */
+ u_long sctps_timoautoclose; /* */
+ u_long sctps_timoassockill; /* */
+ u_long sctps_timoinpkill; /* */
+ /* Early fast retransmission counters */
+ u_long sctps_earlyfrstart;
+ u_long sctps_earlyfrstop;
+ u_long sctps_earlyfrmrkretrans;
+ u_long sctps_earlyfrstpout;
+ u_long sctps_earlyfrstpidsck1;
+ u_long sctps_earlyfrstpidsck2;
+ u_long sctps_earlyfrstpidsck3;
+ u_long sctps_earlyfrstpidsck4;
+ u_long sctps_earlyfrstrid;
+ u_long sctps_earlyfrstrout;
+ u_long sctps_earlyfrstrtmr;
+ /* otheres */
+ u_long sctps_hdrops; /* packet shorter than header */
+ u_long sctps_badsum; /* checksum error */
+ u_long sctps_noport; /* no endpoint for port */
+ u_long sctps_badvtag; /* bad v-tag */
+ u_long sctps_badsid; /* bad SID */
+ u_long sctps_nomem; /* no memory */
+ u_long sctps_fastretransinrtt; /* number of multiple FR in a RTT
+ * window */
+ u_long sctps_markedretrans;
+ u_long sctps_naglesent; /* nagle allowed sending */
+ u_long sctps_naglequeued; /* nagle does't allow sending */
+ u_long sctps_maxburstqueued; /* max burst dosn't allow sending */
+ u_long sctps_ifnomemqueued; /* */
+ u_long sctps_windowprobed; /* total number of window probes sent */
+ u_long sctps_lowlevelerr;
+ u_long sctps_lowlevelerrusr;
+ u_long sctps_datadropchklmt;
+ u_long sctps_datadroprwnd;
+ u_long sctps_ecnereducedcwnd;
+ u_long sctps_vtagexpress; /* Used express lookup via vtag */
+ u_long sctps_vtagbogus; /* Collision in express lookup. */
+ u_long sctps_primary_randry; /* Number of times the sender ran dry
+ * of user data on primary */
+ u_long sctps_cmt_randry;/* Same for above */
+ u_long sctps_slowpath_sack; /* Sacks the slow way */
+ u_long sctps_wu_sacks_sent; /* Window Update only sacks sent */
+ u_long sctps_locks_in_rcv; /* How man so_rcv buf locks we did */
+ u_long sctps_locks_in_rcva; /* How man so_rcv buf locks we did */
+ u_long sctps_locks_in_rcvb; /* How man so_rcv buf locks we did */
+ u_long sctps_locks_in_rcvc; /* How man so_rcv buf locks we did */
+ u_long sctps_locks_in_rcvd; /* How man so_rcv buf locks we did */
+ u_long sctps_locks_in_rcve; /* How man so_rcv buf locks we did */
+ u_long sctps_locks_in_rcvf; /* How man so_rcv buf locks we did */
+};
+
+#define SCTP_STAT_INCR(_x) SCTP_STAT_INCR_BY(_x,1)
+#define SCTP_STAT_DECR(_x) SCTP_STAT_DECR_BY(_x,1)
+#define SCTP_STAT_INCR_BY(_x,_d) atomic_add_long(&sctpstat._x, _d)
+#define SCTP_STAT_DECR_BY(_x,_d) atomic_add_long(&sctpstat._x, -(_d))
+/* The following macros are for handling MIB values, */
+#define SCTP_STAT_INCR_COUNTER32(_x) SCTP_STAT_INCR(_x)
+#define SCTP_STAT_INCR_COUNTER64(_x) SCTP_STAT_INCR(_x)
+#define SCTP_STAT_INCR_GAUGE32(_x) SCTP_STAT_INCR(_x)
+#define SCTP_STAT_DECR_COUNTER32(_x) SCTP_STAT_DECR(_x)
+#define SCTP_STAT_DECR_COUNTER64(_x) SCTP_STAT_DECR(_x)
+#define SCTP_STAT_DECR_GAUGE32(_x) SCTP_STAT_DECR(_x)
+
+/*
+ * Kernel defined for sctp_send
+ */
+#if defined(_KERNEL)
+int
+sctp_lower_sosend(struct socket *so,
+ struct sockaddr *addr,
+ struct uio *uio,
+ struct mbuf *top,
+ struct mbuf *control,
+ int flags,
+ int use_rcvinfo,
+ struct sctp_sndrcvinfo *srcv,
+ struct thread *p
+);
+
+int
+sctp_sorecvmsg(struct socket *so,
+ struct uio *uio,
+ struct mbuf **mp,
+ struct sockaddr *from,
+ int fromlen,
+ int *msg_flags,
+ struct sctp_sndrcvinfo *sinfo,
+ int filling_sinfo);
+
+
+#endif
+
+/*
+ * API system calls
+ */
+
+#if !(defined(_KERNEL))
+
+__BEGIN_DECLS
+int sctp_peeloff __P((int, sctp_assoc_t));
+int sctp_bindx __P((int, struct sockaddr *, int, int));
+int sctp_connectx __P((int, const struct sockaddr *, int));
+int sctp_getaddrlen __P((sa_family_t));
+int sctp_getpaddrs __P((int, sctp_assoc_t, struct sockaddr **));
+void sctp_freepaddrs __P((struct sockaddr *));
+int sctp_getladdrs __P((int, sctp_assoc_t, struct sockaddr **));
+void sctp_freeladdrs __P((struct sockaddr *));
+int sctp_opt_info __P((int, sctp_assoc_t, int, void *, socklen_t *));
+
+ssize_t sctp_sendmsg
+__P((int, const void *, size_t,
+ const struct sockaddr *,
+ socklen_t, uint32_t, uint32_t, uint16_t, uint32_t, uint32_t));
+
+ ssize_t sctp_send __P((int sd, const void *msg, size_t len,
+ const struct sctp_sndrcvinfo *sinfo, int flags));
+
+ ssize_t
+ sctp_sendx __P((int sd, const void *msg, size_t len,
+ struct sockaddr *addrs, int addrcnt,
+ struct sctp_sndrcvinfo *sinfo, int flags));
+ ssize_t
+ sctp_sendmsgx __P((int sd, const void *, size_t,
+ struct sockaddr *, int,
+ uint32_t, uint32_t, uint16_t, uint32_t, uint32_t));
+
+sctp_assoc_t
+sctp_getassocid __P((int sd, struct sockaddr *sa));
+
+ ssize_t sctp_recvmsg __P((int, void *, size_t, struct sockaddr *,
+ socklen_t *, struct sctp_sndrcvinfo *, int *));
+
+__END_DECLS
+
+#endif /* !_KERNEL */
+#endif /* !__sctp_uio_h__ */
diff --git a/sys/netinet/sctp_usrreq.c b/sys/netinet/sctp_usrreq.c
new file mode 100644
index 0000000..15f29fc
--- /dev/null
+++ b/sys/netinet/sctp_usrreq.c
@@ -0,0 +1,4852 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+
+#include "opt_ipsec.h"
+#include "opt_inet6.h"
+#include "opt_inet.h"
+
+#include "opt_sctp.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/domain.h>
+#include <sys/proc.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sysctl.h>
+#include <sys/syslog.h>
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/if_var.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/in_pcb.h>
+#include <netinet/in_var.h>
+#include <netinet/ip_var.h>
+#include <netinet6/ip6_var.h>
+#include <netinet6/in6_var.h>
+#include <netinet6/scope6_var.h>
+#include <netinet/ip_icmp.h>
+#include <netinet/icmp_var.h>
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_auth.h>
+#ifdef IPSEC
+#include <netinet6/ipsec.h>
+#include <netkey/key.h>
+#endif /* IPSEC */
+
+
+
+
+#ifndef in6pcb
+#define in6pcb inpcb
+#endif
+#ifndef sotoin6pcb
+#define sotoin6pcb sotoinpcb
+#endif
+
+
+
+/*
+ * sysctl tunable variables
+ */
+int sctp_sendspace = (128 * 1024);
+int sctp_recvspace = 128 * (1024 +
+#ifdef INET6
+ sizeof(struct sockaddr_in6)
+#else
+ sizeof(struct sockaddr_in)
+#endif
+);
+int sctp_mbuf_threshold_count = SCTP_DEFAULT_MBUFS_IN_CHAIN;
+int sctp_auto_asconf = SCTP_DEFAULT_AUTO_ASCONF;
+int sctp_ecn_enable = 1;
+int sctp_ecn_nonce = 0;
+int sctp_strict_sacks = 0;
+int sctp_no_csum_on_loopback = 1;
+int sctp_strict_init = 1;
+int sctp_abort_if_one_2_one_hits_limit = 0;
+int sctp_strict_data_order = 0;
+
+int sctp_peer_chunk_oh = sizeof(struct mbuf);
+int sctp_max_burst_default = SCTP_DEF_MAX_BURST;
+int sctp_use_cwnd_based_maxburst = 1;
+int sctp_do_drain = 1;
+int sctp_warm_the_crc32_table = 0;
+
+unsigned int sctp_max_chunks_on_queue = SCTP_ASOC_MAX_CHUNKS_ON_QUEUE;
+unsigned int sctp_delayed_sack_time_default = SCTP_RECV_MSEC;
+unsigned int sctp_heartbeat_interval_default = SCTP_HB_DEFAULT_MSEC;
+unsigned int sctp_pmtu_raise_time_default = SCTP_DEF_PMTU_RAISE_SEC;
+unsigned int sctp_shutdown_guard_time_default = SCTP_DEF_MAX_SHUTDOWN_SEC;
+unsigned int sctp_secret_lifetime_default = SCTP_DEFAULT_SECRET_LIFE_SEC;
+unsigned int sctp_rto_max_default = SCTP_RTO_UPPER_BOUND;
+unsigned int sctp_rto_min_default = SCTP_RTO_LOWER_BOUND;
+unsigned int sctp_rto_initial_default = SCTP_RTO_INITIAL;
+unsigned int sctp_init_rto_max_default = SCTP_RTO_UPPER_BOUND;
+unsigned int sctp_valid_cookie_life_default = SCTP_DEFAULT_COOKIE_LIFE;
+unsigned int sctp_init_rtx_max_default = SCTP_DEF_MAX_INIT;
+unsigned int sctp_assoc_rtx_max_default = SCTP_DEF_MAX_SEND;
+unsigned int sctp_path_rtx_max_default = SCTP_DEF_MAX_PATH_RTX;
+unsigned int sctp_nr_outgoing_streams_default = SCTP_OSTREAM_INITIAL;
+unsigned int sctp_add_more_threshold = SCTP_DEFAULT_ADD_MORE;
+
+uint32_t sctp_asoc_free_resc_limit = SCTP_DEF_ASOC_RESC_LIMIT;
+uint32_t sctp_system_free_resc_limit = SCTP_DEF_SYSTEM_RESC_LIMIT;
+
+int sctp_min_split_point = SCTP_DEFAULT_SPLIT_POINT_MIN;
+int sctp_pcbtblsize = SCTP_PCBHASHSIZE;
+int sctp_hashtblsize = SCTP_TCBHASHSIZE;
+int sctp_chunkscale = SCTP_CHUNKQUEUE_SCALE;
+
+unsigned int sctp_cmt_on_off = 0;
+unsigned int sctp_cmt_sockopt_on_off = 0;
+unsigned int sctp_cmt_use_dac = 0;
+unsigned int sctp_cmt_sockopt_use_dac = 0;
+
+int sctp_L2_abc_variable = 1;
+unsigned int sctp_early_fr = 0;
+unsigned int sctp_early_fr_msec = SCTP_MINFR_MSEC_TIMER;
+unsigned int sctp_use_rttvar_cc = 0;
+int sctp_says_check_for_deadlock = 0;
+unsigned int sctp_asconf_auth_nochk = 0;
+unsigned int sctp_auth_disable = 0;
+unsigned int sctp_auth_random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
+unsigned int sctp_auth_hmac_id_default = SCTP_AUTH_HMAC_ID_SHA1;
+struct sctpstat sctpstat;
+
+#ifdef SCTP_DEBUG
+extern uint32_t sctp_debug_on;
+
+#endif /* SCTP_DEBUG */
+
+
+void
+sctp_init(void)
+{
+ /* Init the SCTP pcb in sctp_pcb.c */
+ u_long sb_max_adj;
+
+ sctp_pcb_init();
+
+ if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
+ sctp_max_chunks_on_queue = (nmbclusters / 8);
+ /*
+ * Allow a user to take no more than 1/2 the number of clusters or
+ * the SB_MAX whichever is smaller for the send window.
+ */
+ sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
+ sctp_sendspace = min((min(SB_MAX, sb_max_adj)),
+ ((nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT));
+ /*
+ * Now for the recv window, should we take the same amount? or
+ * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For
+ * now I will just copy.
+ */
+ sctp_recvspace = sctp_sendspace;
+
+
+}
+
+
+#ifdef INET6
+void
+ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip)
+{
+ bzero(ip6, sizeof(*ip6));
+
+ ip6->ip6_vfc = IPV6_VERSION;
+ ip6->ip6_plen = ip->ip_len;
+ ip6->ip6_nxt = ip->ip_p;
+ ip6->ip6_hlim = ip->ip_ttl;
+ ip6->ip6_src.s6_addr32[2] = ip6->ip6_dst.s6_addr32[2] =
+ IPV6_ADDR_INT32_SMP;
+ ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
+ ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
+}
+
+#endif /* INET6 */
+
+
+static void
+sctp_pathmtu_adustment(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ uint16_t nxtsz)
+{
+ struct sctp_tmit_chunk *chk;
+
+ /* Adjust that too */
+ stcb->asoc.smallest_mtu = nxtsz;
+ /* now off to subtract IP_DF flag if needed */
+
+ TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
+ if ((chk->send_size + IP_HDR_SIZE) > nxtsz) {
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ }
+ }
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if ((chk->send_size + IP_HDR_SIZE) > nxtsz) {
+ /*
+ * For this guy we also mark for immediate resend
+ * since we sent to big of chunk
+ */
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ }
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ chk->rec.data.doing_fast_retransmit = 0;
+
+ /* Clear any time so NO RTT is being done */
+ chk->do_rtt = 0;
+ if (stcb->asoc.total_flight >= chk->book_size)
+ stcb->asoc.total_flight -= chk->book_size;
+ else
+ stcb->asoc.total_flight = 0;
+ if (stcb->asoc.total_flight_count > 0)
+ stcb->asoc.total_flight_count--;
+ if (net->flight_size >= chk->book_size)
+ net->flight_size -= chk->book_size;
+ else
+ net->flight_size = 0;
+ }
+ }
+}
+
+static void
+sctp_notify_mbuf(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ struct ip *ip,
+ struct sctphdr *sh)
+{
+ struct icmp *icmph;
+ int totsz, tmr_stopped = 0;
+ uint16_t nxtsz;
+
+ /* protection */
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
+ (ip == NULL) || (sh == NULL)) {
+ if (stcb != NULL)
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ /* First job is to verify the vtag matches what I would send */
+ if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
+ sizeof(struct ip)));
+ if (icmph->icmp_type != ICMP_UNREACH) {
+ /* We only care about unreachable */
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
+ /* not a unreachable message due to frag. */
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ totsz = ip->ip_len;
+
+ nxtsz = ntohs(icmph->icmp_seq);
+ if (nxtsz == 0) {
+ /*
+ * old type router that does not tell us what the next size
+ * mtu is. Rats we will have to guess (in a educated fashion
+ * of course)
+ */
+ nxtsz = find_next_best_mtu(totsz);
+ }
+ /* Stop any PMTU timer */
+ if (callout_pending(&net->pmtu_timer.timer)) {
+ tmr_stopped = 1;
+ sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+ }
+ /* Adjust destination size limit */
+ if (net->mtu > nxtsz) {
+ net->mtu = nxtsz;
+ }
+ /* now what about the ep? */
+ if (stcb->asoc.smallest_mtu > nxtsz) {
+ sctp_pathmtu_adustment(inp, stcb, net, nxtsz);
+ }
+ if (tmr_stopped)
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+
+ SCTP_TCB_UNLOCK(stcb);
+}
+
+
+void
+sctp_notify(struct sctp_inpcb *inp,
+ int errno,
+ struct sctphdr *sh,
+ struct sockaddr *to,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ /* protection */
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
+ (sh == NULL) || (to == NULL)) {
+ return;
+ }
+ /* First job is to verify the vtag matches what I would send */
+ if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
+ return;
+ }
+ /* FIX ME FIX ME PROTOPT i.e. no SCTP should ALWAYS be an ABORT */
+
+ if ((errno == EHOSTUNREACH) || /* Host is not reachable */
+ (errno == EHOSTDOWN) || /* Host is down */
+ (errno == ECONNREFUSED) || /* Host refused the connection, (not
+ * an abort?) */
+ (errno == ENOPROTOOPT) /* SCTP is not present on host */
+ ) {
+ /*
+ * Hmm reachablity problems we must examine closely. If its
+ * not reachable, we may have lost a network. Or if there is
+ * NO protocol at the other end named SCTP. well we consider
+ * it a OOTB abort.
+ */
+ if ((errno == EHOSTUNREACH) || (errno == EHOSTDOWN)) {
+ if (net->dest_state & SCTP_ADDR_REACHABLE) {
+ /* Ok that destination is NOT reachable */
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
+ net->error_count = net->failure_threshold + 1;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+ stcb, SCTP_FAILED_THRESHOLD,
+ (void *)net);
+ }
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /*
+ * Here the peer is either playing tricks on us,
+ * including an address that belongs to someone who
+ * does not support SCTP OR was a userland
+ * implementation that shutdown and now is dead. In
+ * either case treat it like a OOTB abort with no
+ * TCB
+ */
+ sctp_abort_notification(stcb, SCTP_PEER_FAULTY);
+ sctp_free_assoc(inp, stcb, 0);
+ /* no need to unlock here, since the TCB is gone */
+ }
+ } else {
+ /* Send all others to the app */
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+
+
+ if (inp->sctp_socket) {
+#ifdef SCTP_LOCK_LOGGING
+ sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCK);
+#endif
+ SOCK_LOCK(inp->sctp_socket);
+ inp->sctp_socket->so_error = errno;
+ sctp_sowwakeup(inp, inp->sctp_socket);
+ SOCK_UNLOCK(inp->sctp_socket);
+ }
+ }
+}
+
+void
+sctp_ctlinput(cmd, sa, vip)
+ int cmd;
+ struct sockaddr *sa;
+ void *vip;
+{
+ struct ip *ip = vip;
+ struct sctphdr *sh;
+ int s;
+
+
+ if (sa->sa_family != AF_INET ||
+ ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
+ return;
+ }
+ if (PRC_IS_REDIRECT(cmd)) {
+ ip = 0;
+ } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
+ return;
+ }
+ if (ip) {
+ struct sctp_inpcb *inp = NULL;
+ struct sctp_tcb *stcb = NULL;
+ struct sctp_nets *net = NULL;
+ struct sockaddr_in to, from;
+
+ sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
+ bzero(&to, sizeof(to));
+ bzero(&from, sizeof(from));
+ from.sin_family = to.sin_family = AF_INET;
+ from.sin_len = to.sin_len = sizeof(to);
+ from.sin_port = sh->src_port;
+ from.sin_addr = ip->ip_src;
+ to.sin_port = sh->dest_port;
+ to.sin_addr = ip->ip_dst;
+
+ /*
+ * 'to' holds the dest of the packet that failed to be sent.
+ * 'from' holds our local endpoint address. Thus we reverse
+ * the to and the from in the lookup.
+ */
+ s = splnet();
+ stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
+ (struct sockaddr *)&to,
+ &inp, &net, 1);
+ if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
+ if (cmd != PRC_MSGSIZE) {
+ int cm;
+
+ if (cmd == PRC_HOSTDEAD) {
+ cm = EHOSTUNREACH;
+ } else {
+ cm = inetctlerrmap[cmd];
+ }
+ sctp_notify(inp, cm, sh,
+ (struct sockaddr *)&to, stcb,
+ net);
+ } else {
+ /* handle possible ICMP size messages */
+ sctp_notify_mbuf(inp, stcb, net, ip, sh);
+ }
+ } else {
+ if ((stcb == NULL) && (inp != NULL)) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ }
+ splx(s);
+ }
+ return;
+}
+
+static int
+sctp_getcred(SYSCTL_HANDLER_ARGS)
+{
+ struct sockaddr_in addrs[2];
+ struct sctp_inpcb *inp;
+ struct sctp_nets *net;
+ struct sctp_tcb *stcb;
+ int error, s;
+
+ error = suser(req->td);
+ if (error)
+ return (error);
+ error = SYSCTL_IN(req, addrs, sizeof(addrs));
+ if (error)
+ return (error);
+
+ s = splnet();
+ stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
+ sintosa(&addrs[1]),
+ &inp, &net, 1);
+ if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
+ if ((inp != NULL) && (stcb == NULL)) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ error = ENOENT;
+ goto out;
+ }
+ error = SYSCTL_OUT(req, inp->sctp_socket->so_cred, sizeof(struct ucred));
+ SCTP_TCB_UNLOCK(stcb);
+out:
+ splx(s);
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW,
+ 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
+
+
+/*
+ * sysctl definitions
+ */
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, sendspace, CTLFLAG_RW,
+ &sctp_sendspace, 0, "Maximum outgoing SCTP buffer size");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, recvspace, CTLFLAG_RW,
+ &sctp_recvspace, 0, "Maximum incoming SCTP buffer size");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, auto_asconf, CTLFLAG_RW,
+ &sctp_auto_asconf, 0, "Enable SCTP Auto-ASCONF");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_enable, CTLFLAG_RW,
+ &sctp_ecn_enable, 0, "Enable SCTP ECN");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_nonce, CTLFLAG_RW,
+ &sctp_ecn_nonce, 0, "Enable SCTP ECN Nonce");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_sacks, CTLFLAG_RW,
+ &sctp_strict_sacks, 0, "Enable SCTP Strict SACK checking");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, loopback_nocsum, CTLFLAG_RW,
+ &sctp_no_csum_on_loopback, 0,
+ "Enable NO Csum on packets sent on loopback");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_init, CTLFLAG_RW,
+ &sctp_strict_init, 0,
+ "Enable strict INIT/INIT-ACK singleton enforcement");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, peer_chkoh, CTLFLAG_RW,
+ &sctp_peer_chunk_oh, 0,
+ "Amount to debit peers rwnd per chunk sent");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxburst, CTLFLAG_RW,
+ &sctp_max_burst_default, 0,
+ "Default max burst for sctp endpoints");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxchunks, CTLFLAG_RW,
+ &sctp_max_chunks_on_queue, 0,
+ "Default max chunks on queue per asoc");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, tcbhashsize, CTLFLAG_RW,
+ &sctp_hashtblsize, 0,
+ "Tuneable for Hash table sizes");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, min_split_point, CTLFLAG_RW,
+ &sctp_min_split_point, 0,
+ "Minimum size when splitting a chunk");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, pcbhashsize, CTLFLAG_RW,
+ &sctp_pcbtblsize, 0,
+ "Tuneable for PCB Hash table sizes");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, sys_resource, CTLFLAG_RW,
+ &sctp_system_free_resc_limit, 0,
+ "Max number of cached resources in the system");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, asoc_resource, CTLFLAG_RW,
+ &sctp_asoc_free_resc_limit, 0,
+ "Max number of cached resources in an asoc");
+
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, chunkscale, CTLFLAG_RW,
+ &sctp_chunkscale, 0,
+ "Tuneable for Scaling of number of chunks and messages");
+
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, delayed_sack_time, CTLFLAG_RW,
+ &sctp_delayed_sack_time_default, 0,
+ "Default delayed SACK timer in msec");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, heartbeat_interval, CTLFLAG_RW,
+ &sctp_heartbeat_interval_default, 0,
+ "Default heartbeat interval in msec");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, pmtu_raise_time, CTLFLAG_RW,
+ &sctp_pmtu_raise_time_default, 0,
+ "Default PMTU raise timer in sec");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, shutdown_guard_time, CTLFLAG_RW,
+ &sctp_shutdown_guard_time_default, 0,
+ "Default shutdown guard timer in sec");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, secret_lifetime, CTLFLAG_RW,
+ &sctp_secret_lifetime_default, 0,
+ "Default secret lifetime in sec");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_max, CTLFLAG_RW,
+ &sctp_rto_max_default, 0,
+ "Default maximum retransmission timeout in msec");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_min, CTLFLAG_RW,
+ &sctp_rto_min_default, 0,
+ "Default minimum retransmission timeout in msec");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_initial, CTLFLAG_RW,
+ &sctp_rto_initial_default, 0,
+ "Default initial retransmission timeout in msec");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rto_max, CTLFLAG_RW,
+ &sctp_init_rto_max_default, 0,
+ "Default maximum retransmission timeout during association setup in msec");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, valid_cookie_life, CTLFLAG_RW,
+ &sctp_valid_cookie_life_default, 0,
+ "Default cookie lifetime in sec");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rtx_max, CTLFLAG_RW,
+ &sctp_init_rtx_max_default, 0,
+ "Default maximum number of retransmission for INIT chunks");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, assoc_rtx_max, CTLFLAG_RW,
+ &sctp_assoc_rtx_max_default, 0,
+ "Default maximum number of retransmissions per association");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, path_rtx_max, CTLFLAG_RW,
+ &sctp_path_rtx_max_default, 0,
+ "Default maximum of retransmissions per path");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, add_more_on_output, CTLFLAG_RW,
+ &sctp_add_more_threshold, 0,
+ "When space wise is it worthwhile to try to add more to a socket send buffer");
+
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, nr_outgoing_streams, CTLFLAG_RW,
+ &sctp_nr_outgoing_streams_default, 0,
+ "Default number of outgoing streams");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, cmt_on_off, CTLFLAG_RW,
+ &sctp_cmt_on_off, 0,
+ "CMT ON/OFF flag");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, cwnd_maxburst, CTLFLAG_RW,
+ &sctp_use_cwnd_based_maxburst, 0,
+ "Use a CWND adjusting maxburst");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, early_fast_retran, CTLFLAG_RW,
+ &sctp_early_fr, 0,
+ "Early Fast Retransmit with Timer");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, use_rttvar_congctrl, CTLFLAG_RW,
+ &sctp_use_rttvar_cc, 0,
+ "Use congestion control via rtt variation");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, deadlock_detect, CTLFLAG_RW,
+ &sctp_says_check_for_deadlock, 0,
+ "SMP Deadlock detection on/off");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, early_fast_retran_msec, CTLFLAG_RW,
+ &sctp_early_fr_msec, 0,
+ "Early Fast Retransmit minimum timer value");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, asconf_auth_nochk, CTLFLAG_RW,
+ &sctp_asconf_auth_nochk, 0,
+ "Disable SCTP ASCONF AUTH requirement");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, auth_disable, CTLFLAG_RW,
+ &sctp_auth_disable, 0,
+ "Disable SCTP AUTH chunk requirement/function");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, auth_random_len, CTLFLAG_RW,
+ &sctp_auth_random_len, 0,
+ "Length of AUTH RANDOMs");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, auth_hmac_id, CTLFLAG_RW,
+ &sctp_auth_hmac_id_default, 0,
+ "Default HMAC Id for SCTP AUTHenthication");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, abc_l_var, CTLFLAG_RW,
+ &sctp_L2_abc_variable, 0,
+ "SCTP ABC max increase per SACK (L)");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, max_chained_mbufs, CTLFLAG_RW,
+ &sctp_mbuf_threshold_count, 0,
+ "Default max number of small mbufs on a chain");
+
+SYSCTL_UINT(_net_inet_sctp, OID_AUTO, cmt_use_dac, CTLFLAG_RW,
+ &sctp_cmt_use_dac, 0,
+ "CMT DAC ON/OFF flag");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, do_sctp_drain, CTLFLAG_RW,
+ &sctp_do_drain, 0,
+ "Should SCTP respond to the drain calls");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, warm_crc_table, CTLFLAG_RW,
+ &sctp_warm_the_crc32_table, 0,
+ "Should the CRC32c tables be warmed before checksum?");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, abort_at_limit, CTLFLAG_RW,
+ &sctp_abort_if_one_2_one_hits_limit, 0,
+ "When one-2-one hits qlimit abort");
+
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_data_order, CTLFLAG_RW,
+ &sctp_strict_data_order, 0,
+ "Enforce strict data ordering, abort if control inside data");
+
+SYSCTL_STRUCT(_net_inet_sctp, OID_AUTO, stats, CTLFLAG_RW,
+ &sctpstat, sctpstat,
+ "SCTP statistics (struct sctps_stat, netinet/sctp.h");
+#ifdef SCTP_DEBUG
+SYSCTL_INT(_net_inet_sctp, OID_AUTO, debug, CTLFLAG_RW,
+ &sctp_debug_on, 0, "Configure debug output");
+#endif /* SCTP_DEBUG */
+
+static void
+sctp_abort(struct socket *so)
+{
+ struct sctp_inpcb *inp;
+ int s;
+ uint32_t flags;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0)
+ return;
+
+ s = splnet();
+sctp_must_try_again:
+ flags = inp->sctp_flags;
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 17);
+#endif
+ if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 16);
+#endif
+ sctp_inpcb_free(inp, 1, 0);
+ SOCK_LOCK(so);
+ so->so_snd.sb_cc = 0;
+ so->so_snd.sb_mb = NULL;
+ so->so_snd.sb_mbcnt = 0;
+
+ /*
+ * same for the rcv ones, they are only here for the
+ * accounting/select.
+ */
+ so->so_rcv.sb_cc = 0;
+ so->so_rcv.sb_mb = NULL;
+ so->so_rcv.sb_mbcnt = 0;
+ /*
+ * Now null out the reference, we are completely detached.
+ */
+ so->so_pcb = NULL;
+ SOCK_UNLOCK(so);
+
+ } else {
+ flags = inp->sctp_flags;
+ if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+ goto sctp_must_try_again;
+ }
+ }
+ splx(s);
+ return;
+}
+
+static int
+sctp_attach(struct socket *so, int proto, struct thread *p)
+{
+ struct sctp_inpcb *inp;
+ struct inpcb *ip_inp;
+ int s, error;
+
+#ifdef IPSEC
+ uint32_t flags;
+
+#endif
+ s = splnet();
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp != 0) {
+ splx(s);
+ return EINVAL;
+ }
+ error = soreserve(so, sctp_sendspace, sctp_recvspace);
+ if (error) {
+ splx(s);
+ return error;
+ }
+ error = sctp_inpcb_alloc(so);
+ if (error) {
+ splx(s);
+ return error;
+ }
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ SCTP_INP_WLOCK(inp);
+
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */
+ ip_inp = &inp->ip_inp.inp;
+ ip_inp->inp_vflag |= INP_IPV4;
+ ip_inp->inp_ip_ttl = ip_defttl;
+
+#ifdef IPSEC
+ error = ipsec_init_pcbpolicy(so, &ip_inp->inp_sp);
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 17);
+#endif
+ if (error != 0) {
+ flags = inp->sctp_flags;
+ if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 15);
+#endif
+ sctp_inpcb_free(inp, 1, 0);
+ }
+ return error;
+ }
+#endif /* IPSEC */
+ SCTP_INP_WUNLOCK(inp);
+ splx(s);
+ return 0;
+}
+
+static int
+sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
+{
+ struct sctp_inpcb *inp;
+ int s, error;
+
+#ifdef INET6
+ if (addr && addr->sa_family != AF_INET)
+ /* must be a v4 address! */
+ return EINVAL;
+#endif /* INET6 */
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0)
+ return EINVAL;
+
+ s = splnet();
+ error = sctp_inpcb_bind(so, addr, p);
+ splx(s);
+ return error;
+}
+
+static void
+sctp_close(struct socket *so)
+{
+ struct sctp_inpcb *inp;
+ uint32_t flags;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0)
+ return;
+
+ /*
+ * Inform all the lower layer assoc that we are done.
+ */
+sctp_must_try_again:
+ flags = inp->sctp_flags;
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 17);
+#endif
+ if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+ if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
+ (so->so_rcv.sb_cc > 0)) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 13);
+#endif
+ sctp_inpcb_free(inp, 1, 1);
+ } else {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 14);
+#endif
+ sctp_inpcb_free(inp, 0, 1);
+ }
+ /*
+ * The socket is now detached, no matter what the state of
+ * the SCTP association.
+ */
+ SOCK_LOCK(so);
+ so->so_snd.sb_cc = 0;
+ so->so_snd.sb_mb = NULL;
+ so->so_snd.sb_mbcnt = 0;
+
+ /*
+ * same for the rcv ones, they are only here for the
+ * accounting/select.
+ */
+ so->so_rcv.sb_cc = 0;
+ so->so_rcv.sb_mb = NULL;
+ so->so_rcv.sb_mbcnt = 0;
+ /*
+ * Now null out the reference, we are completely detached.
+ */
+ so->so_pcb = NULL;
+ SOCK_UNLOCK(so);
+ } else {
+ flags = inp->sctp_flags;
+ if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+ goto sctp_must_try_again;
+ }
+ }
+ return;
+}
+
+
+int
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct thread *p);
+
+
+int
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct thread *p)
+{
+ struct sctp_inpcb *inp;
+ int error;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ if (control) {
+ sctp_m_freem(control);
+ control = NULL;
+ }
+ sctp_m_freem(m);
+ return EINVAL;
+ }
+ /* Got to have an to address if we are NOT a connected socket */
+ if ((addr == NULL) &&
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
+ ) {
+ goto connected_type;
+ } else if (addr == NULL) {
+ error = EDESTADDRREQ;
+ sctp_m_freem(m);
+ if (control) {
+ sctp_m_freem(control);
+ control = NULL;
+ }
+ return (error);
+ }
+#ifdef INET6
+ if (addr->sa_family != AF_INET) {
+ /* must be a v4 address! */
+ sctp_m_freem(m);
+ if (control) {
+ sctp_m_freem(control);
+ control = NULL;
+ }
+ error = EDESTADDRREQ;
+ return EINVAL;
+ }
+#endif /* INET6 */
+connected_type:
+ /* now what about control */
+ if (control) {
+ if (inp->control) {
+ printf("huh? control set?\n");
+ sctp_m_freem(inp->control);
+ inp->control = NULL;
+ }
+ inp->control = control;
+ }
+ /* add it in possibly */
+ if ((inp->pkt) && (inp->pkt->m_flags & M_PKTHDR)) {
+ struct mbuf *x;
+ int c_len;
+
+ c_len = 0;
+ /* How big is it */
+ for (x = m; x; x = x->m_next) {
+ c_len += x->m_len;
+ }
+ inp->pkt->m_pkthdr.len += c_len;
+ }
+ /* Place the data */
+ if (inp->pkt) {
+ inp->pkt_last->m_next = m;
+ inp->pkt_last = m;
+ } else {
+ inp->pkt_last = inp->pkt = m;
+ }
+ if (
+ /* FreeBSD uses a flag passed */
+ ((flags & PRUS_MORETOCOME) == 0)
+ ) {
+ /*
+ * note with the current version this code will only be used
+ * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
+ * re-defining sosend to use the sctp_sosend. One can
+ * optionally switch back to this code (by changing back the
+ * definitions) but this is not advisable. This code is used
+ * by FreeBSD when sending a file with sendfile() though.
+ */
+ int ret;
+
+ ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
+ inp->pkt = NULL;
+ inp->control = NULL;
+ return (ret);
+ } else {
+ return (0);
+ }
+}
+
+static int
+sctp_disconnect(struct socket *so)
+{
+ struct sctp_inpcb *inp;
+ int s;
+
+ s = splnet();
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ splx(s);
+ return (ENOTCONN);
+ }
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ if (LIST_EMPTY(&inp->sctp_asoc_list)) {
+ /* No connection */
+ splx(s);
+ SCTP_INP_RUNLOCK(inp);
+ return (0);
+ } else {
+ struct sctp_association *asoc;
+ struct sctp_tcb *stcb;
+
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ splx(s);
+ SCTP_INP_RUNLOCK(inp);
+ return (EINVAL);
+ }
+ SCTP_TCB_LOCK(stcb);
+ asoc = &stcb->asoc;
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /* We are about to be freed, out of here */
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ return (0);
+ }
+ if (((so->so_options & SO_LINGER) &&
+ (so->so_linger == 0)) ||
+ (so->so_rcv.sb_cc > 0)) {
+ if (SCTP_GET_STATE(asoc) !=
+ SCTP_STATE_COOKIE_WAIT) {
+ /* Left with Data unread */
+ struct mbuf *err;
+
+ err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
+ if (err) {
+ /*
+ * Fill in the user
+ * initiated abort
+ */
+ struct sctp_paramhdr *ph;
+
+ ph = mtod(err, struct sctp_paramhdr *);
+ err->m_len = sizeof(struct sctp_paramhdr);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(err->m_len);
+ }
+ sctp_send_abort_tcb(stcb, err);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ sctp_free_assoc(inp, stcb, 0);
+ /* No unlock tcb assoc is gone */
+ splx(s);
+ return (0);
+ }
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->stream_queue_cnt == 0)) {
+ /* there is nothing queued to send, so done */
+ if (asoc->locked_on_sending) {
+ goto abort_anyway;
+ }
+ if ((SCTP_GET_STATE(asoc) !=
+ SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(asoc) !=
+ SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ /* only send SHUTDOWN 1st time thru */
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown(stcb,
+ stcb->asoc.primary_destination);
+ sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3);
+ asoc->state = SCTP_STATE_SHUTDOWN_SENT;
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ }
+ } else {
+ /*
+ * we still got (or just got) data to send,
+ * so set SHUTDOWN_PENDING
+ */
+ /*
+ * XXX sockets draft says that SCTP_EOF
+ * should be sent with no data. currently,
+ * we will allow user data to be sent first
+ * and move to SHUTDOWN-PENDING
+ */
+ asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ if (asoc->locked_on_sending) {
+ /* Locked to send out the data */
+ struct sctp_stream_queue_pending *sp;
+
+ sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
+ if (sp == NULL) {
+ printf("Error, sp is NULL, locked on sending is %x strm:%d\n",
+ (u_int)asoc->locked_on_sending,
+ asoc->locked_on_sending->stream_no);
+ } else {
+ if ((sp->length == 0) && (sp->msg_is_complete == 0))
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ }
+ }
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ struct mbuf *op_err;
+
+ abort_anyway:
+ op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err) {
+ /*
+ * Fill in the user
+ * initiated abort
+ */
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ op_err->m_len =
+ (sizeof(struct sctp_paramhdr) + sizeof(uint32_t));
+ ph = mtod(op_err,
+ struct sctp_paramhdr *);
+ ph->param_type = htons(
+ SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(op_err->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x30000007);
+ }
+ sctp_send_abort_tcb(stcb, op_err);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ sctp_free_assoc(inp, stcb, 0);
+ splx(s);
+ return (0);
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ splx(s);
+ return (0);
+ }
+ /* not reached */
+ } else {
+ /* UDP model does not support this */
+ SCTP_INP_RUNLOCK(inp);
+ splx(s);
+ return EOPNOTSUPP;
+ }
+}
+
+int
+sctp_shutdown(struct socket *so)
+{
+ struct sctp_inpcb *inp;
+ int s;
+
+ s = splnet();
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ splx(s);
+ return EINVAL;
+ }
+ SCTP_INP_RLOCK(inp);
+ /* For UDP model this is a invalid call */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
+ /* Restore the flags that the soshutdown took away. */
+ so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
+ /* This proc will wakeup for read and do nothing (I hope) */
+ splx(s);
+ SCTP_INP_RUNLOCK(inp);
+ return (EOPNOTSUPP);
+ }
+ /*
+ * Ok if we reach here its the TCP model and it is either a SHUT_WR
+ * or SHUT_RDWR. This means we put the shutdown flag against it.
+ */
+ {
+ struct sctp_tcb *stcb;
+ struct sctp_association *asoc;
+
+ socantsendmore(so);
+
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ /*
+ * Ok we hit the case that the shutdown call was
+ * made after an abort or something. Nothing to do
+ * now.
+ */
+ splx(s);
+ return (0);
+ }
+ SCTP_TCB_LOCK(stcb);
+ asoc = &stcb->asoc;
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->locked_on_sending) {
+ goto abort_anyway;
+ }
+ /* there is nothing queued to send, so I'm done... */
+ if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
+ /* only send SHUTDOWN the first time through */
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown(stcb,
+ stcb->asoc.primary_destination);
+ sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3);
+ asoc->state = SCTP_STATE_SHUTDOWN_SENT;
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ }
+ } else {
+ /*
+ * we still got (or just got) data to send, so set
+ * SHUTDOWN_PENDING
+ */
+ asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+
+ if (asoc->locked_on_sending) {
+ /* Locked to send out the data */
+ struct sctp_stream_queue_pending *sp;
+
+ sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
+ if (sp == NULL) {
+ printf("Error, sp is NULL, locked on sending is %x strm:%d\n",
+ (u_int)asoc->locked_on_sending,
+ asoc->locked_on_sending->stream_no);
+ } else {
+ if ((sp->length == 0) && (sp->msg_is_complete == 0)) {
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ }
+ }
+ }
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ struct mbuf *op_err;
+
+ abort_anyway:
+ op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err) {
+ /* Fill in the user initiated abort */
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ op_err->m_len =
+ sizeof(struct sctp_paramhdr) + sizeof(uint32_t);
+ ph = mtod(op_err,
+ struct sctp_paramhdr *);
+ ph->param_type = htons(
+ SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(op_err->m_len);
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(0x30000008);
+ }
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_RESPONSE_TO_USER_REQ,
+ op_err);
+ goto skip_unlock;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+skip_unlock:
+ SCTP_INP_RUNLOCK(inp);
+ splx(s);
+ return 0;
+}
+
+/*
+ * copies a "user" presentable address and removes embedded scope, etc.
+ * returns 0 on success, 1 on error
+ */
+static uint32_t
+sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
+{
+ struct sockaddr_in6 lsa6;
+
+ sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
+ &lsa6);
+ memcpy(ss, sa, sa->sa_len);
+ return (0);
+}
+
+
+
+static int
+sctp_fill_up_addresses(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ int limit,
+ struct sockaddr_storage *sas)
+{
+ struct ifnet *ifn;
+ struct ifaddr *ifa;
+ int loopback_scope, ipv4_local_scope, local_scope, site_scope, actual;
+ int ipv4_addr_legal, ipv6_addr_legal;
+
+ actual = 0;
+ if (limit <= 0)
+ return (actual);
+
+ if (stcb) {
+ /* Turn on all the appropriate scope */
+ loopback_scope = stcb->asoc.loopback_scope;
+ ipv4_local_scope = stcb->asoc.ipv4_local_scope;
+ local_scope = stcb->asoc.local_scope;
+ site_scope = stcb->asoc.site_scope;
+ } else {
+ /* Turn on ALL scope, since we look at the EP */
+ loopback_scope = ipv4_local_scope = local_scope =
+ site_scope = 1;
+ }
+ ipv4_addr_legal = ipv6_addr_legal = 0;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ipv6_addr_legal = 1;
+ if (
+ (((struct in6pcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY)
+ == 0) {
+ ipv4_addr_legal = 1;
+ }
+ } else {
+ ipv4_addr_legal = 1;
+ }
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ TAILQ_FOREACH(ifn, &ifnet, if_list) {
+ if ((loopback_scope == 0) &&
+ (ifn->if_type == IFT_LOOP)) {
+ /* Skip loopback if loopback_scope not set */
+ continue;
+ }
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ if (stcb) {
+ /*
+ * For the BOUND-ALL case, the list
+ * associated with a TCB is Always
+ * considered a reverse list.. i.e.
+ * it lists addresses that are NOT
+ * part of the association. If this
+ * is one of those we must skip it.
+ */
+ if (sctp_is_addr_restricted(stcb,
+ ifa->ifa_addr)) {
+ continue;
+ }
+ }
+ if ((ifa->ifa_addr->sa_family == AF_INET) &&
+ (ipv4_addr_legal)) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)ifa->ifa_addr;
+ if (sin->sin_addr.s_addr == 0) {
+ /*
+ * we skip unspecifed
+ * addresses
+ */
+ continue;
+ }
+ if ((ipv4_local_scope == 0) &&
+ (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+ continue;
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) {
+ in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
+ ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
+ sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
+ actual += sizeof(sizeof(struct sockaddr_in6));
+ } else {
+ memcpy(sas, sin, sizeof(*sin));
+ ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
+ sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
+ actual += sizeof(*sin);
+ }
+ if (actual >= limit) {
+ return (actual);
+ }
+ } else if ((ifa->ifa_addr->sa_family == AF_INET6) &&
+ (ipv6_addr_legal)) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /*
+ * we skip unspecifed
+ * addresses
+ */
+ continue;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ if (local_scope == 0)
+ continue;
+ if (sin6->sin6_scope_id == 0) {
+ if (sa6_recoverscope(sin6) != 0)
+ /*
+ * bad link
+ * local
+ * address
+ */
+ continue;
+ }
+ }
+ if ((site_scope == 0) &&
+ (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+ continue;
+ }
+ memcpy(sas, sin6, sizeof(*sin6));
+ ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
+ sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
+ actual += sizeof(*sin6);
+ if (actual >= limit) {
+ return (actual);
+ }
+ }
+ }
+ }
+ } else {
+ struct sctp_laddr *laddr;
+
+ /*
+ * If we have a TCB and we do NOT support ASCONF (it's
+ * turned off or otherwise) then the list is always the true
+ * list of addresses (the else case below). Otherwise the
+ * list on the association is a list of addresses that are
+ * NOT part of the association.
+ */
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+ /* The list is a NEGATIVE list */
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (stcb) {
+ if (sctp_is_addr_restricted(stcb, laddr->ifa->ifa_addr)) {
+ continue;
+ }
+ }
+ if (sctp_fill_user_address(sas, laddr->ifa->ifa_addr))
+ continue;
+
+ ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
+ sas = (struct sockaddr_storage *)((caddr_t)sas +
+ laddr->ifa->ifa_addr->sa_len);
+ actual += laddr->ifa->ifa_addr->sa_len;
+ if (actual >= limit) {
+ return (actual);
+ }
+ }
+ } else {
+ /* The list is a positive list if present */
+ if (stcb) {
+ /* Must use the specific association list */
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
+ sctp_nxt_addr) {
+ if (sctp_fill_user_address(sas,
+ laddr->ifa->ifa_addr))
+ continue;
+ ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
+ sas = (struct sockaddr_storage *)((caddr_t)sas +
+ laddr->ifa->ifa_addr->sa_len);
+ actual += laddr->ifa->ifa_addr->sa_len;
+ if (actual >= limit) {
+ return (actual);
+ }
+ }
+ } else {
+ /*
+ * No endpoint so use the endpoints
+ * individual list
+ */
+ LIST_FOREACH(laddr, &inp->sctp_addr_list,
+ sctp_nxt_addr) {
+ if (sctp_fill_user_address(sas,
+ laddr->ifa->ifa_addr))
+ continue;
+ ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
+ sas = (struct sockaddr_storage *)((caddr_t)sas +
+ laddr->ifa->ifa_addr->sa_len);
+ actual += laddr->ifa->ifa_addr->sa_len;
+ if (actual >= limit) {
+ return (actual);
+ }
+ }
+ }
+ }
+ }
+ return (actual);
+}
+
+static int
+sctp_count_max_addresses(struct sctp_inpcb *inp)
+{
+ int cnt = 0;
+
+ /*
+ * In both sub-set bound an bound_all cases we return the MAXIMUM
+ * number of addresses that you COULD get. In reality the sub-set
+ * bound may have an exclusion list for a given TCB OR in the
+ * bound-all case a TCB may NOT include the loopback or other
+ * addresses as well.
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ struct ifnet *ifn;
+ struct ifaddr *ifa;
+
+ TAILQ_FOREACH(ifn, &ifnet, if_list) {
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ /* Count them if they are the right type */
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
+ cnt += sizeof(struct sockaddr_in6);
+ else
+ cnt += sizeof(struct sockaddr_in);
+
+ } else if (ifa->ifa_addr->sa_family == AF_INET6)
+ cnt += sizeof(struct sockaddr_in6);
+ }
+ }
+ } else {
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
+ cnt += sizeof(struct sockaddr_in6);
+ else
+ cnt += sizeof(struct sockaddr_in);
+
+ } else if (laddr->ifa->ifa_addr->sa_family == AF_INET6)
+ cnt += sizeof(struct sockaddr_in6);
+ }
+ }
+ return (cnt);
+}
+
+static int
+sctp_do_connect_x(struct socket *so,
+ struct sctp_inpcb *inp,
+ struct mbuf *m,
+ struct thread *p,
+ int delay
+)
+{
+ int s = splnet();
+
+ int error = 0;
+ int creat_lock_on = 0;
+ struct sctp_tcb *stcb = NULL;
+ struct sockaddr *sa;
+ int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr, i, incr, at;
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_PCB1) {
+ printf("Connectx called\n");
+ }
+#endif /* SCTP_DEBUG */
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+ /* We are already connected AND the TCP model */
+ splx(s);
+ return (EADDRINUSE);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
+ splx(s);
+ return (EINVAL);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ SCTP_INP_RUNLOCK(inp);
+ }
+ if (stcb) {
+ splx(s);
+ return (EALREADY);
+
+ }
+ SCTP_INP_INCR_REF(inp);
+ SCTP_ASOC_CREATE_LOCK(inp);
+ creat_lock_on = 1;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+ error = EFAULT;
+ goto out_now;
+ }
+ totaddrp = mtod(m, int *);
+ totaddr = *totaddrp;
+ sa = (struct sockaddr *)(totaddrp + 1);
+ at = incr = 0;
+ /* account and validate addresses */
+ for (i = 0; i < totaddr; i++) {
+ if (sa->sa_family == AF_INET) {
+ num_v4++;
+ incr = sizeof(struct sockaddr_in);
+ } else if (sa->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ /* Must be non-mapped for connectx */
+ error = EINVAL;
+ goto out_now;
+ }
+ num_v6++;
+ incr = sizeof(struct sockaddr_in6);
+ } else {
+ totaddr = i;
+ break;
+ }
+ stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
+ if (stcb != NULL) {
+ /* Already have or am bring up an association */
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ creat_lock_on = 0;
+ SCTP_TCB_UNLOCK(stcb);
+ error = EALREADY;
+ goto out_now;
+ }
+ if ((at + incr) > m->m_len) {
+ totaddr = i;
+ break;
+ }
+ sa = (struct sockaddr *)((caddr_t)sa + incr);
+ }
+ sa = (struct sockaddr *)(totaddrp + 1);
+#ifdef INET6
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+ (num_v6 > 0)) {
+ splx(s);
+ error = EINVAL;
+ goto out_now;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ (num_v4 > 0)) {
+ struct in6pcb *inp6;
+
+ inp6 = (struct in6pcb *)inp;
+ if (
+ (inp6->inp_flags & IN6P_IPV6_V6ONLY)
+ ) {
+ /*
+ * if IPV6_V6ONLY flag, ignore connections destined
+ * to a v4 addr or v4-mapped addr
+ */
+ error = EINVAL;
+ goto out_now;
+ }
+ }
+#endif /* INET6 */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
+ SCTP_PCB_FLAGS_UNBOUND) {
+ /* Bind a ephemeral port */
+ SCTP_INP_WUNLOCK(inp);
+ error = sctp_inpcb_bind(so, NULL, p);
+ if (error) {
+ goto out_now;
+ }
+ } else {
+ SCTP_INP_WUNLOCK(inp);
+ }
+
+ /* We are GOOD to go */
+ stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0);
+ if (stcb == NULL) {
+ /* Gak! no memory */
+ error = ENOMEM;
+ goto out_now;
+ }
+ /* move to second address */
+ if (sa->sa_family == AF_INET)
+ sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
+ else
+ sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
+
+ for (i = 1; i < totaddr; i++) {
+ if (sa->sa_family == AF_INET) {
+ incr = sizeof(struct sockaddr_in);
+ if (sctp_add_remote_addr(stcb, sa, 0, 8)) {
+ /* assoc gone no un-lock */
+ sctp_free_assoc(inp, stcb, 0);
+ error = ENOBUFS;
+ goto out_now;
+ }
+ } else if (sa->sa_family == AF_INET6) {
+ incr = sizeof(struct sockaddr_in6);
+ if (sctp_add_remote_addr(stcb, sa, 0, 8)) {
+ /* assoc gone no un-lock */
+ sctp_free_assoc(inp, stcb, 0);
+ error = ENOBUFS;
+ goto out_now;
+ }
+ }
+ sa = (struct sockaddr *)((caddr_t)sa + incr);
+ }
+ stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
+
+ /* initialize authentication parameters for the assoc */
+ sctp_initialize_auth_params(inp, stcb);
+
+ if (delay) {
+ /* doing delayed connection */
+ stcb->asoc.delayed_connection = 1;
+ sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
+ } else {
+ SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+ sctp_send_initiate(inp, stcb);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+ /* Set the connected flag so we can queue data */
+ soisconnecting(so);
+ }
+out_now:
+ if (creat_lock_on)
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ splx(s);
+ return error;
+}
+
+
+
+static int
+sctp_optsget(struct socket *so,
+ int opt,
+ struct mbuf **mp,
+ struct thread *p
+)
+{
+ struct sctp_inpcb *inp;
+ struct mbuf *m;
+ int error, optval = 0;
+ struct sctp_tcb *stcb = NULL;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0)
+ return EINVAL;
+ error = 0;
+
+ if (mp == NULL) {
+ return (EINVAL);
+ }
+ m = *mp;
+ if (m == NULL) {
+ /* Got to have a mbuf */
+ return (EINVAL);
+ }
+ switch (opt) {
+ case SCTP_NODELAY:
+ case SCTP_AUTOCLOSE:
+ case SCTP_EXPLICIT_EOR:
+ case SCTP_AUTO_ASCONF:
+ case SCTP_DISABLE_FRAGMENTS:
+ case SCTP_I_WANT_MAPPED_V4_ADDR:
+ case SCTP_USE_EXT_RCVINFO:
+ SCTP_INP_RLOCK(inp);
+ switch (opt) {
+ case SCTP_DISABLE_FRAGMENTS:
+ optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT);
+ break;
+ case SCTP_I_WANT_MAPPED_V4_ADDR:
+ optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4);
+ break;
+ case SCTP_AUTO_ASCONF:
+ optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+ break;
+ case SCTP_EXPLICIT_EOR:
+ optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
+ break;
+ case SCTP_NODELAY:
+ optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY);
+ break;
+ case SCTP_USE_EXT_RCVINFO:
+ optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO);
+ break;
+ case SCTP_AUTOCLOSE:
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))
+ optval = TICKS_TO_SEC(inp->sctp_ep.auto_close_time);
+ else
+ optval = 0;
+ break;
+
+ default:
+ error = ENOPROTOOPT;
+ } /* end switch (sopt->sopt_name) */
+ if (opt != SCTP_AUTOCLOSE) {
+ /* make it an "on/off" value */
+ optval = (optval != 0);
+ }
+ if ((size_t)m->m_len < sizeof(int)) {
+ error = EINVAL;
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if (error == 0) {
+ /* return the option value */
+ *mtod(m, int *)= optval;
+ m->m_len = sizeof(optval);
+ }
+ break;
+ case SCTP_PARTIAL_DELIVERY_POINT:
+ {
+ if ((size_t)m->m_len < sizeof(unsigned int)) {
+ error = EINVAL;
+ break;
+ }
+ *mtod(m, unsigned int *)= inp->partial_delivery_point;
+ m->m_len = sizeof(unsigned int);
+ }
+ break;
+ case SCTP_FRAGMENT_INTERLEAVE:
+ {
+ if ((size_t)m->m_len < sizeof(unsigned int)) {
+ error = EINVAL;
+ break;
+ }
+ *mtod(m, unsigned int *)= sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+ m->m_len = sizeof(unsigned int);
+ }
+ break;
+ case SCTP_CMT_ON_OFF:
+ {
+ if ((size_t)m->m_len < sizeof(unsigned int)) {
+ error = EINVAL;
+ break;
+ }
+ *mtod(m, unsigned int *)= sctp_cmt_sockopt_on_off;
+ m->m_len = sizeof(unsigned int);
+ }
+ break;
+ case SCTP_CMT_USE_DAC:
+ {
+ *mtod(m, unsigned int *)= sctp_cmt_sockopt_use_dac;
+ m->m_len = sizeof(unsigned int);
+ }
+ break;
+ case SCTP_GET_ADDR_LEN:
+ {
+ struct sctp_assoc_value *av;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_assoc_value)) {
+ error = EINVAL;
+ break;
+ }
+ av = mtod(m, struct sctp_assoc_value *);
+ error = EINVAL;
+#ifdef AF_INET
+ if (av->assoc_value == AF_INET) {
+ av->assoc_value = sizeof(struct sockaddr_in);
+ error = 0;
+ }
+#endif
+#ifdef AF_INET6
+ if (av->assoc_value == AF_INET6) {
+ av->assoc_value = sizeof(struct sockaddr_in6);
+ error = 0;
+ }
+#endif
+ }
+ break;
+ case SCTP_GET_ASOC_ID_LIST:
+ {
+ struct sctp_assoc_ids *ids;
+ int cnt, at;
+ uint16_t orig;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_assoc_ids)) {
+ error = EINVAL;
+ break;
+ }
+ ids = mtod(m, struct sctp_assoc_ids *);
+ cnt = 0;
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ none_out_now:
+ ids->asls_numb_present = 0;
+ ids->asls_more_to_get = 0;
+ SCTP_INP_RUNLOCK(inp);
+ break;
+ }
+ orig = ids->asls_assoc_start;
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ while (orig) {
+ stcb = LIST_NEXT(stcb, sctp_tcblist);
+ orig--;
+ cnt--;
+ if (stcb == NULL)
+ goto none_out_now;
+ }
+ if (stcb == NULL)
+ goto none_out_now;
+
+ at = 0;
+ ids->asls_numb_present = 0;
+ ids->asls_more_to_get = 1;
+ while (at < MAX_ASOC_IDS_RET) {
+ ids->asls_assoc_id[at] = sctp_get_associd(stcb);
+ at++;
+ ids->asls_numb_present++;
+ stcb = LIST_NEXT(stcb, sctp_tcblist);
+ if (stcb == NULL) {
+ ids->asls_more_to_get = 0;
+ break;
+ }
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ break;
+ case SCTP_CONTEXT:
+ {
+
+ struct sctp_assoc_value *av;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_assoc_value)) {
+ error = EINVAL;
+ break;
+ }
+ av = mtod(m, struct sctp_assoc_value *);
+ if (av->assoc_id) {
+ stcb = sctp_findassociation_ep_asocid(inp, av->assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOTCONN;
+ } else {
+ av->assoc_value = stcb->asoc.context;
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ } else {
+ av->assoc_value = inp->sctp_context;
+ }
+ }
+ break;
+ case SCTP_GET_NONCE_VALUES:
+ {
+ struct sctp_get_nonce_values *gnv;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_get_nonce_values)) {
+ error = EINVAL;
+ break;
+ }
+ gnv = mtod(m, struct sctp_get_nonce_values *);
+ stcb = sctp_findassociation_ep_asocid(inp, gnv->gn_assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOTCONN;
+ } else {
+ gnv->gn_peers_tag = stcb->asoc.peer_vtag;
+ gnv->gn_local_tag = stcb->asoc.my_vtag;
+ SCTP_TCB_UNLOCK(stcb);
+ }
+
+ }
+ break;
+ case SCTP_DELAYED_ACK_TIME:
+ {
+ struct sctp_assoc_value *tm;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_assoc_value)) {
+ error = EINVAL;
+ break;
+ }
+ tm = mtod(m, struct sctp_assoc_value *);
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ tm->assoc_value = stcb->asoc.delayed_ack;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ tm->assoc_value = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ stcb = sctp_findassociation_ep_asocid(inp, tm->assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOTCONN;
+ tm->assoc_value = 0;
+ } else {
+ stcb->asoc.delayed_ack = tm->assoc_value;
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+ }
+ break;
+
+ case SCTP_GET_SNDBUF_USE:
+ if ((size_t)m->m_len < sizeof(struct sctp_sockstat)) {
+ error = EINVAL;
+ } else {
+ struct sctp_sockstat *ss;
+ struct sctp_tcb *stcb;
+ struct sctp_association *asoc;
+
+ ss = mtod(m, struct sctp_sockstat *);
+ stcb = sctp_findassociation_ep_asocid(inp, ss->ss_assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOTCONN;
+ } else {
+ asoc = &stcb->asoc;
+ ss->ss_total_sndbuf = (uint32_t) asoc->total_output_queue_size;
+ ss->ss_total_recv_buf = (uint32_t) (asoc->size_on_reasm_queue +
+ asoc->size_on_all_streams);
+ SCTP_TCB_UNLOCK(stcb);
+ error = 0;
+ m->m_len = sizeof(struct sctp_sockstat);
+ }
+ }
+ break;
+ case SCTP_MAXBURST:
+ {
+ uint8_t *burst;
+
+ burst = mtod(m, uint8_t *);
+ SCTP_INP_RLOCK(inp);
+ *burst = inp->sctp_ep.max_burst;
+ SCTP_INP_RUNLOCK(inp);
+ m->m_len = sizeof(uint8_t);
+ }
+ break;
+
+ case SCTP_MAXSEG:
+ {
+ uint32_t *segsize;
+ sctp_assoc_t *assoc_id;
+ int ovh;
+
+ if ((size_t)m->m_len < sizeof(uint32_t)) {
+ error = EINVAL;
+ break;
+ }
+ if ((size_t)m->m_len < sizeof(sctp_assoc_t)) {
+ error = EINVAL;
+ break;
+ }
+ assoc_id = mtod(m, sctp_assoc_t *);
+ segsize = mtod(m, uint32_t *);
+ m->m_len = sizeof(uint32_t);
+
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ struct sctp_tcb *stcb;
+
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ *segsize = sctp_get_frag_point(stcb, &stcb->asoc);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_RUNLOCK(inp);
+ goto skipit;
+ }
+ } else {
+ stcb = sctp_findassociation_ep_asocid(inp, *assoc_id, 1);
+ if (stcb) {
+ *segsize = sctp_get_frag_point(stcb, &stcb->asoc);
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ skipit:
+ /*
+ * default is to get the max, if I can't
+ * calculate from an existing association.
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MED_OVERHEAD;
+ } else {
+ ovh = SCTP_MED_V4_OVERHEAD;
+ }
+ *segsize = inp->sctp_frag_point - ovh;
+ }
+ }
+ break;
+
+ case SCTP_SET_DEBUG_LEVEL:
+#ifdef SCTP_DEBUG
+ {
+ uint32_t *level;
+
+ if ((size_t)m->m_len < sizeof(uint32_t)) {
+ error = EINVAL;
+ break;
+ }
+ level = mtod(m, uint32_t *);
+ error = 0;
+ *level = sctp_debug_on;
+ m->m_len = sizeof(uint32_t);
+ printf("Returning DEBUG LEVEL %x is set\n",
+ (uint32_t) sctp_debug_on);
+ }
+#else /* SCTP_DEBUG */
+ error = EOPNOTSUPP;
+#endif
+ break;
+ case SCTP_GET_STAT_LOG:
+#ifdef SCTP_STAT_LOGGING
+ error = sctp_fill_stat_log(m);
+#else /* SCTP_DEBUG */
+ error = EOPNOTSUPP;
+#endif
+ break;
+ case SCTP_EVENTS:
+ {
+ struct sctp_event_subscribe *events;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_event_subscribe)) {
+ error = EINVAL;
+ break;
+ }
+ events = mtod(m, struct sctp_event_subscribe *);
+ memset(events, 0, sizeof(*events));
+ SCTP_INP_RLOCK(inp);
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT))
+ events->sctp_data_io_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT))
+ events->sctp_association_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT))
+ events->sctp_address_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
+ events->sctp_send_failure_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR))
+ events->sctp_peer_error_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
+ events->sctp_shutdown_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT))
+ events->sctp_partial_delivery_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
+ events->sctp_adaptation_layer_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT))
+ events->sctp_authentication_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
+ events->sctp_stream_reset_events = 1;
+ SCTP_INP_RUNLOCK(inp);
+ m->m_len = sizeof(struct sctp_event_subscribe);
+
+ }
+ break;
+
+ case SCTP_ADAPTATION_LAYER:
+ if ((size_t)m->m_len < sizeof(int)) {
+ error = EINVAL;
+ break;
+ }
+ SCTP_INP_RLOCK(inp);
+ *mtod(m, int *)= inp->sctp_ep.adaptation_layer_indicator;
+ SCTP_INP_RUNLOCK(inp);
+ m->m_len = sizeof(int);
+ break;
+ case SCTP_SET_INITIAL_DBG_SEQ:
+ if ((size_t)m->m_len < sizeof(int)) {
+ error = EINVAL;
+ break;
+ }
+ SCTP_INP_RLOCK(inp);
+ *mtod(m, int *)= inp->sctp_ep.initial_sequence_debug;
+ SCTP_INP_RUNLOCK(inp);
+ m->m_len = sizeof(int);
+ break;
+ case SCTP_GET_LOCAL_ADDR_SIZE:
+ if ((size_t)m->m_len < sizeof(int)) {
+ error = EINVAL;
+ break;
+ }
+ SCTP_INP_RLOCK(inp);
+ *mtod(m, int *)= sctp_count_max_addresses(inp);
+ SCTP_INP_RUNLOCK(inp);
+ m->m_len = sizeof(int);
+ break;
+ case SCTP_GET_REMOTE_ADDR_SIZE:
+ {
+ sctp_assoc_t *assoc_id;
+ uint32_t *val, sz;
+ struct sctp_nets *net;
+
+ if ((size_t)m->m_len < sizeof(sctp_assoc_t)) {
+ error = EINVAL;
+ break;
+ }
+ stcb = NULL;
+ val = mtod(m, uint32_t *);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ }
+ if (stcb == NULL) {
+ assoc_id = mtod(m, sctp_assoc_t *);
+ stcb = sctp_findassociation_ep_asocid(inp, *assoc_id, 1);
+ }
+ if (stcb == NULL) {
+ error = EINVAL;
+ break;
+ }
+ *val = 0;
+ sz = 0;
+ /* Count the sizes */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
+ (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
+ sz += sizeof(struct sockaddr_in6);
+ } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
+ sz += sizeof(struct sockaddr_in);
+ } else {
+ /* huh */
+ break;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ *val = sz;
+ m->m_len = sizeof(uint32_t);
+ }
+ break;
+ case SCTP_GET_PEER_ADDRESSES:
+ /*
+ * Get the address information, an array is passed in to
+ * fill up we pack it.
+ */
+ {
+ int cpsz, left;
+ struct sockaddr_storage *sas;
+ struct sctp_nets *net;
+ struct sctp_getaddresses *saddr;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
+ error = EINVAL;
+ break;
+ }
+ left = m->m_len - sizeof(struct sctp_getaddresses);
+ saddr = mtod(m, struct sctp_getaddresses *);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else
+ stcb = sctp_findassociation_ep_asocid(inp,
+ saddr->sget_assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ m->m_len = sizeof(struct sctp_getaddresses);
+ sas = (struct sockaddr_storage *)&saddr->addr[0];
+
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
+ (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
+ cpsz = sizeof(struct sockaddr_in6);
+ } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
+ cpsz = sizeof(struct sockaddr_in);
+ } else {
+ /* huh */
+ break;
+ }
+ if (left < cpsz) {
+ /* not enough room. */
+ break;
+ }
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
+ (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) {
+ /* Must map the address */
+ in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr,
+ (struct sockaddr_in6 *)sas);
+ } else {
+ memcpy(sas, &net->ro._l_addr, cpsz);
+ }
+ ((struct sockaddr_in *)sas)->sin_port = stcb->rport;
+
+ sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
+ left -= cpsz;
+ m->m_len += cpsz;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ break;
+ case SCTP_GET_LOCAL_ADDRESSES:
+ {
+ int limit, actual;
+ struct sockaddr_storage *sas;
+ struct sctp_getaddresses *saddr;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
+ error = EINVAL;
+ break;
+ }
+ saddr = mtod(m, struct sctp_getaddresses *);
+
+ if (saddr->sget_assoc_id) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else
+ stcb = sctp_findassociation_ep_asocid(inp, saddr->sget_assoc_id, 1);
+
+ } else {
+ stcb = NULL;
+ }
+ /*
+ * assure that the TCP model does not need a assoc
+ * id once connected.
+ */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) &&
+ (stcb == NULL)) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ }
+ sas = (struct sockaddr_storage *)&saddr->addr[0];
+ limit = m->m_len - sizeof(sctp_assoc_t);
+ actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ m->m_len = sizeof(struct sockaddr_storage) + actual;
+ }
+ break;
+ case SCTP_PEER_ADDR_PARAMS:
+ {
+ struct sctp_paddrparams *paddrp;
+ struct sctp_nets *net;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_paddrparams)) {
+ error = EINVAL;
+ break;
+ }
+ paddrp = mtod(m, struct sctp_paddrparams *);
+
+ net = NULL;
+ if (paddrp->spp_assoc_id) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
+ }
+ SCTP_INP_RLOCK(inp);
+ } else {
+ stcb = sctp_findassociation_ep_asocid(inp, paddrp->spp_assoc_id, 1);
+ }
+ if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ }
+ if ((stcb == NULL) &&
+ ((((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET) ||
+ (((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET6))) {
+ /* Lookup via address */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp,
+ (struct sockaddr *)&paddrp->spp_address,
+ &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+
+ if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ }
+ if (stcb) {
+ /* Applys to the specific association */
+ paddrp->spp_flags = 0;
+ if (net) {
+ paddrp->spp_pathmaxrxt = net->failure_threshold;
+ paddrp->spp_pathmtu = net->mtu;
+ /* get flags for HB */
+ if (net->dest_state & SCTP_ADDR_NOHB)
+ paddrp->spp_flags |= SPP_HB_DISABLE;
+ else
+ paddrp->spp_flags |= SPP_HB_ENABLE;
+ /* get flags for PMTU */
+ if (callout_pending(&net->pmtu_timer.timer)) {
+ paddrp->spp_flags |= SPP_PMTUD_ENABLE;
+ } else {
+ paddrp->spp_flags |= SPP_PMTUD_DISABLE;
+ }
+#ifdef AF_INET
+ if (net->ro._l_addr.sin.sin_family == AF_INET) {
+ paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc;
+ paddrp->spp_flags |= SPP_IPV4_TOS;
+ }
+#endif
+#ifdef AF_INET6
+ if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
+ paddrp->spp_ipv6_flowlabel = net->tos_flowlabel;
+ paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
+ }
+#endif
+ } else {
+ /*
+ * No destination so return default
+ * value
+ */
+ paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
+ paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc);
+#ifdef AF_INET
+ paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc;
+ paddrp->spp_flags |= SPP_IPV4_TOS;
+#endif
+#ifdef AF_INET6
+ paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel;
+ paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
+#endif
+ /* default settings should be these */
+ if (sctp_is_hb_timer_running(stcb)) {
+ paddrp->spp_flags |= SPP_HB_ENABLE;
+ }
+ }
+ paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
+ paddrp->spp_sackdelay = stcb->asoc.delayed_ack;
+ /*
+ * Currently we don't support no sack delay
+ * aka SPP_SACKDELAY_DISABLE.
+ */
+ paddrp->spp_flags |= SPP_SACKDELAY_ENABLE;
+ paddrp->spp_assoc_id = sctp_get_associd(stcb);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* Use endpoint defaults */
+ SCTP_INP_RLOCK(inp);
+ paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
+ paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
+ paddrp->spp_sackdelay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
+ paddrp->spp_assoc_id = (sctp_assoc_t) 0;
+ /* get inp's default */
+#ifdef AF_INET
+ paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos;
+ paddrp->spp_flags |= SPP_IPV4_TOS;
+#endif
+#ifdef AF_INET6
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
+ paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
+ }
+#endif
+ /* can't return this */
+ paddrp->spp_pathmaxrxt = 0;
+ paddrp->spp_pathmtu = 0;
+ /* default behavior, no stcb */
+ paddrp->spp_flags = SPP_HB_ENABLE | SPP_SACKDELAY_ENABLE | SPP_PMTUD_ENABLE;
+
+ SCTP_INP_RUNLOCK(inp);
+ }
+ m->m_len = sizeof(struct sctp_paddrparams);
+ }
+ break;
+ case SCTP_GET_PEER_ADDR_INFO:
+ {
+ struct sctp_paddrinfo *paddri;
+ struct sctp_nets *net;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_paddrinfo)) {
+ error = EINVAL;
+ break;
+ }
+ paddri = mtod(m, struct sctp_paddrinfo *);
+ net = NULL;
+ if ((((struct sockaddr *)&paddri->spinfo_address)->sa_family == AF_INET) ||
+ (((struct sockaddr *)&paddri->spinfo_address)->sa_family == AF_INET6)) {
+ /* Lookup via address */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ net = sctp_findnet(stcb,
+ (struct sockaddr *)&paddri->spinfo_address);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp,
+ (struct sockaddr *)&paddri->spinfo_address,
+ &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+
+ } else {
+ stcb = NULL;
+ }
+ if ((stcb == NULL) || (net == NULL)) {
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ error = ENOENT;
+ break;
+ }
+ m->m_len = sizeof(struct sctp_paddrinfo);
+ paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB);
+ paddri->spinfo_cwnd = net->cwnd;
+ paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
+ paddri->spinfo_rto = net->RTO;
+ paddri->spinfo_assoc_id = sctp_get_associd(stcb);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ break;
+ case SCTP_PCB_STATUS:
+ {
+ struct sctp_pcbinfo *spcb;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_pcbinfo)) {
+ error = EINVAL;
+ break;
+ }
+ spcb = mtod(m, struct sctp_pcbinfo *);
+ sctp_fill_pcbinfo(spcb);
+ m->m_len = sizeof(struct sctp_pcbinfo);
+ }
+ break;
+ case SCTP_STATUS:
+ {
+ struct sctp_nets *net;
+ struct sctp_status *sstat;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_status)) {
+ error = EINVAL;
+ break;
+ }
+ sstat = mtod(m, struct sctp_status *);
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else
+ stcb = sctp_findassociation_ep_asocid(inp, sstat->sstat_assoc_id, 1);
+
+ if (stcb == NULL) {
+ error = EINVAL;
+ break;
+ }
+ /*
+ * I think passing the state is fine since
+ * sctp_constants.h will be available to the user
+ * land.
+ */
+ sstat->sstat_state = stcb->asoc.state;
+ sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
+ sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
+ /*
+ * We can't include chunks that have been passed to
+ * the socket layer. Only things in queue.
+ */
+ sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue +
+ stcb->asoc.cnt_on_all_streams);
+
+
+ sstat->sstat_instrms = stcb->asoc.streamincnt;
+ sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
+ sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
+ memcpy(&sstat->sstat_primary.spinfo_address,
+ &stcb->asoc.primary_destination->ro._l_addr,
+ ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
+ net = stcb->asoc.primary_destination;
+ ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
+ /*
+ * Again the user can get info from sctp_constants.h
+ * for what the state of the network is.
+ */
+ sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK;
+ sstat->sstat_primary.spinfo_cwnd = net->cwnd;
+ sstat->sstat_primary.spinfo_srtt = net->lastsa;
+ sstat->sstat_primary.spinfo_rto = net->RTO;
+ sstat->sstat_primary.spinfo_mtu = net->mtu;
+ sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
+ SCTP_TCB_UNLOCK(stcb);
+ m->m_len = sizeof(*sstat);
+ }
+ break;
+ case SCTP_RTOINFO:
+ {
+ struct sctp_rtoinfo *srto;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_rtoinfo)) {
+ error = EINVAL;
+ break;
+ }
+ srto = mtod(m, struct sctp_rtoinfo *);
+ if (srto->srto_assoc_id == 0) {
+ /* Endpoint only please */
+ SCTP_INP_RLOCK(inp);
+ srto->srto_initial = inp->sctp_ep.initial_rto;
+ srto->srto_max = inp->sctp_ep.sctp_maxrto;
+ srto->srto_min = inp->sctp_ep.sctp_minrto;
+ SCTP_INP_RUNLOCK(inp);
+ break;
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else
+ stcb = sctp_findassociation_ep_asocid(inp, srto->srto_assoc_id, 1);
+
+ if (stcb == NULL) {
+ error = EINVAL;
+ break;
+ }
+ srto->srto_initial = stcb->asoc.initial_rto;
+ srto->srto_max = stcb->asoc.maxrto;
+ srto->srto_min = stcb->asoc.minrto;
+ SCTP_TCB_UNLOCK(stcb);
+ m->m_len = sizeof(*srto);
+ }
+ break;
+ case SCTP_ASSOCINFO:
+ {
+ struct sctp_assocparams *sasoc;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_assocparams)) {
+ error = EINVAL;
+ break;
+ }
+ sasoc = mtod(m, struct sctp_assocparams *);
+ stcb = NULL;
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+
+ SCTP_TCB_LOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else if (sasoc->sasoc_assoc_id) {
+ stcb = sctp_findassociation_ep_asocid(inp,
+ sasoc->sasoc_assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ } else {
+ stcb = NULL;
+ }
+ if (stcb) {
+ sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
+ sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
+ sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
+ sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
+ sasoc->sasoc_cookie_life = stcb->asoc.cookie_life;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_RLOCK(inp);
+ sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
+ sasoc->sasoc_number_peer_destinations = 0;
+ sasoc->sasoc_peer_rwnd = 0;
+ sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv);
+ sasoc->sasoc_cookie_life = inp->sctp_ep.def_cookie_life;
+ SCTP_INP_RUNLOCK(inp);
+ }
+ m->m_len = sizeof(*sasoc);
+ }
+ break;
+ case SCTP_DEFAULT_SEND_PARAM:
+ {
+ struct sctp_sndrcvinfo *s_info;
+
+ if (m->m_len != sizeof(struct sctp_sndrcvinfo)) {
+ error = EINVAL;
+ break;
+ }
+ s_info = mtod(m, struct sctp_sndrcvinfo *);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else
+ stcb = sctp_findassociation_ep_asocid(inp, s_info->sinfo_assoc_id, 1);
+
+ if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ /* Copy it out */
+ *s_info = stcb->asoc.def_send;
+ SCTP_TCB_UNLOCK(stcb);
+ m->m_len = sizeof(*s_info);
+ }
+ break;
+ case SCTP_INITMSG:
+ {
+ struct sctp_initmsg *sinit;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_initmsg)) {
+ error = EINVAL;
+ break;
+ }
+ sinit = mtod(m, struct sctp_initmsg *);
+ SCTP_INP_RLOCK(inp);
+ sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
+ sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
+ sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
+ sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
+ SCTP_INP_RUNLOCK(inp);
+ m->m_len = sizeof(*sinit);
+ }
+ break;
+ case SCTP_PRIMARY_ADDR:
+ /* we allow a "get" operation on this */
+ {
+ struct sctp_setprim *ssp;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_setprim)) {
+ error = EINVAL;
+ break;
+ }
+ ssp = mtod(m, struct sctp_setprim *);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ stcb = sctp_findassociation_ep_asocid(inp, ssp->ssp_assoc_id, 1);
+ if (stcb == NULL) {
+ /*
+ * one last shot, try it by the
+ * address in
+ */
+ struct sctp_nets *net;
+
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp,
+ (struct sockaddr *)&ssp->ssp_addr,
+ &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ if (stcb == NULL) {
+ error = EINVAL;
+ break;
+ }
+ }
+ /* simply copy out the sockaddr_storage... */
+ memcpy(&ssp->ssp_addr,
+ &stcb->asoc.primary_destination->ro._l_addr,
+ ((struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr)->sa_len);
+ SCTP_TCB_UNLOCK(stcb);
+ m->m_len = sizeof(*ssp);
+ }
+ break;
+
+ case SCTP_HMAC_IDENT:
+ {
+ struct sctp_hmacalgo *shmac;
+ sctp_hmaclist_t *hmaclist;
+ uint32_t size;
+ int i;
+
+ if ((size_t)(m->m_len) < sizeof(*shmac)) {
+ error = EINVAL;
+ break;
+ }
+ shmac = mtod(m, struct sctp_hmacalgo *);
+ SCTP_INP_RLOCK(inp);
+ hmaclist = inp->sctp_ep.local_hmacs;
+ if (hmaclist == NULL) {
+ /* no HMACs to return */
+ m->m_len = sizeof(*shmac);
+ break;
+ }
+ /* is there room for all of the hmac ids? */
+ size = sizeof(*shmac) + (hmaclist->num_algo *
+ sizeof(shmac->shmac_idents[0]));
+ if ((size_t)(m->m_len) < size) {
+ error = EINVAL;
+ SCTP_INP_RUNLOCK(inp);
+ break;
+ }
+ /* copy in the list */
+ for (i = 0; i < hmaclist->num_algo; i++)
+ shmac->shmac_idents[i] = hmaclist->hmac[i];
+ SCTP_INP_RUNLOCK(inp);
+ m->m_len = size;
+ break;
+ }
+ case SCTP_AUTH_ACTIVE_KEY:
+ {
+ struct sctp_authkeyid *scact;
+
+ if ((size_t)(m->m_len) < sizeof(*scact)) {
+ error = EINVAL;
+ break;
+ }
+ scact = mtod(m, struct sctp_authkeyid *);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ /*
+ * if one-to-one, get from the connected
+ * assoc; else endpoint
+ */
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else if (scact->scact_assoc_id) {
+ stcb = sctp_findassociation_ep_asocid(inp, scact->scact_assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ }
+ if (stcb != NULL) {
+ /* get the active key on the assoc */
+ scact->scact_keynumber = stcb->asoc.authinfo.assoc_keyid;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* get the endpoint active key */
+ SCTP_INP_RLOCK(inp);
+ scact->scact_keynumber = inp->sctp_ep.default_keyid;
+ SCTP_INP_RUNLOCK(inp);
+ }
+ m->m_len = sizeof(*scact);
+ break;
+ }
+ case SCTP_LOCAL_AUTH_CHUNKS:
+ {
+ struct sctp_authchunks *sac;
+ sctp_auth_chklist_t *chklist = NULL;
+ int size = 0;
+
+ if ((size_t)(m->m_len) < sizeof(*sac)) {
+ error = EINVAL;
+ break;
+ }
+ sac = mtod(m, struct sctp_authchunks *);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ /*
+ * if one-to-one, get from the connected
+ * assoc; else endpoint
+ */
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb != NULL)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else if (sac->gauth_assoc_id) {
+ stcb = sctp_findassociation_ep_asocid(inp, sac->gauth_assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ }
+ if (stcb != NULL) {
+ /* get off the assoc */
+ chklist = stcb->asoc.local_auth_chunks;
+ if (chklist == NULL) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ /* is there enough space? */
+ size = sctp_auth_get_chklist_size(chklist);
+ if ((size_t)m->m_len < (sizeof(struct sctp_authchunks) + size)) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ /* copy in the chunks */
+ sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* get off the endpoint */
+ SCTP_INP_RLOCK(inp);
+ chklist = inp->sctp_ep.local_auth_chunks;
+ if (chklist == NULL) {
+ error = EINVAL;
+ SCTP_INP_RUNLOCK(inp);
+ break;
+ }
+ /* is there enough space? */
+ size = sctp_auth_get_chklist_size(chklist);
+ if ((size_t)m->m_len < (sizeof(struct sctp_authchunks) + size)) {
+ error = EINVAL;
+ SCTP_INP_RUNLOCK(inp);
+ break;
+ }
+ /* copy in the chunks */
+ sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
+ SCTP_INP_RUNLOCK(inp);
+ }
+ m->m_len = sizeof(struct sctp_authchunks) + size;
+ break;
+ }
+ case SCTP_PEER_AUTH_CHUNKS:
+ {
+ struct sctp_authchunks *sac;
+ sctp_auth_chklist_t *chklist = NULL;
+ int size = 0;
+
+ if ((size_t)(m->m_len) < sizeof(*sac)) {
+ error = EINVAL;
+ break;
+ }
+ sac = mtod(m, struct sctp_authchunks *);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ /*
+ * if one-to-one, get from the connected
+ * assoc, else endpoint
+ */
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb != NULL)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else if (sac->gauth_assoc_id) {
+ stcb = sctp_findassociation_ep_asocid(inp, sac->gauth_assoc_id, 1);
+ }
+ if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ /* get off the assoc */
+ chklist = stcb->asoc.peer_auth_chunks;
+ if (chklist == NULL) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ /* is there enough space? */
+ size = sctp_auth_get_chklist_size(chklist);
+ if ((size_t)m->m_len < (sizeof(struct sctp_authchunks) + size)) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ /* copy in the chunks */
+ sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
+ SCTP_TCB_UNLOCK(stcb);
+ m->m_len = sizeof(struct sctp_authchunks) + size;
+ break;
+ }
+
+
+ default:
+ error = ENOPROTOOPT;
+ m->m_len = 0;
+ break;
+ } /* end switch (sopt->sopt_name) */
+ return (error);
+}
+
+
+static int
+sctp_optsset(struct socket *so,
+ int opt,
+ struct mbuf **mp,
+ struct thread *p
+)
+{
+ int error, *mopt, set_opt, s;
+ struct mbuf *m;
+ struct sctp_tcb *stcb = NULL;
+ struct sctp_inpcb *inp;
+
+ if (mp == NULL) {
+ return (EINVAL);
+ }
+ m = *mp;
+ if (m == NULL)
+ return (EINVAL);
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0)
+ return EINVAL;
+
+ error = 0;
+ switch (opt) {
+ case SCTP_NODELAY:
+ case SCTP_AUTOCLOSE:
+ case SCTP_AUTO_ASCONF:
+ case SCTP_EXPLICIT_EOR:
+ case SCTP_DISABLE_FRAGMENTS:
+ case SCTP_USE_EXT_RCVINFO:
+ case SCTP_I_WANT_MAPPED_V4_ADDR:
+ /* copy in the option value */
+ if ((size_t)m->m_len < sizeof(int)) {
+ error = EINVAL;
+ break;
+ }
+ mopt = mtod(m, int *);
+ set_opt = 0;
+ if (error)
+ break;
+ switch (opt) {
+ case SCTP_DISABLE_FRAGMENTS:
+ set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
+ break;
+ case SCTP_AUTO_ASCONF:
+ set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
+ break;
+ case SCTP_EXPLICIT_EOR:
+ set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR;
+ break;
+ case SCTP_USE_EXT_RCVINFO:
+ set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO;
+ break;
+ case SCTP_I_WANT_MAPPED_V4_ADDR:
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
+ } else {
+ return (EINVAL);
+ }
+ break;
+ case SCTP_NODELAY:
+ set_opt = SCTP_PCB_FLAGS_NODELAY;
+ break;
+ case SCTP_AUTOCLOSE:
+ set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
+ /*
+ * The value is in ticks. Note this does not effect
+ * old associations, only new ones.
+ */
+ inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt);
+ break;
+ }
+ SCTP_INP_WLOCK(inp);
+ if (*mopt != 0) {
+ sctp_feature_on(inp, set_opt);
+ } else {
+ sctp_feature_off(inp, set_opt);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ case SCTP_PARTIAL_DELIVERY_POINT:
+ {
+ if ((size_t)m->m_len < sizeof(unsigned int)) {
+ error = EINVAL;
+ break;
+ }
+ inp->partial_delivery_point = *mtod(m, unsigned int *);
+ m->m_len = sizeof(unsigned int);
+ }
+ break;
+ case SCTP_FRAGMENT_INTERLEAVE:
+ /* not yet until we re-write sctp_recvmsg() */
+ {
+ int on_off;
+
+ if ((size_t)m->m_len < sizeof(int)) {
+ error = EINVAL;
+ break;
+ }
+ on_off = (mtod(m, int));
+ if (on_off) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+ }
+ }
+ break;
+ case SCTP_CMT_ON_OFF:
+ {
+ struct sctp_assoc_value *av;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_assoc_value)) {
+ error = EINVAL;
+ break;
+ }
+ av = mtod(m, struct sctp_assoc_value *);
+ stcb = sctp_findassociation_ep_asocid(inp, av->assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOTCONN;
+ } else {
+ if (sctp_cmt_on_off) {
+ stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value;
+ } else {
+ if ((stcb->asoc.sctp_cmt_on_off) && (av->assoc_value == 0)) {
+ stcb->asoc.sctp_cmt_on_off = 0;
+ } else {
+ error = EACCES;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+ break;
+ case SCTP_CMT_USE_DAC:
+ {
+ if ((size_t)m->m_len < sizeof(unsigned int)) {
+ error = EINVAL;
+ break;
+ }
+ sctp_cmt_sockopt_use_dac = *mtod(m, unsigned int *);
+ if (sctp_cmt_sockopt_use_dac != 0)
+ sctp_cmt_sockopt_use_dac = 1;
+ }
+ break;
+ case SCTP_CLR_STAT_LOG:
+#ifdef SCTP_STAT_LOGGING
+ sctp_clr_stat_log();
+#else
+ error = EOPNOTSUPP;
+#endif
+ break;
+ case SCTP_CONTEXT:
+ {
+
+ struct sctp_assoc_value *av;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_assoc_value)) {
+ error = EINVAL;
+ break;
+ }
+ av = mtod(m, struct sctp_assoc_value *);
+ if (av->assoc_id) {
+ stcb = sctp_findassociation_ep_asocid(inp, av->assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOTCONN;
+ } else {
+ stcb->asoc.context = av->assoc_value;
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ } else {
+ inp->sctp_context = av->assoc_value;
+ }
+ }
+ break;
+ case SCTP_DELAYED_ACK_TIME:
+ {
+ struct sctp_assoc_value *tm;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_assoc_value)) {
+ error = EINVAL;
+ break;
+ }
+ tm = mtod(m, struct sctp_assoc_value *);
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_WLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ stcb->asoc.delayed_ack = tm->assoc_value;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(tm->assoc_value);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ if (tm->assoc_id) {
+ stcb = sctp_findassociation_ep_asocid(inp, tm->assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOTCONN;
+ } else {
+ stcb->asoc.delayed_ack = tm->assoc_value;
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ } else {
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(tm->assoc_value);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ }
+ }
+ break;
+
+ case SCTP_AUTH_CHUNK:
+ {
+ struct sctp_authchunk *sauth;
+
+ if ((size_t)m->m_len < sizeof(*sauth)) {
+ error = EINVAL;
+ break;
+ }
+ sauth = mtod(m, struct sctp_authchunk *);
+ if (sctp_auth_add_chunk(sauth->sauth_chunk,
+ inp->sctp_ep.local_auth_chunks))
+ error = EINVAL;
+ break;
+ }
+ case SCTP_AUTH_KEY:
+ {
+ struct sctp_authkey *sca;
+ struct sctp_keyhead *shared_keys;
+ sctp_sharedkey_t *shared_key;
+ sctp_key_t *key = NULL;
+ int size;
+
+ size = m->m_len - sizeof(*sca);
+ if (size < 0) {
+ error = EINVAL;
+ break;
+ }
+ sca = mtod(m, struct sctp_authkey *);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ /*
+ * if one-to-one, set it on the connected
+ * assoc; else endpoint
+ */
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else if (sca->sca_assoc_id) {
+ stcb = sctp_findassociation_ep_asocid(inp, sca->sca_assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ }
+ if (stcb != NULL) {
+ /* set it on the assoc */
+ shared_keys = &stcb->asoc.shared_keys;
+ /* clear the cached keys for this key id */
+ sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
+ /*
+ * create the new shared key and
+ * insert/replace it
+ */
+ if (size > 0) {
+ key = sctp_set_key(sca->sca_key, (uint32_t) size);
+ if (key == NULL) {
+ error = ENOMEM;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ }
+ shared_key = sctp_alloc_sharedkey();
+ if (shared_key == NULL) {
+ sctp_free_key(key);
+ error = ENOMEM;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ shared_key->key = key;
+ shared_key->keyid = sca->sca_keynumber;
+ sctp_insert_sharedkey(shared_keys, shared_key);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* ste it on the endpoint */
+ SCTP_INP_WLOCK(inp);
+ shared_keys = &inp->sctp_ep.shared_keys;
+ /*
+ * clear the cached keys on all assocs for
+ * this key id
+ */
+ sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber);
+ /*
+ * create the new shared key and
+ * insert/replace it
+ */
+ if (size > 0) {
+ key = sctp_set_key(sca->sca_key, (uint32_t) size);
+ if (key == NULL) {
+ error = ENOMEM;
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+ }
+ shared_key = sctp_alloc_sharedkey();
+ if (shared_key == NULL) {
+ sctp_free_key(key);
+ error = ENOMEM;
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+ shared_key->key = key;
+ shared_key->keyid = sca->sca_keynumber;
+ sctp_insert_sharedkey(shared_keys, shared_key);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ }
+ case SCTP_HMAC_IDENT:
+ {
+ struct sctp_hmacalgo *shmac;
+ sctp_hmaclist_t *hmaclist;
+ uint32_t hmacid;
+ int size, i;
+
+ size = m->m_len - sizeof(*shmac);
+ if (size < 0) {
+ error = EINVAL;
+ break;
+ }
+ shmac = mtod(m, struct sctp_hmacalgo *);
+ size = size / sizeof(shmac->shmac_idents[0]);
+ hmaclist = sctp_alloc_hmaclist(size);
+ if (hmaclist == NULL) {
+ error = ENOMEM;
+ break;
+ }
+ for (i = 0; i < size; i++) {
+ hmacid = shmac->shmac_idents[i];
+ if (sctp_auth_add_hmacid(hmaclist, (uint16_t) hmacid)) {
+ /* invalid HMACs were found */ ;
+ error = EINVAL;
+ goto sctp_set_hmac_done;
+ }
+ }
+ /* set it on the endpoint */
+ SCTP_INP_WLOCK(inp);
+ if (inp->sctp_ep.local_hmacs)
+ sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
+ inp->sctp_ep.local_hmacs = hmaclist;
+ SCTP_INP_WUNLOCK(inp);
+ sctp_set_hmac_done:
+ break;
+ }
+ case SCTP_AUTH_ACTIVE_KEY:
+ {
+ struct sctp_authkeyid *scact;
+
+ if ((size_t)m->m_len < sizeof(*scact)) {
+ error = EINVAL;
+ break;
+ }
+ scact = mtod(m, struct sctp_authkeyid *);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ /*
+ * if one-to-one, set it on the connected
+ * assoc; else endpoint
+ */
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else if (scact->scact_assoc_id) {
+ stcb = sctp_findassociation_ep_asocid(inp, scact->scact_assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ }
+ /* set the active key on the right place */
+ if (stcb != NULL) {
+ /* set the active key on the assoc */
+ if (sctp_auth_setactivekey(stcb, scact->scact_keynumber))
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* set the active key on the endpoint */
+ SCTP_INP_WLOCK(inp);
+ if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber))
+ error = EINVAL;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ }
+ case SCTP_AUTH_DELETE_KEY:
+ {
+ struct sctp_authkeyid *scdel;
+
+ if ((size_t)m->m_len < sizeof(*scdel)) {
+ error = EINVAL;
+ break;
+ }
+ scdel = mtod(m, struct sctp_authkeyid *);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ /*
+ * if one-to-one, delete from the connected
+ * assoc; else endpoint
+ */
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else if (scdel->scact_assoc_id) {
+ stcb = sctp_findassociation_ep_asocid(inp, scdel->scact_assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ }
+ /* delete the key from the right place */
+ if (stcb != NULL) {
+ if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber))
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber))
+ error = EINVAL;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ }
+
+ case SCTP_RESET_STREAMS:
+ {
+ struct sctp_stream_reset *strrst;
+ uint8_t send_in = 0, send_tsn = 0, send_out = 0;
+ int i;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_stream_reset)) {
+ error = EINVAL;
+ break;
+ }
+ strrst = mtod(m, struct sctp_stream_reset *);
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else
+ stcb = sctp_findassociation_ep_asocid(inp, strrst->strrst_assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ if (stcb->asoc.peer_supports_strreset == 0) {
+ /*
+ * Peer does not support it, we return
+ * protocol not supported since this is true
+ * for this feature and this peer, not the
+ * socket request in general.
+ */
+ error = EPROTONOSUPPORT;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ if (stcb->asoc.stream_reset_outstanding) {
+ error = EALREADY;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
+ send_in = 1;
+ } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
+ send_out = 1;
+ } else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
+ send_in = 1;
+ send_out = 1;
+ } else if (strrst->strrst_flags == SCTP_RESET_TSN) {
+ send_tsn = 1;
+ } else {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ for (i = 0; i < strrst->strrst_num_streams; i++) {
+ if ((send_in) &&
+
+ (strrst->strrst_list[i] > stcb->asoc.streamincnt)) {
+ error = EINVAL;
+ goto get_out;
+ }
+ if ((send_out) &&
+ (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) {
+ error = EINVAL;
+ goto get_out;
+ }
+ }
+ if (error) {
+ get_out:
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
+ strrst->strrst_list,
+ send_out, (stcb->asoc.str_reset_seq_in - 3),
+ send_in, send_tsn);
+
+ s = splnet();
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ);
+ SCTP_TCB_UNLOCK(stcb);
+ splx(s);
+
+ }
+ break;
+ case SCTP_CONNECT_X:
+ if ((size_t)m->m_len < (sizeof(int) + sizeof(struct sockaddr_in))) {
+ error = EINVAL;
+ break;
+ }
+ error = sctp_do_connect_x(so, inp, m, p, 0);
+ break;
+
+ case SCTP_CONNECT_X_DELAYED:
+ if ((size_t)m->m_len < (sizeof(int) + sizeof(struct sockaddr_in))) {
+ error = EINVAL;
+ break;
+ }
+ error = sctp_do_connect_x(so, inp, m, p, 1);
+ break;
+
+ case SCTP_CONNECT_X_COMPLETE:
+ {
+ struct sockaddr *sa;
+ struct sctp_nets *net;
+
+ if ((size_t)m->m_len < sizeof(struct sockaddr_in)) {
+ error = EINVAL;
+ break;
+ }
+ sa = mtod(m, struct sockaddr *);
+ /* find tcb */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ net = sctp_findnet(stcb, sa);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+
+ if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ if (stcb->asoc.delayed_connection == 1) {
+ stcb->asoc.delayed_connection = 0;
+ SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
+ sctp_send_initiate(inp, stcb);
+ } else {
+ /*
+ * already expired or did not use delayed
+ * connectx
+ */
+ error = EALREADY;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ break;
+ case SCTP_MAXBURST:
+ {
+ uint8_t *burst;
+
+ SCTP_INP_WLOCK(inp);
+ burst = mtod(m, uint8_t *);
+ if (*burst) {
+ inp->sctp_ep.max_burst = *burst;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ case SCTP_MAXSEG:
+ {
+ uint32_t *segsize;
+ int ovh;
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MED_OVERHEAD;
+ } else {
+ ovh = SCTP_MED_V4_OVERHEAD;
+ }
+ segsize = mtod(m, uint32_t *);
+ if (*segsize < 1) {
+ error = EINVAL;
+ break;
+ }
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_frag_point = (*segsize + ovh);
+ if (inp->sctp_frag_point < MHLEN) {
+ inp->sctp_frag_point = MHLEN;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ case SCTP_SET_DEBUG_LEVEL:
+#ifdef SCTP_DEBUG
+ {
+ uint32_t *level;
+
+ if ((size_t)m->m_len < sizeof(uint32_t)) {
+ error = EINVAL;
+ break;
+ }
+ level = mtod(m, uint32_t *);
+ error = 0;
+ sctp_debug_on = (*level & (SCTP_DEBUG_ALL |
+ SCTP_DEBUG_NOISY));
+ printf("SETTING DEBUG LEVEL to %x\n",
+ (uint32_t) sctp_debug_on);
+
+ }
+#else
+ error = EOPNOTSUPP;
+#endif /* SCTP_DEBUG */
+ break;
+ case SCTP_EVENTS:
+ {
+ struct sctp_event_subscribe *events;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_event_subscribe)) {
+ error = EINVAL;
+ break;
+ }
+ SCTP_INP_WLOCK(inp);
+ events = mtod(m, struct sctp_event_subscribe *);
+ if (events->sctp_data_io_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
+ }
+
+ if (events->sctp_association_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
+ }
+
+ if (events->sctp_address_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
+ }
+
+ if (events->sctp_send_failure_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
+ }
+
+ if (events->sctp_peer_error_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR);
+ }
+
+ if (events->sctp_shutdown_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
+ }
+
+ if (events->sctp_partial_delivery_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
+ }
+
+ if (events->sctp_adaptation_layer_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
+ }
+
+ if (events->sctp_authentication_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT);
+ }
+
+ if (events->sctp_stream_reset_events) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+
+ case SCTP_ADAPTATION_LAYER:
+ {
+ struct sctp_setadaptation *adap_bits;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_setadaptation)) {
+ error = EINVAL;
+ break;
+ }
+ SCTP_INP_WLOCK(inp);
+ adap_bits = mtod(m, struct sctp_setadaptation *);
+ inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ case SCTP_SET_INITIAL_DBG_SEQ:
+ {
+ uint32_t *vvv;
+
+ if ((size_t)m->m_len < sizeof(uint32_t)) {
+ error = EINVAL;
+ break;
+ }
+ SCTP_INP_WLOCK(inp);
+ vvv = mtod(m, uint32_t *);
+ inp->sctp_ep.initial_sequence_debug = *vvv;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ case SCTP_DEFAULT_SEND_PARAM:
+ {
+ struct sctp_sndrcvinfo *s_info;
+
+ if (m->m_len != sizeof(struct sctp_sndrcvinfo)) {
+ error = EINVAL;
+ break;
+ }
+ s_info = mtod(m, struct sctp_sndrcvinfo *);
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ if (s_info->sinfo_assoc_id) {
+ stcb = sctp_findassociation_ep_asocid(inp, s_info->sinfo_assoc_id, 1);
+ } else {
+ stcb = NULL;
+ }
+ }
+ if ((s_info->sinfo_assoc_id == 0) &&
+ (stcb == NULL)) {
+ inp->def_send = *s_info;
+ } else if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ /* Validate things */
+ if (s_info->sinfo_stream > stcb->asoc.streamoutcnt) {
+ SCTP_TCB_UNLOCK(stcb);
+ error = EINVAL;
+ break;
+ }
+ /* Copy it in */
+ stcb->asoc.def_send = *s_info;
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ break;
+ case SCTP_PEER_ADDR_PARAMS:
+ /* Applys to the specific association */
+ {
+ struct sctp_paddrparams *paddrp;
+ struct sctp_nets *net;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_paddrparams)) {
+ error = EINVAL;
+ break;
+ }
+ paddrp = mtod(m, struct sctp_paddrparams *);
+ net = NULL;
+ if (paddrp->spp_assoc_id) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ stcb = sctp_findassociation_ep_asocid(inp, paddrp->spp_assoc_id, 1);
+ }
+ if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ }
+ if ((stcb == NULL) &&
+ ((((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET) ||
+ (((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET6))) {
+ /* Lookup via address */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ net = sctp_findnet(stcb,
+ (struct sockaddr *)&paddrp->spp_address);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp,
+ (struct sockaddr *)&paddrp->spp_address,
+ &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ }
+ if (stcb) {
+ /************************TCB SPECIFIC SET ******************/
+ /* sack delay first */
+ if (paddrp->spp_flags & SPP_SACKDELAY_ENABLE) {
+ /*
+ * we do NOT support turning it off
+ * (yet). only setting the delay.
+ */
+ if (paddrp->spp_sackdelay >= SCTP_CLOCK_GRANULARITY)
+ stcb->asoc.delayed_ack = paddrp->spp_sackdelay;
+ else
+ stcb->asoc.delayed_ack = SCTP_CLOCK_GRANULARITY;
+
+ } else if (paddrp->spp_flags & SPP_SACKDELAY_DISABLE) {
+ stcb->asoc.delayed_ack = 0;
+ }
+ /*
+ * do we change the timer for HB, we run
+ * only one?
+ */
+ if (paddrp->spp_hbinterval)
+ stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
+ else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
+ stcb->asoc.heart_beat_delay = 0;
+
+ /* network sets ? */
+ if (net) {
+ /************************NET SPECIFIC SET ******************/
+ if (paddrp->spp_flags & SPP_HB_DEMAND) {
+ /* on demand HB */
+ sctp_send_hb(stcb, 1, net);
+ }
+ if (paddrp->spp_flags & SPP_HB_DISABLE) {
+ net->dest_state |= SCTP_ADDR_NOHB;
+ }
+ if (paddrp->spp_flags & SPP_HB_ENABLE) {
+ net->dest_state &= ~SCTP_ADDR_NOHB;
+ }
+ if (paddrp->spp_flags & SPP_PMTUD_DISABLE) {
+ if (callout_pending(&net->pmtu_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+ }
+ if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
+ net->mtu = paddrp->spp_pathmtu;
+ if (net->mtu < stcb->asoc.smallest_mtu)
+ sctp_pathmtu_adustment(inp, stcb, net, net->mtu);
+ }
+ }
+ if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
+ if (callout_pending(&net->pmtu_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+ }
+ }
+ if (paddrp->spp_pathmaxrxt)
+ net->failure_threshold = paddrp->spp_pathmaxrxt;
+#ifdef AF_INET
+ if (paddrp->spp_flags & SPP_IPV4_TOS) {
+ if (net->ro._l_addr.sin.sin_family == AF_INET) {
+ net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc;
+ }
+ }
+#endif
+#ifdef AF_INET6
+ if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
+ if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
+ net->tos_flowlabel = paddrp->spp_ipv6_flowlabel;
+ }
+ }
+#endif
+ } else {
+ /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/
+ if (paddrp->spp_pathmaxrxt)
+ stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
+
+ if (paddrp->spp_flags & SPP_HB_ENABLE) {
+ /* Turn back on the timer */
+ stcb->asoc.hb_is_disabled = 0;
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+ }
+ if (paddrp->spp_flags & SPP_HB_DISABLE) {
+ int cnt_of_unconf = 0;
+ struct sctp_nets *lnet;
+
+ stcb->asoc.hb_is_disabled = 1;
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+ if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ cnt_of_unconf++;
+ }
+ }
+ /*
+ * stop the timer ONLY if we
+ * have no unconfirmed
+ * addresses
+ */
+ if (cnt_of_unconf == 0) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+ }
+ }
+ if (paddrp->spp_flags & SPP_HB_ENABLE) {
+ /* start up the timer. */
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+ }
+#ifdef AF_INET
+ if (paddrp->spp_flags & SPP_IPV4_TOS)
+ stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc;
+#endif
+#ifdef AF_INET6
+ if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL)
+ stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel;
+#endif
+
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /************************NO TCB, SET TO default stuff ******************/
+ SCTP_INP_WLOCK(inp);
+ /*
+ * For the TOS/FLOWLABEL stuff you set it
+ * with the options on the socket
+ */
+ if (paddrp->spp_pathmaxrxt) {
+ inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
+ }
+ if (paddrp->spp_flags & SPP_HB_ENABLE) {
+ inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval);
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
+ } else if (paddrp->spp_flags & SPP_HB_DISABLE) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
+ }
+ if (paddrp->spp_flags & SPP_SACKDELAY_ENABLE) {
+ if (paddrp->spp_sackdelay > SCTP_CLOCK_GRANULARITY)
+ inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(paddrp->spp_sackdelay);
+ else
+ inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(SCTP_CLOCK_GRANULARITY);
+
+ } else if (paddrp->spp_flags & SPP_SACKDELAY_DISABLE) {
+ inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = 0;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ }
+ break;
+ case SCTP_RTOINFO:
+ {
+ struct sctp_rtoinfo *srto;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_rtoinfo)) {
+ error = EINVAL;
+ break;
+ }
+ srto = mtod(m, struct sctp_rtoinfo *);
+ if (srto->srto_assoc_id == 0) {
+ SCTP_INP_WLOCK(inp);
+ /*
+ * If we have a null asoc, its default for
+ * the endpoint
+ */
+ if (srto->srto_initial > 10)
+ inp->sctp_ep.initial_rto = srto->srto_initial;
+ if (srto->srto_max > 10)
+ inp->sctp_ep.sctp_maxrto = srto->srto_max;
+ if (srto->srto_min > 10)
+ inp->sctp_ep.sctp_minrto = srto->srto_min;
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else
+ stcb = sctp_findassociation_ep_asocid(inp, srto->srto_assoc_id, 1);
+ if (stcb == NULL) {
+ error = EINVAL;
+ break;
+ }
+ /* Set in ms we hope :-) */
+ if (srto->srto_initial > 10)
+ stcb->asoc.initial_rto = srto->srto_initial;
+ if (srto->srto_max > 10)
+ stcb->asoc.maxrto = srto->srto_max;
+ if (srto->srto_min > 10)
+ stcb->asoc.minrto = srto->srto_min;
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ break;
+ case SCTP_ASSOCINFO:
+ {
+ struct sctp_assocparams *sasoc;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_assocparams)) {
+ error = EINVAL;
+ break;
+ }
+ sasoc = mtod(m, struct sctp_assocparams *);
+ if (sasoc->sasoc_assoc_id) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else
+ stcb = sctp_findassociation_ep_asocid(inp,
+ sasoc->sasoc_assoc_id, 1);
+ if (stcb == NULL) {
+ error = ENOENT;
+ break;
+ }
+ } else {
+ stcb = NULL;
+ }
+ if (stcb) {
+ if (sasoc->sasoc_asocmaxrxt)
+ stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
+ sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
+ sasoc->sasoc_peer_rwnd = 0;
+ sasoc->sasoc_local_rwnd = 0;
+ if (stcb->asoc.cookie_life)
+ stcb->asoc.cookie_life = sasoc->sasoc_cookie_life;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (sasoc->sasoc_asocmaxrxt)
+ inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
+ sasoc->sasoc_number_peer_destinations = 0;
+ sasoc->sasoc_peer_rwnd = 0;
+ sasoc->sasoc_local_rwnd = 0;
+ if (sasoc->sasoc_cookie_life)
+ inp->sctp_ep.def_cookie_life = sasoc->sasoc_cookie_life;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ }
+ break;
+ case SCTP_INITMSG:
+ {
+ struct sctp_initmsg *sinit;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_initmsg)) {
+ error = EINVAL;
+ break;
+ }
+ sinit = mtod(m, struct sctp_initmsg *);
+ SCTP_INP_WLOCK(inp);
+ if (sinit->sinit_num_ostreams)
+ inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
+
+ if (sinit->sinit_max_instreams)
+ inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
+
+ if (sinit->sinit_max_attempts)
+ inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
+
+ if (sinit->sinit_max_init_timeo > 10)
+ /*
+ * We must be at least a 100ms (we set in
+ * ticks)
+ */
+ inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ case SCTP_PRIMARY_ADDR:
+ {
+ struct sctp_setprim *spa;
+ struct sctp_nets *net, *lnet;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_setprim)) {
+ error = EINVAL;
+ break;
+ }
+ spa = mtod(m, struct sctp_setprim *);
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ } else {
+ error = EINVAL;
+ break;
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else
+ stcb = sctp_findassociation_ep_asocid(inp, spa->ssp_assoc_id, 1);
+ if (stcb == NULL) {
+ /* One last shot */
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp,
+ (struct sockaddr *)&spa->ssp_addr,
+ &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ error = EINVAL;
+ break;
+ }
+ } else {
+ /*
+ * find the net, associd or connected lookup
+ * type
+ */
+ net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
+ if (net == NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ error = EINVAL;
+ break;
+ }
+ }
+ if ((net != stcb->asoc.primary_destination) &&
+ (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
+ /* Ok we need to set it */
+ lnet = stcb->asoc.primary_destination;
+ if (sctp_set_primary_addr(stcb,
+ (struct sockaddr *)NULL,
+ net) == 0) {
+ if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
+ net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
+ }
+ net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ break;
+
+ case SCTP_SET_PEER_PRIMARY_ADDR:
+ {
+ struct sctp_setpeerprim *sspp;
+
+ if ((size_t)m->m_len < sizeof(struct sctp_setpeerprim)) {
+ error = EINVAL;
+ break;
+ }
+ sspp = mtod(m, struct sctp_setpeerprim *);
+
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else
+ stcb = sctp_findassociation_ep_asocid(inp, sspp->sspp_assoc_id, 1);
+ if (stcb == NULL) {
+ error = EINVAL;
+ break;
+ }
+ if (sctp_set_primary_ip_address_sa(stcb, (struct sockaddr *)&sspp->sspp_addr) != 0) {
+ error = EINVAL;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ break;
+ case SCTP_BINDX_ADD_ADDR:
+ {
+ struct sctp_getaddresses *addrs;
+ struct sockaddr *addr_touse;
+ struct sockaddr_in sin;
+
+ /* see if we're bound all already! */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ error = EINVAL;
+ break;
+ }
+ if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
+ error = EINVAL;
+ break;
+ }
+ addrs = mtod(m, struct sctp_getaddresses *);
+ addr_touse = addrs->addr;
+ if (addrs->addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addr_touse;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ in6_sin6_2_sin(&sin, sin6);
+ addr_touse = (struct sockaddr *)&sin;
+ }
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
+ if (p == NULL) {
+ /* Can't get proc for Net/Open BSD */
+ error = EINVAL;
+ break;
+ }
+ error = sctp_inpcb_bind(so, addr_touse, p);
+ break;
+ }
+ /*
+ * No locks required here since bind and mgmt_ep_sa
+ * all do their own locking. If we do something for
+ * the FIX: below we may need to lock in that case.
+ */
+ if (addrs->sget_assoc_id == 0) {
+ /* add the address */
+ struct sctp_inpcb *lep;
+
+ ((struct sockaddr_in *)addr_touse)->sin_port = inp->sctp_lport;
+ lep = sctp_pcb_findep(addr_touse, 1, 0);
+ if (lep != NULL) {
+ /*
+ * We must decrement the refcount
+ * since we have the ep already and
+ * are binding. No remove going on
+ * here.
+ */
+ SCTP_INP_DECR_REF(inp);
+ }
+ if (lep == inp) {
+ /* already bound to it.. ok */
+ break;
+ } else if (lep == NULL) {
+ ((struct sockaddr_in *)addr_touse)->sin_port = 0;
+ error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
+ SCTP_ADD_IP_ADDRESS);
+ } else {
+ error = EADDRNOTAVAIL;
+ }
+ if (error)
+ break;
+
+ } else {
+ /*
+ * FIX: decide whether we allow assoc based
+ * bindx
+ */
+ }
+ }
+ break;
+ case SCTP_BINDX_REM_ADDR:
+ {
+ struct sctp_getaddresses *addrs;
+ struct sockaddr *addr_touse;
+ struct sockaddr_in sin;
+
+ /* see if we're bound all already! */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ error = EINVAL;
+ break;
+ }
+ if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
+ error = EINVAL;
+ break;
+ }
+ addrs = mtod(m, struct sctp_getaddresses *);
+ addr_touse = addrs->addr;
+ if (addrs->addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addr_touse;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ in6_sin6_2_sin(&sin, sin6);
+ addr_touse = (struct sockaddr *)&sin;
+ }
+ }
+ /*
+ * No lock required mgmt_ep_sa does its own locking.
+ * If the FIX: below is ever changed we may need to
+ * lock before calling association level binding.
+ */
+ if (addrs->sget_assoc_id == 0) {
+ /* delete the address */
+ sctp_addr_mgmt_ep_sa(inp, addr_touse,
+ SCTP_DEL_IP_ADDRESS);
+ } else {
+ /*
+ * FIX: decide whether we allow assoc based
+ * bindx
+ */
+ }
+ }
+ break;
+ default:
+ error = ENOPROTOOPT;
+ break;
+ } /* end switch (opt) */
+ return (error);
+}
+
+
+
+extern int sctp_chatty_mbuf;
+
+int
+sctp_ctloutput(struct socket *so, struct sockopt *sopt)
+{
+ struct mbuf *m = NULL;
+ struct sctp_inpcb *inp;
+ int s, error;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ s = splnet();
+ if (inp == 0) {
+ splx(s);
+ /* I made the same as TCP since we are not setup? */
+ return (ECONNRESET);
+ }
+ if (sopt->sopt_level != IPPROTO_SCTP) {
+ /* wrong proto level... send back up to IP */
+#ifdef INET6
+ if (INP_CHECK_SOCKAF(so, AF_INET6))
+ error = ip6_ctloutput(so, sopt);
+ else
+#endif /* INET6 */
+ error = ip_ctloutput(so, sopt);
+ splx(s);
+ return (error);
+ }
+ if (sopt->sopt_valsize) {
+ if (sopt->sopt_valsize < MLEN) {
+ m = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA);
+ } else {
+ m = sctp_get_mbuf_for_msg(sopt->sopt_valsize, 0, M_WAIT, 1, MT_DATA);
+ }
+ if (m == NULL) {
+ sctp_m_freem(m);
+ splx(s);
+ return (ENOBUFS);
+ }
+ if (sopt->sopt_valsize > M_TRAILINGSPACE(m)) {
+ /* Limit to actual size gotten */
+ sopt->sopt_valsize = M_TRAILINGSPACE(m);
+ }
+ error = sooptcopyin(sopt, mtod(m, caddr_t), sopt->sopt_valsize,
+ sopt->sopt_valsize);
+ if (error) {
+ (void)sctp_m_free(m);
+ goto out;
+ }
+ m->m_len = sopt->sopt_valsize;
+ }
+ if (sopt->sopt_dir == SOPT_SET) {
+ error = sctp_optsset(so, sopt->sopt_name, &m, sopt->sopt_td);
+ } else if (sopt->sopt_dir == SOPT_GET) {
+ error = sctp_optsget(so, sopt->sopt_name, &m, sopt->sopt_td);
+ } else {
+ error = EINVAL;
+ }
+ if ((error == 0) && (m != NULL)) {
+ error = sooptcopyout(sopt, mtod(m, caddr_t), m->m_len);
+ sctp_m_freem(m);
+ } else if (m != NULL) {
+ sctp_m_freem(m);
+ }
+out:
+ splx(s);
+ return (error);
+}
+
+
+static int
+sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
+{
+ int s = splnet();
+
+ int error = 0;
+ int create_lock_on = 0;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb = NULL;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ splx(s);
+ /* I made the same as TCP since we are not setup? */
+ return (ECONNRESET);
+ }
+ SCTP_ASOC_CREATE_LOCK(inp);
+ create_lock_on = 1;
+
+ SCTP_INP_INCR_REF(inp);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+ /* Should I really unlock ? */
+ error = EFAULT;
+ goto out_now;
+ }
+#ifdef INET6
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+ (addr->sa_family == AF_INET6)) {
+ error = EINVAL;
+ goto out_now;
+ }
+#endif /* INET6 */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
+ SCTP_PCB_FLAGS_UNBOUND) {
+ /* Bind a ephemeral port */
+ error = sctp_inpcb_bind(so, NULL, p);
+ if (error) {
+ goto out_now;
+ }
+ }
+ /* Now do we connect? */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
+ error = EINVAL;
+ goto out_now;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+ /* We are already connected AND the TCP model */
+ error = EADDRINUSE;
+ goto out_now;
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ /*
+ * Raise the count a second time, since on sucess
+ * f-a-ep_addr will decrement it.
+ */
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ if (stcb != NULL) {
+ /* Already have or am bring up an association */
+ error = EALREADY;
+ goto out_now;
+ }
+ /* We are GOOD to go */
+ stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0);
+ if (stcb == NULL) {
+ /* Gak! no memory */
+ splx(s);
+ return (error);
+ }
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+ /* Set the connected flag so we can queue data */
+ soisconnecting(so);
+ }
+ stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
+ SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+
+ /* initialize authentication parameters for the assoc */
+ sctp_initialize_auth_params(inp, stcb);
+
+ sctp_send_initiate(inp, stcb);
+out_now:
+ if (create_lock_on)
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_DECR_REF(inp);
+ splx(s);
+ return error;
+}
+
+int
+sctp_listen(struct socket *so, int backlog, struct thread *p)
+{
+ /*
+ * Note this module depends on the protocol processing being called
+ * AFTER any socket level flags and backlog are applied to the
+ * socket. The traditional way that the socket flags are applied is
+ * AFTER protocol processing. We have made a change to the
+ * sys/kern/uipc_socket.c module to reverse this but this MUST be in
+ * place if the socket API for SCTP is to work properly.
+ */
+ int s = splnet();
+
+ int error = 0;
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ splx(s);
+ /* I made the same as TCP since we are not setup? */
+ return (ECONNRESET);
+ }
+ SCTP_INP_RLOCK(inp);
+#ifdef SCTP_LOCK_LOGGING
+ sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
+#endif
+ SOCK_LOCK(so);
+ error = solisten_proto_check(so);
+ if (error) {
+ SOCK_UNLOCK(so);
+ return (error);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+ /* We are already connected AND the TCP model */
+ splx(s);
+ SCTP_INP_RUNLOCK(inp);
+ SOCK_UNLOCK(so);
+ return (EADDRINUSE);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
+ /* We must do a bind. */
+ SCTP_INP_RUNLOCK(inp);
+ if ((error = sctp_inpcb_bind(so, NULL, p))) {
+ /* bind error, probably perm */
+ SOCK_UNLOCK(so);
+ splx(s);
+ return (error);
+ }
+ } else {
+ SCTP_INP_RUNLOCK(inp);
+ }
+ /* It appears for 7.0 and on, we must always call this. */
+ solisten_proto(so, backlog);
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
+ /* remove the ACCEPTCONN flag for one-to-many sockets */
+ so->so_options &= ~SO_ACCEPTCONN;
+ }
+ if (backlog == 0) {
+ /* turning off listen */
+ so->so_options &= ~SO_ACCEPTCONN;
+ }
+ SOCK_UNLOCK(so);
+ splx(s);
+ return (error);
+}
+
+static int sctp_defered_wakeup_cnt = 0;
+
+int
+sctp_accept(struct socket *so, struct sockaddr **addr)
+{
+ int s = splnet();
+
+ struct sctp_tcb *stcb;
+ struct sctp_inpcb *inp;
+ union sctp_sockstore store;
+
+ int error;
+
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+
+ if (inp == 0) {
+ splx(s);
+ return (ECONNRESET);
+ }
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
+ return (ENOTSUP);
+ }
+ if (so->so_state & SS_ISDISCONNECTED) {
+ splx(s);
+ SCTP_INP_RUNLOCK(inp);
+ return (ECONNABORTED);
+ }
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ splx(s);
+ SCTP_INP_RUNLOCK(inp);
+ return (ECONNRESET);
+ }
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ store = stcb->asoc.primary_destination->ro._l_addr;
+ SCTP_TCB_UNLOCK(stcb);
+ if (store.sa.sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(*sin);
+ sin->sin_port = ((struct sockaddr_in *)&store)->sin_port;
+ sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr;
+ *addr = (struct sockaddr *)sin;
+ } else {
+ struct sockaddr_in6 *sin6;
+
+ SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(*sin6);
+ sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port;
+
+ sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr;
+ if ((error = sa6_recoverscope(sin6)) != 0)
+ return (error);
+ *addr = (struct sockaddr *)sin6;
+ }
+ /* Wake any delayed sleep action */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
+ SOCKBUF_LOCK(&inp->sctp_socket->so_snd);
+ if (sowriteable(inp->sctp_socket)) {
+ sowwakeup_locked(inp->sctp_socket);
+ } else {
+ SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd);
+ }
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
+ SOCKBUF_LOCK(&inp->sctp_socket->so_rcv);
+ if (soreadable(inp->sctp_socket)) {
+ sctp_defered_wakeup_cnt++;
+ sorwakeup_locked(inp->sctp_socket);
+ } else {
+ SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv);
+ }
+ }
+ }
+ splx(s);
+ return (0);
+}
+
+int
+sctp_ingetaddr(struct socket *so, struct sockaddr **addr)
+{
+ struct sockaddr_in *sin;
+
+ int s;
+ struct sctp_inpcb *inp;
+
+ /*
+ * Do the malloc first in case it blocks.
+ */
+ SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(*sin);
+ s = splnet();
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (!inp) {
+ splx(s);
+ SCTP_FREE_SONAME(sin);
+ return ECONNRESET;
+ }
+ SCTP_INP_RLOCK(inp);
+ sin->sin_port = inp->sctp_lport;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ struct sctp_tcb *stcb;
+ struct sockaddr_in *sin_a;
+ struct sctp_nets *net;
+ int fnd;
+
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ goto notConn;
+ }
+ fnd = 0;
+ sin_a = NULL;
+ SCTP_TCB_LOCK(stcb);
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sin_a = (struct sockaddr_in *)&net->ro._l_addr;
+ if (sin_a->sin_family == AF_INET) {
+ fnd = 1;
+ break;
+ }
+ }
+ if ((!fnd) || (sin_a == NULL)) {
+ /* punt */
+ SCTP_TCB_UNLOCK(stcb);
+ goto notConn;
+ }
+ sin->sin_addr = sctp_ipv4_source_address_selection(inp,
+ stcb, (struct route *)&net->ro, net, 0);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* For the bound all case you get back 0 */
+ notConn:
+ sin->sin_addr.s_addr = 0;
+ }
+
+ } else {
+ /* Take the first IPv4 address in the list */
+ struct sctp_laddr *laddr;
+ int fnd = 0;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
+ struct sockaddr_in *sin_a;
+
+ sin_a = (struct sockaddr_in *)laddr->ifa->ifa_addr;
+ sin->sin_addr = sin_a->sin_addr;
+ fnd = 1;
+ break;
+ }
+ }
+ if (!fnd) {
+ splx(s);
+ SCTP_FREE_SONAME(sin);
+ SCTP_INP_RUNLOCK(inp);
+ return ENOENT;
+ }
+ }
+ SCTP_INP_RUNLOCK(inp);
+ splx(s);
+ (*addr) = (struct sockaddr *)sin;
+ return (0);
+}
+
+int
+sctp_peeraddr(struct socket *so, struct sockaddr **addr)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
+
+ int s, fnd;
+ struct sockaddr_in *sin_a;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+
+
+ /* Do the malloc first in case it blocks. */
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if ((inp == NULL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
+ /* UDP type and listeners will drop out here */
+ return (ENOTCONN);
+ }
+ s = splnet();
+
+ SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(*sin);
+
+ /* We must recapture incase we blocked */
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (!inp) {
+ splx(s);
+ SCTP_FREE_SONAME(sin);
+ return ECONNRESET;
+ }
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ if (stcb == NULL) {
+ splx(s);
+ SCTP_FREE_SONAME(sin);
+ return ECONNRESET;
+ }
+ fnd = 0;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sin_a = (struct sockaddr_in *)&net->ro._l_addr;
+ if (sin_a->sin_family == AF_INET) {
+ fnd = 1;
+ sin->sin_port = stcb->rport;
+ sin->sin_addr = sin_a->sin_addr;
+ break;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ if (!fnd) {
+ /* No IPv4 address */
+ splx(s);
+ SCTP_FREE_SONAME(sin);
+ return ENOENT;
+ }
+ splx(s);
+ (*addr) = (struct sockaddr *)sin;
+ return (0);
+}
+
+struct pr_usrreqs sctp_usrreqs = {
+ .pru_abort = sctp_abort,
+ .pru_accept = sctp_accept,
+ .pru_attach = sctp_attach,
+ .pru_bind = sctp_bind,
+ .pru_connect = sctp_connect,
+ .pru_control = in_control,
+ .pru_close = sctp_close,
+ .pru_detach = sctp_close,
+ .pru_sopoll = sopoll_generic,
+ .pru_disconnect = sctp_disconnect,
+ .pru_listen = sctp_listen,
+ .pru_peeraddr = sctp_peeraddr,
+ .pru_send = sctp_sendm,
+ .pru_shutdown = sctp_shutdown,
+ .pru_sockaddr = sctp_ingetaddr,
+ .pru_sosend = sctp_sosend,
+ .pru_soreceive = sctp_soreceive
+};
diff --git a/sys/netinet/sctp_var.h b/sys/netinet/sctp_var.h
new file mode 100644
index 0000000..4461074
--- /dev/null
+++ b/sys/netinet/sctp_var.h
@@ -0,0 +1,476 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_var.h,v 1.24 2005/03/06 16:04:19 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef _NETINET_SCTP_VAR_H_
+#define _NETINET_SCTP_VAR_H_
+
+#include <sys/socketvar.h>
+#include <netinet/sctp_uio.h>
+
+/* SCTP Kernel structures */
+
+/*
+ * Names for SCTP sysctl objects
+ */
+#define SCTPCTL_MAXDGRAM 1 /* max datagram size */
+#define SCTPCTL_RECVSPACE 2 /* default receive buffer space */
+#define SCTPCTL_AUTOASCONF 3 /* auto asconf enable/disable flag */
+#define SCTPCTL_ECN_ENABLE 4 /* Is ecn allowed */
+#define SCTPCTL_ECN_NONCE 5 /* Is ecn nonce allowed */
+#define SCTPCTL_STRICT_SACK 6 /* strictly require sack'd TSN's to be
+ * smaller than sndnxt. */
+#define SCTPCTL_NOCSUM_LO 7 /* Require that the Loopback NOT have
+ * the crc32 checksum on packets
+ * routed over it. */
+#define SCTPCTL_STRICT_INIT 8
+#define SCTPCTL_PEER_CHK_OH 9
+#define SCTPCTL_MAXBURST 10
+#define SCTPCTL_MAXCHUNKONQ 11
+#define SCTPCTL_DELAYED_SACK 12
+#define SCTPCTL_HB_INTERVAL 13
+#define SCTPCTL_PMTU_RAISE 14
+#define SCTPCTL_SHUTDOWN_GUARD 15
+#define SCTPCTL_SECRET_LIFETIME 16
+#define SCTPCTL_RTO_MAX 17
+#define SCTPCTL_RTO_MIN 18
+#define SCTPCTL_RTO_INITIAL 19
+#define SCTPCTL_INIT_RTO_MAX 20
+#define SCTPCTL_COOKIE_LIFE 21
+#define SCTPCTL_INIT_RTX_MAX 22
+#define SCTPCTL_ASSOC_RTX_MAX 23
+#define SCTPCTL_PATH_RTX_MAX 24
+#define SCTPCTL_NR_OUTGOING_STREAMS 25
+#define SCTPCTL_CMT_ON_OFF 26
+#define SCTPCTL_CWND_MAXBURST 27
+#define SCTPCTL_EARLY_FR 28
+#define SCTPCTL_RTTVAR_CC 29
+#define SCTPCTL_DEADLOCK_DET 30
+#define SCTPCTL_EARLY_FR_MSEC 31
+#define SCTPCTL_ASCONF_AUTH_NOCHK 32
+#define SCTPCTL_AUTH_DISABLE 33
+#define SCTPCTL_AUTH_RANDOM_LEN 34
+#define SCTPCTL_AUTH_HMAC_ID 35
+#define SCTPCTL_ABC_L_VAR 36
+#define SCTPCTL_MAX_MBUF_CHAIN 37
+#define SCTPCTL_CMT_USE_DAC 38
+#define SCTPCTL_DO_DRAIN 39
+#define SCTPCTL_WARM_CRC32 40
+#define SCTPCTL_QLIMIT_ABORT 41
+#define SCTPCTL_STRICT_ORDER 42
+#define SCTPCTL_TCBHASHSIZE 43
+#define SCTPCTL_PCBHASHSIZE 44
+#define SCTPCTL_CHUNKSCALE 45
+#define SCTPCTL_MINSPLIT 46
+#define SCTPCTL_ADD_MORE 47
+#define SCTPCTL_SYS_RESC 48
+#define SCTPCTL_ASOC_RESC 49
+#ifdef SCTP_DEBUG
+#define SCTPCTL_DEBUG 50
+#define SCTPCTL_MAXID 50
+#else
+#define SCTPCTL_MAXID 49
+#endif
+
+#ifdef SCTP_DEBUG
+#define SCTPCTL_NAMES { \
+ { 0, 0 }, \
+ { "sendspace", CTLTYPE_INT }, \
+ { "recvspace", CTLTYPE_INT }, \
+ { "autoasconf", CTLTYPE_INT }, \
+ { "ecn_enable", CTLTYPE_INT }, \
+ { "ecn_nonce", CTLTYPE_INT }, \
+ { "strict_sack", CTLTYPE_INT }, \
+ { "looback_nocsum", CTLTYPE_INT }, \
+ { "strict_init", CTLTYPE_INT }, \
+ { "peer_chkoh", CTLTYPE_INT }, \
+ { "maxburst", CTLTYPE_INT }, \
+ { "maxchunks", CTLTYPE_INT }, \
+ { "delayed_sack_time", CTLTYPE_INT }, \
+ { "heartbeat_interval", CTLTYPE_INT }, \
+ { "pmtu_raise_time", CTLTYPE_INT }, \
+ { "shutdown_guard_time", CTLTYPE_INT }, \
+ { "secret_lifetime", CTLTYPE_INT }, \
+ { "rto_max", CTLTYPE_INT }, \
+ { "rto_min", CTLTYPE_INT }, \
+ { "rto_initial", CTLTYPE_INT }, \
+ { "init_rto_max", CTLTYPE_INT }, \
+ { "valid_cookie_life", CTLTYPE_INT }, \
+ { "init_rtx_max", CTLTYPE_INT }, \
+ { "assoc_rtx_max", CTLTYPE_INT }, \
+ { "path_rtx_max", CTLTYPE_INT }, \
+ { "nr_outgoing_streams", CTLTYPE_INT }, \
+ { "cmt_on_off", CTLTYPE_INT }, \
+ { "cwnd_maxburst", CTLTYPE_INT }, \
+ { "early_fast_retran", CTLTYPE_INT }, \
+ { "use_rttvar_congctrl", CTLTYPE_INT }, \
+ { "deadlock_detect", CTLTYPE_INT }, \
+ { "early_fast_retran_msec", CTLTYPE_INT }, \
+ { "asconf_auth_nochk", CTLTYPE_INT }, \
+ { "auth_disable", CTLTYPE_INT }, \
+ { "auth_random_len", CTLTYPE_INT }, \
+ { "auth_hmac_id", CTLTYPE_INT }, \
+ { "abc_l_var", CTLTYPE_INT }, \
+ { "max_mbuf_chain", CTLTYPE_INT }, \
+ { "cmt_use_dac", CTLTYPE_INT }, \
+ { "do_sctp_drain", CTLTYPE_INT }, \
+ { "warm_crc_table", CTLTYPE_INT }, \
+ { "abort_at_limit", CTLTYPE_INT }, \
+ { "strict_data_order", CTLTYPE_INT }, \
+ { "tcbhashsize", CTLTYPE_INT }, \
+ { "pcbhashsize", CTLTYPE_INT }, \
+ { "chunkscale", CTLTYPE_INT }, \
+ { "min_split_point", CTLTYPE_INT }, \
+ { "add_more_on_output", CTLTYPE_INT }, \
+ { "sys_resource", CTLTYPE_INT }, \
+ { "asoc_resource", CTLTYPE_INT }, \
+ { "debug", CTLTYPE_INT }, \
+}
+#else
+#define SCTPCTL_NAMES { \
+ { 0, 0 }, \
+ { "sendspace", CTLTYPE_INT }, \
+ { "recvspace", CTLTYPE_INT }, \
+ { "autoasconf", CTLTYPE_INT }, \
+ { "ecn_enable", CTLTYPE_INT }, \
+ { "ecn_nonce", CTLTYPE_INT }, \
+ { "strict_sack", CTLTYPE_INT }, \
+ { "looback_nocsum", CTLTYPE_INT }, \
+ { "strict_init", CTLTYPE_INT }, \
+ { "peer_chkoh", CTLTYPE_INT }, \
+ { "maxburst", CTLTYPE_INT }, \
+ { "maxchunks", CTLTYPE_INT }, \
+ { "delayed_sack_time", CTLTYPE_INT }, \
+ { "heartbeat_interval", CTLTYPE_INT }, \
+ { "pmtu_raise_time", CTLTYPE_INT }, \
+ { "shutdown_guard_time", CTLTYPE_INT }, \
+ { "secret_lifetime", CTLTYPE_INT }, \
+ { "rto_max", CTLTYPE_INT }, \
+ { "rto_min", CTLTYPE_INT }, \
+ { "rto_initial", CTLTYPE_INT }, \
+ { "init_rto_max", CTLTYPE_INT }, \
+ { "valid_cookie_life", CTLTYPE_INT }, \
+ { "init_rtx_max", CTLTYPE_INT }, \
+ { "assoc_rtx_max", CTLTYPE_INT }, \
+ { "path_rtx_max", CTLTYPE_INT }, \
+ { "nr_outgoing_streams", CTLTYPE_INT }, \
+ { "cmt_on_off", CTLTYPE_INT }, \
+ { "cwnd_maxburst", CTLTYPE_INT }, \
+ { "early_fast_retran", CTLTYPE_INT }, \
+ { "use_rttvar_congctrl", CTLTYPE_INT }, \
+ { "deadlock_detect", CTLTYPE_INT }, \
+ { "early_fast_retran_msec", CTLTYPE_INT }, \
+ { "asconf_auth_nochk", CTLTYPE_INT }, \
+ { "auth_disable", CTLTYPE_INT }, \
+ { "auth_random_len", CTLTYPE_INT }, \
+ { "auth_hmac_id", CTLTYPE_INT }, \
+ { "abc_l_var", CTLTYPE_INT }, \
+ { "max_mbuf_chain", CTLTYPE_INT }, \
+ { "cmt_use_dac", CTLTYPE_INT }, \
+ { "do_sctp_drain", CTLTYPE_INT }, \
+ { "warm_crc_table", CTLTYPE_INT }, \
+ { "abort_at_limit", CTLTYPE_INT }, \
+ { "strict_data_order", CTLTYPE_INT }, \
+ { "tcbhashsize", CTLTYPE_INT }, \
+ { "pcbhashsize", CTLTYPE_INT }, \
+ { "chunkscale", CTLTYPE_INT }, \
+ { "min_split_point", CTLTYPE_INT }, \
+ { "add_more_on_output", CTLTYPE_INT }, \
+ { "sys_resource", CTLTYPE_INT }, \
+ { "asoc_resource", CTLTYPE_INT }, \
+}
+#endif
+
+
+
+
+#if defined(_KERNEL)
+
+#ifdef SYSCTL_DECL
+SYSCTL_DECL(_net_inet_sctp);
+#endif
+extern struct pr_usrreqs sctp_usrreqs;
+
+
+#define sctp_feature_on(inp, feature) (inp->sctp_features |= feature)
+#define sctp_feature_off(inp, feature) (inp->sctp_features &= ~feature)
+#define sctp_is_feature_on(inp, feature) (inp->sctp_features & feature)
+#define sctp_is_feature_off(inp, feature) ((inp->sctp_features & feature) == 0)
+
+#define sctp_sbspace(asoc, sb) ((long) (((sb)->sb_hiwat > (asoc)->sb_cc) ? ((sb)->sb_hiwat - (asoc)->sb_cc) : 0))
+
+#define sctp_sbspace_failedmsgs(sb) ((long) (((sb)->sb_hiwat > (sb)->sb_cc) ? ((sb)->sb_hiwat - (sb)->sb_cc) : 0))
+
+#define sctp_sbspace_sub(a,b) ((a > b) ? (a - b) : 0)
+
+extern uint32_t sctp_asoc_free_resc_limit;
+extern uint32_t sctp_system_free_resc_limit;
+
+/* I tried to cache the readq entries at one
+ * point. But the reality is that it did not
+ * add any performance since this meant
+ * we had to lock the STCB on read. And at that point
+ * once you have to do an extra lock, it really does
+ * not matter if the lock is in the ZONE stuff or
+ * in our code. Note that this same problem would
+ * occur with an mbuf cache as well so it is
+ * not really worth doing, at least right now :-D
+ */
+
+#define sctp_free_a_readq(_stcb, _readq) { \
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, (_readq)); \
+ SCTP_DECR_READQ_COUNT(); \
+}
+
+#define sctp_alloc_a_readq(_stcb, _readq) { \
+ (_readq) = (struct sctp_queued_to_read *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_readq); \
+ if ((_readq)) { \
+ SCTP_INCR_READQ_COUNT(); \
+ } \
+}
+
+
+
+#define sctp_free_a_strmoq(_stcb, _strmoq) { \
+ if (((_stcb)->asoc.free_strmoq_cnt > sctp_asoc_free_resc_limit) || \
+ (sctppcbinfo.ipi_free_strmoq > sctp_system_free_resc_limit)) { \
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_strmoq, (_strmoq)); \
+ SCTP_DECR_STRMOQ_COUNT(); \
+ } else { \
+ TAILQ_INSERT_TAIL(&(_stcb)->asoc.free_strmoq, (_strmoq), next); \
+ (_stcb)->asoc.free_strmoq_cnt++; \
+ atomic_add_int(&sctppcbinfo.ipi_free_strmoq, 1); \
+ } \
+}
+
+#define sctp_alloc_a_strmoq(_stcb, _strmoq) { \
+ if(TAILQ_EMPTY(&(_stcb)->asoc.free_strmoq)) { \
+ (_strmoq) = (struct sctp_stream_queue_pending *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq); \
+ if ((_strmoq)) { \
+ SCTP_INCR_STRMOQ_COUNT(); \
+ } \
+ } else { \
+ (_strmoq) = TAILQ_FIRST(&(_stcb)->asoc.free_strmoq); \
+ TAILQ_REMOVE(&(_stcb)->asoc.free_strmoq, (_strmoq), next); \
+ atomic_subtract_int(&sctppcbinfo.ipi_free_strmoq, 1); \
+ (_stcb)->asoc.free_strmoq_cnt--; \
+ } \
+}
+
+
+#define sctp_free_a_chunk(_stcb, _chk) { \
+ if (((_stcb)->asoc.free_chunk_cnt > sctp_asoc_free_resc_limit) || \
+ (sctppcbinfo.ipi_free_chunks > sctp_system_free_resc_limit)) { \
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, (_chk)); \
+ SCTP_DECR_CHK_COUNT(); \
+ } else { \
+ TAILQ_INSERT_TAIL(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
+ (_stcb)->asoc.free_chunk_cnt++; \
+ atomic_add_int(&sctppcbinfo.ipi_free_chunks, 1); \
+ } \
+}
+
+#define sctp_alloc_a_chunk(_stcb, _chk) { \
+ if(TAILQ_EMPTY(&(_stcb)->asoc.free_chunks)) { \
+ (_chk) = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk); \
+ if ((_chk)) { \
+ SCTP_INCR_CHK_COUNT(); \
+ } \
+ } else { \
+ (_chk) = TAILQ_FIRST(&(_stcb)->asoc.free_chunks); \
+ TAILQ_REMOVE(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
+ atomic_subtract_int(&sctppcbinfo.ipi_free_chunks, 1); \
+ (_stcb)->asoc.free_chunk_cnt--; \
+ } \
+}
+
+
+
+
+
+#define sctp_free_remote_addr(__net) { \
+ if ((__net)) { \
+ if (atomic_fetchadd_int(&(__net)->ref_count, -1) == 1) { \
+ callout_stop(&(__net)->rxt_timer.timer); \
+ callout_stop(&(__net)->pmtu_timer.timer); \
+ callout_stop(&(__net)->fr_timer.timer); \
+ (__net)->dest_state = SCTP_ADDR_NOT_REACHABLE; \
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_net, (__net)); \
+ SCTP_DECR_RADDR_COUNT(); \
+ } \
+ } \
+}
+
+
+#define sctp_sbfree(ctl, stcb, sb, m) { \
+ uint32_t val; \
+ val = atomic_fetchadd_int(&(sb)->sb_cc,-((m)->m_len)); \
+ if(val < (m)->m_len) { \
+ panic("sb_cc goes negative"); \
+ } \
+ val = atomic_fetchadd_int(&(sb)->sb_mbcnt,-(MSIZE)); \
+ if(val < MSIZE) { \
+ panic("sb_mbcnt goes negative"); \
+ } \
+ if ((m)->m_flags & M_EXT) { \
+ val = atomic_fetchadd_int(&(sb)->sb_mbcnt,-((m)->m_ext.ext_size)); \
+ if(val < (m)->m_ext.ext_size) { \
+ panic("sb_mbcnt goes negative2"); \
+ } \
+ } \
+ if (((ctl)->do_not_ref_stcb == 0) && stcb) {\
+ val = atomic_fetchadd_int(&(stcb)->asoc.sb_cc,-((m)->m_len)); \
+ if(val < (m)->m_len) {\
+ panic("stcb->sb_cc goes negative"); \
+ } \
+ val = atomic_fetchadd_int(&(stcb)->asoc.sb_mbcnt,-(MSIZE)); \
+ if(val < MSIZE) { \
+ panic("asoc->mbcnt goes negative"); \
+ } \
+ if ((m)->m_flags & M_EXT) { \
+ val = atomic_fetchadd_int(&(stcb)->asoc.sb_mbcnt,-((m)->m_ext.ext_size)); \
+ if(val < (m)->m_ext.ext_size) { \
+ panic("assoc stcb->mbcnt would go negative"); \
+ } \
+ } \
+ } \
+ if ((m)->m_type != MT_DATA && (m)->m_type != MT_HEADER && \
+ (m)->m_type != MT_OOBDATA) \
+ atomic_subtract_int(&(sb)->sb_ctl,(m)->m_len); \
+}
+
+
+#define sctp_sballoc(stcb, sb, m) { \
+ atomic_add_int(&(sb)->sb_cc,(m)->m_len); \
+ atomic_add_int(&(sb)->sb_mbcnt, MSIZE); \
+ if ((m)->m_flags & M_EXT) \
+ atomic_add_int(&(sb)->sb_mbcnt,(m)->m_ext.ext_size); \
+ if(stcb) { \
+ atomic_add_int(&(stcb)->asoc.sb_cc,(m)->m_len); \
+ atomic_add_int(&(stcb)->asoc.sb_mbcnt, MSIZE); \
+ if ((m)->m_flags & M_EXT) \
+ atomic_add_int(&(stcb)->asoc.sb_mbcnt,(m)->m_ext.ext_size); \
+ } \
+ if ((m)->m_type != MT_DATA && (m)->m_type != MT_HEADER && \
+ (m)->m_type != MT_OOBDATA) \
+ atomic_add_int(&(sb)->sb_ctl,(m)->m_len); \
+}
+
+
+#define sctp_ucount_incr(val) { \
+ val++; \
+}
+
+#define sctp_ucount_decr(val) { \
+ if (val > 0) { \
+ val--; \
+ } else { \
+ val = 0; \
+ } \
+}
+
+#define sctp_mbuf_crush(data) do { \
+ struct mbuf *_m; \
+ _m = (data); \
+ while(_m && (_m->m_len == 0)) { \
+ (data) = _m->m_next; \
+ _m->m_next = NULL; \
+ sctp_m_free(_m); \
+ _m = (data); \
+ } \
+} while (0)
+
+
+extern int sctp_sendspace;
+extern int sctp_recvspace;
+extern int sctp_ecn_enable;
+extern int sctp_ecn_nonce;
+extern int sctp_use_cwnd_based_maxburst;
+extern unsigned int sctp_cmt_on_off;
+extern unsigned int sctp_cmt_use_dac;
+extern unsigned int sctp_cmt_sockopt_on_off;
+struct sctp_nets;
+struct sctp_inpcb;
+struct sctp_tcb;
+struct sctphdr;
+
+
+void sctp_ctlinput __P((int, struct sockaddr *, void *));
+int sctp_ctloutput __P((struct socket *, struct sockopt *));
+void sctp_input __P((struct mbuf *, int));
+
+void sctp_drain __P((void));
+void sctp_init __P((void));
+
+int sctp_shutdown __P((struct socket *));
+void sctp_notify
+__P((struct sctp_inpcb *, int, struct sctphdr *,
+ struct sockaddr *, struct sctp_tcb *,
+ struct sctp_nets *));
+
+#if defined(INET6)
+ void ip_2_ip6_hdr __P((struct ip6_hdr *, struct ip *));
+
+#endif
+
+ int sctp_bindx(struct socket *, int, struct sockaddr_storage *,
+ int, int, struct proc *);
+
+/* can't use sctp_assoc_t here */
+ int sctp_peeloff(struct socket *, struct socket *, int, caddr_t, int *);
+
+
+ sctp_assoc_t sctp_getassocid(struct sockaddr *);
+
+
+
+ int sctp_ingetaddr(struct socket *,
+ struct sockaddr **
+);
+
+ int sctp_peeraddr(struct socket *,
+ struct sockaddr **
+);
+
+ int sctp_listen(struct socket *, int, struct thread *);
+
+
+ int sctp_accept(struct socket *, struct sockaddr **);
+
+
+
+
+#endif /* _KERNEL */
+
+#endif /* !_NETINET_SCTP_VAR_H_ */
diff --git a/sys/netinet/sctputil.c b/sys/netinet/sctputil.c
new file mode 100644
index 0000000..e8fb2d5
--- /dev/null
+++ b/sys/netinet/sctputil.c
@@ -0,0 +1,5390 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+
+#include "opt_ipsec.h"
+#include "opt_compat.h"
+#include "opt_inet6.h"
+#include "opt_inet.h"
+#include "opt_sctp.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/fcntl.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/domain.h>
+#include <sys/file.h> /* for struct knote */
+#include <sys/kernel.h>
+#include <sys/event.h>
+#include <sys/poll.h>
+
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/resourcevar.h>
+#include <sys/signalvar.h>
+#include <sys/sysctl.h>
+#include <sys/uio.h>
+#include <sys/jail.h>
+
+#include <sys/callout.h>
+
+#include <net/radix.h>
+#include <net/route.h>
+
+#ifdef INET6
+#include <sys/domain.h>
+#endif
+
+#include <sys/limits.h>
+#include <sys/mac.h>
+#include <sys/mutex.h>
+
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/route.h>
+
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/in_pcb.h>
+#include <netinet/in_var.h>
+#include <netinet/ip_var.h>
+
+#ifdef INET6
+#include <netinet/ip6.h>
+#include <netinet6/ip6_var.h>
+
+#include <netinet6/in6_pcb.h>
+
+#include <netinet6/scope6_var.h>
+#endif /* INET6 */
+
+#ifdef IPSEC
+#include <netinet6/ipsec.h>
+#include <netkey/key.h>
+#endif /* IPSEC */
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_var.h>
+#ifdef INET6
+#include <netinet6/sctp6_var.h>
+#endif
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_crc32.h>
+#include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_asconf.h>
+
+extern int sctp_warm_the_crc32_table;
+
+#define NUMBER_OF_MTU_SIZES 18
+
+#ifdef SCTP_DEBUG
+extern uint32_t sctp_debug_on;
+
+#endif
+
+
+#ifdef SCTP_STAT_LOGGING
+int global_sctp_cwnd_log_at = 0;
+int global_sctp_cwnd_log_rolled = 0;
+struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE];
+
+static uint32_t
+sctp_get_time_of_event(void)
+{
+ struct timeval now;
+ uint32_t timeval;
+
+ SCTP_GETPTIME_TIMEVAL(&now);
+ timeval = (now.tv_sec % 0x00000fff);
+ timeval <<= 20;
+ timeval |= now.tv_usec & 0xfffff;
+ return (timeval);
+}
+
+
+void
+sctp_clr_stat_log(void)
+{
+ global_sctp_cwnd_log_at = 0;
+ global_sctp_cwnd_log_rolled = 0;
+}
+
+
+void
+sctp_sblog(struct sockbuf *sb,
+ struct sctp_tcb *stcb, int from, int incr)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SB;
+ sctp_clog[sctp_cwnd_log_at].x.sb.stcb = (uint32_t) stcb;
+ sctp_clog[sctp_cwnd_log_at].x.sb.so_sbcc = sb->sb_cc;
+ if (stcb)
+ sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = stcb->asoc.sb_cc;
+ else
+ sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = 0;
+ sctp_clog[sctp_cwnd_log_at].x.sb.incr = incr;
+}
+
+void
+sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = 0;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CLOSE;
+ sctp_clog[sctp_cwnd_log_at].x.close.inp = (uint32_t) inp;
+ sctp_clog[sctp_cwnd_log_at].x.close.sctp_flags = inp->sctp_flags;
+ if (stcb) {
+ sctp_clog[sctp_cwnd_log_at].x.close.stcb = (uint32_t) stcb;
+ sctp_clog[sctp_cwnd_log_at].x.close.state = (uint16_t) stcb->asoc.state;
+ } else {
+ sctp_clog[sctp_cwnd_log_at].x.close.stcb = 0;
+ sctp_clog[sctp_cwnd_log_at].x.close.state = 0;
+ }
+ sctp_clog[sctp_cwnd_log_at].x.close.loc = loc;
+}
+
+
+void
+rto_logging(struct sctp_nets *net, int from)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RTT;
+ sctp_clog[sctp_cwnd_log_at].x.rto.net = (uint32_t) net;
+ sctp_clog[sctp_cwnd_log_at].x.rto.rtt = net->prev_rtt;
+ sctp_clog[sctp_cwnd_log_at].x.rto.rttvar = net->rtt_variance;
+ sctp_clog[sctp_cwnd_log_at].x.rto.direction = net->rto_variance_dir;
+}
+
+void
+sctp_log_strm_del_alt(uint32_t tsn, uint16_t sseq, int from)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM;
+ sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn;
+ sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq;
+ sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
+ sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
+}
+
+void
+sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) action;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_NAGLE;
+ sctp_clog[sctp_cwnd_log_at].x.nagle.stcb = (uint32_t) stcb;
+ sctp_clog[sctp_cwnd_log_at].x.nagle.total_flight = stcb->asoc.total_flight;
+ sctp_clog[sctp_cwnd_log_at].x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
+ sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
+ sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_flight = stcb->asoc.total_flight_count;
+}
+
+
+void
+sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SACK;
+ sctp_clog[sctp_cwnd_log_at].x.sack.cumack = cumack;
+ sctp_clog[sctp_cwnd_log_at].x.sack.oldcumack = old_cumack;
+ sctp_clog[sctp_cwnd_log_at].x.sack.tsn = tsn;
+ sctp_clog[sctp_cwnd_log_at].x.sack.numGaps = gaps;
+ sctp_clog[sctp_cwnd_log_at].x.sack.numDups = dups;
+}
+
+void
+sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAP;
+ sctp_clog[sctp_cwnd_log_at].x.map.base = map;
+ sctp_clog[sctp_cwnd_log_at].x.map.cum = cum;
+ sctp_clog[sctp_cwnd_log_at].x.map.high = high;
+}
+
+void
+sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
+ int from)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_FR;
+ sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn;
+ sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn;
+ sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn;
+}
+
+
+void
+sctp_log_mb(struct mbuf *m, int from)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBUF;
+ sctp_clog[sctp_cwnd_log_at].x.mb.mp = m;
+ sctp_clog[sctp_cwnd_log_at].x.mb.mbuf_flags = (uint8_t) (m->m_flags);
+ sctp_clog[sctp_cwnd_log_at].x.mb.size = (uint16_t) (m->m_len);
+ sctp_clog[sctp_cwnd_log_at].x.mb.data = m->m_data;
+ if (m->m_flags & M_EXT) {
+ sctp_clog[sctp_cwnd_log_at].x.mb.ext = m->m_ext.ext_buf;
+ sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = (uint8_t) (*m->m_ext.ref_cnt);
+ } else {
+ sctp_clog[sctp_cwnd_log_at].x.mb.ext = 0;
+ sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = 0;
+ }
+}
+
+
+void
+sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
+ int from)
+{
+ int sctp_cwnd_log_at;
+
+ if (control == NULL) {
+ printf("Gak log of NULL?\n");
+ return;
+ }
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM;
+ sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = control->sinfo_tsn;
+ sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = control->sinfo_ssn;
+ if (poschk != NULL) {
+ sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = poschk->sinfo_tsn;
+ sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = poschk->sinfo_ssn;
+ } else {
+ sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
+ sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
+ }
+}
+
+void
+sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CWND;
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
+ if (stcb->asoc.send_queue_cnt > 255)
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255;
+ else
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
+ if (stcb->asoc.stream_queue_cnt > 255)
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255;
+ else
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
+
+ if (net) {
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd;
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.pseudo_cumack = net->pseudo_cumack;
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
+ }
+ if (SCTP_CWNDLOG_PRESEND == from) {
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
+ }
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment;
+}
+
+void
+sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_LOCK_EVENT;
+ sctp_clog[sctp_cwnd_log_at].x.lock.sock = (uint32_t) inp->sctp_socket;
+ sctp_clog[sctp_cwnd_log_at].x.lock.inp = (uint32_t) inp;
+ if (stcb) {
+ sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
+ } else {
+ sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
+ }
+ if (inp) {
+ sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
+ sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
+ } else {
+ sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
+ sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = SCTP_LOCK_UNKNOWN;
+ }
+ sctp_clog[sctp_cwnd_log_at].x.lock.info_lock = mtx_owned(&sctppcbinfo.ipi_ep_mtx);
+ if (inp->sctp_socket) {
+ sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
+ sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
+ sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
+ } else {
+ sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
+ sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
+ sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
+ }
+}
+
+void
+sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAXBURST;
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error;
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst;
+ if (stcb->asoc.send_queue_cnt > 255)
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255;
+ else
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
+ if (stcb->asoc.stream_queue_cnt > 255)
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255;
+ else
+ sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
+}
+
+void
+sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND;
+ sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
+ sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size;
+ sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
+ sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0;
+}
+
+void
+sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND;
+ sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
+ sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size;
+ sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
+ sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval;
+}
+
+void
+sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBCNT;
+ sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq;
+ sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change = book;
+ sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q;
+ sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt;
+}
+
+void
+sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_MISC_EVENT;
+ sctp_clog[sctp_cwnd_log_at].x.misc.log1 = a;
+ sctp_clog[sctp_cwnd_log_at].x.misc.log2 = b;
+ sctp_clog[sctp_cwnd_log_at].x.misc.log3 = c;
+ sctp_clog[sctp_cwnd_log_at].x.misc.log4 = d;
+}
+
+void
+sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_WAKE;
+ sctp_clog[sctp_cwnd_log_at].x.wake.stcb = (uint32_t) stcb;
+ sctp_clog[sctp_cwnd_log_at].x.wake.wake_cnt = wake_cnt;
+ sctp_clog[sctp_cwnd_log_at].x.wake.flight = stcb->asoc.total_flight_count;
+ sctp_clog[sctp_cwnd_log_at].x.wake.send_q = stcb->asoc.send_queue_cnt;
+ sctp_clog[sctp_cwnd_log_at].x.wake.sent_q = stcb->asoc.sent_queue_cnt;
+
+ if (stcb->asoc.stream_queue_cnt < 0xff)
+ sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
+ else
+ sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = 0xff;
+
+ if (stcb->asoc.chunks_on_out_queue < 0xff)
+ sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
+ else
+ sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = 0xff;
+
+ sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags = 0;
+ /* set in the defered mode stuff */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
+ sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 1;
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
+ sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 2;
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
+ sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 4;
+ /* what about the sb */
+ if (stcb->sctp_socket) {
+ struct socket *so = stcb->sctp_socket;
+
+ sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
+ } else {
+ sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = 0xff;
+ }
+}
+
+void
+sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
+{
+ int sctp_cwnd_log_at;
+
+ SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
+ sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
+ sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
+ sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_BLOCK;
+ sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size;
+ sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
+ sctp_clog[sctp_cwnd_log_at].x.blk.peer_rwnd = asoc->peers_rwnd;
+ sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
+ sctp_clog[sctp_cwnd_log_at].x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
+ sctp_clog[sctp_cwnd_log_at].x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
+ sctp_clog[sctp_cwnd_log_at].x.blk.sndlen = sendlen;
+}
+
+int
+sctp_fill_stat_log(struct mbuf *m)
+{
+ int sctp_cwnd_log_at;
+ struct sctp_cwnd_log_req *req;
+ size_t size_limit;
+ int num, i, at, cnt_out = 0;
+
+ if (m == NULL)
+ return (EINVAL);
+
+ size_limit = (m->m_len - sizeof(struct sctp_cwnd_log_req));
+ if (size_limit < sizeof(struct sctp_cwnd_log)) {
+ return (EINVAL);
+ }
+ sctp_cwnd_log_at = global_sctp_cwnd_log_at;
+ req = mtod(m, struct sctp_cwnd_log_req *);
+ num = size_limit / sizeof(struct sctp_cwnd_log);
+ if (global_sctp_cwnd_log_rolled) {
+ req->num_in_log = SCTP_STAT_LOG_SIZE;
+ } else {
+ req->num_in_log = sctp_cwnd_log_at;
+ /*
+ * if the log has not rolled, we don't let you have old
+ * data.
+ */
+ if (req->end_at > sctp_cwnd_log_at) {
+ req->end_at = sctp_cwnd_log_at;
+ }
+ }
+ if ((num < SCTP_STAT_LOG_SIZE) &&
+ ((global_sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) {
+ /* we can't return all of it */
+ if (((req->start_at == 0) && (req->end_at == 0)) ||
+ (req->start_at >= SCTP_STAT_LOG_SIZE) ||
+ (req->end_at >= SCTP_STAT_LOG_SIZE)) {
+ /* No user request or user is wacked. */
+ req->num_ret = num;
+ req->end_at = sctp_cwnd_log_at - 1;
+ if ((sctp_cwnd_log_at - num) < 0) {
+ int cc;
+
+ cc = num - sctp_cwnd_log_at;
+ req->start_at = SCTP_STAT_LOG_SIZE - cc;
+ } else {
+ req->start_at = sctp_cwnd_log_at - num;
+ }
+ } else {
+ /* a user request */
+ int cc;
+
+ if (req->start_at > req->end_at) {
+ cc = (SCTP_STAT_LOG_SIZE - req->start_at) +
+ (req->end_at + 1);
+ } else {
+
+ cc = (req->end_at - req->start_at) + 1;
+ }
+ if (cc < num) {
+ num = cc;
+ }
+ req->num_ret = num;
+ }
+ } else {
+ /* We can return all of it */
+ req->start_at = 0;
+ req->end_at = sctp_cwnd_log_at - 1;
+ req->num_ret = sctp_cwnd_log_at;
+ }
+#ifdef INVARIENTS
+ if (req->num_ret > num) {
+ panic("Bad statlog get?");
+ }
+#endif
+ for (i = 0, at = req->start_at; i < req->num_ret; i++) {
+ req->log[i] = sctp_clog[at];
+ cnt_out++;
+ at++;
+ if (at >= SCTP_STAT_LOG_SIZE)
+ at = 0;
+ }
+ m->m_len = (cnt_out * sizeof(struct sctp_cwnd_log)) + sizeof(struct sctp_cwnd_log_req);
+ return (0);
+}
+
+#endif
+
+#ifdef SCTP_AUDITING_ENABLED
+uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
+static int sctp_audit_indx = 0;
+
+static
+void
+sctp_print_audit_report(void)
+{
+ int i;
+ int cnt;
+
+ cnt = 0;
+ for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
+ if ((sctp_audit_data[i][0] == 0xe0) &&
+ (sctp_audit_data[i][1] == 0x01)) {
+ cnt = 0;
+ printf("\n");
+ } else if (sctp_audit_data[i][0] == 0xf0) {
+ cnt = 0;
+ printf("\n");
+ } else if ((sctp_audit_data[i][0] == 0xc0) &&
+ (sctp_audit_data[i][1] == 0x01)) {
+ printf("\n");
+ cnt = 0;
+ }
+ printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
+ (uint32_t) sctp_audit_data[i][1]);
+ cnt++;
+ if ((cnt % 14) == 0)
+ printf("\n");
+ }
+ for (i = 0; i < sctp_audit_indx; i++) {
+ if ((sctp_audit_data[i][0] == 0xe0) &&
+ (sctp_audit_data[i][1] == 0x01)) {
+ cnt = 0;
+ printf("\n");
+ } else if (sctp_audit_data[i][0] == 0xf0) {
+ cnt = 0;
+ printf("\n");
+ } else if ((sctp_audit_data[i][0] == 0xc0) &&
+ (sctp_audit_data[i][1] == 0x01)) {
+ printf("\n");
+ cnt = 0;
+ }
+ printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
+ (uint32_t) sctp_audit_data[i][1]);
+ cnt++;
+ if ((cnt % 14) == 0)
+ printf("\n");
+ }
+ printf("\n");
+}
+
+void
+sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ int resend_cnt, tot_out, rep, tot_book_cnt;
+ struct sctp_nets *lnet;
+ struct sctp_tmit_chunk *chk;
+
+ sctp_audit_data[sctp_audit_indx][0] = 0xAA;
+ sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ if (inp == NULL) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0x01;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ return;
+ }
+ if (stcb == NULL) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0x02;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ return;
+ }
+ sctp_audit_data[sctp_audit_indx][0] = 0xA1;
+ sctp_audit_data[sctp_audit_indx][1] =
+ (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ rep = 0;
+ tot_book_cnt = 0;
+ resend_cnt = tot_out = 0;
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ resend_cnt++;
+ } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ tot_out += chk->book_size;
+ tot_book_cnt++;
+ }
+ }
+ if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0xA1;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ printf("resend_cnt:%d asoc-tot:%d\n",
+ resend_cnt, stcb->asoc.sent_queue_retran_cnt);
+ rep = 1;
+ stcb->asoc.sent_queue_retran_cnt = resend_cnt;
+ sctp_audit_data[sctp_audit_indx][0] = 0xA2;
+ sctp_audit_data[sctp_audit_indx][1] =
+ (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ }
+ if (tot_out != stcb->asoc.total_flight) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0xA2;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ rep = 1;
+ printf("tot_flt:%d asoc_tot:%d\n", tot_out,
+ (int)stcb->asoc.total_flight);
+ stcb->asoc.total_flight = tot_out;
+ }
+ if (tot_book_cnt != stcb->asoc.total_flight_count) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0xA5;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ rep = 1;
+ printf("tot_flt_book:%d\n", tot_book);
+
+ stcb->asoc.total_flight_count = tot_book_cnt;
+ }
+ tot_out = 0;
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+ tot_out += lnet->flight_size;
+ }
+ if (tot_out != stcb->asoc.total_flight) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0xA3;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ rep = 1;
+ printf("real flight:%d net total was %d\n",
+ stcb->asoc.total_flight, tot_out);
+ /* now corrective action */
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+
+ tot_out = 0;
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if ((chk->whoTo == lnet) &&
+ (chk->sent < SCTP_DATAGRAM_RESEND)) {
+ tot_out += chk->book_size;
+ }
+ }
+ if (lnet->flight_size != tot_out) {
+ printf("net:%x flight was %d corrected to %d\n",
+ (uint32_t) lnet, lnet->flight_size, tot_out);
+ lnet->flight_size = tot_out;
+ }
+ }
+ }
+ if (rep) {
+ sctp_print_audit_report();
+ }
+}
+
+void
+sctp_audit_log(uint8_t ev, uint8_t fd)
+{
+ int s;
+
+ s = splnet();
+ sctp_audit_data[sctp_audit_indx][0] = ev;
+ sctp_audit_data[sctp_audit_indx][1] = fd;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ splx(s);
+}
+
+#endif
+
+/*
+ * a list of sizes based on typical mtu's, used only if next hop size not
+ * returned.
+ */
+static int sctp_mtu_sizes[] = {
+ 68,
+ 296,
+ 508,
+ 512,
+ 544,
+ 576,
+ 1006,
+ 1492,
+ 1500,
+ 1536,
+ 2002,
+ 2048,
+ 4352,
+ 4464,
+ 8166,
+ 17914,
+ 32000,
+ 65535
+};
+
+void
+sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
+{
+ struct sctp_association *asoc;
+ struct sctp_nets *net;
+
+ asoc = &stcb->asoc;
+
+ callout_stop(&asoc->hb_timer.timer);
+ callout_stop(&asoc->dack_timer.timer);
+ callout_stop(&asoc->strreset_timer.timer);
+ callout_stop(&asoc->asconf_timer.timer);
+ callout_stop(&asoc->autoclose_timer.timer);
+ callout_stop(&asoc->delayed_event_timer.timer);
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ callout_stop(&net->fr_timer.timer);
+ callout_stop(&net->pmtu_timer.timer);
+ }
+}
+
+int
+find_next_best_mtu(int totsz)
+{
+ int i, perfer;
+
+ /*
+ * if we are in here we must find the next best fit based on the
+ * size of the dg that failed to be sent.
+ */
+ perfer = 0;
+ for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
+ if (totsz < sctp_mtu_sizes[i]) {
+ perfer = i - 1;
+ if (perfer < 0)
+ perfer = 0;
+ break;
+ }
+ }
+ return (sctp_mtu_sizes[perfer]);
+}
+
+void
+sctp_fill_random_store(struct sctp_pcb *m)
+{
+ /*
+ * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
+ * our counter. The result becomes our good random numbers and we
+ * then setup to give these out. Note that we do no locking to
+ * protect this. This is ok, since if competing folks call this we
+ * will get more gobbled gook in the random store whic is what we
+ * want. There is a danger that two guys will use the same random
+ * numbers, but thats ok too since that is random as well :->
+ */
+ m->store_at = 0;
+ sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
+ sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
+ sizeof(m->random_counter), (uint8_t *) m->random_store);
+ m->random_counter++;
+}
+
+uint32_t
+sctp_select_initial_TSN(struct sctp_pcb *m)
+{
+ /*
+ * A true implementation should use random selection process to get
+ * the initial stream sequence number, using RFC1750 as a good
+ * guideline
+ */
+ u_long x, *xp;
+ uint8_t *p;
+
+ if (m->initial_sequence_debug != 0) {
+ uint32_t ret;
+
+ ret = m->initial_sequence_debug;
+ m->initial_sequence_debug++;
+ return (ret);
+ }
+ if ((m->store_at + sizeof(u_long)) > SCTP_SIGNATURE_SIZE) {
+ /* Refill the random store */
+ sctp_fill_random_store(m);
+ }
+ p = &m->random_store[(int)m->store_at];
+ xp = (u_long *)p;
+ x = *xp;
+ m->store_at += sizeof(u_long);
+ return (x);
+}
+
+uint32_t
+sctp_select_a_tag(struct sctp_inpcb *m)
+{
+ u_long x, not_done;
+ struct timeval now;
+
+ SCTP_GETTIME_TIMEVAL(&now);
+ not_done = 1;
+ while (not_done) {
+ x = sctp_select_initial_TSN(&m->sctp_ep);
+ if (x == 0) {
+ /* we never use 0 */
+ continue;
+ }
+ if (sctp_is_vtag_good(m, x, &now)) {
+ not_done = 0;
+ }
+ }
+ return (x);
+}
+
+
+int
+sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
+ int for_a_init, uint32_t override_tag)
+{
+ /*
+ * Anything set to zero is taken care of by the allocation routine's
+ * bzero
+ */
+
+ /*
+ * Up front select what scoping to apply on addresses I tell my peer
+ * Not sure what to do with these right now, we will need to come up
+ * with a way to set them. We may need to pass them through from the
+ * caller in the sctp_aloc_assoc() function.
+ */
+ int i;
+
+ /* init all variables to a known value. */
+ asoc->state = SCTP_STATE_INUSE;
+ asoc->max_burst = m->sctp_ep.max_burst;
+ asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
+ asoc->cookie_life = m->sctp_ep.def_cookie_life;
+ asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off;
+#ifdef AF_INET
+ asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
+#else
+ asoc->default_tos = 0;
+#endif
+
+#ifdef AF_INET6
+ asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
+#else
+ asoc->default_flowlabel = 0;
+#endif
+ if (override_tag) {
+ struct timeval now;
+
+ if (sctp_is_vtag_good(m, override_tag, &now)) {
+ asoc->my_vtag = override_tag;
+ } else {
+ return (ENOMEM);
+ }
+
+ } else {
+ asoc->my_vtag = sctp_select_a_tag(m);
+ }
+ if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
+ asoc->hb_is_disabled = 1;
+ else
+ asoc->hb_is_disabled = 0;
+
+ asoc->refcnt = 0;
+ asoc->assoc_up_sent = 0;
+ asoc->assoc_id = asoc->my_vtag;
+ asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
+ sctp_select_initial_TSN(&m->sctp_ep);
+ /* we are optimisitic here */
+ asoc->peer_supports_pktdrop = 1;
+
+ asoc->sent_queue_retran_cnt = 0;
+
+ /* for CMT */
+ asoc->last_net_data_came_from = NULL;
+
+ /* This will need to be adjusted */
+ asoc->last_cwr_tsn = asoc->init_seq_number - 1;
+ asoc->last_acked_seq = asoc->init_seq_number - 1;
+ asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+ asoc->asconf_seq_in = asoc->last_acked_seq;
+
+ /* here we are different, we hold the next one we expect */
+ asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
+
+ asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
+ asoc->initial_rto = m->sctp_ep.initial_rto;
+
+ asoc->max_init_times = m->sctp_ep.max_init_times;
+ asoc->max_send_times = m->sctp_ep.max_send_times;
+ asoc->def_net_failure = m->sctp_ep.def_net_failure;
+ asoc->free_chunk_cnt = 0;
+
+ asoc->iam_blocking = 0;
+ /* ECN Nonce initialization */
+ asoc->context = m->sctp_context;
+ asoc->def_send = m->def_send;
+ asoc->ecn_nonce_allowed = 0;
+ asoc->receiver_nonce_sum = 1;
+ asoc->nonce_sum_expect_base = 1;
+ asoc->nonce_sum_check = 1;
+ asoc->nonce_resync_tsn = 0;
+ asoc->nonce_wait_for_ecne = 0;
+ asoc->nonce_wait_tsn = 0;
+ asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
+ asoc->pr_sctp_cnt = 0;
+ asoc->total_output_queue_size = 0;
+
+ if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ struct in6pcb *inp6;
+
+
+ /* Its a V6 socket */
+ inp6 = (struct in6pcb *)m;
+ asoc->ipv6_addr_legal = 1;
+ /* Now look at the binding flag to see if V4 will be legal */
+ if (
+ (inp6->inp_flags & IN6P_IPV6_V6ONLY)
+ == 0) {
+ asoc->ipv4_addr_legal = 1;
+ } else {
+ /* V4 addresses are NOT legal on the association */
+ asoc->ipv4_addr_legal = 0;
+ }
+ } else {
+ /* Its a V4 socket, no - V6 */
+ asoc->ipv4_addr_legal = 1;
+ asoc->ipv6_addr_legal = 0;
+ }
+
+
+ asoc->my_rwnd = max(m->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND);
+ asoc->peers_rwnd = m->sctp_socket->so_rcv.sb_hiwat;
+
+ asoc->smallest_mtu = m->sctp_frag_point;
+ asoc->minrto = m->sctp_ep.sctp_minrto;
+ asoc->maxrto = m->sctp_ep.sctp_maxrto;
+
+ asoc->locked_on_sending = NULL;
+ asoc->stream_locked_on = 0;
+ asoc->ecn_echo_cnt_onq = 0;
+ asoc->stream_locked = 0;
+
+ LIST_INIT(&asoc->sctp_local_addr_list);
+ TAILQ_INIT(&asoc->nets);
+ TAILQ_INIT(&asoc->pending_reply_queue);
+ asoc->last_asconf_ack_sent = NULL;
+ /* Setup to fill the hb random cache at first HB */
+ asoc->hb_random_idx = 4;
+
+ asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
+
+ /*
+ * Now the stream parameters, here we allocate space for all streams
+ * that we request by default.
+ */
+ asoc->streamoutcnt = asoc->pre_open_streams =
+ m->sctp_ep.pre_open_stream_count;
+ SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
+ asoc->streamoutcnt * sizeof(struct sctp_stream_out),
+ "StreamsOut");
+ if (asoc->strmout == NULL) {
+ /* big trouble no memory */
+ return (ENOMEM);
+ }
+ for (i = 0; i < asoc->streamoutcnt; i++) {
+ /*
+ * inbound side must be set to 0xffff, also NOTE when we get
+ * the INIT-ACK back (for INIT sender) we MUST reduce the
+ * count (streamoutcnt) but first check if we sent to any of
+ * the upper streams that were dropped (if some were). Those
+ * that were dropped must be notified to the upper layer as
+ * failed to send.
+ */
+ asoc->strmout[i].next_sequence_sent = 0x0;
+ TAILQ_INIT(&asoc->strmout[i].outqueue);
+ asoc->strmout[i].stream_no = i;
+ asoc->strmout[i].last_msg_incomplete = 0;
+ asoc->strmout[i].next_spoke.tqe_next = 0;
+ asoc->strmout[i].next_spoke.tqe_prev = 0;
+ }
+ /* Now the mapping array */
+ asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
+ SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
+ "MappingArray");
+ if (asoc->mapping_array == NULL) {
+ SCTP_FREE(asoc->strmout);
+ return (ENOMEM);
+ }
+ memset(asoc->mapping_array, 0, asoc->mapping_array_size);
+ /* Now the init of the other outqueues */
+ TAILQ_INIT(&asoc->free_chunks);
+ TAILQ_INIT(&asoc->free_strmoq);
+ TAILQ_INIT(&asoc->out_wheel);
+ TAILQ_INIT(&asoc->control_send_queue);
+ TAILQ_INIT(&asoc->send_queue);
+ TAILQ_INIT(&asoc->sent_queue);
+ TAILQ_INIT(&asoc->reasmqueue);
+ TAILQ_INIT(&asoc->resetHead);
+ asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
+ TAILQ_INIT(&asoc->asconf_queue);
+ /* authentication fields */
+ asoc->authinfo.random = NULL;
+ asoc->authinfo.assoc_key = NULL;
+ asoc->authinfo.assoc_keyid = 0;
+ asoc->authinfo.recv_key = NULL;
+ asoc->authinfo.recv_keyid = 0;
+ LIST_INIT(&asoc->shared_keys);
+
+ return (0);
+}
+
+int
+sctp_expand_mapping_array(struct sctp_association *asoc)
+{
+ /* mapping array needs to grow */
+ uint8_t *new_array;
+ uint16_t new_size;
+
+ new_size = asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR;
+ SCTP_MALLOC(new_array, uint8_t *, new_size, "MappingArray");
+ if (new_array == NULL) {
+ /* can't get more, forget it */
+ printf("No memory for expansion of SCTP mapping array %d\n",
+ new_size);
+ return (-1);
+ }
+ memset(new_array, 0, new_size);
+ memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
+ SCTP_FREE(asoc->mapping_array);
+ asoc->mapping_array = new_array;
+ asoc->mapping_array_size = new_size;
+ return (0);
+}
+
+extern unsigned int sctp_early_fr_msec;
+
+static void
+sctp_handle_addr_wq(void)
+{
+ /* deal with the ADDR wq from the rtsock calls */
+ struct sctp_laddr *wi;
+
+ SCTP_IPI_ADDR_LOCK();
+ wi = LIST_FIRST(&sctppcbinfo.addr_wq);
+ if (wi == NULL) {
+ SCTP_IPI_ADDR_UNLOCK();
+ return;
+ }
+ LIST_REMOVE(wi, sctp_nxt_addr);
+ if (!LIST_EMPTY(&sctppcbinfo.addr_wq)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
+ (struct sctp_inpcb *)NULL,
+ (struct sctp_tcb *)NULL,
+ (struct sctp_nets *)NULL);
+ }
+ SCTP_IPI_ADDR_UNLOCK();
+ if (wi->action == RTM_ADD) {
+ sctp_add_ip_address(wi->ifa);
+ } else if (wi->action == RTM_DELETE) {
+ sctp_delete_ip_address(wi->ifa);
+ }
+ IFAFREE(wi->ifa);
+ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, wi);
+ SCTP_DECR_LADDR_COUNT();
+}
+
+void
+sctp_timeout_handler(void *t)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+ struct sctp_timer *tmr;
+ int s, did_output;
+ struct sctp_iterator *it = NULL;
+
+
+ s = splnet();
+ tmr = (struct sctp_timer *)t;
+ inp = (struct sctp_inpcb *)tmr->ep;
+ stcb = (struct sctp_tcb *)tmr->tcb;
+ net = (struct sctp_nets *)tmr->net;
+ did_output = 1;
+
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xF0, (uint8_t) tmr->type);
+ sctp_auditing(3, inp, stcb, net);
+#endif
+
+ /* sanity checks... */
+ if (tmr->self != (void *)tmr) {
+ /*
+ * printf("Stale SCTP timer fired (%p), ignoring...\n",
+ * tmr);
+ */
+ splx(s);
+ return;
+ }
+ if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
+ /*
+ * printf("SCTP timer fired with invalid type: 0x%x\n",
+ * tmr->type);
+ */
+ splx(s);
+ return;
+ }
+ if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
+ splx(s);
+ return;
+ }
+ /* if this is an iterator timeout, get the struct and clear inp */
+ if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
+ it = (struct sctp_iterator *)inp;
+ inp = NULL;
+ }
+ if (inp) {
+ SCTP_INP_INCR_REF(inp);
+ if ((inp->sctp_socket == 0) &&
+ ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
+ (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
+ (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
+ (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
+ (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
+ ) {
+ splx(s);
+ SCTP_INP_DECR_REF(inp);
+ return;
+ }
+ }
+ if (stcb) {
+ if (stcb->asoc.state == 0) {
+ splx(s);
+ if (inp) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ return;
+ }
+ }
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
+ printf("Timer type %d goes off\n", tmr->type);
+ }
+#endif /* SCTP_DEBUG */
+ if (!callout_active(&tmr->timer)) {
+ splx(s);
+ if (inp) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ return;
+ }
+ if (stcb) {
+ atomic_add_16(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_add_16(&stcb->asoc.refcnt, -1);
+ }
+ /* mark as being serviced now */
+ callout_deactivate(&tmr->timer);
+
+ /* call the handler for the appropriate timer type */
+ switch (tmr->type) {
+ case SCTP_TIMER_TYPE_ADDR_WQ:
+ sctp_handle_addr_wq();
+ break;
+ case SCTP_TIMER_TYPE_ITERATOR:
+ SCTP_STAT_INCR(sctps_timoiterator);
+ sctp_iterator_timer(it);
+ break;
+ case SCTP_TIMER_TYPE_SEND:
+ SCTP_STAT_INCR(sctps_timodata);
+ stcb->asoc.num_send_timers_up--;
+ if (stcb->asoc.num_send_timers_up < 0) {
+ stcb->asoc.num_send_timers_up = 0;
+ }
+ if (sctp_t3rxt_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+
+ goto out_decr;
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3);
+ if ((stcb->asoc.num_send_timers_up == 0) &&
+ (stcb->asoc.sent_queue_cnt > 0)
+ ) {
+ struct sctp_tmit_chunk *chk;
+
+ /*
+ * safeguard. If there on some on the sent queue
+ * somewhere but no timers running something is
+ * wrong... so we start a timer on the first chunk
+ * on the send queue on whatever net it is sent to.
+ */
+ chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
+ chk->whoTo);
+ }
+ break;
+ case SCTP_TIMER_TYPE_INIT:
+ SCTP_STAT_INCR(sctps_timoinit);
+ if (sctp_t1init_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ /* We do output but not here */
+ did_output = 0;
+ break;
+ case SCTP_TIMER_TYPE_RECV:
+ SCTP_STAT_INCR(sctps_timosack);
+ sctp_send_sack(stcb);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR);
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWN:
+ if (sctp_shutdown_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ SCTP_STAT_INCR(sctps_timoshutdown);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR);
+ break;
+ case SCTP_TIMER_TYPE_HEARTBEAT:
+ {
+ struct sctp_nets *net;
+ int cnt_of_unconf = 0;
+
+ SCTP_STAT_INCR(sctps_timoheartbeat);
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
+ (net->dest_state & SCTP_ADDR_REACHABLE)) {
+ cnt_of_unconf++;
+ }
+ }
+ if (cnt_of_unconf == 0) {
+ if (sctp_heartbeat_timer(inp, stcb, net, cnt_of_unconf)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
+ stcb, net);
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR);
+ }
+ break;
+ case SCTP_TIMER_TYPE_COOKIE:
+ if (sctp_cookie_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ SCTP_STAT_INCR(sctps_timocookie);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ /*
+ * We consider T3 and Cookie timer pretty much the same with
+ * respect to where from in chunk_output.
+ */
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3);
+ break;
+ case SCTP_TIMER_TYPE_NEWCOOKIE:
+ {
+ struct timeval tv;
+ int i, secret;
+
+ SCTP_STAT_INCR(sctps_timosecret);
+ SCTP_GETTIME_TIMEVAL(&tv);
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_ep.time_of_secret_change = tv.tv_sec;
+ inp->sctp_ep.last_secret_number =
+ inp->sctp_ep.current_secret_number;
+ inp->sctp_ep.current_secret_number++;
+ if (inp->sctp_ep.current_secret_number >=
+ SCTP_HOW_MANY_SECRETS) {
+ inp->sctp_ep.current_secret_number = 0;
+ }
+ secret = (int)inp->sctp_ep.current_secret_number;
+ for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
+ inp->sctp_ep.secret_key[secret][i] =
+ sctp_select_initial_TSN(&inp->sctp_ep);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
+ }
+ did_output = 0;
+ break;
+ case SCTP_TIMER_TYPE_PATHMTURAISE:
+ SCTP_STAT_INCR(sctps_timopathmtu);
+ sctp_pathmtu_timer(inp, stcb, net);
+ did_output = 0;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNACK:
+ if (sctp_shutdownack_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ SCTP_STAT_INCR(sctps_timoshutdownack);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR);
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
+ SCTP_STAT_INCR(sctps_timoshutdownguard);
+ sctp_abort_an_association(inp, stcb,
+ SCTP_SHUTDOWN_GUARD_EXPIRES, NULL);
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ break;
+
+ case SCTP_TIMER_TYPE_STRRESET:
+ if (sctp_strreset_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ SCTP_STAT_INCR(sctps_timostrmrst);
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR);
+ break;
+ case SCTP_TIMER_TYPE_EARLYFR:
+ /* Need to do FR of things for net */
+ SCTP_STAT_INCR(sctps_timoearlyfr);
+ sctp_early_fr_timer(inp, stcb, net);
+ break;
+ case SCTP_TIMER_TYPE_ASCONF:
+ if (sctp_asconf_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ SCTP_STAT_INCR(sctps_timoasconf);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR);
+ break;
+
+ case SCTP_TIMER_TYPE_AUTOCLOSE:
+ SCTP_STAT_INCR(sctps_timoautoclose);
+ sctp_autoclose_timer(inp, stcb, net);
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR);
+ did_output = 0;
+ break;
+ case SCTP_TIMER_TYPE_ASOCKILL:
+ SCTP_STAT_INCR(sctps_timoassockill);
+ /* Can we free it yet? */
+ SCTP_INP_DECR_REF(inp);
+ sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
+ sctp_free_assoc(inp, stcb, 0);
+ /*
+ * free asoc, always unlocks (or destroy's) so prevent
+ * duplicate unlock or unlock of a free mtx :-0
+ */
+ stcb = NULL;
+ goto out_no_decr;
+ break;
+ case SCTP_TIMER_TYPE_INPKILL:
+ SCTP_STAT_INCR(sctps_timoinpkill);
+ /*
+ * special case, take away our increment since WE are the
+ * killer
+ */
+ SCTP_INP_DECR_REF(inp);
+ sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
+ sctp_inpcb_free(inp, 1, 0);
+ goto out_no_decr;
+ break;
+ default:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
+ printf("sctp_timeout_handler:unknown timer %d\n",
+ tmr->type);
+ }
+#endif /* SCTP_DEBUG */
+ break;
+ };
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xF1, (uint8_t) tmr->type);
+ if (inp)
+ sctp_auditing(5, inp, stcb, net);
+#endif
+ if ((did_output) && stcb) {
+ /*
+ * Now we need to clean up the control chunk chain if an
+ * ECNE is on it. It must be marked as UNSENT again so next
+ * call will continue to send it until such time that we get
+ * a CWR, to remove it. It is, however, less likely that we
+ * will find a ecn echo on the chain though.
+ */
+ sctp_fix_ecn_echo(&stcb->asoc);
+ }
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+out_decr:
+ if (inp) {
+ SCTP_INP_DECR_REF(inp);
+ }
+out_no_decr:
+
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
+ printf("Timer now complete (type %d)\n", tmr->type);
+ }
+#endif /* SCTP_DEBUG */
+ splx(s);
+ if (inp) {
+ }
+}
+
+int
+sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ int to_ticks;
+ struct sctp_timer *tmr;
+
+
+ if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
+ (inp == NULL))
+ return (EFAULT);
+
+ to_ticks = 0;
+
+ tmr = NULL;
+ if (stcb) {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ }
+ switch (t_type) {
+ case SCTP_TIMER_TYPE_ADDR_WQ:
+ /* Only 1 tick away :-) */
+ tmr = &sctppcbinfo.addr_wq_timer;
+ to_ticks = 1;
+ break;
+ case SCTP_TIMER_TYPE_ITERATOR:
+ {
+ struct sctp_iterator *it;
+
+ it = (struct sctp_iterator *)inp;
+ tmr = &it->tmr;
+ to_ticks = SCTP_ITERATOR_TICKS;
+ }
+ break;
+ case SCTP_TIMER_TYPE_SEND:
+ /* Here we use the RTO timer */
+ {
+ int rto_val;
+
+ if ((stcb == NULL) || (net == NULL)) {
+ return (EFAULT);
+ }
+ tmr = &net->rxt_timer;
+ if (net->RTO == 0) {
+ rto_val = stcb->asoc.initial_rto;
+ } else {
+ rto_val = net->RTO;
+ }
+ to_ticks = MSEC_TO_TICKS(rto_val);
+ }
+ break;
+ case SCTP_TIMER_TYPE_INIT:
+ /*
+ * Here we use the INIT timer default usually about 1
+ * minute.
+ */
+ if ((stcb == NULL) || (net == NULL)) {
+ return (EFAULT);
+ }
+ tmr = &net->rxt_timer;
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ break;
+ case SCTP_TIMER_TYPE_RECV:
+ /*
+ * Here we use the Delayed-Ack timer value from the inp
+ * ususually about 200ms.
+ */
+ if (stcb == NULL) {
+ return (EFAULT);
+ }
+ tmr = &stcb->asoc.dack_timer;
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWN:
+ /* Here we use the RTO of the destination. */
+ if ((stcb == NULL) || (net == NULL)) {
+ return (EFAULT);
+ }
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_HEARTBEAT:
+ /*
+ * the net is used here so that we can add in the RTO. Even
+ * though we use a different timer. We also add the HB timer
+ * PLUS a random jitter.
+ */
+ if (stcb == NULL) {
+ return (EFAULT);
+ } {
+ uint32_t rndval;
+ uint8_t this_random;
+ int cnt_of_unconf = 0;
+ struct sctp_nets *lnet;
+
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+ if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
+ (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
+ cnt_of_unconf++;
+ }
+ }
+ if (cnt_of_unconf) {
+ lnet = NULL;
+ sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
+ }
+ if (stcb->asoc.hb_random_idx > 3) {
+ rndval = sctp_select_initial_TSN(&inp->sctp_ep);
+ memcpy(stcb->asoc.hb_random_values, &rndval,
+ sizeof(stcb->asoc.hb_random_values));
+ this_random = stcb->asoc.hb_random_values[0];
+ stcb->asoc.hb_random_idx = 0;
+ stcb->asoc.hb_ect_randombit = 0;
+ } else {
+ this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
+ stcb->asoc.hb_random_idx++;
+ stcb->asoc.hb_ect_randombit = 0;
+ }
+ /*
+ * this_random will be 0 - 256 ms RTO is in ms.
+ */
+ if ((stcb->asoc.hb_is_disabled) &&
+ (cnt_of_unconf == 0)) {
+ return (0);
+ }
+ if (net) {
+ struct sctp_nets *lnet;
+ int delay;
+
+ delay = stcb->asoc.heart_beat_delay;
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+ if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
+ ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
+ (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
+ delay = 0;
+ }
+ }
+ if (net->RTO == 0) {
+ /* Never been checked */
+ to_ticks = this_random + stcb->asoc.initial_rto + delay;
+ } else {
+ /* set rto_val to the ms */
+ to_ticks = delay + net->RTO + this_random;
+ }
+ } else {
+ if (cnt_of_unconf) {
+ to_ticks = this_random + stcb->asoc.initial_rto;
+ } else {
+ to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
+ }
+ }
+ /*
+ * Now we must convert the to_ticks that are now in
+ * ms to ticks.
+ */
+ to_ticks = MSEC_TO_TICKS(to_ticks);
+ tmr = &stcb->asoc.hb_timer;
+ }
+ break;
+ case SCTP_TIMER_TYPE_COOKIE:
+ /*
+ * Here we can use the RTO timer from the network since one
+ * RTT was compelete. If a retran happened then we will be
+ * using the RTO initial value.
+ */
+ if ((stcb == NULL) || (net == NULL)) {
+ return (EFAULT);
+ }
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_NEWCOOKIE:
+ /*
+ * nothing needed but the endpoint here ususually about 60
+ * minutes.
+ */
+ tmr = &inp->sctp_ep.signature_change;
+ to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
+ break;
+ case SCTP_TIMER_TYPE_ASOCKILL:
+ if (stcb == NULL) {
+ return (EFAULT);
+ }
+ tmr = &stcb->asoc.strreset_timer;
+ to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
+ break;
+ case SCTP_TIMER_TYPE_INPKILL:
+ /*
+ * The inp is setup to die. We re-use the signature_chage
+ * timer since that has stopped and we are in the GONE
+ * state.
+ */
+ tmr = &inp->sctp_ep.signature_change;
+ to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
+ break;
+ case SCTP_TIMER_TYPE_PATHMTURAISE:
+ /*
+ * Here we use the value found in the EP for PMTU ususually
+ * about 10 minutes.
+ */
+ if (stcb == NULL) {
+ return (EFAULT);
+ }
+ if (net == NULL) {
+ return (EFAULT);
+ }
+ to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
+ tmr = &net->pmtu_timer;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNACK:
+ /* Here we use the RTO of the destination */
+ if ((stcb == NULL) || (net == NULL)) {
+ return (EFAULT);
+ }
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
+ /*
+ * Here we use the endpoints shutdown guard timer usually
+ * about 3 minutes.
+ */
+ if (stcb == NULL) {
+ return (EFAULT);
+ }
+ to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
+ tmr = &stcb->asoc.shut_guard_timer;
+ break;
+ case SCTP_TIMER_TYPE_STRRESET:
+ /*
+ * Here the timer comes from the inp but its value is from
+ * the RTO.
+ */
+ if ((stcb == NULL) || (net == NULL)) {
+ return (EFAULT);
+ }
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ tmr = &stcb->asoc.strreset_timer;
+ break;
+
+ case SCTP_TIMER_TYPE_EARLYFR:
+ {
+ unsigned int msec;
+
+ if ((stcb == NULL) || (net == NULL)) {
+ return (EFAULT);
+ }
+ if (net->flight_size > net->cwnd) {
+ /* no need to start */
+ return (0);
+ }
+ SCTP_STAT_INCR(sctps_earlyfrstart);
+ if (net->lastsa == 0) {
+ /* Hmm no rtt estimate yet? */
+ msec = stcb->asoc.initial_rto >> 2;
+ } else {
+ msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
+ }
+ if (msec < sctp_early_fr_msec) {
+ msec = sctp_early_fr_msec;
+ if (msec < SCTP_MINFR_MSEC_FLOOR) {
+ msec = SCTP_MINFR_MSEC_FLOOR;
+ }
+ }
+ to_ticks = MSEC_TO_TICKS(msec);
+ tmr = &net->fr_timer;
+ }
+ break;
+ case SCTP_TIMER_TYPE_ASCONF:
+ /*
+ * Here the timer comes from the inp but its value is from
+ * the RTO.
+ */
+ if ((stcb == NULL) || (net == NULL)) {
+ return (EFAULT);
+ }
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ tmr = &stcb->asoc.asconf_timer;
+ break;
+ case SCTP_TIMER_TYPE_AUTOCLOSE:
+ if (stcb == NULL) {
+ return (EFAULT);
+ }
+ if (stcb->asoc.sctp_autoclose_ticks == 0) {
+ /*
+ * Really an error since stcb is NOT set to
+ * autoclose
+ */
+ return (0);
+ }
+ to_ticks = stcb->asoc.sctp_autoclose_ticks;
+ tmr = &stcb->asoc.autoclose_timer;
+ break;
+ default:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
+ printf("sctp_timer_start:Unknown timer type %d\n",
+ t_type);
+ }
+#endif /* SCTP_DEBUG */
+ return (EFAULT);
+ break;
+ };
+ if ((to_ticks <= 0) || (tmr == NULL)) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
+ printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n",
+ t_type, to_ticks, tmr);
+ }
+#endif /* SCTP_DEBUG */
+ return (EFAULT);
+ }
+ if (callout_pending(&tmr->timer)) {
+ /*
+ * we do NOT allow you to have it already running. if it is
+ * we leave the current one up unchanged
+ */
+ return (EALREADY);
+ }
+ /* At this point we can proceed */
+ if (t_type == SCTP_TIMER_TYPE_SEND) {
+ stcb->asoc.num_send_timers_up++;
+ }
+ tmr->type = t_type;
+ tmr->ep = (void *)inp;
+ tmr->tcb = (void *)stcb;
+ tmr->net = (void *)net;
+ tmr->self = (void *)tmr;
+ tmr->ticks = ticks;
+ callout_reset(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
+ return (0);
+}
+
+int
+sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_timer *tmr;
+
+ if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
+ (inp == NULL))
+ return (EFAULT);
+
+ tmr = NULL;
+ if (stcb) {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ }
+ switch (t_type) {
+ case SCTP_TIMER_TYPE_ADDR_WQ:
+ tmr = &sctppcbinfo.addr_wq_timer;
+ break;
+ case SCTP_TIMER_TYPE_EARLYFR:
+ if ((stcb == NULL) || (net == NULL)) {
+ return (EFAULT);
+ }
+ tmr = &net->fr_timer;
+ SCTP_STAT_INCR(sctps_earlyfrstop);
+ break;
+ case SCTP_TIMER_TYPE_ITERATOR:
+ {
+ struct sctp_iterator *it;
+
+ it = (struct sctp_iterator *)inp;
+ tmr = &it->tmr;
+ }
+ break;
+ case SCTP_TIMER_TYPE_SEND:
+ if ((stcb == NULL) || (net == NULL)) {
+ return (EFAULT);
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_INIT:
+ if ((stcb == NULL) || (net == NULL)) {
+ return (EFAULT);
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_RECV:
+ if (stcb == NULL) {
+ return (EFAULT);
+ }
+ tmr = &stcb->asoc.dack_timer;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWN:
+ if ((stcb == NULL) || (net == NULL)) {
+ return (EFAULT);
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_HEARTBEAT:
+ if (stcb == NULL) {
+ return (EFAULT);
+ }
+ tmr = &stcb->asoc.hb_timer;
+ break;
+ case SCTP_TIMER_TYPE_COOKIE:
+ if ((stcb == NULL) || (net == NULL)) {
+ return (EFAULT);
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_NEWCOOKIE:
+ /* nothing needed but the endpoint here */
+ tmr = &inp->sctp_ep.signature_change;
+ /*
+ * We re-use the newcookie timer for the INP kill timer. We
+ * must assure that we do not kill it by accident.
+ */
+ break;
+ case SCTP_TIMER_TYPE_ASOCKILL:
+ /*
+ * Stop the asoc kill timer.
+ */
+ if (stcb == NULL) {
+ return (EFAULT);
+ }
+ tmr = &stcb->asoc.strreset_timer;
+ break;
+
+ case SCTP_TIMER_TYPE_INPKILL:
+ /*
+ * The inp is setup to die. We re-use the signature_chage
+ * timer since that has stopped and we are in the GONE
+ * state.
+ */
+ tmr = &inp->sctp_ep.signature_change;
+ break;
+ case SCTP_TIMER_TYPE_PATHMTURAISE:
+ if ((stcb == NULL) || (net == NULL)) {
+ return (EFAULT);
+ }
+ tmr = &net->pmtu_timer;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNACK:
+ if ((stcb == NULL) || (net == NULL)) {
+ return (EFAULT);
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
+ if (stcb == NULL) {
+ return (EFAULT);
+ }
+ tmr = &stcb->asoc.shut_guard_timer;
+ break;
+ case SCTP_TIMER_TYPE_STRRESET:
+ if (stcb == NULL) {
+ return (EFAULT);
+ }
+ tmr = &stcb->asoc.strreset_timer;
+ break;
+ case SCTP_TIMER_TYPE_ASCONF:
+ if (stcb == NULL) {
+ return (EFAULT);
+ }
+ tmr = &stcb->asoc.asconf_timer;
+ break;
+ case SCTP_TIMER_TYPE_AUTOCLOSE:
+ if (stcb == NULL) {
+ return (EFAULT);
+ }
+ tmr = &stcb->asoc.autoclose_timer;
+ break;
+ default:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
+ printf("sctp_timer_stop:Unknown timer type %d\n",
+ t_type);
+ }
+#endif /* SCTP_DEBUG */
+ break;
+ };
+ if (tmr == NULL) {
+ return (EFAULT);
+ }
+ if ((tmr->type != t_type) && tmr->type) {
+ /*
+ * Ok we have a timer that is under joint use. Cookie timer
+ * per chance with the SEND timer. We therefore are NOT
+ * running the timer that the caller wants stopped. So just
+ * return.
+ */
+ return (0);
+ }
+ if (t_type == SCTP_TIMER_TYPE_SEND) {
+ stcb->asoc.num_send_timers_up--;
+ if (stcb->asoc.num_send_timers_up < 0) {
+ stcb->asoc.num_send_timers_up = 0;
+ }
+ }
+ tmr->self = NULL;
+ callout_stop(&tmr->timer);
+ return (0);
+}
+
+#ifdef SCTP_USE_ADLER32
+static uint32_t
+update_adler32(uint32_t adler, uint8_t * buf, int32_t len)
+{
+ uint32_t s1 = adler & 0xffff;
+ uint32_t s2 = (adler >> 16) & 0xffff;
+ int n;
+
+ for (n = 0; n < len; n++, buf++) {
+ /* s1 = (s1 + buf[n]) % BASE */
+ /* first we add */
+ s1 = (s1 + *buf);
+ /*
+ * now if we need to, we do a mod by subtracting. It seems a
+ * bit faster since I really will only ever do one subtract
+ * at the MOST, since buf[n] is a max of 255.
+ */
+ if (s1 >= SCTP_ADLER32_BASE) {
+ s1 -= SCTP_ADLER32_BASE;
+ }
+ /* s2 = (s2 + s1) % BASE */
+ /* first we add */
+ s2 = (s2 + s1);
+ /*
+ * again, it is more efficent (it seems) to subtract since
+ * the most s2 will ever be is (BASE-1 + BASE-1) in the
+ * worse case. This would then be (2 * BASE) - 2, which will
+ * still only do one subtract. On Intel this is much better
+ * to do this way and avoid the divide. Have not -pg'd on
+ * sparc.
+ */
+ if (s2 >= SCTP_ADLER32_BASE) {
+ s2 -= SCTP_ADLER32_BASE;
+ }
+ }
+ /* Return the adler32 of the bytes buf[0..len-1] */
+ return ((s2 << 16) + s1);
+}
+
+#endif
+
+
+uint32_t
+sctp_calculate_len(struct mbuf *m)
+{
+ uint32_t tlen = 0;
+ struct mbuf *at;
+
+ at = m;
+ while (at) {
+ tlen += at->m_len;
+ at = at->m_next;
+ }
+ return (tlen);
+}
+
+#if defined(SCTP_WITH_NO_CSUM)
+
+uint32_t
+sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
+{
+ /*
+ * given a mbuf chain with a packetheader offset by 'offset'
+ * pointing at a sctphdr (with csum set to 0) go through the chain
+ * of m_next's and calculate the SCTP checksum. This is currently
+ * Adler32 but will change to CRC32x soon. Also has a side bonus
+ * calculate the total length of the mbuf chain. Note: if offset is
+ * greater than the total mbuf length, checksum=1, pktlen=0 is
+ * returned (ie. no real error code)
+ */
+ if (pktlen == NULL)
+ return (0);
+ *pktlen = sctp_calculate_len(m);
+ return (0);
+}
+
+#elif defined(SCTP_USE_INCHKSUM)
+
+#include <machine/in_cksum.h>
+
+uint32_t
+sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
+{
+ /*
+ * given a mbuf chain with a packetheader offset by 'offset'
+ * pointing at a sctphdr (with csum set to 0) go through the chain
+ * of m_next's and calculate the SCTP checksum. This is currently
+ * Adler32 but will change to CRC32x soon. Also has a side bonus
+ * calculate the total length of the mbuf chain. Note: if offset is
+ * greater than the total mbuf length, checksum=1, pktlen=0 is
+ * returned (ie. no real error code)
+ */
+ int32_t tlen = 0;
+ struct mbuf *at;
+ uint32_t the_sum, retsum;
+
+ at = m;
+ while (at) {
+ tlen += at->m_len;
+ at = at->m_next;
+ }
+ the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset));
+ if (pktlen != NULL)
+ *pktlen = (tlen - offset);
+ retsum = htons(the_sum);
+ return (the_sum);
+}
+
+#else
+
+uint32_t
+sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
+{
+ /*
+ * given a mbuf chain with a packetheader offset by 'offset'
+ * pointing at a sctphdr (with csum set to 0) go through the chain
+ * of m_next's and calculate the SCTP checksum. This is currently
+ * Adler32 but will change to CRC32x soon. Also has a side bonus
+ * calculate the total length of the mbuf chain. Note: if offset is
+ * greater than the total mbuf length, checksum=1, pktlen=0 is
+ * returned (ie. no real error code)
+ */
+ int32_t tlen = 0;
+
+#ifdef SCTP_USE_ADLER32
+ uint32_t base = 1L;
+
+#else
+ uint32_t base = 0xffffffff;
+
+#endif /* SCTP_USE_ADLER32 */
+ struct mbuf *at;
+
+ at = m;
+ /* find the correct mbuf and offset into mbuf */
+ while ((at != NULL) && (offset > (uint32_t) at->m_len)) {
+ offset -= at->m_len; /* update remaining offset left */
+ at = at->m_next;
+ }
+ while (at != NULL) {
+ if ((at->m_len - offset) > 0) {
+#ifdef SCTP_USE_ADLER32
+ base = update_adler32(base,
+ (unsigned char *)(at->m_data + offset),
+ (unsigned int)(at->m_len - offset));
+#else
+ if ((at->m_len - offset) < 4) {
+ /* Use old method if less than 4 bytes */
+ base = old_update_crc32(base,
+ (unsigned char *)(at->m_data + offset),
+ (unsigned int)(at->m_len - offset));
+ } else {
+ base = update_crc32(base,
+ (unsigned char *)(at->m_data + offset),
+ (unsigned int)(at->m_len - offset));
+ }
+#endif /* SCTP_USE_ADLER32 */
+ tlen += at->m_len - offset;
+ /* we only offset once into the first mbuf */
+ }
+ if (offset) {
+ if (offset < at->m_len)
+ offset = 0;
+ else
+ offset -= at->m_len;
+ }
+ at = at->m_next;
+ }
+ if (pktlen != NULL) {
+ *pktlen = tlen;
+ }
+#ifdef SCTP_USE_ADLER32
+ /* Adler32 */
+ base = htonl(base);
+#else
+ /* CRC-32c */
+ base = sctp_csum_finalize(base);
+#endif
+ return (base);
+}
+
+
+#endif
+
+void
+sctp_mtu_size_reset(struct sctp_inpcb *inp,
+ struct sctp_association *asoc, u_long mtu)
+{
+ /*
+ * Reset the P-MTU size on this association, this involves changing
+ * the asoc MTU, going through ANY chunk+overhead larger than mtu to
+ * allow the DF flag to be cleared.
+ */
+ struct sctp_tmit_chunk *chk;
+ unsigned int eff_mtu, ovh;
+
+ asoc->smallest_mtu = mtu;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MIN_OVERHEAD;
+ } else {
+ ovh = SCTP_MIN_V4_OVERHEAD;
+ }
+ eff_mtu = mtu - ovh;
+ TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
+
+ if (chk->send_size > eff_mtu) {
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ }
+ }
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ if (chk->send_size > eff_mtu) {
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ }
+ }
+}
+
+
+/*
+ * given an association and starting time of the current RTT period return
+ * RTO in number of usecs net should point to the current network
+ */
+uint32_t
+sctp_calculate_rto(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_nets *net,
+ struct timeval *old)
+{
+ /*
+ * given an association and the starting time of the current RTT
+ * period (in value1/value2) return RTO in number of usecs.
+ */
+ int calc_time = 0;
+ int o_calctime;
+ unsigned int new_rto = 0;
+ int first_measure = 0;
+ struct timeval now;
+
+ /************************/
+ /* 1. calculate new RTT */
+ /************************/
+ /* get the current time */
+ SCTP_GETTIME_TIMEVAL(&now);
+ /* compute the RTT value */
+ if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
+ calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
+ if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
+ calc_time += (((u_long)now.tv_usec -
+ (u_long)old->tv_usec) / 1000);
+ } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
+ /* Borrow 1,000ms from current calculation */
+ calc_time -= 1000;
+ /* Add in the slop over */
+ calc_time += ((int)now.tv_usec / 1000);
+ /* Add in the pre-second ms's */
+ calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
+ }
+ } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
+ if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
+ calc_time = ((u_long)now.tv_usec -
+ (u_long)old->tv_usec) / 1000;
+ } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
+ /* impossible .. garbage in nothing out */
+ return (((net->lastsa >> 2) + net->lastsv) >> 1);
+ } else {
+ /* impossible .. garbage in nothing out */
+ return (((net->lastsa >> 2) + net->lastsv) >> 1);
+ }
+ } else {
+ /* Clock wrapped? */
+ return (((net->lastsa >> 2) + net->lastsv) >> 1);
+ }
+ /***************************/
+ /* 2. update RTTVAR & SRTT */
+ /***************************/
+#if 0
+ /* if (net->lastsv || net->lastsa) { */
+ /* per Section 5.3.1 C3 in SCTP */
+ /* net->lastsv = (int) *//* RTTVAR */
+ /*
+ * (((double)(1.0 - 0.25) * (double)net->lastsv) + (double)(0.25 *
+ * (double)abs(net->lastsa - calc_time))); net->lastsa = (int)
+*//* SRTT */
+ /*
+ * (((double)(1.0 - 0.125) * (double)net->lastsa) + (double)(0.125 *
+ * (double)calc_time)); } else {
+*//* the first RTT calculation, per C2 Section 5.3.1 */
+ /* net->lastsa = calc_time; *//* SRTT */
+ /* net->lastsv = calc_time / 2; *//* RTTVAR */
+ /* } */
+ /* if RTTVAR goes to 0 you set to clock grainularity */
+ /*
+ * if (net->lastsv == 0) { net->lastsv = SCTP_CLOCK_GRANULARITY; }
+ * new_rto = net->lastsa + 4 * net->lastsv;
+ */
+#endif
+ o_calctime = calc_time;
+ /* this is Van Jacobson's integer version */
+ if (net->RTO) {
+ calc_time -= (net->lastsa >> 3);
+ if ((int)net->prev_rtt > o_calctime) {
+ net->rtt_variance = net->prev_rtt - o_calctime;
+ /* decreasing */
+ net->rto_variance_dir = 0;
+ } else {
+ /* increasing */
+ net->rtt_variance = o_calctime - net->prev_rtt;
+ net->rto_variance_dir = 1;
+ }
+#ifdef SCTP_RTTVAR_LOGGING
+ rto_logging(net, SCTP_LOG_RTTVAR);
+#endif
+ net->prev_rtt = o_calctime;
+ net->lastsa += calc_time;
+ if (calc_time < 0) {
+ calc_time = -calc_time;
+ }
+ calc_time -= (net->lastsv >> 2);
+ net->lastsv += calc_time;
+ if (net->lastsv == 0) {
+ net->lastsv = SCTP_CLOCK_GRANULARITY;
+ }
+ } else {
+ /* First RTO measurment */
+ net->lastsa = calc_time;
+ net->lastsv = calc_time >> 1;
+ first_measure = 1;
+ net->rto_variance_dir = 1;
+ net->prev_rtt = o_calctime;
+ net->rtt_variance = 0;
+#ifdef SCTP_RTTVAR_LOGGING
+ rto_logging(net, SCTP_LOG_INITIAL_RTT);
+#endif
+ }
+ new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1;
+ if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
+ (stcb->asoc.sat_network_lockout == 0)) {
+ stcb->asoc.sat_network = 1;
+ } else if ((!first_measure) && stcb->asoc.sat_network) {
+ stcb->asoc.sat_network = 0;
+ stcb->asoc.sat_network_lockout = 1;
+ }
+ /* bound it, per C6/C7 in Section 5.3.1 */
+ if (new_rto < stcb->asoc.minrto) {
+ new_rto = stcb->asoc.minrto;
+ }
+ if (new_rto > stcb->asoc.maxrto) {
+ new_rto = stcb->asoc.maxrto;
+ }
+ /* we are now returning the RTT Smoothed */
+ return ((uint32_t) new_rto);
+}
+
+
+/*
+ * return a pointer to a contiguous piece of data from the given mbuf chain
+ * starting at 'off' for 'len' bytes. If the desired piece spans more than
+ * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
+ * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
+ */
+__inline caddr_t
+sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
+{
+ uint32_t count;
+ uint8_t *ptr;
+
+ ptr = in_ptr;
+ if ((off < 0) || (len <= 0))
+ return (NULL);
+
+ /* find the desired start location */
+ while ((m != NULL) && (off > 0)) {
+ if (off < m->m_len)
+ break;
+ off -= m->m_len;
+ m = m->m_next;
+ }
+ if (m == NULL)
+ return (NULL);
+
+ /* is the current mbuf large enough (eg. contiguous)? */
+ if ((m->m_len - off) >= len) {
+ return (mtod(m, caddr_t)+off);
+ } else {
+ /* else, it spans more than one mbuf, so save a temp copy... */
+ while ((m != NULL) && (len > 0)) {
+ count = min(m->m_len - off, len);
+ bcopy(mtod(m, caddr_t)+off, ptr, count);
+ len -= count;
+ ptr += count;
+ off = 0;
+ m = m->m_next;
+ }
+ if ((m == NULL) && (len > 0))
+ return (NULL);
+ else
+ return ((caddr_t)in_ptr);
+ }
+}
+
+
+struct sctp_paramhdr *
+sctp_get_next_param(struct mbuf *m,
+ int offset,
+ struct sctp_paramhdr *pull,
+ int pull_limit)
+{
+ /* This just provides a typed signature to Peter's Pull routine */
+ return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
+ (uint8_t *) pull));
+}
+
+
+int
+sctp_add_pad_tombuf(struct mbuf *m, int padlen)
+{
+ /*
+ * add padlen bytes of 0 filled padding to the end of the mbuf. If
+ * padlen is > 3 this routine will fail.
+ */
+ uint8_t *dp;
+ int i;
+
+ if (padlen > 3) {
+ return (ENOBUFS);
+ }
+ if (M_TRAILINGSPACE(m)) {
+ /*
+ * The easy way. We hope the majority of the time we hit
+ * here :)
+ */
+ dp = (uint8_t *) (mtod(m, caddr_t)+m->m_len);
+ m->m_len += padlen;
+ } else {
+ /* Hard way we must grow the mbuf */
+ struct mbuf *tmp;
+
+ tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
+ if (tmp == NULL) {
+ /* Out of space GAK! we are in big trouble. */
+ return (ENOSPC);
+ }
+ /* setup and insert in middle */
+ tmp->m_next = m->m_next;
+ tmp->m_len = padlen;
+ m->m_next = tmp;
+ dp = mtod(tmp, uint8_t *);
+ }
+ /* zero out the pad */
+ for (i = 0; i < padlen; i++) {
+ *dp = 0;
+ dp++;
+ }
+ return (0);
+}
+
+int
+sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
+{
+ /* find the last mbuf in chain and pad it */
+ struct mbuf *m_at;
+
+ m_at = m;
+ if (last_mbuf) {
+ return (sctp_add_pad_tombuf(last_mbuf, padval));
+ } else {
+ while (m_at) {
+ if (m_at->m_next == NULL) {
+ return (sctp_add_pad_tombuf(m_at, padval));
+ }
+ m_at = m_at->m_next;
+ }
+ }
+ return (EFAULT);
+}
+
+int sctp_asoc_change_wake = 0;
+
+static void
+sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
+ uint32_t error, void *data)
+{
+ struct mbuf *m_notify;
+ struct sctp_assoc_change *sac;
+ struct sctp_queued_to_read *control;
+ int locked = 0;
+
+ /*
+ * First if we are are going down dump everything we can to the
+ * socket rcv queue.
+ */
+
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
+ ) {
+ /* If the socket is gone we are out of here */
+ return;
+ }
+ if ((event == SCTP_COMM_LOST) || (event == SCTP_SHUTDOWN_COMP)) {
+ if (stcb->asoc.control_pdapi) {
+ /*
+ * we were in the middle of a PD-API verify its
+ * there.
+ */
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
+ locked = 1;
+ TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
+ if (control == stcb->asoc.control_pdapi) {
+ /* Yep its here, notify them */
+ if (event == SCTP_COMM_LOST) {
+ /*
+ * Abort/broken we had a
+ * real PD-API aborted
+ */
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
+ /*
+ * hmm.. don't want
+ * a notify if
+ * held_lenght is
+ * set,they may be
+ * stuck. clear and
+ * wake.
+ */
+ if (control->held_length) {
+ control->held_length = 0;
+ control->end_added = 1;
+ }
+ } else {
+ sctp_notify_partial_delivery_indication(stcb, event, 1);
+
+ }
+ } else {
+ /* implicit EOR on EOF */
+ control->held_length = 0;
+ control->end_added = 1;
+ }
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+ locked = 0;
+ /* wake him up */
+ control->do_not_ref_stcb = 1;
+ stcb->asoc.control_pdapi = NULL;
+ sorwakeup(stcb->sctp_socket);
+ break;
+ }
+ }
+ if (locked)
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+
+ }
+ }
+ /*
+ * For TCP model AND UDP connected sockets we will send an error up
+ * when an ABORT comes in.
+ */
+ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+ (event == SCTP_COMM_LOST)) {
+ if (TAILQ_EMPTY(&stcb->sctp_ep->read_queue)) {
+ stcb->sctp_socket->so_error = ECONNRESET;
+ }
+ /* Wake ANY sleepers */
+ sorwakeup(stcb->sctp_socket);
+ sowwakeup(stcb->sctp_socket);
+ sctp_asoc_change_wake++;
+ }
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
+ /* event not enabled */
+ return;
+ }
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 1, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ m_notify->m_len = 0;
+
+ sac = mtod(m_notify, struct sctp_assoc_change *);
+ sac->sac_type = SCTP_ASSOC_CHANGE;
+ sac->sac_flags = 0;
+ sac->sac_length = sizeof(struct sctp_assoc_change);
+ sac->sac_state = event;
+ sac->sac_error = error;
+ /* XXX verify these stream counts */
+ sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
+ sac->sac_inbound_streams = stcb->asoc.streamincnt;
+ sac->sac_assoc_id = sctp_get_associd(stcb);
+ m_notify->m_flags |= M_EOR | M_NOTIFICATION;
+ m_notify->m_pkthdr.len = sizeof(struct sctp_assoc_change);
+ m_notify->m_pkthdr.rcvif = 0;
+ m_notify->m_len = sizeof(struct sctp_assoc_change);
+ m_notify->m_next = NULL;
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = m_notify->m_len;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1);
+ if (event == SCTP_COMM_LOST) {
+ /* Wake up any sleeper */
+ sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
+ }
+}
+
+static void
+sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
+ struct sockaddr *sa, uint32_t error)
+{
+ struct mbuf *m_notify;
+ struct sctp_paddr_change *spc;
+ struct sctp_queued_to_read *control;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT))
+ /* event not enabled */
+ return;
+
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 1, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ return;
+ m_notify->m_len = 0;
+ spc = mtod(m_notify, struct sctp_paddr_change *);
+ spc->spc_type = SCTP_PEER_ADDR_CHANGE;
+ spc->spc_flags = 0;
+ spc->spc_length = sizeof(struct sctp_paddr_change);
+ if (sa->sa_family == AF_INET) {
+ memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
+ } else {
+ memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
+ }
+ spc->spc_state = state;
+ spc->spc_error = error;
+ spc->spc_assoc_id = sctp_get_associd(stcb);
+
+ m_notify->m_flags |= M_EOR | M_NOTIFICATION;
+ m_notify->m_pkthdr.len = sizeof(struct sctp_paddr_change);
+ m_notify->m_pkthdr.rcvif = 0;
+ m_notify->m_len = sizeof(struct sctp_paddr_change);
+ m_notify->m_next = NULL;
+
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = m_notify->m_len;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1);
+}
+
+
+static void
+sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
+ struct sctp_tmit_chunk *chk)
+{
+ struct mbuf *m_notify;
+ struct sctp_send_failed *ssf;
+ struct sctp_queued_to_read *control;
+ int length;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
+ /* event not enabled */
+ return;
+
+ length = sizeof(struct sctp_send_failed) + chk->send_size;
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 1, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ m_notify->m_len = 0;
+ ssf = mtod(m_notify, struct sctp_send_failed *);
+ ssf->ssf_type = SCTP_SEND_FAILED;
+ if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
+ ssf->ssf_flags = SCTP_DATA_UNSENT;
+ else
+ ssf->ssf_flags = SCTP_DATA_SENT;
+ ssf->ssf_length = length;
+ ssf->ssf_error = error;
+ /* not exactly what the user sent in, but should be close :) */
+ ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
+ ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
+ ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
+ ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
+ ssf->ssf_info.sinfo_context = chk->rec.data.context;
+ ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
+ ssf->ssf_assoc_id = sctp_get_associd(stcb);
+ m_notify->m_next = chk->data;
+ m_notify->m_flags |= M_NOTIFICATION;
+ m_notify->m_pkthdr.len = length;
+ m_notify->m_pkthdr.rcvif = 0;
+ m_notify->m_len = sizeof(struct sctp_send_failed);
+
+ /* Steal off the mbuf */
+ chk->data = NULL;
+ /*
+ * For this case, we check the actual socket buffer, since the assoc
+ * is going away we don't want to overfill the socket buffer for a
+ * non-reader
+ */
+ if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
+ sctp_m_freem(m_notify);
+ return;
+ }
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1);
+}
+
+
+static void
+sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
+ struct sctp_stream_queue_pending *sp)
+{
+ struct mbuf *m_notify;
+ struct sctp_send_failed *ssf;
+ struct sctp_queued_to_read *control;
+ int length;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
+ /* event not enabled */
+ return;
+
+ length = sizeof(struct sctp_send_failed) + sp->length;
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 1, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ m_notify->m_len = 0;
+ ssf = mtod(m_notify, struct sctp_send_failed *);
+ ssf->ssf_type = SCTP_SEND_FAILED;
+ if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
+ ssf->ssf_flags = SCTP_DATA_UNSENT;
+ else
+ ssf->ssf_flags = SCTP_DATA_SENT;
+ ssf->ssf_length = length;
+ ssf->ssf_error = error;
+ /* not exactly what the user sent in, but should be close :) */
+ ssf->ssf_info.sinfo_stream = sp->stream;
+ ssf->ssf_info.sinfo_ssn = sp->strseq;
+ ssf->ssf_info.sinfo_flags = sp->sinfo_flags;
+ ssf->ssf_info.sinfo_ppid = sp->ppid;
+ ssf->ssf_info.sinfo_context = sp->context;
+ ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
+ ssf->ssf_assoc_id = sctp_get_associd(stcb);
+ m_notify->m_next = sp->data;
+ m_notify->m_flags |= M_NOTIFICATION;
+ m_notify->m_pkthdr.len = length;
+ m_notify->m_pkthdr.rcvif = 0;
+ m_notify->m_len = sizeof(struct sctp_send_failed);
+
+ /* Steal off the mbuf */
+ sp->data = NULL;
+ /*
+ * For this case, we check the actual socket buffer, since the assoc
+ * is going away we don't want to overfill the socket buffer for a
+ * non-reader
+ */
+ if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
+ sctp_m_freem(m_notify);
+ return;
+ }
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1);
+}
+
+
+
+static void
+sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
+ uint32_t error)
+{
+ struct mbuf *m_notify;
+ struct sctp_adaptation_event *sai;
+ struct sctp_queued_to_read *control;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
+ /* event not enabled */
+ return;
+
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 1, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ m_notify->m_len = 0;
+ sai = mtod(m_notify, struct sctp_adaptation_event *);
+ sai->sai_type = SCTP_ADAPTATION_INDICATION;
+ sai->sai_flags = 0;
+ sai->sai_length = sizeof(struct sctp_adaptation_event);
+ sai->sai_adaptation_ind = error;
+ sai->sai_assoc_id = sctp_get_associd(stcb);
+
+ m_notify->m_flags |= M_EOR | M_NOTIFICATION;
+ m_notify->m_pkthdr.len = sizeof(struct sctp_adaptation_event);
+ m_notify->m_pkthdr.rcvif = 0;
+ m_notify->m_len = sizeof(struct sctp_adaptation_event);
+ m_notify->m_next = NULL;
+
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = m_notify->m_len;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1);
+}
+
+void
+sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb,
+ uint32_t error, int no_lock)
+{
+ struct mbuf *m_notify;
+ struct sctp_pdapi_event *pdapi;
+ struct sctp_queued_to_read *control;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
+ /* event not enabled */
+ return;
+
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 1, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ m_notify->m_len = 0;
+ pdapi = mtod(m_notify, struct sctp_pdapi_event *);
+ pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
+ pdapi->pdapi_flags = 0;
+ pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
+ pdapi->pdapi_indication = error;
+ pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
+
+ m_notify->m_flags |= M_EOR | M_NOTIFICATION;
+ m_notify->m_pkthdr.len = sizeof(struct sctp_pdapi_event);
+ m_notify->m_pkthdr.rcvif = 0;
+ m_notify->m_len = sizeof(struct sctp_pdapi_event);
+ m_notify->m_next = NULL;
+
+ if (stcb->asoc.control_pdapi != NULL) {
+ /* we will do some substitution */
+ control = stcb->asoc.control_pdapi;
+ if (no_lock == 0)
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
+
+ if (control->data == NULL) {
+ control->data = control->tail_mbuf = m_notify;
+ control->held_length = 0;
+ control->length = m_notify->m_len;
+ control->end_added = 1;
+ sctp_sballoc(stcb,
+ &stcb->sctp_socket->so_rcv,
+ m_notify);
+ } else if (control->end_added == 0) {
+ struct mbuf *m = NULL;
+
+ m = control->data;
+ while (m) {
+ sctp_sbfree(control, stcb,
+ &stcb->sctp_socket->so_rcv, m);
+ m = sctp_m_free(m);
+ }
+ control->data = NULL;
+ control->length = m_notify->m_len;
+ control->data = control->tail_mbuf = m_notify;
+ control->held_length = 0;
+ control->end_added = 1;
+ sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
+ } else {
+ /* Hmm .. should not happen */
+ control->end_added = 1;
+ stcb->asoc.control_pdapi = NULL;
+ goto add_to_end;
+ }
+ if (no_lock == 0)
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+ } else {
+ /* append to socket */
+add_to_end:
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = m_notify->m_len;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1);
+ }
+}
+
+static void
+sctp_notify_shutdown_event(struct sctp_tcb *stcb)
+{
+ struct mbuf *m_notify;
+ struct sctp_shutdown_event *sse;
+ struct sctp_queued_to_read *control;
+
+ /*
+ * For TCP model AND UDP connected sockets we will send an error up
+ * when an SHUTDOWN completes
+ */
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /* mark socket closed for read/write and wakeup! */
+ socantsendmore(stcb->sctp_socket);
+ }
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
+ /* event not enabled */
+ return;
+
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 1, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ m_notify->m_len = 0;
+ sse = mtod(m_notify, struct sctp_shutdown_event *);
+ sse->sse_type = SCTP_SHUTDOWN_EVENT;
+ sse->sse_flags = 0;
+ sse->sse_length = sizeof(struct sctp_shutdown_event);
+ sse->sse_assoc_id = sctp_get_associd(stcb);
+
+ m_notify->m_flags |= M_EOR | M_NOTIFICATION;
+ m_notify->m_pkthdr.len = sizeof(struct sctp_shutdown_event);
+ m_notify->m_pkthdr.rcvif = 0;
+ m_notify->m_len = sizeof(struct sctp_shutdown_event);
+ m_notify->m_next = NULL;
+
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = m_notify->m_len;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1);
+}
+
+static void
+sctp_notify_stream_reset(struct sctp_tcb *stcb,
+ int number_entries, uint16_t * list, int flag)
+{
+ struct mbuf *m_notify;
+ struct sctp_queued_to_read *control;
+ struct sctp_stream_reset_event *strreset;
+ int len;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
+ /* event not enabled */
+ return;
+
+ m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ m_notify->m_len = 0;
+ len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
+ if (len > M_TRAILINGSPACE(m_notify)) {
+ /* never enough room */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ strreset = mtod(m_notify, struct sctp_stream_reset_event *);
+ strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
+ if (number_entries == 0) {
+ strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
+ } else {
+ strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
+ }
+ strreset->strreset_length = len;
+ strreset->strreset_assoc_id = sctp_get_associd(stcb);
+ if (number_entries) {
+ int i;
+
+ for (i = 0; i < number_entries; i++) {
+ strreset->strreset_list[i] = ntohs(list[i]);
+ }
+ }
+ m_notify->m_flags |= M_EOR | M_NOTIFICATION;
+ m_notify->m_pkthdr.len = len;
+ m_notify->m_pkthdr.rcvif = 0;
+ m_notify->m_len = len;
+ m_notify->m_next = NULL;
+ if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < m_notify->m_len) {
+ /* no space */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = m_notify->m_len;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1);
+}
+
+
+void
+sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
+ uint32_t error, void *data)
+{
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
+ ) {
+ /* No notifications up when we are in a no socket state */
+ return;
+ }
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* Can't send up to a closed socket any notifications */
+ return;
+ }
+ if (stcb && (stcb->asoc.assoc_up_sent == 0) && (notification != SCTP_NOTIFY_ASSOC_UP)) {
+ if ((notification != SCTP_NOTIFY_ASSOC_DOWN) &&
+ (notification != SCTP_NOTIFY_ASSOC_ABORTED) &&
+ (notification != SCTP_NOTIFY_SPECIAL_SP_FAIL) &&
+ (notification != SCTP_NOTIFY_DG_FAIL) &&
+ (notification != SCTP_NOTIFY_PEER_SHUTDOWN)) {
+ sctp_notify_assoc_change(SCTP_COMM_UP, stcb, 0, NULL);
+ stcb->asoc.assoc_up_sent = 1;
+ }
+ }
+ switch (notification) {
+ case SCTP_NOTIFY_ASSOC_UP:
+ if (stcb->asoc.assoc_up_sent == 0) {
+ sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL);
+ stcb->asoc.assoc_up_sent = 1;
+ }
+ break;
+ case SCTP_NOTIFY_ASSOC_DOWN:
+ sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL);
+ break;
+ case SCTP_NOTIFY_INTERFACE_DOWN:
+ {
+ struct sctp_nets *net;
+
+ net = (struct sctp_nets *)data;
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
+ (struct sockaddr *)&net->ro._l_addr, error);
+ break;
+ }
+ case SCTP_NOTIFY_INTERFACE_UP:
+ {
+ struct sctp_nets *net;
+
+ net = (struct sctp_nets *)data;
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
+ (struct sockaddr *)&net->ro._l_addr, error);
+ break;
+ }
+ case SCTP_NOTIFY_INTERFACE_CONFIRMED:
+ {
+ struct sctp_nets *net;
+
+ net = (struct sctp_nets *)data;
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
+ (struct sockaddr *)&net->ro._l_addr, error);
+ break;
+ }
+ case SCTP_NOTIFY_SPECIAL_SP_FAIL:
+ sctp_notify_send_failed2(stcb, error,
+ (struct sctp_stream_queue_pending *)data);
+ break;
+ case SCTP_NOTIFY_DG_FAIL:
+ sctp_notify_send_failed(stcb, error,
+ (struct sctp_tmit_chunk *)data);
+ break;
+ case SCTP_NOTIFY_ADAPTATION_INDICATION:
+ /* Here the error is the adaptation indication */
+ sctp_notify_adaptation_layer(stcb, error);
+ break;
+ case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
+ sctp_notify_partial_delivery_indication(stcb, error, 0);
+ break;
+ case SCTP_NOTIFY_STRDATA_ERR:
+ break;
+ case SCTP_NOTIFY_ASSOC_ABORTED:
+ sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL);
+ break;
+ case SCTP_NOTIFY_PEER_OPENED_STREAM:
+ break;
+ case SCTP_NOTIFY_STREAM_OPENED_OK:
+ break;
+ case SCTP_NOTIFY_ASSOC_RESTART:
+ sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data);
+ break;
+ case SCTP_NOTIFY_HB_RESP:
+ break;
+ case SCTP_NOTIFY_STR_RESET_SEND:
+ sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
+ break;
+ case SCTP_NOTIFY_STR_RESET_RECV:
+ sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
+ break;
+ case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
+ sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_INBOUND_STR));
+ break;
+
+ case SCTP_NOTIFY_STR_RESET_FAILED_IN:
+ sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_INBOUND_STR));
+ break;
+
+ case SCTP_NOTIFY_ASCONF_ADD_IP:
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
+ error);
+ break;
+ case SCTP_NOTIFY_ASCONF_DELETE_IP:
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
+ error);
+ break;
+ case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
+ error);
+ break;
+ case SCTP_NOTIFY_ASCONF_SUCCESS:
+ break;
+ case SCTP_NOTIFY_ASCONF_FAILED:
+ break;
+ case SCTP_NOTIFY_PEER_SHUTDOWN:
+ sctp_notify_shutdown_event(stcb);
+ break;
+ case SCTP_NOTIFY_AUTH_NEW_KEY:
+ sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
+ (uint32_t) data);
+ break;
+#if 0
+ case SCTP_NOTIFY_AUTH_KEY_CONFLICT:
+ sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT,
+ error, (uint32_t) data);
+ break;
+#endif /* not yet? remove? */
+
+
+ default:
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_UTIL1) {
+ printf("NOTIFY: unknown notification %xh (%u)\n",
+ notification, notification);
+ }
+#endif /* SCTP_DEBUG */
+ break;
+ } /* end switch */
+}
+
+void
+sctp_report_all_outbound(struct sctp_tcb *stcb)
+{
+ struct sctp_association *asoc;
+ struct sctp_stream_out *outs;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_stream_queue_pending *sp;
+
+ asoc = &stcb->asoc;
+
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
+ return;
+ }
+ /* now through all the gunk freeing chunks */
+
+ TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
+ /* now clean up any chunks here */
+ stcb->asoc.locked_on_sending = NULL;
+ sp = TAILQ_FIRST(&outs->outqueue);
+ while (sp) {
+ stcb->asoc.stream_queue_cnt--;
+ TAILQ_REMOVE(&outs->outqueue, sp, next);
+ sctp_free_spbufspace(stcb, asoc, sp);
+ sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
+ SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp);
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ }
+ if (sp->net)
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ /* Free the chunk */
+ sctp_free_a_strmoq(stcb, sp);
+ sp = TAILQ_FIRST(&outs->outqueue);
+ }
+ }
+
+ /* pending send queue SHOULD be empty */
+ if (!TAILQ_EMPTY(&asoc->send_queue)) {
+ chk = TAILQ_FIRST(&asoc->send_queue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
+ if (chk->data) {
+ /*
+ * trim off the sctp chunk header(it should
+ * be there)
+ */
+ if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
+ m_adj(chk->data, sizeof(struct sctp_data_chunk));
+ sctp_mbuf_crush(chk->data);
+ }
+ }
+ sctp_free_bufspace(stcb, asoc, chk, 1);
+ sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ if (chk->whoTo)
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = NULL;
+ sctp_free_a_chunk(stcb, chk);
+ chk = TAILQ_FIRST(&asoc->send_queue);
+ }
+ }
+ /* sent queue SHOULD be empty */
+ if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+ chk = TAILQ_FIRST(&asoc->sent_queue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
+ if (chk->data) {
+ /*
+ * trim off the sctp chunk header(it should
+ * be there)
+ */
+ if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
+ m_adj(chk->data, sizeof(struct sctp_data_chunk));
+ sctp_mbuf_crush(chk->data);
+ }
+ }
+ sctp_free_bufspace(stcb, asoc, chk, 1);
+ sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
+ SCTP_NOTIFY_DATAGRAM_SENT, chk);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ if (chk->whoTo)
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = NULL;
+ sctp_free_a_chunk(stcb, chk);
+ chk = TAILQ_FIRST(&asoc->sent_queue);
+ }
+ }
+}
+
+void
+sctp_abort_notification(struct sctp_tcb *stcb, int error)
+{
+
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
+ return;
+ }
+ /* Tell them we lost the asoc */
+ sctp_report_all_outbound(stcb);
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
+ }
+ sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL);
+}
+
+void
+sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err)
+{
+ uint32_t vtag;
+
+ vtag = 0;
+ if (stcb != NULL) {
+ /* We have a TCB to abort, send notification too */
+ vtag = stcb->asoc.peer_vtag;
+ sctp_abort_notification(stcb, 0);
+ }
+ sctp_send_abort(m, iphlen, sh, vtag, op_err);
+ if (stcb != NULL) {
+ /* Ok, now lets free it */
+ sctp_free_assoc(inp, stcb, 0);
+ } else {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
+ sctp_inpcb_free(inp, 1, 0);
+ }
+ }
+ }
+}
+
+void
+sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ int error, struct mbuf *op_err)
+{
+ uint32_t vtag;
+
+ if (stcb == NULL) {
+ /* Got to have a TCB */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
+ sctp_inpcb_free(inp, 1, 0);
+ }
+ }
+ return;
+ }
+ vtag = stcb->asoc.peer_vtag;
+ /* notify the ulp */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
+ sctp_abort_notification(stcb, error);
+ /* notify the peer */
+ sctp_send_abort_tcb(stcb, op_err);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ /* now free the asoc */
+ sctp_free_assoc(inp, stcb, 0);
+}
+
+void
+sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
+ struct sctp_inpcb *inp, struct mbuf *op_err)
+{
+ struct sctp_chunkhdr *ch, chunk_buf;
+ unsigned int chk_length;
+
+ SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
+ /* Generate a TO address for future reference */
+ if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+ if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
+ sctp_inpcb_free(inp, 1, 0);
+ }
+ }
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+ sizeof(*ch), (uint8_t *) & chunk_buf);
+ while (ch != NULL) {
+ chk_length = ntohs(ch->chunk_length);
+ if (chk_length < sizeof(*ch)) {
+ /* break to abort land */
+ break;
+ }
+ switch (ch->chunk_type) {
+ case SCTP_PACKET_DROPPED:
+ /* we don't respond to pkt-dropped */
+ return;
+ case SCTP_ABORT_ASSOCIATION:
+ /* we don't respond with an ABORT to an ABORT */
+ return;
+ case SCTP_SHUTDOWN_COMPLETE:
+ /*
+ * we ignore it since we are not waiting for it and
+ * peer is gone
+ */
+ return;
+ case SCTP_SHUTDOWN_ACK:
+ sctp_send_shutdown_complete2(m, iphlen, sh);
+ return;
+ default:
+ break;
+ }
+ offset += SCTP_SIZE32(chk_length);
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+ sizeof(*ch), (uint8_t *) & chunk_buf);
+ }
+ sctp_send_abort(m, iphlen, sh, 0, op_err);
+}
+
+/*
+ * check the inbound datagram to make sure there is not an abort inside it,
+ * if there is return 1, else return 0.
+ */
+int
+sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
+{
+ struct sctp_chunkhdr *ch;
+ struct sctp_init_chunk *init_chk, chunk_buf;
+ int offset;
+ unsigned int chk_length;
+
+ offset = iphlen + sizeof(struct sctphdr);
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
+ (uint8_t *) & chunk_buf);
+ while (ch != NULL) {
+ chk_length = ntohs(ch->chunk_length);
+ if (chk_length < sizeof(*ch)) {
+ /* packet is probably corrupt */
+ break;
+ }
+ /* we seem to be ok, is it an abort? */
+ if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
+ /* yep, tell them */
+ return (1);
+ }
+ if (ch->chunk_type == SCTP_INITIATION) {
+ /* need to update the Vtag */
+ init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
+ offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
+ if (init_chk != NULL) {
+ *vtagfill = ntohl(init_chk->init.initiate_tag);
+ }
+ }
+ /* Nope, move to the next chunk */
+ offset += SCTP_SIZE32(chk_length);
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+ sizeof(*ch), (uint8_t *) & chunk_buf);
+ }
+ return (0);
+}
+
+/*
+ * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
+ * set (i.e. it's 0) so, create this function to compare link local scopes
+ */
+uint32_t
+sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
+{
+ struct sockaddr_in6 a, b;
+
+ /* save copies */
+ a = *addr1;
+ b = *addr2;
+
+ if (a.sin6_scope_id == 0)
+ if (sa6_recoverscope(&a)) {
+ /* can't get scope, so can't match */
+ return (0);
+ }
+ if (b.sin6_scope_id == 0)
+ if (sa6_recoverscope(&b)) {
+ /* can't get scope, so can't match */
+ return (0);
+ }
+ if (a.sin6_scope_id != b.sin6_scope_id)
+ return (0);
+
+ return (1);
+}
+
+/*
+ * returns a sockaddr_in6 with embedded scope recovered and removed
+ */
+struct sockaddr_in6 *
+sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
+{
+
+ /* check and strip embedded scope junk */
+ if (addr->sin6_family == AF_INET6) {
+ if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
+ if (addr->sin6_scope_id == 0) {
+ *store = *addr;
+ if (!sa6_recoverscope(store)) {
+ /* use the recovered scope */
+ addr = store;
+ }
+ /* else, return the original "to" addr */
+ }
+ }
+ }
+ return (addr);
+}
+
+/*
+ * are the two addresses the same? currently a "scopeless" check returns: 1
+ * if same, 0 if not
+ */
+__inline int
+sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
+{
+
+ /* must be valid */
+ if (sa1 == NULL || sa2 == NULL)
+ return (0);
+
+ /* must be the same family */
+ if (sa1->sa_family != sa2->sa_family)
+ return (0);
+
+ if (sa1->sa_family == AF_INET6) {
+ /* IPv6 addresses */
+ struct sockaddr_in6 *sin6_1, *sin6_2;
+
+ sin6_1 = (struct sockaddr_in6 *)sa1;
+ sin6_2 = (struct sockaddr_in6 *)sa2;
+ return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
+ &sin6_2->sin6_addr));
+ } else if (sa1->sa_family == AF_INET) {
+ /* IPv4 addresses */
+ struct sockaddr_in *sin_1, *sin_2;
+
+ sin_1 = (struct sockaddr_in *)sa1;
+ sin_2 = (struct sockaddr_in *)sa2;
+ return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
+ } else {
+ /* we don't do these... */
+ return (0);
+ }
+}
+
+void
+sctp_print_address(struct sockaddr *sa)
+{
+
+ if (sa->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sa;
+ printf("IPv6 address: %s:%d scope:%u\n",
+ ip6_sprintf(&sin6->sin6_addr), ntohs(sin6->sin6_port),
+ sin6->sin6_scope_id);
+ } else if (sa->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+ unsigned char *p;
+
+ sin = (struct sockaddr_in *)sa;
+ p = (unsigned char *)&sin->sin_addr;
+ printf("IPv4 address: %u.%u.%u.%u:%d\n",
+ p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
+ } else {
+ printf("?\n");
+ }
+}
+
+void
+sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
+{
+ if (iph->ip_v == IPVERSION) {
+ struct sockaddr_in lsa, fsa;
+
+ bzero(&lsa, sizeof(lsa));
+ lsa.sin_len = sizeof(lsa);
+ lsa.sin_family = AF_INET;
+ lsa.sin_addr = iph->ip_src;
+ lsa.sin_port = sh->src_port;
+ bzero(&fsa, sizeof(fsa));
+ fsa.sin_len = sizeof(fsa);
+ fsa.sin_family = AF_INET;
+ fsa.sin_addr = iph->ip_dst;
+ fsa.sin_port = sh->dest_port;
+ printf("src: ");
+ sctp_print_address((struct sockaddr *)&lsa);
+ printf("dest: ");
+ sctp_print_address((struct sockaddr *)&fsa);
+ } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
+ struct ip6_hdr *ip6;
+ struct sockaddr_in6 lsa6, fsa6;
+
+ ip6 = (struct ip6_hdr *)iph;
+ bzero(&lsa6, sizeof(lsa6));
+ lsa6.sin6_len = sizeof(lsa6);
+ lsa6.sin6_family = AF_INET6;
+ lsa6.sin6_addr = ip6->ip6_src;
+ lsa6.sin6_port = sh->src_port;
+ bzero(&fsa6, sizeof(fsa6));
+ fsa6.sin6_len = sizeof(fsa6);
+ fsa6.sin6_family = AF_INET6;
+ fsa6.sin6_addr = ip6->ip6_dst;
+ fsa6.sin6_port = sh->dest_port;
+ printf("src: ");
+ sctp_print_address((struct sockaddr *)&lsa6);
+ printf("dest: ");
+ sctp_print_address((struct sockaddr *)&fsa6);
+ }
+}
+
+#if defined(HAVE_SCTP_SO_LASTRECORD)
+
+/* cloned from uipc_socket.c */
+
+#define SCTP_SBLINKRECORD(sb, m0) do { \
+ if ((sb)->sb_lastrecord != NULL) \
+ (sb)->sb_lastrecord->m_nextpkt = (m0); \
+ else \
+ (sb)->sb_mb = (m0); \
+ (sb)->sb_lastrecord = (m0); \
+} while (/*CONSTCOND*/0)
+#endif
+
+void
+sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
+ struct sctp_inpcb *new_inp,
+ struct sctp_tcb *stcb)
+{
+ /*
+ * go through our old INP and pull off any control structures that
+ * belong to stcb and move then to the new inp.
+ */
+ struct socket *old_so, *new_so;
+ struct sctp_queued_to_read *control, *nctl;
+ struct sctp_readhead tmp_queue;
+ struct mbuf *m;
+ int error;
+
+ old_so = old_inp->sctp_socket;
+ new_so = new_inp->sctp_socket;
+ TAILQ_INIT(&tmp_queue);
+
+ SOCKBUF_LOCK(&(old_so->so_rcv));
+
+ error = sblock(&old_so->so_rcv, 0);
+
+ SOCKBUF_UNLOCK(&(old_so->so_rcv));
+ if (error) {
+ /*
+ * Gak, can't get sblock, we have a problem. data will be
+ * left stranded.. and we don't dare look at it since the
+ * other thread may be reading something. Oh well, its a
+ * screwed up app that does a peeloff OR a accept while
+ * reading from the main socket... actually its only the
+ * peeloff() case, since I think read will fail on a
+ * listening socket..
+ */
+ return;
+ }
+ /* lock the socket buffers */
+ SCTP_INP_READ_LOCK(old_inp);
+ control = TAILQ_FIRST(&old_inp->read_queue);
+ /* Pull off all for out target stcb */
+ while (control) {
+ nctl = TAILQ_NEXT(control, next);
+ if (control->stcb == stcb) {
+ /* remove it we want it */
+ TAILQ_REMOVE(&old_inp->read_queue, control, next);
+ TAILQ_INSERT_TAIL(&tmp_queue, control, next);
+ m = control->data;
+ while (m) {
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, m->m_len);
+#endif
+ sctp_sbfree(control, stcb, &old_so->so_rcv, m);
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
+#endif
+ m = m->m_next;
+ }
+ }
+ control = nctl;
+ }
+ SCTP_INP_READ_UNLOCK(old_inp);
+
+ /* Remove the sb-lock on the old socket */
+ SOCKBUF_LOCK(&(old_so->so_rcv));
+
+ sbunlock(&old_so->so_rcv);
+ SOCKBUF_UNLOCK(&(old_so->so_rcv));
+
+ /* Now we move them over to the new socket buffer */
+ control = TAILQ_FIRST(&tmp_queue);
+ SCTP_INP_READ_LOCK(new_inp);
+ while (control) {
+ nctl = TAILQ_NEXT(control, next);
+ TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
+ m = control->data;
+ while (m) {
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, m->m_len);
+#endif
+ sctp_sballoc(stcb, &new_so->so_rcv, m);
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
+#endif
+ m = m->m_next;
+ }
+ control = nctl;
+ }
+ SCTP_INP_READ_UNLOCK(new_inp);
+}
+
+
+void
+sctp_add_to_readq(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_queued_to_read *control,
+ struct sockbuf *sb,
+ int end)
+{
+ /*
+ * Here we must place the control on the end of the socket read
+ * queue AND increment sb_cc so that select will work properly on
+ * read.
+ */
+ struct mbuf *m, *prev = NULL;
+
+ SCTP_INP_READ_LOCK(inp);
+ m = control->data;
+ control->held_length = 0;
+ control->length = 0;
+ while (m) {
+ if (m->m_len == 0) {
+ /* Skip mbufs with NO length */
+ if (prev == NULL) {
+ /* First one */
+ control->data = sctp_m_free(m);
+ m = control->data;
+ } else {
+ prev->m_next = sctp_m_free(m);
+ m = prev->m_next;
+ }
+ if (m == NULL) {
+ control->tail_mbuf = prev;;
+ }
+ continue;
+ }
+ prev = m;
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, m->m_len);
+#endif
+ sctp_sballoc(stcb, sb, m);
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
+#endif
+ atomic_add_int(&control->length, m->m_len);
+ m = m->m_next;
+ }
+ if (prev != NULL) {
+ control->tail_mbuf = prev;
+ if (end) {
+ prev->m_flags |= M_EOR;
+ }
+ } else {
+ return;
+ }
+ if (end) {
+ control->end_added = 1;
+ }
+ TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
+ SCTP_INP_READ_UNLOCK(inp);
+ if (inp && inp->sctp_socket) {
+ sctp_sorwakeup(inp, inp->sctp_socket);
+ }
+}
+
+
+int
+sctp_append_to_readq(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_queued_to_read *control,
+ struct mbuf *m,
+ int end,
+ int ctls_cumack,
+ struct sockbuf *sb)
+{
+ /*
+ * A partial delivery API event is underway. OR we are appending on
+ * the reassembly queue.
+ *
+ * If PDAPI this means we need to add m to the end of the data.
+ * Increase the length in the control AND increment the sb_cc.
+ * Otherwise sb is NULL and all we need to do is put it at the end
+ * of the mbuf chain.
+ */
+ int len = 0;
+ struct mbuf *mm, *tail = NULL, *prev = NULL;
+
+ if (inp) {
+ SCTP_INP_READ_LOCK(inp);
+ }
+ if (control == NULL) {
+get_out:
+ if (inp) {
+ SCTP_INP_READ_UNLOCK(inp);
+ }
+ return (-1);
+ }
+ if ((control->tail_mbuf) &&
+ (control->tail_mbuf->m_flags & M_EOR)) {
+ /* huh this one is complete? */
+ goto get_out;
+ }
+ mm = m;
+ if (mm == NULL) {
+ goto get_out;
+ }
+ while (mm) {
+ if (mm->m_len == 0) {
+ /* Skip mbufs with NO lenght */
+ if (prev == NULL) {
+ /* First one */
+ m = sctp_m_free(mm);
+ mm = m;
+ } else {
+ prev->m_next = sctp_m_free(mm);
+ mm = prev->m_next;
+ }
+ continue;
+ }
+ prev = mm;
+ len += mm->m_len;
+ if (sb) {
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, mm->m_len);
+#endif
+ sctp_sballoc(stcb, sb, mm);
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
+#endif
+ }
+ mm = mm->m_next;
+ }
+ if (prev) {
+ tail = prev;
+ } else {
+ /* Really there should always be a prev */
+ if (m == NULL) {
+ /* Huh nothing left? */
+#ifdef INVARIENTS
+ panic("Nothing left to add?");
+#else
+ goto get_out;
+#endif
+ }
+ tail = m;
+ }
+ if (end) {
+ /* message is complete */
+ tail->m_flags |= M_EOR;
+ if (control == stcb->asoc.control_pdapi) {
+ stcb->asoc.control_pdapi = NULL;
+ }
+ control->held_length = 0;
+ control->end_added = 1;
+ }
+ atomic_add_int(&control->length, len);
+ if (control->tail_mbuf) {
+ /* append */
+ control->tail_mbuf->m_next = m;
+ control->tail_mbuf = tail;
+ } else {
+ /* nothing there */
+#ifdef INVARIENTS
+ if (control->data != NULL) {
+ panic("This should NOT happen");
+ }
+#endif
+ control->data = m;
+ control->tail_mbuf = tail;
+ }
+ /*
+ * When we are appending in partial delivery, the cum-ack is used
+ * for the actual pd-api highest tsn on this mbuf. The true cum-ack
+ * is populated in the outbound sinfo structure from the true cumack
+ * if the association exists...
+ */
+ control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
+ if (inp) {
+ SCTP_INP_READ_UNLOCK(inp);
+ }
+ if (inp && inp->sctp_socket) {
+ sctp_sorwakeup(inp, inp->sctp_socket);
+ }
+ return (0);
+}
+
+
+
+/*************HOLD THIS COMMENT FOR PATCH FILE OF
+ *************ALTERNATE ROUTING CODE
+ */
+
+/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
+ *************ALTERNATE ROUTING CODE
+ */
+
+struct mbuf *
+sctp_generate_invmanparam(int err)
+{
+ /* Return a MBUF with a invalid mandatory parameter */
+ struct mbuf *m;
+
+ m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
+ if (m) {
+ struct sctp_paramhdr *ph;
+
+ m->m_len = sizeof(struct sctp_paramhdr);
+ ph = mtod(m, struct sctp_paramhdr *);
+ ph->param_length = htons(sizeof(struct sctp_paramhdr));
+ ph->param_type = htons(err);
+ }
+ return (m);
+}
+
+#ifdef SCTP_MBCNT_LOGGING
+void
+sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_tmit_chunk *tp1, int chk_cnt)
+{
+ if (tp1->data == NULL) {
+ return;
+ }
+ asoc->chunks_on_out_queue -= chk_cnt;
+ sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
+ asoc->total_output_queue_size,
+ tp1->book_size,
+ 0,
+ tp1->mbcnt);
+ if (asoc->total_output_queue_size >= tp1->book_size) {
+ asoc->total_output_queue_size -= tp1->book_size;
+ } else {
+ asoc->total_output_queue_size = 0;
+ }
+
+ if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
+ ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
+ if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
+ stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
+ } else {
+ stcb->sctp_socket->so_snd.sb_cc = 0;
+
+ }
+ }
+}
+
+#endif
+
+int
+sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
+ int reason, struct sctpchunk_listhead *queue)
+{
+ int ret_sz = 0;
+ int notdone;
+ uint8_t foundeom = 0;
+
+ do {
+ ret_sz += tp1->book_size;
+ tp1->sent = SCTP_FORWARD_TSN_SKIP;
+ if (tp1->data) {
+ sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
+ sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1);
+ sctp_m_freem(tp1->data);
+ tp1->data = NULL;
+ sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
+ }
+ if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
+ stcb->asoc.sent_queue_cnt_removeable--;
+ }
+ if (queue == &stcb->asoc.send_queue) {
+ TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
+ /* on to the sent queue */
+ TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
+ sctp_next);
+ stcb->asoc.sent_queue_cnt++;
+ }
+ if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
+ SCTP_DATA_NOT_FRAG) {
+ /* not frag'ed we ae done */
+ notdone = 0;
+ foundeom = 1;
+ } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+ /* end of frag, we are done */
+ notdone = 0;
+ foundeom = 1;
+ } else {
+ /*
+ * Its a begin or middle piece, we must mark all of
+ * it
+ */
+ notdone = 1;
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ }
+ } while (tp1 && notdone);
+ if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
+ /*
+ * The multi-part message was scattered across the send and
+ * sent queue.
+ */
+ tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
+ /*
+ * recurse throught the send_queue too, starting at the
+ * beginning.
+ */
+ if (tp1) {
+ ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
+ &stcb->asoc.send_queue);
+ } else {
+ printf("hmm, nothing on the send queue and no EOM?\n");
+ }
+ }
+ return (ret_sz);
+}
+
+/*
+ * checks to see if the given address, sa, is one that is currently known by
+ * the kernel note: can't distinguish the same address on multiple interfaces
+ * and doesn't handle multiple addresses with different zone/scope id's note:
+ * ifa_ifwithaddr() compares the entire sockaddr struct
+ */
+struct ifaddr *
+sctp_find_ifa_by_addr(struct sockaddr *sa)
+{
+ struct ifnet *ifn;
+ struct ifaddr *ifa;
+
+ /* go through all our known interfaces */
+ TAILQ_FOREACH(ifn, &ifnet, if_list) {
+ /* go through each interface addresses */
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ /* correct family? */
+ if (ifa->ifa_addr->sa_family != sa->sa_family)
+ continue;
+
+#ifdef INET6
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ /* IPv6 address */
+ struct sockaddr_in6 *sin1, *sin2, sin6_tmp;
+
+ sin1 = (struct sockaddr_in6 *)ifa->ifa_addr;
+ if (IN6_IS_SCOPE_LINKLOCAL(&sin1->sin6_addr)) {
+ /* create a copy and clear scope */
+ memcpy(&sin6_tmp, sin1,
+ sizeof(struct sockaddr_in6));
+ sin1 = &sin6_tmp;
+ in6_clearscope(&sin1->sin6_addr);
+ }
+ sin2 = (struct sockaddr_in6 *)sa;
+ if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
+ sizeof(struct in6_addr)) == 0) {
+ /* found it */
+ return (ifa);
+ }
+ } else
+#endif
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ /* IPv4 address */
+ struct sockaddr_in *sin1, *sin2;
+
+ sin1 = (struct sockaddr_in *)ifa->ifa_addr;
+ sin2 = (struct sockaddr_in *)sa;
+ if (sin1->sin_addr.s_addr ==
+ sin2->sin_addr.s_addr) {
+ /* found it */
+ return (ifa);
+ }
+ }
+ /* else, not AF_INET or AF_INET6, so skip */
+ } /* end foreach ifa */
+ } /* end foreach ifn */
+ /* not found! */
+ return (NULL);
+}
+
+
+
+
+
+
+
+
+static void
+sctp_user_rcvd(struct sctp_tcb *stcb, int *freed_so_far, int hold_rlock,
+ uint32_t rwnd_req)
+{
+ /* User pulled some data, do we need a rwnd update? */
+ int r_unlocked = 0;
+ int tcb_incr_up = 0;
+ uint32_t dif, rwnd;
+ struct socket *so = NULL;
+
+ if (stcb == NULL)
+ return;
+
+ atomic_add_16(&stcb->asoc.refcnt, 1);
+ tcb_incr_up = 1;
+
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /* Pre-check If we are freeing no update */
+ goto no_lock;
+ }
+ SCTP_INP_INCR_REF(stcb->sctp_ep);
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ goto out;
+ }
+ so = stcb->sctp_socket;
+ if (so == NULL) {
+ goto out;
+ }
+ atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
+ /* Have you have freed enough to look */
+#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
+ sctp_misc_ints(SCTP_ENTER_USER_RECV,
+ (stcb->asoc.my_rwnd - stcb->asoc.my_last_reported_rwnd),
+ *freed_so_far,
+ stcb->freed_by_sorcv_sincelast,
+ rwnd_req);
+#endif
+ *freed_so_far = 0;
+ /* Yep, its worth a look and the lock overhead */
+
+ /* Figure out what the rwnd would be */
+ rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
+ if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
+ dif = rwnd - stcb->asoc.my_last_reported_rwnd;
+ } else {
+ dif = 0;
+ }
+ if (dif >= rwnd_req) {
+ if (hold_rlock) {
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+ r_unlocked = 1;
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /*
+ * One last check before we allow the guy possibly
+ * to get in. There is a race, where the guy has not
+ * reached the gate. In that case
+ */
+ goto out;
+ }
+ SCTP_TCB_LOCK(stcb);
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /* No reports here */
+ SCTP_TCB_UNLOCK(stcb);
+ goto out;
+ }
+#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
+ sctp_misc_ints(SCTP_USER_RECV_SACKS,
+ stcb->asoc.my_rwnd,
+ stcb->asoc.my_last_reported_rwnd,
+ stcb->freed_by_sorcv_sincelast,
+ dif);
+#endif
+ SCTP_STAT_INCR(sctps_wu_sacks_sent);
+ sctp_send_sack(stcb);
+ sctp_chunk_output(stcb->sctp_ep, stcb,
+ SCTP_OUTPUT_FROM_USR_RCVD);
+ /* make sure no timer is running */
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* Update how much we have pending */
+ stcb->freed_by_sorcv_sincelast = dif;
+#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
+ sctp_misc_ints(SCTP_USER_RECV_SACKS,
+ stcb->asoc.my_rwnd,
+ stcb->asoc.my_last_reported_rwnd,
+ stcb->freed_by_sorcv_sincelast,
+ 0);
+#endif
+ }
+out:
+ if (so && r_unlocked && hold_rlock) {
+ SCTP_STAT_INCR(sctps_locks_in_rcv);
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
+ }
+ SCTP_INP_DECR_REF(stcb->sctp_ep);
+no_lock:
+ if (tcb_incr_up) {
+ atomic_add_16(&stcb->asoc.refcnt, -1);
+ }
+ return;
+}
+
+int
+sctp_sorecvmsg(struct socket *so,
+ struct uio *uio,
+ struct mbuf **mp,
+ struct sockaddr *from,
+ int fromlen,
+ int *msg_flags,
+ struct sctp_sndrcvinfo *sinfo,
+ int filling_sinfo)
+{
+ /*
+ * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
+ * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
+ * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
+ * On the way out we may send out any combination of:
+ * MSG_NOTIFICATION MSG_EOR
+ *
+ */
+ struct sctp_inpcb *inp = NULL;
+ int my_len = 0;
+ int cp_len = 0, error = 0;
+ struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
+ struct mbuf *m = NULL, *embuf = NULL;
+ struct sctp_tcb *stcb = NULL;
+ int wakeup_read_socket = 0;
+ int freecnt_applied = 0;
+ int out_flags = 0, in_flags = 0;
+ int block_allowed = 1;
+ int freed_so_far = 0;
+ int copied_so_far = 0;
+ int s, in_eeor_mode = 0;
+ int no_rcv_needed = 0;
+ uint32_t rwnd_req = 0;
+ int hold_sblock = 0;
+ int hold_rlock = 0;
+ int alen = 0, slen = 0;
+ int held_length = 0;
+
+ if (msg_flags) {
+ in_flags = *msg_flags;
+ } else {
+ in_flags = 0;
+ }
+ slen = uio->uio_resid;
+ /* Pull in and set up our int flags */
+ if (in_flags & MSG_OOB) {
+ /* Out of band's NOT supported */
+ return (EOPNOTSUPP);
+ }
+ if ((in_flags & MSG_PEEK) && (mp != NULL)) {
+ return (EINVAL);
+ }
+ if ((in_flags & (MSG_DONTWAIT
+ | MSG_NBIO
+ )) ||
+ (so->so_state & SS_NBIO)) {
+ block_allowed = 0;
+ }
+ /* setup the endpoint */
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ return (EFAULT);
+ }
+ s = splnet();
+ rwnd_req = (so->so_rcv.sb_hiwat >> SCTP_RWND_HIWAT_SHIFT);
+ /* Must be at least a MTU's worth */
+ if (rwnd_req < SCTP_MIN_RWND)
+ rwnd_req = SCTP_MIN_RWND;
+ in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
+#ifdef SCTP_RECV_RWND_LOGGING
+ sctp_misc_ints(SCTP_SORECV_ENTER,
+ rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
+#endif
+ SOCKBUF_LOCK(&so->so_rcv);
+ hold_sblock = 1;
+#ifdef SCTP_RECV_RWND_LOGGING
+ sctp_misc_ints(SCTP_SORECV_ENTERPL,
+ rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
+#endif
+
+
+ error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
+ if (error) {
+ goto release_unlocked;
+ }
+restart:
+ if (hold_sblock == 0) {
+ SOCKBUF_LOCK(&so->so_rcv);
+ hold_sblock = 1;
+ }
+ sbunlock(&so->so_rcv);
+
+restart_nosblocks:
+ if (hold_sblock == 0) {
+ SOCKBUF_LOCK(&so->so_rcv);
+ hold_sblock = 1;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ goto out;
+ }
+ if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE) {
+ if (so->so_error) {
+ error = so->so_error;
+ } else {
+ error = ENOTCONN;
+ }
+ goto out;
+ }
+ if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
+ /* we need to wait for data */
+#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
+ sctp_misc_ints(SCTP_SORECV_BLOCKSA,
+ 0, 0, so->so_rcv.sb_cc, uio->uio_resid);
+#endif
+ if ((so->so_rcv.sb_cc == 0) &&
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
+ /*
+ * For active open side clear flags for
+ * re-use passive open is blocked by
+ * connect.
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
+ /*
+ * You were aborted, passive side
+ * always hits here
+ */
+ error = ECONNRESET;
+ /*
+ * You get this once if you are
+ * active open side
+ */
+ if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /*
+ * Remove flag if on the
+ * active open side
+ */
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
+ }
+ }
+ so->so_state &= ~(SS_ISCONNECTING |
+ SS_ISDISCONNECTING |
+ SS_ISCONFIRMING |
+ SS_ISCONNECTED);
+ if (error == 0) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
+ error = ENOTCONN;
+ } else {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
+ }
+ }
+ goto out;
+ }
+ }
+ error = sbwait(&so->so_rcv);
+ if (error) {
+ goto out;
+ }
+ held_length = 0;
+ goto restart_nosblocks;
+ } else if (so->so_rcv.sb_cc == 0) {
+ error = EWOULDBLOCK;
+ goto out;
+ }
+ error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
+ /* we possibly have data we can read */
+ control = TAILQ_FIRST(&inp->read_queue);
+ if (control == NULL) {
+ /*
+ * This could be happening since the appender did the
+ * increment but as not yet did the tailq insert onto the
+ * read_queue
+ */
+ if (hold_rlock == 0) {
+ SCTP_INP_READ_LOCK(inp);
+ hold_rlock = 1;
+ }
+ control = TAILQ_FIRST(&inp->read_queue);
+ if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
+#ifdef INVARIENTS
+ panic("Huh, its non zero and nothing on control?");
+#endif
+ so->so_rcv.sb_cc = 0;
+ }
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ goto restart;
+ }
+ if ((control->length == 0) &&
+ (control->do_not_ref_stcb)) {
+ /*
+ * Clean up code for freeing assoc that left behind a
+ * pdapi.. maybe a peer in EEOR that just closed after
+ * sending and never indicated a EOR.
+ */
+ SCTP_STAT_INCR(sctps_locks_in_rcva);
+ if (hold_rlock == 0) {
+ hold_rlock = 1;
+ SCTP_INP_READ_LOCK(inp);
+ }
+ control->held_length = 0;
+ if (control->data) {
+ /* Hmm there is data here .. fix */
+ struct mbuf *m;
+ int cnt = 0;
+
+ m = control->data;
+ while (m) {
+ cnt += m->m_len;
+ if (m->m_next == NULL) {
+ control->tail_mbuf = m;
+ m->m_flags |= M_EOR;
+ control->end_added = 1;
+ }
+ m = m->m_next;
+ }
+ control->length = cnt;
+ } else {
+ /* remove it */
+ TAILQ_REMOVE(&inp->read_queue, control, next);
+ /* Add back any hiddend data */
+ sctp_free_remote_addr(control->whoFrom);
+ sctp_free_a_readq(stcb, control);
+ }
+ if (hold_rlock) {
+ hold_rlock = 0;
+ SCTP_INP_READ_UNLOCK(inp);
+ }
+ goto restart;
+ }
+ if (control->length == 0) {
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
+ (filling_sinfo)) {
+ /* find a more suitable one then this */
+ ctl = TAILQ_NEXT(control, next);
+ while (ctl) {
+ if ((ctl->stcb != control->stcb) && (ctl->length)) {
+ /* found one */
+ control = ctl;
+ goto found_one;
+ }
+ ctl = TAILQ_NEXT(ctl, next);
+ }
+ }
+ /*
+ * if we reach here, not suitable replacement is available
+ * <or> fragment interleave is NOT on. So stuff the sb_cc
+ * into the our held count, and its time to sleep again.
+ */
+ held_length = so->so_rcv.sb_cc;
+ control->held_length = so->so_rcv.sb_cc;
+ goto restart;
+ }
+ /* Clear the held length since there is something to read */
+ control->held_length = 0;
+ if (hold_rlock) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+found_one:
+ /*
+ * If we reach here, control has a some data for us to read off.
+ * Note that stcb COULD be NULL.
+ */
+ if (hold_sblock) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+ stcb = control->stcb;
+ if (stcb) {
+ if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
+ (control->do_not_ref_stcb == 0)) {
+ stcb = NULL;
+ } else if (control->do_not_ref_stcb == 0) {
+ /* you can't free it on me please */
+ /*
+ * The lock on the socket buffer protects us so the
+ * free code will stop. But since we used the
+ * socketbuf lock and the sender uses the tcb_lock
+ * to increment, we need to use the atomic add to
+ * the refcnt
+ */
+ atomic_add_16(&stcb->asoc.refcnt, 1);
+ freecnt_applied = 1;
+ /*
+ * Setup to remember how much we have not yet told
+ * the peer our rwnd has opened up. Note we grab the
+ * value from the tcb from last time. Note too that
+ * sack sending clears this when a sack is sent..
+ * which is fine. Once we hit the rwnd_req, we then
+ * will go to the sctp_user_rcvd() that will not
+ * lock until it KNOWs it MUST send a WUP-SACK.
+ *
+ */
+ freed_so_far = stcb->freed_by_sorcv_sincelast;
+ stcb->freed_by_sorcv_sincelast = 0;
+ }
+ }
+ /* First lets get off the sinfo and sockaddr info */
+ if ((sinfo) && filling_sinfo) {
+ memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
+ nxt = TAILQ_NEXT(control, next);
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
+ struct sctp_extrcvinfo *s_extra;
+
+ s_extra = (struct sctp_extrcvinfo *)sinfo;
+ if (nxt) {
+ s_extra->next_flags = SCTP_NEXT_MSG_AVAIL;
+ if (nxt->sinfo_flags & SCTP_UNORDERED) {
+ s_extra->next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
+ }
+ s_extra->next_asocid = nxt->sinfo_assoc_id;
+ s_extra->next_length = nxt->length;
+ s_extra->next_ppid = nxt->sinfo_ppid;
+ s_extra->next_stream = nxt->sinfo_stream;
+ if (nxt->tail_mbuf != NULL) {
+ if (nxt->tail_mbuf->m_flags & M_EOR) {
+ s_extra->next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
+ }
+ }
+ } else {
+ /*
+ * we explicitly 0 this, since the memcpy
+ * got some other things beyond the older
+ * sinfo_ that is on the control's structure
+ * :-D
+ */
+ s_extra->next_flags = SCTP_NO_NEXT_MSG;
+ s_extra->next_asocid = 0;
+ s_extra->next_length = 0;
+ s_extra->next_ppid = 0;
+ s_extra->next_stream = 0;
+ }
+ }
+ /*
+ * update off the real current cum-ack, if we have an stcb.
+ */
+ if (stcb)
+ sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
+ /*
+ * mask off the high bits, we keep the actual chunk bits in
+ * there.
+ */
+ sinfo->sinfo_flags &= 0x00ff;
+ }
+ if (fromlen && from) {
+ struct sockaddr *to;
+
+#ifdef AF_INET
+ cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin.sin_len);
+ memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
+ ((struct sockaddr_in *)from)->sin_port = control->port_from;
+#else
+ /* No AF_INET use AF_INET6 */
+ cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin6.sin6_len);
+ memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
+ ((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
+#endif
+
+ to = from;
+#if defined(AF_INET) && defined(AF_INET6)
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
+ (to->sa_family == AF_INET) &&
+ ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 sin6;
+
+ sin = (struct sockaddr_in *)to;
+ bzero(&sin6, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+ sin6.sin6_addr.s6_addr16[2] = 0xffff;
+ bcopy(&sin->sin_addr,
+ &sin6.sin6_addr.s6_addr16[3],
+ sizeof(sin6.sin6_addr.s6_addr16[3]));
+ sin6.sin6_port = sin->sin_port;
+ memcpy(from, (caddr_t)&sin6, sizeof(sin6));
+ }
+#endif
+#if defined(AF_INET6)
+ {
+ struct sockaddr_in6 lsa6, *to6;
+
+ to6 = (struct sockaddr_in6 *)to;
+ sctp_recover_scope_mac(to6, (&lsa6));
+
+ }
+#endif
+ }
+ /* now copy out what data we can */
+ if (mp == NULL) {
+ /* copy out each mbuf in the chain up to length */
+get_more_data:
+ m = control->data;
+ while (m) {
+ /* Move out all we can */
+ cp_len = (int)uio->uio_resid;
+ my_len = (int)m->m_len;
+ if (cp_len > my_len) {
+ /* not enough in this buf */
+ cp_len = my_len;
+ }
+ if (hold_rlock) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+ splx(s);
+ if (cp_len > 0)
+ error = uiomove(mtod(m, char *), cp_len, uio);
+ s = splnet();
+#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
+ sctp_misc_ints(SCTP_SORCV_DOESCPY,
+ so->so_rcv.sb_cc,
+ cp_len,
+ 0,
+ 0);
+#endif
+ /* re-read */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ goto release;
+ }
+ if (stcb &&
+ stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ no_rcv_needed = 1;
+ }
+ if (error) {
+ /* error we are out of here */
+ goto release;
+ }
+ if ((m->m_next == NULL) &&
+ (cp_len >= m->m_len) &&
+ ((control->end_added == 0) ||
+ (control->end_added && (TAILQ_NEXT(control, next) == NULL)))
+ ) {
+#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
+ sctp_misc_ints(SCTP_SORCV_DOESLCK,
+ so->so_rcv.sb_cc,
+ cp_len,
+ m->m_len,
+ control->length);
+#endif
+ SCTP_STAT_INCR(sctps_locks_in_rcvb);
+ SCTP_INP_READ_LOCK(inp);
+ hold_rlock = 1;
+ }
+ if (cp_len == m->m_len) {
+#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
+ sctp_misc_ints(SCTP_SORCV_DOESADJ,
+ so->so_rcv.sb_cc,
+ control->length,
+ cp_len,
+ 0);
+#endif
+ if (m->m_flags & M_EOR) {
+ out_flags |= MSG_EOR;
+ }
+ if (m->m_flags & M_NOTIFICATION) {
+ out_flags |= MSG_NOTIFICATION;
+ }
+ /* we ate up the mbuf */
+ if (in_flags & MSG_PEEK) {
+ /* just looking */
+ m = m->m_next;
+ copied_so_far += cp_len;
+ } else {
+ /* dispose of the mbuf */
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(&so->so_rcv,
+ control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, m->m_len);
+#endif
+ sctp_sbfree(control, stcb, &so->so_rcv, m);
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(&so->so_rcv,
+ control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
+#endif
+ embuf = m;
+ copied_so_far += cp_len;
+ freed_so_far += cp_len;
+ alen = atomic_fetchadd_int(&control->length, -(cp_len));
+ if (alen < cp_len) {
+ panic("Control length goes negative?");
+ }
+#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
+ sctp_misc_ints(SCTP_SORCV_PASSBF,
+ so->so_rcv.sb_cc,
+ control->length,
+ 0,
+ 0);
+#endif
+ control->data = sctp_m_free(m);
+ m = control->data;
+ /*
+ * been through it all, must hold sb
+ * lock ok to null tail
+ */
+ if (control->data == NULL) {
+#ifdef INVARIENTS
+ if ((control->end_added == 0) ||
+ (TAILQ_NEXT(control, next) == NULL)) {
+ /*
+ * If the end is not
+ * added, OR the
+ * next is NOT null
+ * we MUST have the
+ * lock.
+ */
+ if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
+ panic("Hmm we don't own the lock?");
+ }
+ }
+#endif
+ control->tail_mbuf = NULL;
+#ifdef INVARIENTS
+ if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
+ panic("end_added, nothing left and no MSG_EOR");
+ }
+#endif
+ }
+#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
+ sctp_misc_ints(SCTP_SORCV_ADJD,
+ so->so_rcv.sb_cc,
+ control->length,
+ 0,
+ 0);
+#endif
+ }
+ } else {
+ /* Do we need to trim the mbuf? */
+ if (m->m_flags & M_NOTIFICATION) {
+ out_flags |= MSG_NOTIFICATION;
+ }
+ if ((in_flags & MSG_PEEK) == 0) {
+ if (out_flags & MSG_NOTIFICATION) {
+ /*
+ * remark this one with the
+ * notify flag, they read
+ * only part of the
+ * notification.
+ */
+ m->m_flags |= M_NOTIFICATION;
+ }
+ m->m_data += cp_len;
+ m->m_len -= cp_len;
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
+#endif
+ atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
+ if (stcb) {
+ atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
+ }
+ copied_so_far += cp_len;
+ embuf = m;
+ freed_so_far += cp_len;
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
+ SCTP_LOG_SBRESULT, 0);
+#endif
+ alen = atomic_fetchadd_int(&control->length, -(cp_len));
+ if (alen < cp_len) {
+ panic("Control length goes negative2?");
+ }
+ } else {
+ copied_so_far += cp_len;
+ }
+ }
+ if ((out_flags & MSG_EOR) ||
+ (uio->uio_resid == 0)
+ ) {
+ break;
+ }
+ if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
+ (control->do_not_ref_stcb == 0) &&
+ (freed_so_far >= rwnd_req)) {
+ sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+ }
+#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
+ sctp_misc_ints(SCTP_SORCV_BOTWHILE,
+ so->so_rcv.sb_cc,
+ control->length,
+ 0,
+ 0);
+#endif
+
+ } /* end while(m) */
+ /*
+ * At this point we have looked at it all and we either have
+ * a MSG_EOR/or read all the user wants... <OR>
+ * control->length == 0.
+ */
+ if ((out_flags & MSG_EOR) &&
+ ((in_flags & MSG_PEEK) == 0)) {
+ /* we are done with this control */
+ if (control->length == 0) {
+ if (control->data) {
+#ifdef INVARIENTS
+ panic("control->data not null at read eor?");
+#else
+ printf("Strange, data left in the control buffer .. invarients would panic?\n");
+ sctp_m_freem(control->data);
+ control->data = NULL;
+#endif
+ }
+ done_with_control:
+#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
+ sctp_misc_ints(SCTP_SORCV_FREECTL,
+ so->so_rcv.sb_cc,
+ 0,
+ 0,
+ 0);
+#endif
+ if (TAILQ_NEXT(control, next) == NULL) {
+ /*
+ * If we don't have a next we need a
+ * lock, if there is a next interupt
+ * is filling ahead of us and we
+ * don't need a lock to remove this
+ * guy (which is the head of the
+ * queue).
+ */
+ if (hold_rlock == 0) {
+ SCTP_STAT_INCR(sctps_locks_in_rcvc);
+ SCTP_INP_READ_LOCK(inp);
+ hold_rlock = 1;
+ }
+ }
+ TAILQ_REMOVE(&inp->read_queue, control, next);
+ /* Add back any hiddend data */
+ if (control->held_length) {
+ held_length = 0;
+ control->held_length = 0;
+ wakeup_read_socket = 1;
+ }
+ no_rcv_needed = control->do_not_ref_stcb;
+ sctp_free_remote_addr(control->whoFrom);
+ control->data = NULL;
+ sctp_free_a_readq(stcb, control);
+ control = NULL;
+ if ((freed_so_far >= rwnd_req) && (no_rcv_needed == 0))
+ sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+
+ } else {
+ /*
+ * The user did not read all of this
+ * message, turn off the returned MSG_EOR
+ * since we are leaving more behind on the
+ * control to read.
+ */
+#ifdef INVARIENTS
+ if (control->end_added && (control->data == NULL) &&
+ (control->tail_mbuf == NULL)) {
+ panic("Gak, control->length is corrupt?");
+ }
+#endif
+ no_rcv_needed = control->do_not_ref_stcb;
+ out_flags &= ~MSG_EOR;
+ }
+ }
+ if (out_flags & MSG_EOR) {
+ goto release;
+ }
+ if ((uio->uio_resid == 0) ||
+ ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
+ ) {
+ goto release;
+ }
+ /*
+ * If I hit here the receiver wants more and this message is
+ * NOT done (pd-api). So two questions. Can we block? if not
+ * we are done. Did the user NOT set MSG_WAITALL?
+ */
+ if (block_allowed == 0) {
+ goto release;
+ }
+ /*
+ * We need to wait for more data a few things: - We don't
+ * sbunlock() so we don't get someone else reading. - We
+ * must be sure to account for the case where what is added
+ * is NOT to our control when we wakeup.
+ */
+
+ /*
+ * Do we need to tell the transport a rwnd update might be
+ * needed before we go to sleep?
+ */
+ if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
+ ((freed_so_far >= rwnd_req) &&
+ (control->do_not_ref_stcb == 0) &&
+ (no_rcv_needed == 0))) {
+ sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+ }
+wait_some_more:
+ if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE) {
+ goto release;
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
+ goto release;
+
+ if (hold_rlock == 1) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+ if (hold_sblock == 0) {
+ SOCKBUF_LOCK(&so->so_rcv);
+ hold_sblock = 1;
+ }
+#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
+ if (stcb)
+ sctp_misc_ints(SCTP_SORECV_BLOCKSB,
+ freed_so_far,
+ stcb->asoc.my_rwnd,
+ so->so_rcv.sb_cc,
+ uio->uio_resid);
+ else
+ sctp_misc_ints(SCTP_SORECV_BLOCKSB,
+ freed_so_far,
+ 0,
+ so->so_rcv.sb_cc,
+ uio->uio_resid);
+#endif
+ if (so->so_rcv.sb_cc <= control->held_length) {
+ error = sbwait(&so->so_rcv);
+ if (error) {
+ goto release;
+ }
+ control->held_length = 0;
+ }
+ if (hold_sblock) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+ if (control->length == 0) {
+ /* still nothing here */
+ if (control->end_added == 1) {
+ /* he aborted, or is done i.e.did a shutdown */
+ out_flags |= MSG_EOR;
+ goto done_with_control;
+ }
+ if (so->so_rcv.sb_cc > held_length) {
+ SCTP_STAT_INCR(sctps_locks_in_rcvf);
+ control->held_length = so->so_rcv.sb_cc;
+ held_length = 0;
+ }
+ goto wait_some_more;
+ } else if (control->data == NULL) {
+ panic("Impossible data==NULL length !=0");
+ }
+ goto get_more_data;
+ } else {
+ /* copy out the mbuf chain */
+get_more_data2:
+ /*
+ * Do we have a uio, I doubt it if so we grab the size from
+ * it, if not you get it all
+ */
+ if (uio)
+ cp_len = uio->uio_resid;
+ else
+ cp_len = control->length;
+
+ if ((uint32_t) cp_len >= control->length) {
+ /* easy way */
+ if ((control->end_added == 0) ||
+ (TAILQ_NEXT(control, next) == NULL)) {
+ /* Need to get rlock */
+ if (hold_rlock == 0) {
+ SCTP_INP_READ_LOCK(inp);
+ hold_rlock = 1;
+ }
+ }
+ if (control->tail_mbuf->m_flags & M_EOR) {
+ out_flags |= MSG_EOR;
+ }
+ if (control->data->m_flags & M_NOTIFICATION) {
+ out_flags |= MSG_NOTIFICATION;
+ }
+ if (uio)
+ uio->uio_resid -= control->length;
+ *mp = control->data;
+ m = control->data;
+ while (m) {
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(&so->so_rcv,
+ control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, m->m_len);
+#endif
+ sctp_sbfree(control, stcb, &so->so_rcv, m);
+ freed_so_far += m->m_len;
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(&so->so_rcv,
+ control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
+#endif
+ m = m->m_next;
+ }
+ control->data = control->tail_mbuf = NULL;
+ control->length = 0;
+ if (out_flags & MSG_EOR) {
+ /* Done with this control */
+ goto done_with_control;
+ }
+ /* still more to do with this conntrol */
+ /* do we really support msg_waitall here? */
+ if ((block_allowed == 0) ||
+ ((in_flags & MSG_WAITALL) == 0)) {
+ goto release;
+ }
+ wait_some_more2:
+ if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE)
+ goto release;
+ if (hold_rlock == 1) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+ if (hold_sblock == 0) {
+ SOCKBUF_LOCK(&so->so_rcv);
+ hold_sblock = 1;
+ }
+ if (so->so_rcv.sb_cc <= control->held_length) {
+ error = sbwait(&so->so_rcv);
+ if (error) {
+ goto release;
+ }
+ }
+ if (hold_sblock) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+ if (control->length == 0) {
+ /* still nothing here */
+ if (control->end_added == 1) {
+ /*
+ * he aborted, or is done i.e.
+ * shutdown
+ */
+ out_flags |= MSG_EOR;
+ goto done_with_control;
+ }
+ if (so->so_rcv.sb_cc > held_length) {
+ control->held_length = so->so_rcv.sb_cc;
+ /*
+ * We don't use held_length while
+ * getting a message
+ */
+ held_length = 0;
+ }
+ goto wait_some_more2;
+ }
+ goto get_more_data2;
+ } else {
+ /* hard way mbuf by mbuf */
+ m = control->data;
+ if (control->end_added == 0) {
+ /* need the rlock */
+ if (hold_rlock == 0) {
+ SCTP_INP_READ_LOCK(inp);
+ hold_rlock = 1;
+ }
+ }
+ if (m->m_flags & M_NOTIFICATION) {
+ out_flags |= MSG_NOTIFICATION;
+ }
+ while ((m) && (cp_len > 0)) {
+ if (cp_len >= m->m_len) {
+ *mp = m;
+ atomic_subtract_int(&control->length, m->m_len);
+ if (uio)
+ uio->uio_resid -= m->m_len;
+ cp_len -= m->m_len;
+ control->data = m->m_next;
+ m->m_next = NULL;
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(&so->so_rcv,
+ control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, m->m_len);
+#endif
+ sctp_sbfree(control, stcb, &so->so_rcv, m);
+ freed_so_far += m->m_len;
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(&so->so_rcv,
+ control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
+#endif
+ mp = &m->m_next;
+ m = control->data;
+ } else {
+ /*
+ * got all he wants and its part of
+ * this mbuf only.
+ */
+ if (uio)
+ uio->uio_resid -= m->m_len;
+ cp_len -= m->m_len;
+ if (hold_rlock) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+ if (hold_sblock) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+ splx(s);
+ *mp = sctp_m_copym(m, 0, cp_len,
+ M_TRYWAIT
+ );
+ s = splnet();
+#ifdef SCTP_LOCK_LOGGING
+ sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCKBUF_R);
+#endif
+ if (hold_sblock == 0) {
+ SOCKBUF_LOCK(&so->so_rcv);
+ hold_sblock = 1;
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
+ goto release;
+
+ if (stcb &&
+ stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ no_rcv_needed = 1;
+ }
+ m->m_data += cp_len;
+ m->m_len -= cp_len;
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
+#endif
+ freed_so_far += cp_len;
+ atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
+ if (stcb) {
+ atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
+ if ((freed_so_far >= rwnd_req) &&
+ (control->do_not_ref_stcb == 0) &&
+ (no_rcv_needed == 0))
+ sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+ }
+#ifdef SCTP_SB_LOGGING
+ sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
+ SCTP_LOG_SBRESULT, 0);
+#endif
+ if (out_flags & MSG_NOTIFICATION) {
+ /*
+ * remark the first mbuf if
+ * they took a partial read.
+ */
+ control->data->m_flags |= M_NOTIFICATION;
+ }
+ goto release;
+ }
+ }
+ }
+ }
+release:
+ if (hold_rlock == 1) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+ if (hold_sblock == 0) {
+ SOCKBUF_LOCK(&so->so_rcv);
+ hold_sblock = 1;
+ }
+ sbunlock(&so->so_rcv);
+
+release_unlocked:
+ if (hold_sblock) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+ if ((stcb) && (in_flags & MSG_PEEK) == 0) {
+ if ((freed_so_far >= rwnd_req) &&
+ (control && (control->do_not_ref_stcb == 0)) &&
+ (no_rcv_needed == 0))
+ sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+ }
+ if (msg_flags)
+ *msg_flags |= out_flags;
+out:
+ if (hold_rlock == 1) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+ if (hold_sblock) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+ if ((stcb) && freecnt_applied) {
+ /*
+ * The lock on the socket buffer protects us so the free
+ * code will stop. But since we used the socketbuf lock and
+ * the sender uses the tcb_lock to increment, we need to use
+ * the atomic add to the refcnt.
+ */
+ atomic_add_16(&stcb->asoc.refcnt, -1);
+ freecnt_applied = 0;
+ /* Save the value back for next time */
+ stcb->freed_by_sorcv_sincelast = freed_so_far;
+ }
+ splx(s);
+#ifdef SCTP_RECV_RWND_LOGGING
+ if (stcb) {
+ sctp_misc_ints(SCTP_SORECV_DONE,
+ freed_so_far,
+ ((uio) ? (slen - uio->uio_resid) : slen),
+ stcb->asoc.my_rwnd,
+ so->so_rcv.sb_cc);
+ } else {
+ sctp_misc_ints(SCTP_SORECV_DONE,
+ freed_so_far,
+ ((uio) ? (slen - uio->uio_resid) : slen),
+ 0,
+ so->so_rcv.sb_cc);
+ }
+#endif
+ if (wakeup_read_socket) {
+ sctp_sorwakeup(inp, so);
+ }
+ return (error);
+}
+
+
+#ifdef SCTP_MBUF_LOGGING
+struct mbuf *
+sctp_m_free(struct mbuf *m)
+{
+ if (m->m_flags & M_EXT) {
+ sctp_log_mb(m, SCTP_MBUF_IFREE);
+ }
+ return (m_free(m));
+}
+
+void
+sctp_m_freem(struct mbuf *mb)
+{
+ while (mb != NULL)
+ mb = sctp_m_free(mb);
+}
+
+#endif
+
+
+int
+sctp_soreceive(so, psa, uio, mp0, controlp, flagsp)
+ struct socket *so;
+ struct sockaddr **psa;
+ struct uio *uio;
+ struct mbuf **mp0;
+ struct mbuf **controlp;
+ int *flagsp;
+{
+ int error, fromlen;
+ uint8_t sockbuf[256];
+ struct sockaddr *from;
+ struct sctp_extrcvinfo sinfo;
+ int filling_sinfo = 1;
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ /* pickup the assoc we are reading from */
+ if (inp == NULL) {
+ return (EINVAL);
+ }
+ if ((sctp_is_feature_off(inp,
+ SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
+ (controlp == NULL)) {
+ /* user does not want the sndrcv ctl */
+ filling_sinfo = 0;
+ }
+ if (psa) {
+ from = (struct sockaddr *)sockbuf;
+ fromlen = sizeof(sockbuf);
+ from->sa_len = 0;
+ } else {
+ from = NULL;
+ fromlen = 0;
+ }
+
+ error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
+ (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
+ if ((controlp) && (filling_sinfo)) {
+ /* copy back the sinfo in a CMSG format */
+ if (filling_sinfo)
+ *controlp = sctp_build_ctl_nchunk(inp,
+ (struct sctp_sndrcvinfo *)&sinfo);
+ else
+ *controlp = NULL;
+ }
+ if (psa) {
+ /* copy back the address info */
+ if (from && from->sa_len) {
+ *psa = sodupsockaddr(from, M_NOWAIT);
+ } else {
+ *psa = NULL;
+ }
+ }
+ return (error);
+}
diff --git a/sys/netinet/sctputil.h b/sys/netinet/sctputil.h
new file mode 100644
index 0000000..a281931
--- /dev/null
+++ b/sys/netinet/sctputil.h
@@ -0,0 +1,314 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/* $KAME: sctputil.h,v 1.15 2005/03/06 16:04:19 itojun Exp $ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#ifndef __sctputil_h__
+#define __sctputil_h__
+
+
+
+#if defined(_KERNEL)
+
+#ifdef SCTP_MBUF_LOGGING
+
+struct mbuf *sctp_m_free(struct mbuf *m);
+void sctp_m_freem(struct mbuf *m);
+
+#else
+#define sctp_m_free m_free
+#define sctp_m_freem m_freem
+#endif
+
+
+#define sctp_m_copym m_copym
+
+#define sctp_get_associd(stcb) ((sctp_assoc_t)stcb->asoc.assoc_id)
+
+
+/*
+ * Function prototypes
+ */
+struct ifaddr *sctp_find_ifa_by_addr(struct sockaddr *sa);
+
+uint32_t sctp_select_initial_TSN(struct sctp_pcb *);
+
+uint32_t sctp_select_a_tag(struct sctp_inpcb *);
+
+int sctp_init_asoc(struct sctp_inpcb *, struct sctp_association *, int, uint32_t);
+
+void sctp_fill_random_store(struct sctp_pcb *);
+
+int
+sctp_timer_start(int, struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+int
+sctp_timer_stop(int, struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+uint32_t sctp_calculate_sum(struct mbuf *, int32_t *, uint32_t);
+
+void
+sctp_mtu_size_reset(struct sctp_inpcb *, struct sctp_association *,
+ u_long);
+
+void
+sctp_add_to_readq(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_queued_to_read *control,
+ struct sockbuf *sb,
+ int end);
+
+int
+sctp_append_to_readq(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_queued_to_read *control,
+ struct mbuf *m,
+ int end,
+ int new_cumack,
+ struct sockbuf *sb);
+
+
+int find_next_best_mtu(int);
+
+void
+ sctp_timeout_handler(void *);
+
+uint32_t
+sctp_calculate_rto(struct sctp_tcb *, struct sctp_association *,
+ struct sctp_nets *, struct timeval *);
+
+uint32_t sctp_calculate_len(struct mbuf *);
+
+caddr_t sctp_m_getptr(struct mbuf *, int, int, uint8_t *);
+
+struct sctp_paramhdr *
+sctp_get_next_param(struct mbuf *, int,
+ struct sctp_paramhdr *, int);
+
+int sctp_add_pad_tombuf(struct mbuf *, int);
+
+int sctp_pad_lastmbuf(struct mbuf *, int, struct mbuf *);
+
+void sctp_ulp_notify(uint32_t, struct sctp_tcb *, uint32_t, void *);
+
+void
+sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
+ struct sctp_inpcb *new_inp,
+ struct sctp_tcb *stcb);
+
+
+void sctp_stop_timers_for_shutdown(struct sctp_tcb *);
+
+void sctp_report_all_outbound(struct sctp_tcb *);
+
+int sctp_expand_mapping_array(struct sctp_association *);
+
+void sctp_abort_notification(struct sctp_tcb *, int);
+
+/* We abort responding to an IP packet for some reason */
+void
+sctp_abort_association(struct sctp_inpcb *, struct sctp_tcb *,
+ struct mbuf *, int, struct sctphdr *, struct mbuf *);
+
+/* We choose to abort via user input */
+void
+sctp_abort_an_association(struct sctp_inpcb *, struct sctp_tcb *, int,
+ struct mbuf *);
+
+void
+sctp_handle_ootb(struct mbuf *, int, int, struct sctphdr *,
+ struct sctp_inpcb *, struct mbuf *);
+
+int sctp_is_there_an_abort_here(struct mbuf *, int, uint32_t *);
+uint32_t sctp_is_same_scope(struct sockaddr_in6 *, struct sockaddr_in6 *);
+struct sockaddr_in6 *
+sctp_recover_scope(struct sockaddr_in6 *,
+ struct sockaddr_in6 *);
+
+
+
+
+#define sctp_recover_scope_mac(addr, store) do { \
+ if ((addr->sin6_family == AF_INET6) && \
+ (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) && \
+ (addr->sin6_scope_id == 0)) { \
+ *store = *addr; \
+ if (!sa6_recoverscope(store)) { \
+ addr = store; \
+ } \
+ } \
+ } while (0)
+
+
+
+int sctp_cmpaddr(struct sockaddr *, struct sockaddr *);
+
+void sctp_print_address(struct sockaddr *);
+void sctp_print_address_pkt(struct ip *, struct sctphdr *);
+
+void
+sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb,
+ uint32_t error, int no_lock);
+
+int
+sctp_release_pr_sctp_chunk(struct sctp_tcb *, struct sctp_tmit_chunk *,
+ int, struct sctpchunk_listhead *);
+
+struct mbuf *sctp_generate_invmanparam(int);
+
+
+#ifdef SCTP_MBCNT_LOGGING
+void
+sctp_free_bufspace(struct sctp_tcb *, struct sctp_association *,
+ struct sctp_tmit_chunk *);
+
+#else
+#define sctp_free_bufspace(stcb, asoc, tp1, chk_cnt) \
+do { \
+ if (tp1->data != NULL) { \
+ atomic_add_16(&((asoc)->chunks_on_out_queue), -chk_cnt); \
+ if ((asoc)->total_output_queue_size >= tp1->book_size) { \
+ atomic_add_int(&((asoc)->total_output_queue_size), -tp1->book_size); \
+ } else { \
+ (asoc)->total_output_queue_size = 0; \
+ } \
+ if (stcb->sctp_socket && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || \
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { \
+ if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { \
+ atomic_add_int(&((stcb)->sctp_socket->so_snd.sb_cc), -tp1->book_size); \
+ } else { \
+ stcb->sctp_socket->so_snd.sb_cc = 0; \
+ } \
+ } \
+ } \
+} while (0)
+
+#endif
+
+#define sctp_free_spbufspace(stcb, asoc, sp) \
+do { \
+ if (sp->data != NULL) { \
+ atomic_add_16(&(asoc)->chunks_on_out_queue, -1); \
+ if ((asoc)->total_output_queue_size >= sp->length) { \
+ atomic_add_int(&(asoc)->total_output_queue_size,sp->length); \
+ } else { \
+ (asoc)->total_output_queue_size = 0; \
+ } \
+ if (stcb->sctp_socket && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || \
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { \
+ if (stcb->sctp_socket->so_snd.sb_cc >= sp->length) { \
+ atomic_add_int(&stcb->sctp_socket->so_snd.sb_cc,sp->length); \
+ } else { \
+ stcb->sctp_socket->so_snd.sb_cc = 0; \
+ } \
+ } \
+ } \
+} while (0)
+
+#define sctp_snd_sb_alloc(stcb, sz) \
+do { \
+ atomic_add_int(&stcb->asoc.total_output_queue_size,sz); \
+ if ((stcb->sctp_socket != NULL) && \
+ ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || \
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { \
+ atomic_add_int(&stcb->sctp_socket->so_snd.sb_cc,sz); \
+ } \
+} while (0)
+
+
+int
+sctp_soreceive(struct socket *so, struct sockaddr **psa,
+ struct uio *uio,
+ struct mbuf **mp0,
+ struct mbuf **controlp,
+ int *flagsp);
+
+
+#ifdef SCTP_STAT_LOGGING
+void
+ sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d);
+
+void
+sctp_wakeup_log(struct sctp_tcb *stcb,
+ uint32_t cumtsn,
+ uint32_t wake_cnt, int from);
+
+void sctp_log_strm_del_alt(uint32_t, uint16_t, int);
+
+void sctp_log_nagle_event(struct sctp_tcb *stcb, int action);
+
+
+void
+ sctp_log_mb(struct mbuf *m, int from);
+
+void
+sctp_sblog(struct sockbuf *sb,
+ struct sctp_tcb *stcb, int from, int incr);
+
+void
+sctp_log_strm_del(struct sctp_queued_to_read *control,
+ struct sctp_queued_to_read *poschk,
+ int from);
+void sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *, int, uint8_t);
+void rto_logging(struct sctp_nets *net, int from);
+
+void sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc);
+
+void sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from);
+void sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *, int, int, uint8_t);
+void sctp_log_block(uint8_t, struct socket *, struct sctp_association *, int);
+void sctp_log_rwnd(uint8_t, uint32_t, uint32_t, uint32_t);
+void sctp_log_mbcnt(uint8_t, uint32_t, uint32_t, uint32_t, uint32_t);
+void sctp_log_rwnd_set(uint8_t, uint32_t, uint32_t, uint32_t, uint32_t);
+int sctp_fill_stat_log(struct mbuf *);
+void sctp_log_fr(uint32_t, uint32_t, uint32_t, int);
+void sctp_log_sack(uint32_t, uint32_t, uint32_t, uint16_t, uint16_t, int);
+void sctp_log_map(uint32_t, uint32_t, uint32_t, int);
+
+void sctp_clr_stat_log(void);
+
+#endif
+
+#ifdef SCTP_AUDITING_ENABLED
+void
+sctp_auditing(int, struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+void sctp_audit_log(uint8_t, uint8_t);
+
+#endif
+
+
+#endif /* _KERNEL */
+#endif
diff --git a/sys/netinet6/in6_proto.c b/sys/netinet6/in6_proto.c
index 95f984c..6765102 100644
--- a/sys/netinet6/in6_proto.c
+++ b/sys/netinet6/in6_proto.c
@@ -66,6 +66,7 @@
#include "opt_ipsec.h"
#include "opt_ipstealth.h"
#include "opt_carp.h"
+#include "opt_sctp.h"
#include <sys/param.h>
#include <sys/socket.h>
@@ -127,6 +128,14 @@
#include <netinet/ip_carp.h>
#endif
+#ifdef SCTP
+#include <netinet/in_pcb.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_var.h>
+#include <netinet6/sctp6_var.h>
+#endif /* SCTP */
+
#ifdef FAST_IPSEC
#include <netipsec/ipsec6.h>
#define IPSEC
@@ -184,6 +193,42 @@ struct ip6protosw inet6sw[] = {
.pr_drain = tcp_drain,
.pr_usrreqs = &tcp6_usrreqs,
},
+#ifdef SCTP
+{
+ .pr_type = SOCK_DGRAM,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_SCTP,
+ .pr_flags = PR_WANTRCVD,
+ .pr_input = sctp6_input,
+ .pr_ctlinput = sctp6_ctlinput,
+ .pr_ctloutput = sctp_ctloutput,
+ .pr_drain = sctp_drain,
+ .pr_usrreqs = &sctp6_usrreqs
+},
+{
+ .pr_type = SOCK_SEQPACKET,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_SCTP,
+ .pr_flags = PR_WANTRCVD,
+ .pr_input = sctp6_input,
+ .pr_ctlinput = sctp6_ctlinput,
+ .pr_ctloutput = sctp_ctloutput,
+ .pr_drain = sctp_drain,
+ .pr_usrreqs = &sctp6_usrreqs
+},
+
+{
+ .pr_type = SOCK_STREAM,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_SCTP,
+ .pr_flags = PR_WANTRCVD,
+ .pr_input = sctp6_input,
+ .pr_ctlinput = sctp6_ctlinput,
+ .pr_ctloutput = sctp_ctloutput,
+ .pr_drain = sctp_drain,
+ .pr_usrreqs = &sctp6_usrreqs
+},
+#endif /* SCTP */
{
.pr_type = SOCK_RAW,
.pr_domain = &inet6domain,
@@ -416,6 +461,9 @@ SYSCTL_NODE(_net_inet6, IPPROTO_IPV6, ip6, CTLFLAG_RW, 0, "IP6");
SYSCTL_NODE(_net_inet6, IPPROTO_ICMPV6, icmp6, CTLFLAG_RW, 0, "ICMP6");
SYSCTL_NODE(_net_inet6, IPPROTO_UDP, udp6, CTLFLAG_RW, 0, "UDP6");
SYSCTL_NODE(_net_inet6, IPPROTO_TCP, tcp6, CTLFLAG_RW, 0, "TCP6");
+#ifdef SCTP
+SYSCTL_NODE(_net_inet6, IPPROTO_SCTP, sctp6, CTLFLAG_RW, 0, "SCTP6");
+#endif
#ifdef IPSEC
SYSCTL_NODE(_net_inet6, IPPROTO_ESP, ipsec6, CTLFLAG_RW, 0, "IPSEC6");
#endif /* IPSEC */
diff --git a/sys/netinet6/sctp6_usrreq.c b/sys/netinet6/sctp6_usrreq.c
new file mode 100644
index 0000000..2894a22
--- /dev/null
+++ b/sys/netinet6/sctp6_usrreq.c
@@ -0,0 +1,1370 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* $KAME: sctp6_usrreq.c,v 1.38 2005/08/24 08:08:56 suz Exp $ */
+__FBSDID("$FreeBSD$");
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_inet.h"
+#include "opt_ipsec.h"
+#include "opt_sctp.h"
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/mbuf.h>
+#include <sys/domain.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/socketvar.h>
+#include <sys/sysctl.h>
+#include <sys/errno.h>
+#include <sys/stat.h>
+#include <sys/systm.h>
+#include <sys/syslog.h>
+#include <sys/proc.h>
+#include <net/if.h>
+#include <net/route.h>
+#include <net/if_types.h>
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/in_pcb.h>
+#include <netinet/in_var.h>
+#include <netinet/ip_var.h>
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet6/ip6_var.h>
+#include <netinet6/scope6_var.h>
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/ip6.h>
+#include <netinet6/in6_pcb.h>
+#include <netinet/icmp6.h>
+#include <netinet6/sctp6_var.h>
+#include <netinet6/ip6protosw.h>
+#include <netinet6/nd6.h>
+
+#ifdef IPSEC
+#include <netinet6/ipsec.h>
+#endif /* IPSEC */
+
+#if defined(NFAITH) && NFAITH > 0
+#include <net/if_faith.h>
+#endif
+
+
+
+extern struct protosw inetsw[];
+
+
+#ifndef in6pcb
+#define in6pcb inpcb
+#endif
+#ifndef sotoin6pcb
+#define sotoin6pcb sotoinpcb
+#endif
+
+
+#ifdef SCTP_DEBUG
+extern u_int32_t sctp_debug_on;
+
+#endif
+
+
+
+extern int sctp_no_csum_on_loopback;
+
+int
+sctp6_input(mp, offp, proto)
+ struct mbuf **mp;
+ int *offp;
+
+ int proto;
+
+{
+ struct mbuf *m = *mp;
+ struct ip6_hdr *ip6;
+ struct sctphdr *sh;
+ struct sctp_inpcb *in6p = NULL;
+ struct sctp_nets *net;
+ int refcount_up = 0;
+ u_int32_t check, calc_check;
+ struct inpcb *in6p_ip;
+ struct sctp_chunkhdr *ch;
+ int length, mlen, offset, iphlen;
+ u_int8_t ecn_bits;
+ struct sctp_tcb *stcb = NULL;
+ int off = *offp;
+ int s;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+#ifndef PULLDOWN_TEST
+ /* If PULLDOWN_TEST off, must be in a single mbuf. */
+ IP6_EXTHDR_CHECK(m, off, (int)(sizeof(*sh) + sizeof(*ch)), IPPROTO_DONE);
+ sh = (struct sctphdr *)((caddr_t)ip6 + off);
+ ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
+#else
+ /* Ensure that (sctphdr + sctp_chunkhdr) in a row. */
+ IP6_EXTHDR_GET(sh, struct sctphdr *, m, off, sizeof(*sh) + sizeof(*ch));
+ if (sh == NULL) {
+ SCTP_STAT_INCR(sctps_hdrops);
+ return IPPROTO_DONE;
+ }
+ ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
+#endif
+
+ iphlen = off;
+ offset = iphlen + sizeof(*sh) + sizeof(*ch);
+
+#if defined(NFAITH) && NFAITH > 0
+
+ if (faithprefix_p != NULL && (*faithprefix_p) (&ip6->ip6_dst)) {
+ /* XXX send icmp6 host/port unreach? */
+ goto bad;
+ }
+#endif /* NFAITH defined and > 0 */
+ SCTP_STAT_INCR(sctps_recvpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("V6 input gets a packet iphlen:%d pktlen:%d\n", iphlen, m->m_pkthdr.len);
+ }
+#endif
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
+ /* No multi-cast support in SCTP */
+ goto bad;
+ }
+ /* destination port of 0 is illegal, based on RFC2960. */
+ if (sh->dest_port == 0)
+ goto bad;
+ if ((sctp_no_csum_on_loopback == 0) ||
+ (m->m_pkthdr.rcvif == NULL) ||
+ (m->m_pkthdr.rcvif->if_type != IFT_LOOP)) {
+ /*
+ * we do NOT validate things from the loopback if the sysctl
+ * is set to 1.
+ */
+ check = sh->checksum; /* save incoming checksum */
+ if ((check == 0) && (sctp_no_csum_on_loopback)) {
+ /*
+ * special hook for where we got a local address
+ * somehow routed across a non IFT_LOOP type
+ * interface
+ */
+ if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &ip6->ip6_dst))
+ goto sctp_skip_csum;
+ }
+ sh->checksum = 0; /* prepare for calc */
+ calc_check = sctp_calculate_sum(m, &mlen, iphlen);
+ if (calc_check != check) {
+#ifdef SCTP_DEBUG
+ if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
+ printf("Bad CSUM on SCTP packet calc_check:%x check:%x m:%x mlen:%d iphlen:%d\n",
+ calc_check, check, (u_int)m,
+ mlen, iphlen);
+ }
+#endif
+ stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
+ sh, ch, &in6p, &net);
+ /* in6p's ref-count increased && stcb locked */
+ if ((in6p) && (stcb)) {
+ sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
+ sctp_chunk_output((struct sctp_inpcb *)in6p, stcb, 2);
+ } else if ((in6p != NULL) && (stcb == NULL)) {
+ refcount_up = 1;
+ }
+ SCTP_STAT_INCR(sctps_badsum);
+ SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
+ goto bad;
+ }
+ sh->checksum = calc_check;
+ } else {
+sctp_skip_csum:
+ mlen = m->m_pkthdr.len;
+ }
+ net = NULL;
+ /*
+ * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
+ * IP/SCTP/first chunk header...
+ */
+ stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
+ sh, ch, &in6p, &net);
+ /* in6p's ref-count increased */
+ if (in6p == NULL) {
+ struct sctp_init_chunk *init_chk, chunk_buf;
+
+ SCTP_STAT_INCR(sctps_noport);
+ if (ch->chunk_type == SCTP_INITIATION) {
+ /*
+ * we do a trick here to get the INIT tag, dig in
+ * and get the tag from the INIT and put it in the
+ * common header.
+ */
+ init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
+ iphlen + sizeof(*sh), sizeof(*init_chk),
+ (u_int8_t *) & chunk_buf);
+ sh->v_tag = init_chk->init.initiate_tag;
+ }
+ sctp_send_abort(m, iphlen, sh, 0, NULL);
+ goto bad;
+ } else if (stcb == NULL) {
+ refcount_up = 1;
+ }
+ in6p_ip = (struct inpcb *)in6p;
+#ifdef IPSEC
+ /*
+ * Check AH/ESP integrity.
+ */
+ if (in6p->sctp_socket && (ipsec6_in_reject_so(m, in6p->sctp_socket)) {
+/* XXX */
+ ipsec6stat.in_polvio++;
+ goto bad;
+ }
+#endif /* IPSEC */
+
+
+ /*
+ * CONTROL chunk processing
+ */
+ length = ntohs(ip6->ip6_plen) + iphlen;
+ offset -= sizeof(*ch);
+ ecn_bits = ((ntohl(ip6->ip6_flow) >> 20) & 0x000000ff);
+ s = splnet();
+ (void)sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
+ in6p, stcb, net, ecn_bits);
+ /* inp's ref-count reduced && stcb unlocked */
+ splx(s);
+ /* XXX this stuff below gets moved to appropriate parts later... */
+ if (m)
+ m_freem(m);
+ if ((in6p) && refcount_up) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(in6p);
+ SCTP_INP_DECR_REF(in6p);
+ SCTP_INP_WUNLOCK(in6p);
+ }
+ return IPPROTO_DONE;
+
+bad:
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+
+ if ((in6p) && refcount_up) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(in6p);
+ SCTP_INP_DECR_REF(in6p);
+ SCTP_INP_WUNLOCK(in6p);
+ }
+ if (m)
+ m_freem(m);
+ return IPPROTO_DONE;
+}
+
+
+static void
+sctp6_notify_mbuf(struct sctp_inpcb *inp,
+ struct icmp6_hdr *icmp6,
+ struct sctphdr *sh,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ u_int32_t nxtsz;
+
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
+ (icmp6 == NULL) || (sh == NULL)) {
+ goto out;
+ }
+ /* First do we even look at it? */
+ if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag))
+ goto out;
+
+ if (icmp6->icmp6_type != ICMP6_PACKET_TOO_BIG) {
+ /* not PACKET TO BIG */
+ goto out;
+ }
+ /*
+ * ok we need to look closely. We could even get smarter and look at
+ * anyone that we sent to in case we get a different ICMP that tells
+ * us there is no way to reach a host, but for this impl, all we
+ * care about is MTU discovery.
+ */
+ nxtsz = ntohl(icmp6->icmp6_mtu);
+ /* Stop any PMTU timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL);
+
+ /* Adjust destination size limit */
+ if (net->mtu > nxtsz) {
+ net->mtu = nxtsz;
+ }
+ /* now what about the ep? */
+ if (stcb->asoc.smallest_mtu > nxtsz) {
+ struct sctp_tmit_chunk *chk;
+
+ /* Adjust that too */
+ stcb->asoc.smallest_mtu = nxtsz;
+ /* now off to subtract IP_DF flag if needed */
+
+ TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
+ if ((u_int32_t) (chk->send_size + IP_HDR_SIZE) > nxtsz) {
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ }
+ }
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if ((u_int32_t) (chk->send_size + IP_HDR_SIZE) > nxtsz) {
+ /*
+ * For this guy we also mark for immediate
+ * resend since we sent to big of chunk
+ */
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ if (chk->sent != SCTP_DATAGRAM_RESEND)
+ stcb->asoc.sent_queue_retran_cnt++;
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ chk->rec.data.doing_fast_retransmit = 0;
+
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ /* Clear any time so NO RTT is being done */
+ chk->sent_rcv_time.tv_sec = 0;
+ chk->sent_rcv_time.tv_usec = 0;
+ stcb->asoc.total_flight -= chk->send_size;
+ net->flight_size -= chk->send_size;
+ }
+ }
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL);
+out:
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+}
+
+
+void
+sctp6_ctlinput(cmd, pktdst, d)
+ int cmd;
+ struct sockaddr *pktdst;
+ void *d;
+{
+ struct sctphdr sh;
+ struct ip6ctlparam *ip6cp = NULL;
+ int s, cm;
+
+ if (pktdst->sa_family != AF_INET6 ||
+ pktdst->sa_len != sizeof(struct sockaddr_in6))
+ return;
+
+ if ((unsigned)cmd >= PRC_NCMDS)
+ return;
+ if (PRC_IS_REDIRECT(cmd)) {
+ d = NULL;
+ } else if (inet6ctlerrmap[cmd] == 0) {
+ return;
+ }
+ /* if the parameter is from icmp6, decode it. */
+ if (d != NULL) {
+ ip6cp = (struct ip6ctlparam *)d;
+ } else {
+ ip6cp = (struct ip6ctlparam *)NULL;
+ }
+
+ if (ip6cp) {
+ /*
+ * XXX: We assume that when IPV6 is non NULL, M and OFF are
+ * valid.
+ */
+ /* check if we can safely examine src and dst ports */
+ struct sctp_inpcb *inp = NULL;
+ struct sctp_tcb *stcb = NULL;
+ struct sctp_nets *net = NULL;
+ struct sockaddr_in6 final;
+
+ if (ip6cp->ip6c_m == NULL ||
+ (size_t)ip6cp->ip6c_m->m_pkthdr.len < (ip6cp->ip6c_off + sizeof(sh)))
+ return;
+
+ bzero(&sh, sizeof(sh));
+ bzero(&final, sizeof(final));
+ inp = NULL;
+ net = NULL;
+ m_copydata(ip6cp->ip6c_m, ip6cp->ip6c_off, sizeof(sh),
+ (caddr_t)&sh);
+ ip6cp->ip6c_src->sin6_port = sh.src_port;
+ final.sin6_len = sizeof(final);
+ final.sin6_family = AF_INET6;
+ final.sin6_addr = ((struct sockaddr_in6 *)pktdst)->sin6_addr;
+ final.sin6_port = sh.dest_port;
+ s = splnet();
+ stcb = sctp_findassociation_addr_sa((struct sockaddr *)ip6cp->ip6c_src,
+ (struct sockaddr *)&final,
+ &inp, &net, 1);
+ /* inp's ref-count increased && stcb locked */
+ if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
+ if (cmd == PRC_MSGSIZE) {
+ sctp6_notify_mbuf(inp,
+ ip6cp->ip6c_icmp6,
+ &sh,
+ stcb,
+ net);
+ /* inp's ref-count reduced && stcb unlocked */
+ } else {
+ if (cmd == PRC_HOSTDEAD) {
+ cm = EHOSTUNREACH;
+ } else {
+ cm = inet6ctlerrmap[cmd];
+ }
+ sctp_notify(inp, cm, &sh,
+ (struct sockaddr *)&final,
+ stcb, net);
+ /* inp's ref-count reduced && stcb unlocked */
+ }
+ } else {
+ if (PRC_IS_REDIRECT(cmd) && inp) {
+ in6_rtchange((struct in6pcb *)inp,
+ inet6ctlerrmap[cmd]);
+ }
+ if (inp) {
+ /* reduce inp's ref-count */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ splx(s);
+ }
+}
+
+/*
+ * this routine can probably be collasped into the one in sctp_userreq.c
+ * since they do the same thing and now we lookup with a sockaddr
+ */
+static int
+sctp6_getcred(SYSCTL_HANDLER_ARGS)
+{
+ struct sockaddr_in6 addrs[2];
+ struct sctp_inpcb *inp;
+ struct sctp_nets *net;
+ struct sctp_tcb *stcb;
+ int error, s;
+
+ error = suser(req->td);
+ if (error)
+ return (error);
+
+ if (req->newlen != sizeof(addrs))
+ return (EINVAL);
+ if (req->oldlen != sizeof(struct ucred))
+ return (EINVAL);
+ error = SYSCTL_IN(req, addrs, sizeof(addrs));
+ if (error)
+ return (error);
+ s = splnet();
+
+ stcb = sctp_findassociation_addr_sa(sin6tosa(&addrs[0]),
+ sin6tosa(&addrs[1]),
+ &inp, &net, 1);
+ if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
+ error = ENOENT;
+ if (inp) {
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ goto out;
+ }
+ error = SYSCTL_OUT(req, inp->sctp_socket->so_cred,
+ sizeof(struct ucred));
+
+ SCTP_TCB_UNLOCK(stcb);
+out:
+ splx(s);
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet6_sctp6, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW,
+ 0, 0,
+ sctp6_getcred, "S,ucred", "Get the ucred of a SCTP6 connection");
+
+
+/* This is the same as the sctp_abort() could be made common */
+static void
+sctp6_abort(struct socket *so)
+{
+ struct sctp_inpcb *inp;
+ int s;
+ uint32_t flags;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0)
+ return;
+ s = splnet();
+sctp_must_try_again:
+ flags = inp->sctp_flags;
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 17);
+#endif
+ if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 16);
+#endif
+ sctp_inpcb_free(inp, 1, 0);
+ SOCK_LOCK(so);
+ so->so_snd.sb_cc = 0;
+ so->so_snd.sb_mb = NULL;
+ so->so_snd.sb_mbcnt = 0;
+
+ /*
+ * same for the rcv ones, they are only here for the
+ * accounting/select.
+ */
+ so->so_rcv.sb_cc = 0;
+ so->so_rcv.sb_mb = NULL;
+ so->so_rcv.sb_mbcnt = 0;
+ /*
+ * Now null out the reference, we are completely detached.
+ */
+ so->so_pcb = NULL;
+ SOCK_UNLOCK(so);
+ } else {
+ flags = inp->sctp_flags;
+ if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+ goto sctp_must_try_again;
+ }
+ }
+ splx(s);
+ return;
+}
+
+static int
+sctp6_attach(struct socket *so, int proto, struct thread *p)
+{
+ struct in6pcb *inp6;
+ int s, error;
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp != NULL)
+ return EINVAL;
+
+ if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
+ error = soreserve(so, sctp_sendspace, sctp_recvspace);
+ if (error)
+ return error;
+ }
+ s = splnet();
+ error = sctp_inpcb_alloc(so);
+ splx(s);
+ if (error)
+ return error;
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ inp->sctp_flags |= SCTP_PCB_FLAGS_BOUND_V6; /* I'm v6! */
+ inp6 = (struct in6pcb *)inp;
+
+ inp6->inp_vflag |= INP_IPV6;
+ inp6->in6p_hops = -1; /* use kernel default */
+ inp6->in6p_cksum = -1; /* just to be sure */
+#ifdef INET
+ /*
+ * XXX: ugly!! IPv4 TTL initialization is necessary for an IPv6
+ * socket as well, because the socket may be bound to an IPv6
+ * wildcard address, which may match an IPv4-mapped IPv6 address.
+ */
+ inp6->inp_ip_ttl = ip_defttl;
+#endif
+ /*
+ * Hmm what about the IPSEC stuff that is missing here but in
+ * sctp_attach()?
+ */
+ return 0;
+}
+
+static int
+sctp6_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
+{
+ struct sctp_inpcb *inp;
+ struct in6pcb *inp6;
+ int s, error;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0)
+ return EINVAL;
+
+ inp6 = (struct in6pcb *)inp;
+ inp6->inp_vflag &= ~INP_IPV4;
+ inp6->inp_vflag |= INP_IPV6;
+ if (addr != NULL &&
+ (inp6->inp_flags & IN6P_IPV6_V6ONLY)
+ == 0) {
+ if (addr->sa_family == AF_INET) {
+ /* binding v4 addr to v6 socket, so reset flags */
+ inp6->inp_vflag |= INP_IPV4;
+ inp6->inp_vflag &= ~INP_IPV6;
+ } else {
+ struct sockaddr_in6 *sin6_p;
+
+ sin6_p = (struct sockaddr_in6 *)addr;
+
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6_p->sin6_addr)) {
+ inp6->inp_vflag |= INP_IPV4;
+ } else if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) {
+ struct sockaddr_in sin;
+
+ in6_sin6_2_sin(&sin, sin6_p);
+ inp6->inp_vflag |= INP_IPV4;
+ inp6->inp_vflag &= ~INP_IPV6;
+ s = splnet();
+ error = sctp_inpcb_bind(so, (struct sockaddr *)&sin, p);
+ splx(s);
+ return error;
+ }
+ }
+ } else if (addr != NULL) {
+ /* IPV6_V6ONLY socket */
+ if (addr->sa_family == AF_INET) {
+ /* can't bind v4 addr to v6 only socket! */
+ return EINVAL;
+ } else {
+ struct sockaddr_in6 *sin6_p;
+
+ sin6_p = (struct sockaddr_in6 *)addr;
+
+ if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr))
+ /* can't bind v4-mapped addrs either! */
+ /* NOTE: we don't support SIIT */
+ return EINVAL;
+ }
+ }
+ s = splnet();
+ error = sctp_inpcb_bind(so, addr, p);
+ splx(s);
+ return error;
+}
+
+
+static void
+sctp6_close(struct socket *so)
+{
+ struct sctp_inpcb *inp;
+ uint32_t flags;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0)
+ return;
+
+ /*
+ * Inform all the lower layer assoc that we are done.
+ */
+sctp_must_try_again:
+ flags = inp->sctp_flags;
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 17);
+#endif
+ if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+ if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
+ (so->so_rcv.sb_cc > 0)) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 13);
+#endif
+ sctp_inpcb_free(inp, 1, 1);
+ } else {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 14);
+#endif
+ sctp_inpcb_free(inp, 0, 1);
+ }
+ /*
+ * The socket is now detached, no matter what the state of
+ * the SCTP association.
+ */
+ SOCK_LOCK(so);
+ so->so_snd.sb_cc = 0;
+ so->so_snd.sb_mb = NULL;
+ so->so_snd.sb_mbcnt = 0;
+
+ /*
+ * same for the rcv ones, they are only here for the
+ * accounting/select.
+ */
+ so->so_rcv.sb_cc = 0;
+ so->so_rcv.sb_mb = NULL;
+ so->so_rcv.sb_mbcnt = 0;
+ /*
+ * Now null out the reference, we are completely detached.
+ */
+ so->so_pcb = NULL;
+ SOCK_UNLOCK(so);
+ } else {
+ flags = inp->sctp_flags;
+ if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+ goto sctp_must_try_again;
+ }
+ }
+ return;
+
+}
+
+
+static int
+sctp6_disconnect(struct socket *so)
+{
+ struct sctp_inpcb *inp;
+ int s;
+
+ s = splnet(); /* XXX */
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ splx(s);
+ return (ENOTCONN);
+ }
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ if (LIST_EMPTY(&inp->sctp_asoc_list)) {
+ /* No connection */
+ splx(s);
+ SCTP_INP_RUNLOCK(inp);
+ return (ENOTCONN);
+ } else {
+ int some_on_streamwheel = 0;
+ struct sctp_association *asoc;
+ struct sctp_tcb *stcb;
+
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ splx(s);
+ SCTP_INP_RUNLOCK(inp);
+ return (EINVAL);
+ }
+ SCTP_TCB_LOCK(stcb);
+ asoc = &stcb->asoc;
+ if (((so->so_options & SO_LINGER) &&
+ (so->so_linger == 0)) ||
+ (so->so_rcv.sb_cc > 0)) {
+ if (SCTP_GET_STATE(asoc) !=
+ SCTP_STATE_COOKIE_WAIT) {
+ /* Left with Data unread */
+ struct mbuf *err;
+
+ err = NULL;
+ MGET(err, M_DONTWAIT, MT_DATA);
+ if (err) {
+ /*
+ * Fill in the user
+ * initiated abort
+ */
+ struct sctp_paramhdr *ph;
+
+ ph = mtod(err, struct sctp_paramhdr *);
+ err->m_len = sizeof(struct sctp_paramhdr);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(err->m_len);
+ }
+ sctp_send_abort_tcb(stcb, err);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ sctp_free_assoc(inp, stcb, 0);
+ /* No unlock tcb assoc is gone */
+ splx(s);
+ return (0);
+ }
+ if (!TAILQ_EMPTY(&asoc->out_wheel)) {
+ /* Check to see if some data queued */
+ struct sctp_stream_out *outs;
+
+ TAILQ_FOREACH(outs, &asoc->out_wheel,
+ next_spoke) {
+ if (!TAILQ_EMPTY(&outs->outqueue)) {
+ some_on_streamwheel = 1;
+ break;
+ }
+ }
+ }
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (some_on_streamwheel == 0)) {
+ /* nothing queued to send, so I'm done... */
+ if ((SCTP_GET_STATE(asoc) !=
+ SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(asoc) !=
+ SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ /* only send SHUTDOWN the first time */
+ sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
+ sctp_chunk_output(stcb->sctp_ep, stcb, 1);
+ asoc->state = SCTP_STATE_SHUTDOWN_SENT;
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ }
+ } else {
+ /*
+ * we still got (or just got) data to send,
+ * so set SHUTDOWN_PENDING
+ */
+ /*
+ * XXX sockets draft says that MSG_EOF
+ * should be sent with no data. currently,
+ * we will allow user data to be sent first
+ * and move to SHUTDOWN-PENDING
+ */
+ asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ splx(s);
+ return (0);
+ }
+ } else {
+ /* UDP model does not support this */
+ SCTP_INP_RUNLOCK(inp);
+ splx(s);
+ return EOPNOTSUPP;
+ }
+}
+
+int
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct thread *p);
+
+
+
+static int
+sctp6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct thread *p)
+{
+ struct sctp_inpcb *inp;
+ struct inpcb *in_inp;
+ struct in6pcb *inp6;
+
+#ifdef INET
+ struct sockaddr_in6 *sin6;
+
+#endif /* INET */
+ /* No SPL needed since sctp_output does this */
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ if (control) {
+ m_freem(control);
+ control = NULL;
+ }
+ m_freem(m);
+ return EINVAL;
+ }
+ in_inp = (struct inpcb *)inp;
+ inp6 = (struct in6pcb *)inp;
+ /*
+ * For the TCP model we may get a NULL addr, if we are a connected
+ * socket thats ok.
+ */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) &&
+ (addr == NULL)) {
+ goto connected_type;
+ }
+ if (addr == NULL) {
+ m_freem(m);
+ if (control) {
+ m_freem(control);
+ control = NULL;
+ }
+ return (EDESTADDRREQ);
+ }
+#ifdef INET
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (
+
+ (inp6->inp_flags & IN6P_IPV6_V6ONLY)
+ ) {
+ /*
+ * if IPV6_V6ONLY flag, we discard datagrams destined to a
+ * v4 addr or v4-mapped addr
+ */
+ if (addr->sa_family == AF_INET) {
+ return EINVAL;
+ }
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ return EINVAL;
+ }
+ }
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ if (!ip6_v6only) {
+ struct sockaddr_in sin;
+
+ /* convert v4-mapped into v4 addr and send */
+ in6_sin6_2_sin(&sin, sin6);
+ return sctp_sendm(so, flags, m, (struct sockaddr *)&sin,
+ control, p);
+ } else {
+ /* mapped addresses aren't enabled */
+ return EINVAL;
+ }
+ }
+#endif /* INET */
+connected_type:
+ /* now what about control */
+ if (control) {
+ if (inp->control) {
+ printf("huh? control set?\n");
+ m_freem(inp->control);
+ inp->control = NULL;
+ }
+ inp->control = control;
+ }
+ /* add it in possibly */
+ if ((inp->pkt) &&
+ (inp->pkt->m_flags & M_PKTHDR)) {
+ struct mbuf *x;
+ int c_len;
+
+ c_len = 0;
+ /* How big is it */
+ for (x = m; x; x = x->m_next) {
+ c_len += x->m_len;
+ }
+ inp->pkt->m_pkthdr.len += c_len;
+ }
+ /* Place the data */
+ if (inp->pkt) {
+ inp->pkt_last->m_next = m;
+ inp->pkt_last = m;
+ } else {
+ inp->pkt_last = inp->pkt = m;
+ }
+ if (
+ /* FreeBSD and MacOSX uses a flag passed */
+ ((flags & PRUS_MORETOCOME) == 0)
+ ) {
+ /*
+ * note with the current version this code will only be used
+ * by OpenBSD, NetBSD and FreeBSD have methods for
+ * re-defining sosend() to use sctp_sosend(). One can
+ * optionaly switch back to this code (by changing back the
+ * defininitions but this is not advisable.
+ */
+ int ret;
+
+ ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
+ inp->pkt = NULL;
+ inp->control = NULL;
+ return (ret);
+ } else {
+ return (0);
+ }
+}
+
+static int
+sctp6_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
+{
+ int s = splnet();
+
+ int error = 0;
+ struct sctp_inpcb *inp;
+ struct in6pcb *inp6;
+ struct sctp_tcb *stcb;
+
+#ifdef INET
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_storage ss;
+
+#endif /* INET */
+
+ inp6 = (struct in6pcb *)so->so_pcb;
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ splx(s);
+ return (ECONNRESET); /* I made the same as TCP since we are
+ * not setup? */
+ }
+ SCTP_ASOC_CREATE_LOCK(inp);
+ SCTP_INP_RLOCK(inp);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
+ SCTP_PCB_FLAGS_UNBOUND) {
+ /* Bind a ephemeral port */
+ SCTP_INP_RUNLOCK(inp);
+ error = sctp6_bind(so, NULL, p);
+ if (error) {
+ splx(s);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+
+ return (error);
+ }
+ SCTP_INP_RLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+ /* We are already connected AND the TCP model */
+ splx(s);
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ return (EADDRINUSE);
+ }
+#ifdef INET
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (
+ (inp6->inp_flags & IN6P_IPV6_V6ONLY)
+ ) {
+ /*
+ * if IPV6_V6ONLY flag, ignore connections destined to a v4
+ * addr or v4-mapped addr
+ */
+ if (addr->sa_family == AF_INET) {
+ splx(s);
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ return EINVAL;
+ }
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ splx(s);
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ return EINVAL;
+ }
+ }
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ if (!ip6_v6only) {
+ /* convert v4-mapped into v4 addr */
+ in6_sin6_2_sin((struct sockaddr_in *)&ss, sin6);
+ addr = (struct sockaddr *)&ss;
+ } else {
+ /* mapped addresses aren't enabled */
+ splx(s);
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ return EINVAL;
+ }
+ } else
+#endif /* INET */
+ addr = addr; /* for true v6 address case */
+
+ /* Now do we connect? */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_INCR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ }
+
+ if (stcb != NULL) {
+ /* Already have or am bring up an association */
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_TCB_UNLOCK(stcb);
+ splx(s);
+ return (EALREADY);
+ }
+ /* We are GOOD to go */
+ stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ if (stcb == NULL) {
+ /* Gak! no memory */
+ splx(s);
+ return (error);
+ }
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+ /* Set the connected flag so we can queue data */
+ soisconnecting(so);
+ }
+ stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
+ SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+
+ /* initialize authentication parameters for the assoc */
+ sctp_initialize_auth_params(inp, stcb);
+
+ sctp_send_initiate(inp, stcb);
+ SCTP_TCB_UNLOCK(stcb);
+ splx(s);
+ return error;
+}
+
+static int
+sctp6_getaddr(struct socket *so, struct sockaddr **addr)
+{
+ struct sockaddr_in6 *sin6;
+
+ struct sctp_inpcb *inp;
+
+ int error;
+
+
+ /*
+ * Do the malloc first in case it blocks.
+ */
+ SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(*sin6);
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_FREE_SONAME(sin6);
+ return ECONNRESET;
+ }
+ SCTP_INP_RLOCK(inp);
+ sin6->sin6_port = inp->sctp_lport;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* For the bound all case you get back 0 */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ struct sctp_tcb *stcb;
+ struct sockaddr_in6 *sin_a6;
+ struct sctp_nets *net;
+ int fnd;
+
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ goto notConn6;
+ }
+ fnd = 0;
+ sin_a6 = NULL;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sin_a6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ if (sin_a6->sin6_family == AF_INET6) {
+ fnd = 1;
+ break;
+ }
+ }
+ if ((!fnd) || (sin_a6 == NULL)) {
+ /* punt */
+ goto notConn6;
+ }
+ sin6->sin6_addr = sctp_ipv6_source_address_selection(
+ inp, stcb, (struct route *)&net->ro, net, 0);
+
+ } else {
+ /* For the bound all case you get back 0 */
+ notConn6:
+ memset(&sin6->sin6_addr, 0, sizeof(sin6->sin6_addr));
+ }
+ } else {
+ /* Take the first IPv6 address in the list */
+ struct sctp_laddr *laddr;
+ int fnd = 0;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa->ifa_addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin_a;
+
+ sin_a = (struct sockaddr_in6 *)laddr->ifa->ifa_addr;
+ sin6->sin6_addr = sin_a->sin6_addr;
+ fnd = 1;
+ break;
+ }
+ }
+ if (!fnd) {
+ SCTP_FREE_SONAME(sin6);
+ SCTP_INP_RUNLOCK(inp);
+ return ENOENT;
+ }
+ }
+ SCTP_INP_RUNLOCK(inp);
+ /* Scoping things for v6 */
+ if ((error = sa6_recoverscope(sin6)) != 0)
+ return (error);
+ (*addr) = (struct sockaddr *)sin6;
+ return (0);
+}
+
+static int
+sctp6_peeraddr(struct socket *so, struct sockaddr **addr)
+{
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)*addr;
+
+ int fnd;
+ struct sockaddr_in6 *sin_a6;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+
+ int error;
+
+
+ /*
+ * Do the malloc first in case it blocks.
+ */
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
+ /* UDP type and listeners will drop out here */
+ return (ENOTCONN);
+ }
+ SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(*sin6);
+
+ /* We must recapture incase we blocked */
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_FREE_SONAME(sin6);
+ return ECONNRESET;
+ }
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb)
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ if (stcb == NULL) {
+ SCTP_FREE_SONAME(sin6);
+ return ECONNRESET;
+ }
+ fnd = 0;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sin_a6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ if (sin_a6->sin6_family == AF_INET6) {
+ fnd = 1;
+ sin6->sin6_port = stcb->rport;
+ sin6->sin6_addr = sin_a6->sin6_addr;
+ break;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ if (!fnd) {
+ /* No IPv4 address */
+ SCTP_FREE_SONAME(sin6);
+ return ENOENT;
+ }
+ if ((error = sa6_recoverscope(sin6)) != 0)
+ return (error);
+ *addr = (struct sockaddr *)sin6;
+ return (0);
+}
+
+static int
+sctp6_in6getaddr(struct socket *so, struct sockaddr **nam)
+{
+ struct sockaddr *addr;
+
+ struct in6pcb *inp6 = sotoin6pcb(so);
+ int error, s;
+
+ if (inp6 == NULL)
+ return EINVAL;
+
+ s = splnet();
+ /* allow v6 addresses precedence */
+ error = sctp6_getaddr(so, nam);
+ if (error) {
+ /* try v4 next if v6 failed */
+ error = sctp_ingetaddr(so, nam);
+ if (error) {
+ splx(s);
+ return (error);
+ }
+ addr = *nam;
+ /* if I'm V6ONLY, convert it to v4-mapped */
+ if (
+ (inp6->inp_flags & IN6P_IPV6_V6ONLY)
+ ) {
+ struct sockaddr_in6 sin6;
+
+ in6_sin_2_v4mapsin6((struct sockaddr_in *)addr, &sin6);
+ memcpy(addr, &sin6, sizeof(struct sockaddr_in6));
+ }
+ }
+ splx(s);
+ return (error);
+}
+
+
+static int
+sctp6_getpeeraddr(struct socket *so, struct sockaddr **nam)
+{
+ struct sockaddr *addr = *nam;
+
+ struct in6pcb *inp6 = sotoin6pcb(so);
+ int error, s;
+
+ if (inp6 == NULL)
+ return EINVAL;
+
+ s = splnet();
+ /* allow v6 addresses precedence */
+ error = sctp6_peeraddr(so, nam);
+ if (error) {
+ /* try v4 next if v6 failed */
+ error = sctp_peeraddr(so, nam);
+ if (error) {
+ splx(s);
+ return (error);
+ }
+ /* if I'm V6ONLY, convert it to v4-mapped */
+ if (
+ (inp6->inp_flags & IN6P_IPV6_V6ONLY)
+ ) {
+ struct sockaddr_in6 sin6;
+
+ in6_sin_2_v4mapsin6((struct sockaddr_in *)addr, &sin6);
+ memcpy(addr, &sin6, sizeof(struct sockaddr_in6));
+ }
+ }
+ splx(s);
+ return error;
+}
+
+struct pr_usrreqs sctp6_usrreqs = {
+ .pru_abort = sctp6_abort,
+ .pru_accept = sctp_accept,
+ .pru_attach = sctp6_attach,
+ .pru_bind = sctp6_bind,
+ .pru_connect = sctp6_connect,
+ .pru_control = in6_control,
+ .pru_close = sctp6_close,
+ .pru_detach = sctp6_close,
+ .pru_sopoll = sopoll_generic,
+ .pru_disconnect = sctp6_disconnect,
+ .pru_listen = sctp_listen,
+ .pru_peeraddr = sctp6_getpeeraddr,
+ .pru_send = sctp6_send,
+ .pru_shutdown = sctp_shutdown,
+ .pru_sockaddr = sctp6_in6getaddr,
+ .pru_sosend = sctp_sosend,
+ .pru_soreceive = sctp_soreceive
+};
diff --git a/sys/netinet6/sctp6_var.h b/sys/netinet6/sctp6_var.h
new file mode 100644
index 0000000..27c849b
--- /dev/null
+++ b/sys/netinet6/sctp6_var.h
@@ -0,0 +1,52 @@
+/*-
+ * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* $KAME: sctp6_var.h,v 1.7 2004/08/17 04:06:22 itojun Exp $ */
+__FBSDID("$FreeBSD$");
+#ifndef _NETINET6_SCTP6_VAR_H_
+#define _NETINET6_SCTP6_VAR_H_
+
+
+
+#if defined(_KERNEL)
+
+SYSCTL_DECL(_net_inet6_sctp6);
+extern struct pr_usrreqs sctp6_usrreqs;
+int sctp6_ctloutput __P((struct socket *, struct sockopt *));
+
+
+int sctp6_input __P((struct mbuf **, int *, int));
+
+int sctp6_output
+__P((struct sctp_inpcb *, struct mbuf *, struct sockaddr *,
+ struct mbuf *, struct proc *));
+ void sctp6_ctlinput __P((int, struct sockaddr *, void *));
+
+#endif /* _KERNEL */
+#endif
diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h
index e9cc353..fe0042b 100644
--- a/sys/sys/mbuf.h
+++ b/sys/sys/mbuf.h
@@ -169,6 +169,7 @@ struct mbuf {
#define M_PROTO3 0x0040 /* protocol-specific */
#define M_PROTO4 0x0080 /* protocol-specific */
#define M_PROTO5 0x0100 /* protocol-specific */
+#define M_NOTIFICATION 0x2000 /* SCTP notification */
#define M_SKIP_FIREWALL 0x4000 /* skip firewall processing */
#define M_FREELIST 0x8000 /* mbuf is on the free list */
diff --git a/sys/sys/socket.h b/sys/sys/socket.h
index ff78332..8d0bab2 100644
--- a/sys/sys/socket.h
+++ b/sys/sys/socket.h
@@ -394,6 +394,7 @@ struct msghdr {
#define MSG_TRUNC 0x10 /* data discarded before delivery */
#define MSG_CTRUNC 0x20 /* control data lost before delivery */
#define MSG_WAITALL 0x40 /* wait for full request or error */
+#define MSG_NOTIFICATION 0x2000 /* SCTP notification */
#if __BSD_VISIBLE
#define MSG_DONTWAIT 0x80 /* this message should be nonblocking */
#define MSG_EOF 0x100 /* data completes connection */
diff --git a/sys/sys/syscall.h b/sys/sys/syscall.h
index 701758c..1be8e9f 100644
--- a/sys/sys/syscall.h
+++ b/sys/sys/syscall.h
@@ -390,4 +390,8 @@
#define SYS_thr_set_name 464
#define SYS_aio_fsync 465
#define SYS_rtprio_thread 466
-#define SYS_MAXSYSCALL 471
+#define SYS_sctp_peeloff 471
+#define SYS_sctp_generic_sendmsg 472
+#define SYS_sctp_generic_sendmsg_iov 473
+#define SYS_sctp_generic_recvmsg 474
+#define SYS_MAXSYSCALL 475
diff --git a/sys/sys/syscall.mk b/sys/sys/syscall.mk
index 20660a6..acd6498 100644
--- a/sys/sys/syscall.mk
+++ b/sys/sys/syscall.mk
@@ -331,4 +331,8 @@ MIASM = \
abort2.o \
thr_set_name.o \
aio_fsync.o \
- rtprio_thread.o
+ rtprio_thread.o \
+ sctp_peeloff.o \
+ sctp_generic_sendmsg.o \
+ sctp_generic_sendmsg_iov.o \
+ sctp_generic_recvmsg.o
diff --git a/sys/sys/sysproto.h b/sys/sys/sysproto.h
index fad9251..0e1bbbb 100644
--- a/sys/sys/sysproto.h
+++ b/sys/sys/sysproto.h
@@ -1451,6 +1451,37 @@ struct rtprio_thread_args {
char lwpid_l_[PADL_(lwpid_t)]; lwpid_t lwpid; char lwpid_r_[PADR_(lwpid_t)];
char rtp_l_[PADL_(struct rtprio *)]; struct rtprio * rtp; char rtp_r_[PADR_(struct rtprio *)];
};
+struct sctp_peeloff_args {
+ char sd_l_[PADL_(int)]; int sd; char sd_r_[PADR_(int)];
+ char name_l_[PADL_(uint32_t)]; uint32_t name; char name_r_[PADR_(uint32_t)];
+};
+struct sctp_generic_sendmsg_args {
+ char sd_l_[PADL_(int)]; int sd; char sd_r_[PADR_(int)];
+ char msg_l_[PADL_(caddr_t)]; caddr_t msg; char msg_r_[PADR_(caddr_t)];
+ char mlen_l_[PADL_(int)]; int mlen; char mlen_r_[PADR_(int)];
+ char to_l_[PADL_(caddr_t)]; caddr_t to; char to_r_[PADR_(caddr_t)];
+ char tolen_l_[PADL_(__socklen_t)]; __socklen_t tolen; char tolen_r_[PADR_(__socklen_t)];
+ char sinfo_l_[PADL_(struct sctp_sndrcvinfo *)]; struct sctp_sndrcvinfo * sinfo; char sinfo_r_[PADR_(struct sctp_sndrcvinfo *)];
+ char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)];
+};
+struct sctp_generic_sendmsg_iov_args {
+ char sd_l_[PADL_(int)]; int sd; char sd_r_[PADR_(int)];
+ char iov_l_[PADL_(struct iovec *)]; struct iovec * iov; char iov_r_[PADR_(struct iovec *)];
+ char iovlen_l_[PADL_(int)]; int iovlen; char iovlen_r_[PADR_(int)];
+ char to_l_[PADL_(caddr_t)]; caddr_t to; char to_r_[PADR_(caddr_t)];
+ char tolen_l_[PADL_(__socklen_t)]; __socklen_t tolen; char tolen_r_[PADR_(__socklen_t)];
+ char sinfo_l_[PADL_(struct sctp_sndrcvinfo *)]; struct sctp_sndrcvinfo * sinfo; char sinfo_r_[PADR_(struct sctp_sndrcvinfo *)];
+ char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)];
+};
+struct sctp_generic_recvmsg_args {
+ char sd_l_[PADL_(int)]; int sd; char sd_r_[PADR_(int)];
+ char iov_l_[PADL_(struct iovec *)]; struct iovec * iov; char iov_r_[PADR_(struct iovec *)];
+ char iovlen_l_[PADL_(int)]; int iovlen; char iovlen_r_[PADR_(int)];
+ char from_l_[PADL_(struct sockaddr *)]; struct sockaddr * from; char from_r_[PADR_(struct sockaddr *)];
+ char fromlenaddr_l_[PADL_(__socklen_t *)]; __socklen_t * fromlenaddr; char fromlenaddr_r_[PADR_(__socklen_t *)];
+ char sinfo_l_[PADL_(struct sctp_sndrcvinfo *)]; struct sctp_sndrcvinfo * sinfo; char sinfo_r_[PADR_(struct sctp_sndrcvinfo *)];
+ char msg_flags_l_[PADL_(int *)]; int * msg_flags; char msg_flags_r_[PADR_(int *)];
+};
int nosys(struct thread *, struct nosys_args *);
void sys_exit(struct thread *, struct sys_exit_args *);
int fork(struct thread *, struct fork_args *);
@@ -1779,6 +1810,10 @@ int abort2(struct thread *, struct abort2_args *);
int thr_set_name(struct thread *, struct thr_set_name_args *);
int aio_fsync(struct thread *, struct aio_fsync_args *);
int rtprio_thread(struct thread *, struct rtprio_thread_args *);
+int sctp_peeloff(struct thread *, struct sctp_peeloff_args *);
+int sctp_generic_sendmsg(struct thread *, struct sctp_generic_sendmsg_args *);
+int sctp_generic_sendmsg_iov(struct thread *, struct sctp_generic_sendmsg_iov_args *);
+int sctp_generic_recvmsg(struct thread *, struct sctp_generic_recvmsg_args *);
#ifdef COMPAT_43
@@ -2326,6 +2361,10 @@ int freebsd4_sigreturn(struct thread *, struct freebsd4_sigreturn_args *);
#define SYS_AUE_thr_set_name AUE_NULL
#define SYS_AUE_aio_fsync AUE_NULL
#define SYS_AUE_rtprio_thread AUE_RTPRIO
+#define SYS_AUE_sctp_peeloff AUE_NULL
+#define SYS_AUE_sctp_generic_sendmsg AUE_NULL
+#define SYS_AUE_sctp_generic_sendmsg_iov AUE_NULL
+#define SYS_AUE_sctp_generic_recvmsg AUE_NULL
#undef PAD_
#undef PADL_
OpenPOWER on IntegriCloud