summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrrs <rrs@FreeBSD.org>2008-12-06 13:19:54 +0000
committerrrs <rrs@FreeBSD.org>2008-12-06 13:19:54 +0000
commit0f2b9dafa358fb3b64efed4842cd31650ceea672 (patch)
tree773b646ae7f5af08e1421071f2aa493698537042
parentf4594595d3de298d45caf389d9cee0d893cedfc0 (diff)
downloadFreeBSD-src-0f2b9dafa358fb3b64efed4842cd31650ceea672.zip
FreeBSD-src-0f2b9dafa358fb3b64efed4842cd31650ceea672.tar.gz
Code from the hack-session known as the IETF (and a
bit of debugging afterwards): - Fix protection code for notification generation. - Decouple associd from vtag - Allow vtags to have less strigent requirements in non-uniqueness. o don't pre-hash them when you issue one in a cookie. o Allow duplicates and use addresses and ports to discriminate amongst the duplicates during lookup. - Add support for the NAT draft draft-ietf-behave-sctpnat-00, this is still experimental and needs more extensive testing with the Jason Butt ipfw changes. - Support for the SENDER_DRY event to get DTLS in OpenSSL working with a set of patches from Michael Tuexen (hopefully heading to OpenSSL soon). - Update the support of SCTP-AUTH by Peter Lei. - Use macros for refcounting. - Fix MTU for UDP encapsulation. - Fix reporting back of unsent data. - Update assoc send counter handling to be consistent with endpoint sent counter. - Fix a bug in PR-SCTP. - Fix so we only send another FWD-TSN when a SACK arrives IF and only if the adv-peer-ack point progressed. However we still make sure a timer is running if we do have an adv_peer_ack point. - Fix PR-SCTP bug where chunks were retransmitted if they are sent unreliable but not abandoned yet. With the help of: Michael Teuxen and Peter Lei :-) MFC after: 4 weeks
-rw-r--r--sys/netinet/sctp.h25
-rw-r--r--sys/netinet/sctp_asconf.c198
-rw-r--r--sys/netinet/sctp_asconf.h4
-rw-r--r--sys/netinet/sctp_auth.c490
-rw-r--r--sys/netinet/sctp_auth.h81
-rw-r--r--sys/netinet/sctp_constants.h101
-rw-r--r--sys/netinet/sctp_header.h41
-rw-r--r--sys/netinet/sctp_indata.c2796
-rw-r--r--sys/netinet/sctp_indata.h10
-rw-r--r--sys/netinet/sctp_input.c378
-rw-r--r--sys/netinet/sctp_os_bsd.h21
-rw-r--r--sys/netinet/sctp_output.c2614
-rw-r--r--sys/netinet/sctp_output.h5
-rw-r--r--sys/netinet/sctp_pcb.c421
-rw-r--r--sys/netinet/sctp_pcb.h23
-rw-r--r--sys/netinet/sctp_structs.h23
-rw-r--r--sys/netinet/sctp_sysctl.c16
-rw-r--r--sys/netinet/sctp_sysctl.h16
-rw-r--r--sys/netinet/sctp_timer.c18
-rw-r--r--sys/netinet/sctp_uio.h32
-rw-r--r--sys/netinet/sctp_usrreq.c139
-rw-r--r--sys/netinet/sctp_var.h71
-rw-r--r--sys/netinet/sctputil.c272
-rw-r--r--sys/netinet/sctputil.h4
-rw-r--r--sys/netinet6/sctp6_usrreq.c3
25 files changed, 6917 insertions, 885 deletions
diff --git a/sys/netinet/sctp.h b/sys/netinet/sctp.h
index 6dfe5d4..afbf5ea 100644
--- a/sys/netinet/sctp.h
+++ b/sys/netinet/sctp.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -111,6 +111,7 @@ struct sctp_paramhdr {
/* explict EOR signalling */
#define SCTP_EXPLICIT_EOR 0x0000001b
#define SCTP_REUSE_PORT 0x0000001c /* rw */
+#define SCTP_AUTH_DEACTIVATE_KEY 0x0000001d
/*
* read-only options
@@ -154,6 +155,8 @@ struct sctp_paramhdr {
/* CMT ON/OFF socket option */
#define SCTP_CMT_ON_OFF 0x00001200
#define SCTP_CMT_USE_DAC 0x00001201
+/* EY - NR_SACK on/off socket option */
+#define SCTP_NR_SACK_ON_OFF 0x00001300
/* JRS - Pluggable Congestion Control Socket option */
#define SCTP_PLUGGABLE_CC 0x00001202
@@ -293,11 +296,15 @@ struct sctp_paramhdr {
#define SCTP_CAUSE_PROTOCOL_VIOLATION 0x000d
/* Error causes from RFC5061 */
-#define SCTP_CAUSE_DELETING_LAST_ADDR 0xa0
-#define SCTP_CAUSE_RESOURCE_SHORTAGE 0xa1
-#define SCTP_CAUSE_DELETING_SRC_ADDR 0xa2
-#define SCTP_CAUSE_ILLEGAL_ASCONF_ACK 0xa3
-#define SCTP_CAUSE_REQUEST_REFUSED 0xa4
+#define SCTP_CAUSE_DELETING_LAST_ADDR 0x00a0
+#define SCTP_CAUSE_RESOURCE_SHORTAGE 0x00a1
+#define SCTP_CAUSE_DELETING_SRC_ADDR 0x00a2
+#define SCTP_CAUSE_ILLEGAL_ASCONF_ACK 0x00a3
+#define SCTP_CAUSE_REQUEST_REFUSED 0x00a4
+
+/* Error causes from nat-draft */
+#define SCTP_CAUSE_NAT_COLLIDING_STATE 0x00b0
+#define SCTP_CAUSE_NAT_MISSING_STATE 0x00b1
/* Error causes from RFC4895 */
#define SCTP_CAUSE_UNSUPPORTED_HMACID 0x0105
@@ -364,6 +371,8 @@ struct sctp_error_unrecognized_chunk {
#define SCTP_SHUTDOWN_COMPLETE 0x0e
/* RFC4895 */
#define SCTP_AUTHENTICATION 0x0f
+/* EY nr_sack chunk id*/
+#define SCTP_NR_SELECTIVE_ACK 0x10
/************0x40 series ***********/
/************0x80 series ***********/
/* RFC5061 */
@@ -406,6 +415,9 @@ struct sctp_error_unrecognized_chunk {
/* ECN Nonce: SACK Chunk Specific Flags */
#define SCTP_SACK_NONCE_SUM 0x01
+/* EY nr_sack all bit - All bit is the 2nd LSB of nr_sack chunk flags*/
+/* if All bit is set in an nr-sack chunk, then all nr gap acks gap acks*/
+#define SCTP_NR_SACK_ALL_BIT 0x02
/* CMT DAC algorithm SACK flag */
#define SCTP_SACK_CMT_DAC 0x80
@@ -467,6 +479,7 @@ struct sctp_error_unrecognized_chunk {
#define SCTP_PCB_FLAGS_NEEDS_MAPPED_V4 0x00800000
#define SCTP_PCB_FLAGS_MULTIPLE_ASCONFS 0x01000000
#define SCTP_PCB_FLAGS_PORTREUSE 0x02000000
+#define SCTP_PCB_FLAGS_DRYEVNT 0x04000000
/*-
* mobility_features parameters (by micchie).Note
* these features are applied against the
diff --git a/sys/netinet/sctp_asconf.c b/sys/netinet/sctp_asconf.c
index fd11e90..c38c344 100644
--- a/sys/netinet/sctp_asconf.c
+++ b/sys/netinet/sctp_asconf.c
@@ -761,6 +761,9 @@ sctp_handle_asconf(struct mbuf *m, unsigned int offset,
m_result = sctp_process_asconf_set_primary(m, aph,
stcb, error);
break;
+ case SCTP_NAT_VTAGS:
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: sees a NAT VTAG state parameter\n");
+ break;
case SCTP_SUCCESS_REPORT:
/* not valid in an ASCONF chunk */
break;
@@ -1349,6 +1352,7 @@ sctp_asconf_queue_mgmt(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
SCTPDBG(SCTP_DEBUG_ASCONF1, "asconf_queue_mgmt: failed to get memory!\n");
return (-1);
}
+ aa->special_del = 0;
/* fill in asconf address parameter fields */
/* top level elements are "networked" during send */
aa->ap.aph.ph.param_type = type;
@@ -1555,6 +1559,7 @@ sctp_asconf_queue_sa_delete(struct sctp_tcb *stcb, struct sockaddr *sa)
"sctp_asconf_queue_sa_delete: failed to get memory!\n");
return (-1);
}
+ aa->special_del = 0;
/* fill in asconf address parameter fields */
/* top level elements are "networked" during send */
aa->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS;
@@ -2691,6 +2696,7 @@ sctp_compose_asconf(struct sctp_tcb *stcb, int *retlen, int addr_locked)
* case)
*/
if (lookup_used == 0 &&
+ (aa->special_del == 0) &&
aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS) {
struct sctp_ipv6addr_param *lookup;
uint16_t p_size, addr_size;
@@ -3234,3 +3240,195 @@ sctp_addr_mgmt_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa,
}
return (0);
}
+
+void
+sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_asconf_addr *aa;
+ struct sctp_ifa *sctp_ifap;
+ struct sctp_asconf_tag_param *vtag;
+ struct sockaddr_in *to;
+
+#ifdef INET6
+ struct sockaddr_in6 *to6;
+
+#endif
+ if (net == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing net\n");
+ return;
+ }
+ if (stcb == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing stcb\n");
+ return;
+ }
+ /*
+ * Need to have in the asconf: - vtagparam(my_vtag/peer_vtag) -
+ * add(0.0.0.0) - del(0.0.0.0) - Any global addresses add(addr)
+ */
+ SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+ SCTP_M_ASC_ADDR);
+ if (aa == NULL) {
+ /* didn't get memory */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "sctp_asconf_send_nat_state_update: failed to get memory!\n");
+ return;
+ }
+ aa->special_del = 0;
+ /* fill in asconf address parameter fields */
+ /* top level elements are "networked" during send */
+ aa->ifa = NULL;
+ aa->sent = 0; /* clear sent flag */
+ vtag = (struct sctp_asconf_tag_param *)&aa->ap.aph;
+ vtag->aph.ph.param_type = SCTP_NAT_VTAGS;
+ vtag->aph.ph.param_length = sizeof(struct sctp_asconf_tag_param);
+ vtag->local_vtag = htonl(stcb->asoc.my_vtag);
+ vtag->remote_vtag = htonl(stcb->asoc.peer_vtag);
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+
+ SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+ SCTP_M_ASC_ADDR);
+ if (aa == NULL) {
+ /* didn't get memory */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "sctp_asconf_send_nat_state_update: failed to get memory!\n");
+ return;
+ }
+ memset(aa, 0, sizeof(struct sctp_asconf_addr));
+ /* fill in asconf address parameter fields */
+ /* ADD(0.0.0.0) */
+ if (net->ro._l_addr.sa.sa_family == AF_INET) {
+ aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param);
+ aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+ aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv4addr_param);
+ /* No need to add an address, we are using 0.0.0.0 */
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+ }
+#ifdef INET6
+ else if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+ aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param);
+ aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+ aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv6addr_param);
+ /* No need to add an address, we are using 0.0.0.0 */
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+ }
+#endif /* INET6 */
+ SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+ SCTP_M_ASC_ADDR);
+ if (aa == NULL) {
+ /* didn't get memory */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "sctp_asconf_send_nat_state_update: failed to get memory!\n");
+ return;
+ }
+ memset(aa, 0, sizeof(struct sctp_asconf_addr));
+ /* fill in asconf address parameter fields */
+ /* ADD(0.0.0.0) */
+ if (net->ro._l_addr.sa.sa_family == AF_INET) {
+ aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param);
+ aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+ aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv4addr_param);
+ /* No need to add an address, we are using 0.0.0.0 */
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+ }
+#ifdef INET6
+ else if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+ aa->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS;
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param);
+ aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+ aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv6addr_param);
+ /* No need to add an address, we are using 0.0.0.0 */
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+ }
+#endif /* INET6 */
+ /* Now we must hunt the addresses and add all global addresses */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ struct sctp_vrf *vrf = NULL;
+ struct sctp_ifn *sctp_ifnp;
+ uint32_t vrf_id;
+
+ vrf_id = stcb->sctp_ep->def_vrf_id;
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ goto skip_rest;
+ }
+ SCTP_IPI_ADDR_RLOCK();
+ LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
+ LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
+ if (sctp_ifap->address.sa.sa_family == AF_INET) {
+ to = &sctp_ifap->address.sin;
+
+ if (IN4_ISPRIVATE_ADDRESS(&to->sin_addr)) {
+ continue;
+ }
+ if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
+ continue;
+ }
+ }
+#ifdef INET6
+ else if (sctp_ifap->address.sa.sa_family == AF_INET6) {
+ to6 = &sctp_ifap->address.sin6;
+ if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
+ continue;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
+ continue;
+ }
+ }
+#endif
+ sctp_asconf_queue_mgmt(stcb, sctp_ifap, SCTP_ADD_IP_ADDRESS);
+ }
+ }
+ SCTP_IPI_ADDR_RUNLOCK();
+ } else {
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ continue;
+ }
+ if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
+ /*
+ * Address being deleted by the system, dont
+ * list.
+ */
+ continue;
+ if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+ /*
+ * Address being deleted on this ep don't
+ * list.
+ */
+ continue;
+ }
+ sctp_ifap = laddr->ifa;
+ if (sctp_ifap->address.sa.sa_family == AF_INET) {
+ to = &sctp_ifap->address.sin;
+
+ if (IN4_ISPRIVATE_ADDRESS(&to->sin_addr)) {
+ continue;
+ }
+ if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
+ continue;
+ }
+ }
+#ifdef INET6
+ else if (sctp_ifap->address.sa.sa_family == AF_INET6) {
+ to6 = &sctp_ifap->address.sin6;
+ if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
+ continue;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
+ continue;
+ }
+ }
+#endif
+ sctp_asconf_queue_mgmt(stcb, sctp_ifap, SCTP_ADD_IP_ADDRESS);
+ }
+ }
+skip_rest:
+ /* Now we must send the asconf into the queue */
+ sctp_send_asconf(stcb, net, 0);
+}
diff --git a/sys/netinet/sctp_asconf.h b/sys/netinet/sctp_asconf.h
index 1a3f7ff..9622871 100644
--- a/sys/netinet/sctp_asconf.h
+++ b/sys/netinet/sctp_asconf.h
@@ -86,6 +86,10 @@ extern void
extern void
sctp_net_immediate_retrans(struct sctp_tcb *, struct sctp_nets *);
+extern void
+sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+
extern int
sctp_is_addr_pending(struct sctp_tcb *, struct sctp_ifa *);
diff --git a/sys/netinet/sctp_auth.c b/sys/netinet/sctp_auth.c
index 40467b2..71715cc 100644
--- a/sys/netinet/sctp_auth.c
+++ b/sys/netinet/sctp_auth.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -359,9 +359,11 @@ sctp_set_key(uint8_t * key, uint32_t keylen)
return (new_key);
}
-/*
+/*-
* given two keys of variable size, compute which key is "larger/smaller"
- * returns: 1 if key1 > key2 -1 if key1 < key2 0 if key1 = key2
+ * returns: 1 if key1 > key2
+ * -1 if key1 < key2
+ * 0 if key1 = key2
*/
static int
sctp_compare_key(sctp_key_t * key1, sctp_key_t * key2)
@@ -531,13 +533,18 @@ sctp_alloc_sharedkey(void)
}
new_key->keyid = 0;
new_key->key = NULL;
+ new_key->refcount = 1;
+ new_key->deactivated = 0;
return (new_key);
}
void
sctp_free_sharedkey(sctp_sharedkey_t * skey)
{
- if (skey != NULL) {
+ if (skey == NULL)
+ return;
+
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&skey->refcount)) {
if (skey->key != NULL)
sctp_free_key(skey->key);
SCTP_FREE(skey, SCTP_M_AUTH_KY);
@@ -556,40 +563,93 @@ sctp_find_sharedkey(struct sctp_keyhead *shared_keys, uint16_t key_id)
return (NULL);
}
-void
+int
sctp_insert_sharedkey(struct sctp_keyhead *shared_keys,
sctp_sharedkey_t * new_skey)
{
sctp_sharedkey_t *skey;
if ((shared_keys == NULL) || (new_skey == NULL))
- return;
+ return (EINVAL);
/* insert into an empty list? */
if (SCTP_LIST_EMPTY(shared_keys)) {
LIST_INSERT_HEAD(shared_keys, new_skey, next);
- return;
+ return (0);
}
/* insert into the existing list, ordered by key id */
LIST_FOREACH(skey, shared_keys, next) {
if (new_skey->keyid < skey->keyid) {
/* insert it before here */
LIST_INSERT_BEFORE(skey, new_skey, next);
- return;
+ return (0);
} else if (new_skey->keyid == skey->keyid) {
/* replace the existing key */
+ /* verify this key *can* be replaced */
+ if ((skey->deactivated) && (skey->refcount > 1)) {
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "can't replace shared key id %u\n",
+ new_skey->keyid);
+ return (EBUSY);
+ }
SCTPDBG(SCTP_DEBUG_AUTH1,
"replacing shared key id %u\n",
new_skey->keyid);
LIST_INSERT_BEFORE(skey, new_skey, next);
LIST_REMOVE(skey, next);
sctp_free_sharedkey(skey);
- return;
+ return (0);
}
if (LIST_NEXT(skey, next) == NULL) {
/* belongs at the end of the list */
LIST_INSERT_AFTER(skey, new_skey, next);
- return;
+ return (0);
+ }
+ }
+ /* shouldn't reach here */
+ return (0);
+}
+
+void
+sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t key_id)
+{
+ sctp_sharedkey_t *skey;
+
+ /* find the shared key */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, key_id);
+
+ /* bump the ref count */
+ if (skey) {
+ atomic_add_int(&skey->refcount, 1);
+ SCTPDBG(SCTP_DEBUG_AUTH2,
+ "%s: stcb %p key %u refcount acquire to %d\n",
+ __FUNCTION__, stcb, key_id, skey->refcount);
+ }
+}
+
+void
+sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t key_id)
+{
+ sctp_sharedkey_t *skey;
+
+ /* find the shared key */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, key_id);
+
+ /* decrement the ref count */
+ if (skey) {
+ sctp_free_sharedkey(skey);
+ SCTPDBG(SCTP_DEBUG_AUTH2,
+ "%s: stcb %p key %u refcount release to %d\n",
+ __FUNCTION__, stcb, key_id, skey->refcount);
+
+ /* see if a notification should be generated */
+ if ((skey->refcount <= 1) && (skey->deactivated)) {
+ /* notify ULP that key is no longer used */
+ sctp_ulp_notify(SCTP_NOTIFY_AUTH_FREE_KEY, stcb,
+ key_id, 0, SCTP_SO_NOT_LOCKED);
+ SCTPDBG(SCTP_DEBUG_AUTH2,
+ "%s: stcb %p key %u no longer used, %d\n",
+ __FUNCTION__, stcb, key_id, skey->refcount);
}
}
}
@@ -623,7 +683,7 @@ sctp_copy_skeylist(const struct sctp_keyhead *src, struct sctp_keyhead *dest)
LIST_FOREACH(skey, src, next) {
new_skey = sctp_copy_sharedkey(skey);
if (new_skey != NULL) {
- sctp_insert_sharedkey(dest, new_skey);
+ (void)sctp_insert_sharedkey(dest, new_skey);
count++;
}
}
@@ -727,9 +787,9 @@ sctp_default_supported_hmaclist(void)
return (new_list);
}
-/*
- * HMAC algos are listed in priority/preference order find the best HMAC id
- * to use for the peer based on local support
+/*-
+ * HMAC algos are listed in priority/preference order
+ * find the best HMAC id to use for the peer based on local support
*/
uint16_t
sctp_negotiate_hmacid(sctp_hmaclist_t * peer, sctp_hmaclist_t * local)
@@ -760,9 +820,9 @@ sctp_negotiate_hmacid(sctp_hmaclist_t * peer, sctp_hmaclist_t * local)
return (SCTP_AUTH_HMAC_ID_RSVD);
}
-/*
- * serialize the HMAC algo list and return space used caller must guarantee
- * ptr has appropriate space
+/*-
+ * serialize the HMAC algo list and return space used
+ * caller must guarantee ptr has appropriate space
*/
int
sctp_serialize_hmaclist(sctp_hmaclist_t * list, uint8_t * ptr)
@@ -994,7 +1054,7 @@ sctp_hmac_final(uint16_t hmac_algo, sctp_hash_context_t * ctx,
} /* end switch */
}
-/*
+/*-
* Keyed-Hashing for Message Authentication: FIPS 198 (RFC 2104)
*
* Compute the HMAC digest using the desired hash key, text, and HMAC
@@ -1142,9 +1202,10 @@ sctp_hmac_m(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
return (digestlen);
}
-/*
+/*-
* verify the HMAC digest using the desired hash key, text, and HMAC
- * algorithm. Returns -1 on error, 0 on success.
+ * algorithm.
+ * Returns -1 on error, 0 on success.
*/
int
sctp_verify_hmac(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
@@ -1263,10 +1324,10 @@ sctp_auth_is_supported_hmac(sctp_hmaclist_t * list, uint16_t id)
}
-/*
- * clear any cached key(s) if they match the given key id on an association
- * the cached key(s) will be recomputed and re-cached at next use. ASSUMES
- * TCB_LOCK is already held
+/*-
+ * clear any cached key(s) if they match the given key id on an association.
+ * the cached key(s) will be recomputed and re-cached at next use.
+ * ASSUMES TCB_LOCK is already held
*/
void
sctp_clear_cachedkeys(struct sctp_tcb *stcb, uint16_t keyid)
@@ -1284,9 +1345,10 @@ sctp_clear_cachedkeys(struct sctp_tcb *stcb, uint16_t keyid)
}
}
-/*
+/*-
* clear any cached key(s) if they match the given key id for all assocs on
- * an association ASSUMES INP_WLOCK is already held
+ * an endpoint.
+ * ASSUMES INP_WLOCK is already held
*/
void
sctp_clear_cachedkeys_ep(struct sctp_inpcb *inp, uint16_t keyid)
@@ -1304,8 +1366,9 @@ sctp_clear_cachedkeys_ep(struct sctp_inpcb *inp, uint16_t keyid)
}
}
-/*
- * delete a shared key from an association ASSUMES TCB_LOCK is already held
+/*-
+ * delete a shared key from an association
+ * ASSUMES TCB_LOCK is already held
*/
int
sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
@@ -1316,7 +1379,7 @@ sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
return (-1);
/* is the keyid the assoc active sending key */
- if (keyid == stcb->asoc.authinfo.assoc_keyid)
+ if (keyid == stcb->asoc.authinfo.active_keyid)
return (-1);
/* does the key exist? */
@@ -1324,6 +1387,10 @@ sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
if (skey == NULL)
return (-1);
+ /* are there other refcount holders on the key? */
+ if (skey->refcount > 1)
+ return (-1);
+
/* remove it */
LIST_REMOVE(skey, next);
sctp_free_sharedkey(skey); /* frees skey->key as well */
@@ -1333,35 +1400,29 @@ sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
return (0);
}
-/*
- * deletes a shared key from the endpoint ASSUMES INP_WLOCK is already held
+/*-
+ * deletes a shared key from the endpoint
+ * ASSUMES INP_WLOCK is already held
*/
int
sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid)
{
sctp_sharedkey_t *skey;
- struct sctp_tcb *stcb;
if (inp == NULL)
return (-1);
- /* is the keyid the active sending key on the endpoint or any assoc */
+ /* is the keyid the active sending key on the endpoint */
if (keyid == inp->sctp_ep.default_keyid)
return (-1);
- LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
- SCTP_TCB_LOCK(stcb);
- if (keyid == stcb->asoc.authinfo.assoc_keyid) {
- SCTP_TCB_UNLOCK(stcb);
- return (-1);
- }
- SCTP_TCB_UNLOCK(stcb);
- }
/* does the key exist? */
skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
if (skey == NULL)
return (-1);
+ /* endpoint keys are not refcounted */
+
/* remove it */
LIST_REMOVE(skey, next);
sctp_free_sharedkey(skey); /* frees skey->key as well */
@@ -1371,60 +1432,36 @@ sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid)
return (0);
}
-/*
- * set the active key on an association ASSUME TCB_LOCK is already held
+/*-
+ * set the active key on an association
+ * ASSUMES TCB_LOCK is already held
*/
int
sctp_auth_setactivekey(struct sctp_tcb *stcb, uint16_t keyid)
{
sctp_sharedkey_t *skey = NULL;
- sctp_key_t *key = NULL;
- int using_ep_key = 0;
/* find the key on the assoc */
skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
if (skey == NULL) {
- /* if not on the assoc, find the key on the endpoint */
- atomic_add_int(&stcb->asoc.refcnt, 1);
- SCTP_TCB_UNLOCK(stcb);
- SCTP_INP_RLOCK(stcb->sctp_ep);
- SCTP_TCB_LOCK(stcb);
- atomic_add_int(&stcb->asoc.refcnt, -1);
- skey = sctp_find_sharedkey(&stcb->sctp_ep->sctp_ep.shared_keys,
- keyid);
- using_ep_key = 1;
- }
- if (skey == NULL) {
/* that key doesn't exist */
- if (using_ep_key) {
- SCTP_INP_RUNLOCK(stcb->sctp_ep);
- }
return (-1);
}
- /* get the shared key text */
- key = skey->key;
-
- /* free any existing cached key */
- if (stcb->asoc.authinfo.assoc_key != NULL)
- sctp_free_key(stcb->asoc.authinfo.assoc_key);
- /* compute a new assoc key and cache it */
- stcb->asoc.authinfo.assoc_key =
- sctp_compute_hashkey(stcb->asoc.authinfo.random,
- stcb->asoc.authinfo.peer_random, key);
- stcb->asoc.authinfo.assoc_keyid = keyid;
-#ifdef SCTP_DEBUG
- if (SCTP_AUTH_DEBUG)
- sctp_print_key(stcb->asoc.authinfo.assoc_key, "Assoc Key");
-#endif
-
- if (using_ep_key) {
- SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ if ((skey->deactivated) && (skey->refcount > 1)) {
+ /* can't reactivate a deactivated key with other refcounts */
+ return (-1);
}
+ /* set the (new) active key */
+ stcb->asoc.authinfo.active_keyid = keyid;
+ /* reset the deactivated flag */
+ skey->deactivated = 0;
+
return (0);
}
-/*
- * set the active key on an endpoint ASSUMES INP_WLOCK is already held
+/*-
+ * set the active key on an endpoint
+ * ASSUMES INP_WLOCK is already held
*/
int
sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid)
@@ -1441,6 +1478,69 @@ sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid)
return (0);
}
+/*-
+ * deactivates a shared key from the association
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_deact_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey;
+
+ if (stcb == NULL)
+ return (-1);
+
+ /* is the keyid the assoc active sending key */
+ if (keyid == stcb->asoc.authinfo.active_keyid)
+ return (-1);
+
+ /* does the key exist? */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+ if (skey == NULL)
+ return (-1);
+
+ /* are there other refcount holders on the key? */
+ if (skey->refcount == 1) {
+ /* no other users, send a notification for this key */
+ sctp_ulp_notify(SCTP_NOTIFY_AUTH_FREE_KEY, stcb, keyid, 0,
+ SCTP_SO_LOCKED);
+ }
+ /* mark the key as deactivated */
+ skey->deactivated = 1;
+
+ return (0);
+}
+
+/*-
+ * deactivates a shared key from the endpoint
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_deact_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey;
+
+ if (inp == NULL)
+ return (-1);
+
+ /* is the keyid the active sending key on the endpoint */
+ if (keyid == inp->sctp_ep.default_keyid)
+ return (-1);
+
+ /* does the key exist? */
+ skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
+ if (skey == NULL)
+ return (-1);
+
+ /* endpoint keys are not refcounted */
+
+ /* remove it */
+ LIST_REMOVE(skey, next);
+ sctp_free_sharedkey(skey); /* frees skey->key as well */
+
+ return (0);
+}
+
/*
* get local authentication parameters from cookie (from INIT-ACK)
*/
@@ -1581,9 +1681,13 @@ sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
/* negotiate what HMAC to use for the peer */
stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
stcb->asoc.local_hmacs);
+
/* copy defaults from the endpoint */
/* FIX ME: put in cookie? */
- stcb->asoc.authinfo.assoc_keyid = stcb->sctp_ep->sctp_ep.default_keyid;
+ stcb->asoc.authinfo.active_keyid = stcb->sctp_ep->sctp_ep.default_keyid;
+ /* copy out the shared key list (by reference) from the endpoint */
+ (void)sctp_copy_skeylist(&stcb->sctp_ep->sctp_ep.shared_keys,
+ &stcb->asoc.shared_keys);
}
/*
@@ -1591,7 +1695,7 @@ sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
*/
void
sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
- struct sctp_auth_chunk *auth, struct sctp_tcb *stcb)
+ struct sctp_auth_chunk *auth, struct sctp_tcb *stcb, uint16_t keyid)
{
uint32_t digestlen;
sctp_sharedkey_t *skey;
@@ -1603,15 +1707,15 @@ sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
/* zero the digest + chunk padding */
digestlen = sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
bzero(auth->hmac, SCTP_SIZE32(digestlen));
- /* is an assoc key cached? */
- if (stcb->asoc.authinfo.assoc_key == NULL) {
- skey = sctp_find_sharedkey(&stcb->asoc.shared_keys,
- stcb->asoc.authinfo.assoc_keyid);
- if (skey == NULL) {
- /* not in the assoc list, so check the endpoint list */
- skey = sctp_find_sharedkey(&stcb->sctp_ep->sctp_ep.shared_keys,
- stcb->asoc.authinfo.assoc_keyid);
+
+ /* is the desired key cached? */
+ if ((keyid != stcb->asoc.authinfo.assoc_keyid) ||
+ (stcb->asoc.authinfo.assoc_key == NULL)) {
+ if (stcb->asoc.authinfo.assoc_key != NULL) {
+ /* free the old cached key */
+ sctp_free_key(stcb->asoc.authinfo.assoc_key);
}
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
/* the only way skey is NULL is if null key id 0 is used */
if (skey != NULL)
key = skey->key;
@@ -1621,6 +1725,7 @@ sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
stcb->asoc.authinfo.assoc_key =
sctp_compute_hashkey(stcb->asoc.authinfo.random,
stcb->asoc.authinfo.peer_random, key);
+ stcb->asoc.authinfo.assoc_keyid = keyid;
SCTPDBG(SCTP_DEBUG_AUTH1, "caching key id %u\n",
stcb->asoc.authinfo.assoc_keyid);
#ifdef SCTP_DEBUG
@@ -1630,11 +1735,10 @@ sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
#endif
}
/* set in the active key id */
- auth->shared_key_id = htons(stcb->asoc.authinfo.assoc_keyid);
+ auth->shared_key_id = htons(keyid);
/* compute and fill in the digest */
- (void)sctp_compute_hmac_m(stcb->asoc.peer_hmac_id,
- stcb->asoc.authinfo.assoc_key,
+ (void)sctp_compute_hmac_m(stcb->asoc.peer_hmac_id, stcb->asoc.authinfo.assoc_key,
m, auth_offset, auth->hmac);
}
@@ -1671,9 +1775,11 @@ sctp_bzero_m(struct mbuf *m, uint32_t m_offset, uint32_t size)
}
}
-/*
- * process the incoming Authentication chunk return codes: -1 on any
- * authentication error 0 on authentication verification
+/*-
+ * process the incoming Authentication chunk
+ * return codes:
+ * -1 on any authentication error
+ * 0 on authentication verification
*/
int
sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth,
@@ -1736,12 +1842,8 @@ sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth,
if ((stcb->asoc.authinfo.recv_key == NULL) ||
(stcb->asoc.authinfo.recv_keyid != shared_key_id)) {
/* find the shared key on the assoc first */
- skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, shared_key_id);
- if (skey == NULL) {
- /* if not on the assoc, find it on the endpoint */
- skey = sctp_find_sharedkey(&stcb->sctp_ep->sctp_ep.shared_keys,
- shared_key_id);
- }
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys,
+ shared_key_id);
/* if the shared key isn't found, discard the chunk */
if (skey == NULL) {
SCTP_STAT_INCR(sctps_recvivalkeyid);
@@ -1758,7 +1860,8 @@ sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth,
* *)stcb->asoc.authinfo.recv_keyid);
*/
sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY,
- shared_key_id, stcb->asoc.authinfo.recv_keyid);
+ shared_key_id, stcb->asoc.authinfo.recv_keyid,
+ SCTP_SO_NOT_LOCKED);
/* compute a new recv assoc key and cache it */
if (stcb->asoc.authinfo.recv_key != NULL)
sctp_free_key(stcb->asoc.authinfo.recv_key);
@@ -1801,7 +1904,11 @@ sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth,
*/
void
sctp_notify_authentication(struct sctp_tcb *stcb, uint32_t indication,
- uint16_t keyid, uint16_t alt_keyid)
+ uint16_t keyid, uint16_t alt_keyid, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
{
struct mbuf *m_notify;
struct sctp_authkey_event *auth;
@@ -1851,11 +1958,11 @@ sctp_notify_authentication(struct sctp_tcb *stcb, uint32_t indication,
/* not that we need this */
control->tail_mbuf = m_notify;
sctp_add_to_readq(stcb->sctp_ep, stcb, control,
- &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
+ &stcb->sctp_socket->so_rcv, 1, so_locked);
}
-/*
+/*-
* validates the AUTHentication related parameters in an INIT/INIT-ACK
* Note: currently only used for INIT as INIT-ACK is handled inline
* with sctp_load_addresses_from_init()
@@ -2027,7 +2134,11 @@ sctp_initialize_auth_params(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
}
}
/* copy defaults from the endpoint */
- stcb->asoc.authinfo.assoc_keyid = inp->sctp_ep.default_keyid;
+ stcb->asoc.authinfo.active_keyid = inp->sctp_ep.default_keyid;
+
+ /* copy out the shared key list (by reference) from the endpoint */
+ (void)sctp_copy_skeylist(&inp->sctp_ep.shared_keys,
+ &stcb->asoc.shared_keys);
/* now set the concatenated key (random + chunks + hmacs) */
#ifdef SCTP_AUTH_DRAFT_04
@@ -2135,11 +2246,13 @@ sctp_test_hmac_sha1(void)
uint32_t digestlen = 20;
int failed = 0;
- /*
- * test_case = 1 key =
- * 0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b key_len = 20
- * data = "Hi There" data_len = 8 digest =
- * 0xb617318655057264e28bc0b6fb378c8ef146be00
+ /*-
+ * test_case = 1
+ * key = 0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b
+ * key_len = 20
+ * data = "Hi There"
+ * data_len = 8
+ * digest = 0xb617318655057264e28bc0b6fb378c8ef146be00
*/
keylen = 20;
memset(key, 0x0b, keylen);
@@ -2150,10 +2263,13 @@ sctp_test_hmac_sha1(void)
text, textlen, digest, digestlen) < 0)
failed++;
- /*
- * test_case = 2 key = "Jefe" key_len = 4 data =
- * "what do ya want for nothing?" data_len = 28 digest =
- * 0xeffcdf6ae5eb2fa2d27416d5f184df9c259a7c79
+ /*-
+ * test_case = 2
+ * key = "Jefe"
+ * key_len = 4
+ * data = "what do ya want for nothing?"
+ * data_len = 28
+ * digest = 0xeffcdf6ae5eb2fa2d27416d5f184df9c259a7c79
*/
keylen = 4;
strcpy(key, "Jefe");
@@ -2164,11 +2280,13 @@ sctp_test_hmac_sha1(void)
text, textlen, digest, digestlen) < 0)
failed++;
- /*
- * test_case = 3 key =
- * 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa key_len = 20
- * data = 0xdd repeated 50 times data_len = 50 digest
- * = 0x125d7342b9ac11cd91a39af48aa17b4f63f175d3
+ /*-
+ * test_case = 3
+ * key = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * key_len = 20
+ * data = 0xdd repeated 50 times
+ * data_len = 50
+ * digest = 0x125d7342b9ac11cd91a39af48aa17b4f63f175d3
*/
keylen = 20;
memset(key, 0xaa, keylen);
@@ -2179,11 +2297,13 @@ sctp_test_hmac_sha1(void)
text, textlen, digest, digestlen) < 0)
failed++;
- /*
- * test_case = 4 key =
- * 0x0102030405060708090a0b0c0d0e0f10111213141516171819 key_len = 25
- * data = 0xcd repeated 50 times data_len = 50 digest
- * = 0x4c9007f4026250c6bc8414f9bf50c86c2d7235da
+ /*-
+ * test_case = 4
+ * key = 0x0102030405060708090a0b0c0d0e0f10111213141516171819
+ * key_len = 25
+ * data = 0xcd repeated 50 times
+ * data_len = 50
+ * digest = 0x4c9007f4026250c6bc8414f9bf50c86c2d7235da
*/
keylen = 25;
memcpy(key, "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19", keylen);
@@ -2194,12 +2314,14 @@ sctp_test_hmac_sha1(void)
text, textlen, digest, digestlen) < 0)
failed++;
- /*
- * test_case = 5 key =
- * 0x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c key_len = 20
- * data = "Test With Truncation" data_len = 20 digest
- * = 0x4c1a03424b55e07fe7f27be1d58bb9324a9a5a04 digest-96 =
- * 0x4c1a03424b55e07fe7f27be1
+ /*-
+ * test_case = 5
+ * key = 0x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c
+ * key_len = 20
+ * data = "Test With Truncation"
+ * data_len = 20
+ * digest = 0x4c1a03424b55e07fe7f27be1d58bb9324a9a5a04
+ * digest-96 = 0x4c1a03424b55e07fe7f27be1
*/
keylen = 20;
memset(key, 0x0c, keylen);
@@ -2210,11 +2332,13 @@ sctp_test_hmac_sha1(void)
text, textlen, digest, digestlen) < 0)
failed++;
- /*
- * test_case = 6 key = 0xaa repeated 80 times key_len
- * = 80 data = "Test Using Larger Than Block-Size Key -
- * Hash Key First" data_len = 54 digest =
- * 0xaa4ae5e15272d00e95705637ce8a3b55ed402112
+ /*-
+ * test_case = 6
+ * key = 0xaa repeated 80 times
+ * key_len = 80
+ * data = "Test Using Larger Than Block-Size Key - Hash Key First"
+ * data_len = 54
+ * digest = 0xaa4ae5e15272d00e95705637ce8a3b55ed402112
*/
keylen = 80;
memset(key, 0xaa, keylen);
@@ -2225,11 +2349,13 @@ sctp_test_hmac_sha1(void)
text, textlen, digest, digestlen) < 0)
failed++;
- /*
- * test_case = 7 key = 0xaa repeated 80 times key_len
- * = 80 data = "Test Using Larger Than Block-Size Key and
- * Larger Than One Block-Size Data" data_len = 73 digest =
- * 0xe8e99d0f45237d786d6bbaa7965c7808bbff1a91
+ /*-
+ * test_case = 7
+ * key = 0xaa repeated 80 times
+ * key_len = 80
+ * data = "Test Using Larger Than Block-Size Key and Larger Than One Block-Size Data"
+ * data_len = 73
+ * digest = 0xe8e99d0f45237d786d6bbaa7965c7808bbff1a91
*/
keylen = 80;
memset(key, 0xaa, keylen);
@@ -2261,10 +2387,13 @@ sctp_test_hmac_md5(void)
uint32_t digestlen = 16;
int failed = 0;
- /*
- * test_case = 1 key = 0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b
- * key_len = 16 data = "Hi There" data_len = 8 digest =
- * 0x9294727a3638bb1c13f48ef8158bfc9d
+ /*-
+ * test_case = 1
+ * key = 0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b
+ * key_len = 16
+ * data = "Hi There"
+ * data_len = 8
+ * digest = 0x9294727a3638bb1c13f48ef8158bfc9d
*/
keylen = 16;
memset(key, 0x0b, keylen);
@@ -2275,10 +2404,13 @@ sctp_test_hmac_md5(void)
text, textlen, digest, digestlen) < 0)
failed++;
- /*
- * test_case = 2 key = "Jefe" key_len = 4 data =
- * "what do ya want for nothing?" data_len = 28 digest =
- * 0x750c783e6ab0b503eaa86e310a5db738
+ /*-
+ * test_case = 2
+ * key = "Jefe"
+ * key_len = 4
+ * data = "what do ya want for nothing?"
+ * data_len = 28
+ * digest = 0x750c783e6ab0b503eaa86e310a5db738
*/
keylen = 4;
strcpy(key, "Jefe");
@@ -2289,10 +2421,13 @@ sctp_test_hmac_md5(void)
text, textlen, digest, digestlen) < 0)
failed++;
- /*
- * test_case = 3 key = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
- * key_len = 16 data = 0xdd repeated 50 times data_len = 50
- * digest = 0x56be34521d144c88dbb8c733f0e8b3f6
+ /*-
+ * test_case = 3
+ * key = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * key_len = 16
+ * data = 0xdd repeated 50 times
+ * data_len = 50
+ * digest = 0x56be34521d144c88dbb8c733f0e8b3f6
*/
keylen = 16;
memset(key, 0xaa, keylen);
@@ -2303,11 +2438,13 @@ sctp_test_hmac_md5(void)
text, textlen, digest, digestlen) < 0)
failed++;
- /*
- * test_case = 4 key =
- * 0x0102030405060708090a0b0c0d0e0f10111213141516171819 key_len = 25
- * data = 0xcd repeated 50 times data_len = 50 digest
- * = 0x697eaf0aca3a3aea3a75164746ffaa79
+ /*-
+ * test_case = 4
+ * key = 0x0102030405060708090a0b0c0d0e0f10111213141516171819
+ * key_len = 25
+ * data = 0xcd repeated 50 times
+ * data_len = 50
+ * digest = 0x697eaf0aca3a3aea3a75164746ffaa79
*/
keylen = 25;
memcpy(key, "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19", keylen);
@@ -2318,11 +2455,14 @@ sctp_test_hmac_md5(void)
text, textlen, digest, digestlen) < 0)
failed++;
- /*
- * test_case = 5 key = 0x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c
- * key_len = 16 data = "Test With Truncation" data_len = 20
- * digest = 0x56461ef2342edc00f9bab995690efd4c digest-96
- * 0x56461ef2342edc00f9bab995
+ /*-
+ * test_case = 5
+ * key = 0x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c
+ * key_len = 16
+ * data = "Test With Truncation"
+ * data_len = 20
+ * digest = 0x56461ef2342edc00f9bab995690efd4c
+ * digest-96 = 0x56461ef2342edc00f9bab995
*/
keylen = 16;
memset(key, 0x0c, keylen);
@@ -2333,11 +2473,13 @@ sctp_test_hmac_md5(void)
text, textlen, digest, digestlen) < 0)
failed++;
- /*
- * test_case = 6 key = 0xaa repeated 80 times key_len
- * = 80 data = "Test Using Larger Than Block-Size Key -
- * Hash Key First" data_len = 54 digest =
- * 0x6b1ab7fe4bd7bf8f0b62e6ce61b9d0cd
+ /*-
+ * test_case = 6
+ * key = 0xaa repeated 80 times
+ * key_len = 80
+ * data = "Test Using Larger Than Block-Size Key - Hash Key First"
+ * data_len = 54
+ * digest = 0x6b1ab7fe4bd7bf8f0b62e6ce61b9d0cd
*/
keylen = 80;
memset(key, 0xaa, keylen);
@@ -2348,11 +2490,13 @@ sctp_test_hmac_md5(void)
text, textlen, digest, digestlen) < 0)
failed++;
- /*
- * test_case = 7 key = 0xaa repeated 80 times key_len
- * = 80 data = "Test Using Larger Than Block-Size Key and
- * Larger Than One Block-Size Data" data_len = 73 digest =
- * 0x6f630fad67cda0ee1fb1f562db3aa53e
+ /*-
+ * test_case = 7
+ * key = 0xaa repeated 80 times
+ * key_len = 80
+ * data = "Test Using Larger Than Block-Size Key and Larger Than One Block-Size Data"
+ * data_len = 73
+ * digest = 0x6f630fad67cda0ee1fb1f562db3aa53e
*/
keylen = 80;
memset(key, 0xaa, keylen);
diff --git a/sys/netinet/sctp_auth.h b/sys/netinet/sctp_auth.h
index be7e6a0..8602c97 100644
--- a/sys/netinet/sctp_auth.h
+++ b/sys/netinet/sctp_auth.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -68,7 +68,9 @@ typedef struct sctp_key {
typedef struct sctp_shared_key {
LIST_ENTRY(sctp_shared_key) next;
sctp_key_t *key; /* key text */
+ uint32_t refcount; /* reference count */
uint16_t keyid; /* shared key ID */
+ uint8_t deactivated; /* key is deactivated */
} sctp_sharedkey_t;
LIST_HEAD(sctp_keyhead, sctp_shared_key);
@@ -91,10 +93,11 @@ typedef struct sctp_authinfo {
sctp_key_t *random; /* local random key (concatenated) */
uint32_t random_len; /* local random number length for param */
sctp_key_t *peer_random;/* peer's random key (concatenated) */
+ sctp_key_t *assoc_key; /* cached concatenated send key */
+ sctp_key_t *recv_key; /* cached concatenated recv key */
+ uint16_t active_keyid; /* active send keyid */
uint16_t assoc_keyid; /* current send keyid (cached) */
uint16_t recv_keyid; /* last recv keyid (cached) */
- sctp_key_t *assoc_key; /* cached send key */
- sctp_key_t *recv_key; /* cached recv key */
} sctp_authinfo_t;
@@ -117,10 +120,13 @@ extern int sctp_auth_add_chunk(uint8_t chunk, sctp_auth_chklist_t * list);
extern int sctp_auth_delete_chunk(uint8_t chunk, sctp_auth_chklist_t * list);
extern size_t sctp_auth_get_chklist_size(const sctp_auth_chklist_t * list);
extern void sctp_auth_set_default_chunks(sctp_auth_chklist_t * list);
-extern int
- sctp_serialize_auth_chunks(const sctp_auth_chklist_t * list, uint8_t * ptr);
-extern int sctp_pack_auth_chunks(const sctp_auth_chklist_t * list, uint8_t * ptr);
-extern int
+extern int
+sctp_serialize_auth_chunks(const sctp_auth_chklist_t * list,
+ uint8_t * ptr);
+extern int
+sctp_pack_auth_chunks(const sctp_auth_chklist_t * list,
+ uint8_t * ptr);
+extern int
sctp_unpack_auth_chunks(const uint8_t * ptr, uint8_t num_chunks,
sctp_auth_chklist_t * list);
@@ -139,14 +145,20 @@ sctp_compute_hashkey(sctp_key_t * key1, sctp_key_t * key2,
extern sctp_sharedkey_t *sctp_alloc_sharedkey(void);
extern void sctp_free_sharedkey(sctp_sharedkey_t * skey);
extern sctp_sharedkey_t *
- sctp_find_sharedkey(struct sctp_keyhead *shared_keys, uint16_t key_id);
-extern void
+sctp_find_sharedkey(struct sctp_keyhead *shared_keys,
+ uint16_t key_id);
+extern int
sctp_insert_sharedkey(struct sctp_keyhead *shared_keys,
sctp_sharedkey_t * new_skey);
-extern int
+extern int
sctp_copy_skeylist(const struct sctp_keyhead *src,
struct sctp_keyhead *dest);
+/* ref counts on shared keys, by key id */
+extern void sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t keyid);
+extern void sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t keyid);
+
+
/* hmac list handling */
extern sctp_hmaclist_t *sctp_alloc_hmaclist(uint8_t num_hmacs);
extern void sctp_free_hmaclist(sctp_hmaclist_t * list);
@@ -167,25 +179,24 @@ extern void sctp_free_authinfo(sctp_authinfo_t * authinfo);
/* keyed-HMAC functions */
extern uint32_t sctp_get_auth_chunk_len(uint16_t hmac_algo);
extern uint32_t sctp_get_hmac_digest_len(uint16_t hmac_algo);
-extern uint32_t
+extern uint32_t
sctp_hmac(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
uint8_t * text, uint32_t textlen, uint8_t * digest);
-extern int
+extern int
sctp_verify_hmac(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
- uint8_t * text, uint32_t textlen, uint8_t * digest,
- uint32_t digestlen);
-extern uint32_t
+ uint8_t * text, uint32_t textlen, uint8_t * digest, uint32_t digestlen);
+extern uint32_t
sctp_compute_hmac(uint16_t hmac_algo, sctp_key_t * key,
uint8_t * text, uint32_t textlen, uint8_t * digest);
extern int sctp_auth_is_supported_hmac(sctp_hmaclist_t * list, uint16_t id);
/* mbuf versions */
-extern uint32_t
+extern uint32_t
sctp_hmac_m(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
struct mbuf *m, uint32_t m_offset, uint8_t * digest, uint32_t trailer);
-extern uint32_t
-sctp_compute_hmac_m(uint16_t hmac_algo, sctp_key_t * key, struct mbuf *m,
- uint32_t m_offset, uint8_t * digest);
+extern uint32_t
+sctp_compute_hmac_m(uint16_t hmac_algo, sctp_key_t * key,
+ struct mbuf *m, uint32_t m_offset, uint8_t * digest);
/*
* authentication routines
@@ -196,31 +207,31 @@ extern int sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid);
extern int sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid);
extern int sctp_auth_setactivekey(struct sctp_tcb *stcb, uint16_t keyid);
extern int sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid);
+extern int sctp_deact_sharedkey(struct sctp_tcb *stcb, uint16_t keyid);
+extern int sctp_deact_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid);
-extern void
+extern void
sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
uint32_t offset, uint32_t length);
-extern void
+extern void
sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
- struct sctp_auth_chunk *auth,
- struct sctp_tcb *stcb);
+ struct sctp_auth_chunk *auth, struct sctp_tcb *stcb, uint16_t key_id);
extern struct mbuf *
sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
- struct sctp_auth_chunk **auth_ret,
- uint32_t * offset, struct sctp_tcb *stcb,
- uint8_t chunk);
-extern int
+ struct sctp_auth_chunk **auth_ret, uint32_t * offset,
+ struct sctp_tcb *stcb, uint8_t chunk);
+extern int
sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *ch,
struct mbuf *m, uint32_t offset);
-extern void
+extern void
sctp_notify_authentication(struct sctp_tcb *stcb,
- uint32_t indication, uint16_t keyid,
- uint16_t alt_keyid);
-extern int
- sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit);
-extern void
- sctp_initialize_auth_params(struct sctp_inpcb *inp, struct sctp_tcb *stcb);
-
+ uint32_t indication, uint16_t keyid, uint16_t alt_keyid, int so_locked);
+extern int
+sctp_validate_init_auth_params(struct mbuf *m, int offset,
+ int limit);
+extern void
+sctp_initialize_auth_params(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb);
/* test functions */
extern void sctp_test_hmac_sha1(void);
diff --git a/sys/netinet/sctp_constants.h b/sys/netinet/sctp_constants.h
index 2305a1e..cc48d2f 100644
--- a/sys/netinet/sctp_constants.h
+++ b/sys/netinet/sctp_constants.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -313,7 +313,6 @@ __FBSDID("$FreeBSD$");
/* Minimum number of bytes read by user before we
* condsider doing a rwnd update
*/
-#define SCTP_MIN_READ_BEFORE_CONSIDERING 3000
/*
* default HMAC for cookies, etc... use one of the AUTH HMAC id's
@@ -382,6 +381,14 @@ __FBSDID("$FreeBSD$");
* hit this value) */
#define SCTP_DATAGRAM_RESEND 4
#define SCTP_DATAGRAM_ACKED 10010
+/* EY
+ * If a tsn is nr-gapped, its first tagged as NR_MARKED and then NR_ACKED
+ * When yet another nr-sack is received, if a particular TSN's sent tag
+ * is observed to be NR_ACKED after gap-ack info is processed, this implies
+ * that particular TSN is reneged
+*/
+#define SCTP_DATAGRAM_NR_ACKED 10020
+#define SCTP_DATAGRAM_NR_MARKED 20005
#define SCTP_DATAGRAM_MARKED 20010
#define SCTP_FORWARD_TSN_SKIP 30010
@@ -465,6 +472,9 @@ __FBSDID("$FreeBSD$");
#define SCTP_SET_PRIM_ADDR 0xc004
#define SCTP_SUCCESS_REPORT 0xc005
#define SCTP_ULP_ADAPTATION 0xc006
+/* behave-nat-draft */
+#define SCTP_HAS_NAT_SUPPORT 0xc007
+#define SCTP_NAT_VTAGS 0xc008
/* Notification error codes */
#define SCTP_NOTIFY_DATAGRAM_UNSENT 0x0001
@@ -553,7 +563,13 @@ __FBSDID("$FreeBSD$");
#define SCTP_INITIAL_MAPPING_ARRAY 16
/* how much we grow the mapping array each call */
#define SCTP_MAPPING_ARRAY_INCR 32
-
+/* EY 05/13/08 - nr_sack version of the previous 3 constants */
+/* Maximum the nr mapping array will grow to (TSN mapping array) */
+#define SCTP_NR_MAPPING_ARRAY 512
+/* size of the inital malloc on the nr mapping array */
+#define SCTP_INITIAL_NR_MAPPING_ARRAY 16
+/* how much we grow the nr mapping array each call */
+#define SCTP_NR_MAPPING_ARRAY_INCR 32
/*
* Here we define the timer types used by the implementation as arguments in
* the set/get timer type calls.
@@ -607,7 +623,6 @@ __FBSDID("$FreeBSD$");
* Number of ticks before the soxwakeup() event that is delayed is sent AFTER
* the accept() call
*/
-#define SCTP_EVENTWAKEUP_WAIT_TICKS 3000
/*
* Of course we really don't collect stale cookies, being folks of decerning
@@ -616,7 +631,6 @@ __FBSDID("$FreeBSD$");
* up...this is a implemenation dependent treatment. In ours we do not ask
* for a extension of time, but just retry this many times...
*/
-#define SCTP_MAX_STALE_COOKIES_I_COLLECT 10
/* max number of TSN's dup'd that I will hold */
#define SCTP_MAX_DUP_TSNS 20
@@ -625,11 +639,8 @@ __FBSDID("$FreeBSD$");
* Here we define the types used when setting the retry amounts.
*/
/* constants for type of set */
-#define SCTP_MAXATTEMPT_INIT 2
-#define SCTP_MAXATTEMPT_SEND 3
/* Maximum TSN's we will summarize in a drop report */
-#define SCTP_MAX_DROP_REPORT 16
/* How many drop re-attempts we make on INIT/COOKIE-ECHO */
#define SCTP_RETRY_DROPPED_THRESH 4
@@ -638,7 +649,6 @@ __FBSDID("$FreeBSD$");
* And the max we will keep a history of in the tcb which MUST be lower than
* 256.
*/
-#define SCTP_MAX_DROP_SAVE_REPORT 16
/*
* Here we define the default timers and the default number of attemts we
@@ -711,14 +721,7 @@ __FBSDID("$FreeBSD$");
#define SCTP_DEF_MAX_PATH_RTX 5
#define SCTP_DEF_PMTU_RAISE_SEC 600 /* 10 min between raise attempts */
-#define SCTP_DEF_PMTU_MIN 600
-
-
-#define SCTP_MSEC_IN_A_SEC 1000
-#define SCTP_USEC_IN_A_SEC 1000000
-#define SCTP_NSEC_IN_A_SEC 1000000000
-#define SCTP_MAX_OUTSTANDING_DG 10000
/* How many streams I request initally by default */
#define SCTP_OSTREAM_INITIAL 10
@@ -727,9 +730,7 @@ __FBSDID("$FreeBSD$");
* How many smallest_mtu's need to increase before a window update sack is
* sent (should be a power of 2).
*/
-#define SCTP_SEG_TO_RWND_UPD 32
/* Send window update (incr * this > hiwat). Should be a power of 2 */
-#define SCTP_SCALE_OF_RWND_TO_UPD 4
#define SCTP_MINIMAL_RWND (4096) /* minimal rwnd */
#define SCTP_ADDRMAX 24
@@ -786,15 +787,6 @@ __FBSDID("$FreeBSD$");
/* amount peer is obligated to have in rwnd or I will abort */
#define SCTP_MIN_RWND 1500
-#define SCTP_WINDOW_MIN 1500 /* smallest rwnd can be */
-#define SCTP_WINDOW_MAX 1048576 /* biggest I can grow rwnd to My playing
- * around suggests a value greater than 64k
- * does not do much, I guess via the kernel
- * limitations on the stream/socket. */
-
-/* I can handle a 1meg re-assembly */
-#define SCTP_DEFAULT_MAXMSGREASM 1048576
-
#define SCTP_DEFAULT_MAXSEGMENT 65535
#define SCTP_CHUNK_BUFFER_SIZE 512
@@ -813,33 +805,35 @@ __FBSDID("$FreeBSD$");
/*
* SCTP upper layer notifications
*/
-#define SCTP_NOTIFY_ASSOC_UP 1
-#define SCTP_NOTIFY_ASSOC_DOWN 2
-#define SCTP_NOTIFY_INTERFACE_DOWN 3
-#define SCTP_NOTIFY_INTERFACE_UP 4
-#define SCTP_NOTIFY_DG_FAIL 5
-#define SCTP_NOTIFY_STRDATA_ERR 6
-#define SCTP_NOTIFY_ASSOC_ABORTED 7
-#define SCTP_NOTIFY_PEER_OPENED_STREAM 8
-#define SCTP_NOTIFY_STREAM_OPENED_OK 9
-#define SCTP_NOTIFY_ASSOC_RESTART 10
-#define SCTP_NOTIFY_HB_RESP 11
-#define SCTP_NOTIFY_ASCONF_SUCCESS 12
-#define SCTP_NOTIFY_ASCONF_FAILED 13
-#define SCTP_NOTIFY_PEER_SHUTDOWN 14
-#define SCTP_NOTIFY_ASCONF_ADD_IP 15
-#define SCTP_NOTIFY_ASCONF_DELETE_IP 16
-#define SCTP_NOTIFY_ASCONF_SET_PRIMARY 17
+#define SCTP_NOTIFY_ASSOC_UP 1
+#define SCTP_NOTIFY_ASSOC_DOWN 2
+#define SCTP_NOTIFY_INTERFACE_DOWN 3
+#define SCTP_NOTIFY_INTERFACE_UP 4
+#define SCTP_NOTIFY_DG_FAIL 5
+#define SCTP_NOTIFY_STRDATA_ERR 6
+#define SCTP_NOTIFY_ASSOC_ABORTED 7
+#define SCTP_NOTIFY_PEER_OPENED_STREAM 8
+#define SCTP_NOTIFY_STREAM_OPENED_OK 9
+#define SCTP_NOTIFY_ASSOC_RESTART 10
+#define SCTP_NOTIFY_HB_RESP 11
+#define SCTP_NOTIFY_ASCONF_SUCCESS 12
+#define SCTP_NOTIFY_ASCONF_FAILED 13
+#define SCTP_NOTIFY_PEER_SHUTDOWN 14
+#define SCTP_NOTIFY_ASCONF_ADD_IP 15
+#define SCTP_NOTIFY_ASCONF_DELETE_IP 16
+#define SCTP_NOTIFY_ASCONF_SET_PRIMARY 17
#define SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION 18
-#define SCTP_NOTIFY_INTERFACE_CONFIRMED 20
-#define SCTP_NOTIFY_STR_RESET_RECV 21
-#define SCTP_NOTIFY_STR_RESET_SEND 22
-#define SCTP_NOTIFY_STR_RESET_FAILED_OUT 23
-#define SCTP_NOTIFY_STR_RESET_FAILED_IN 24
-#define SCTP_NOTIFY_AUTH_NEW_KEY 25
-#define SCTP_NOTIFY_AUTH_KEY_CONFLICT 26
-#define SCTP_NOTIFY_SPECIAL_SP_FAIL 27
-#define SCTP_NOTIFY_MAX 27
+#define SCTP_NOTIFY_INTERFACE_CONFIRMED 20
+#define SCTP_NOTIFY_STR_RESET_RECV 21
+#define SCTP_NOTIFY_STR_RESET_SEND 22
+#define SCTP_NOTIFY_STR_RESET_FAILED_OUT 23
+#define SCTP_NOTIFY_STR_RESET_FAILED_IN 24
+#define SCTP_NOTIFY_AUTH_NEW_KEY 25
+#define SCTP_NOTIFY_AUTH_FREE_KEY 26
+#define SCTP_NOTIFY_SPECIAL_SP_FAIL 27
+#define SCTP_NOTIFY_NO_PEER_AUTH 28
+#define SCTP_NOTIFY_SENDER_DRY 29
+#define SCTP_NOTIFY_MAX 29
/* This is the value for messages that are NOT completely
* copied down where we will start to split the message.
@@ -970,7 +964,6 @@ __FBSDID("$FreeBSD$");
#endif /* !IPPROTO_SCTP */
#define SCTP_MAX_DATA_BUNDLING 256
-#define SCTP_MAX_CONTROL_BUNDLING 20
/* modular comparison */
/* True if a > b (mod = M) */
diff --git a/sys/netinet/sctp_header.h b/sys/netinet/sctp_header.h
index c10cb7a..ccfee63 100644
--- a/sys/netinet/sctp_header.h
+++ b/sys/netinet/sctp_header.h
@@ -121,6 +121,14 @@ struct sctp_asconf_addr_param { /* an ASCONF address parameter */
struct sctp_ipv6addr_param addrp; /* max storage size */
} SCTP_PACKED;
+
+struct sctp_asconf_tag_param { /* an ASCONF NAT-Vtag parameter */
+ struct sctp_asconf_paramhdr aph; /* asconf "parameter" */
+ uint32_t local_vtag;
+ uint32_t remote_vtag;
+} SCTP_PACKED;
+
+
struct sctp_asconf_addrv4_param { /* an ASCONF address (v4) parameter */
struct sctp_asconf_paramhdr aph; /* asconf "parameter" */
struct sctp_ipv4addr_param addrp; /* max storage size */
@@ -206,6 +214,15 @@ struct sctp_state_cookie { /* this is our definition... */
*/
} SCTP_PACKED;
+
+/* Used for NAT state error cause */
+struct sctp_missing_nat_state {
+ uint16_t cause;
+ uint16_t length;
+ uint8_t data[0];
+} SCTP_PACKED;
+
+
struct sctp_inv_mandatory_param {
uint16_t cause;
uint16_t length;
@@ -268,6 +285,30 @@ struct sctp_sack_chunk {
} SCTP_PACKED;
+/* EY Following 3 structs define NR Selective Ack (NR_SACK) chunk */
+struct sctp_nr_gap_ack_block {
+ uint16_t start; /* NR Gap Ack block start */
+ uint16_t end; /* NR Gap Ack block end */
+} SCTP_PACKED;
+
+struct sctp_nr_sack {
+ uint32_t cum_tsn_ack; /* cumulative TSN Ack */
+ uint32_t a_rwnd; /* updated a_rwnd of sender */
+ uint16_t num_gap_ack_blks; /* number of Gap Ack blocks */
+ uint16_t num_nr_gap_ack_blks; /* number of NR Gap Ack blocks */
+ uint16_t num_dup_tsns; /* number of duplicate TSNs */
+ uint16_t reserved; /* not currently used */
+ /* struct sctp_gap_ack_block's follow */
+ /* struct sctp_nr_gap_ack_block's follow */
+ /* uint32_t duplicate_tsn's follow */
+} SCTP_PACKED;
+
+struct sctp_nr_sack_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_nr_sack nr_sack;
+} SCTP_PACKED;
+
+
/* Heartbeat Request (HEARTBEAT) */
struct sctp_heartbeat {
struct sctp_heartbeat_info_param hb_info;
diff --git a/sys/netinet/sctp_indata.c b/sys/netinet/sctp_indata.c
index 20d153e..a1a8d7a 100644
--- a/sys/netinet/sctp_indata.c
+++ b/sys/netinet/sctp_indata.c
@@ -300,6 +300,10 @@ sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
uint16_t stream_no;
int end = 0;
int cntDel;
+
+ /* EY if any out-of-order delivered, then tag it nr on nr_map */
+ uint32_t nr_tsn, nr_gap;
+
struct sctp_queued_to_read *control, *ctl, *ctlat;
if (stcb == NULL)
@@ -407,6 +411,42 @@ abandon:
}
/* pull it we did it */
TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
+ /*
+ * EY this is the chunk that should be tagged nr gapped
+ * calculate the gap and such then tag this TSN nr
+ * chk->rec.data.TSN_seq
+ */
+ /*
+ * EY!-TODO- this tsn should be tagged nr only if it is
+ * out-of-order, the if statement should be modified
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ nr_tsn = chk->rec.data.TSN_seq;
+ if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
+ nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
+ } else {
+ nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
+ }
+ if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
+ (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
+ /*
+ * EY The 1st should never happen, as in
+ * process_a_data_chunk method this check
+ * should be done
+ */
+ /*
+ * EY The 2nd should never happen, because
+ * nr_mapping_array is always expanded when
+ * mapping_array is expanded
+ */
+ } else {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ asoc->highest_tsn_inside_nr_map = nr_tsn;
+ }
+ }
if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
asoc->fragmented_delivery_inprogress = 0;
if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
@@ -453,9 +493,67 @@ abandon:
asoc->size_on_all_streams -= ctl->length;
sctp_ucount_decr(asoc->cnt_on_all_streams);
strm->last_sequence_delivered++;
+ /*
+ * EY will be used to
+ * calculate nr-gap
+ */
+ nr_tsn = ctl->sinfo_tsn;
sctp_add_to_readq(stcb->sctp_ep, stcb,
ctl,
&stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
+ /*
+ * EY -now something is
+ * delivered, calculate
+ * nr_gap and tag this tsn
+ * NR
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
+ nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
+ } else {
+ nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
+ }
+ if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
+ (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
+ /*
+ * EY The
+ * 1st
+ * should
+ * never
+ * happen,
+ * as in
+ * process_a_
+ * data_chunk
+ * method
+ * this
+ * check
+ * should be
+ * done
+ */
+ /*
+ * EY The
+ * 2nd
+ * should
+ * never
+ * happen,
+ * because
+ * nr_mapping
+ * _array is
+ * always
+ * expanded
+ * when
+ * mapping_ar
+ * ray is
+ * expanded
+ */
+ } else {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ asoc->highest_tsn_inside_nr_map = nr_tsn;
+ }
+ }
ctl = ctlat;
} else {
break;
@@ -504,6 +602,9 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
uint16_t nxt_todel;
struct mbuf *oper;
+ /* EY- will be used to calculate nr-gap for a tsn */
+ uint32_t nr_tsn, nr_gap;
+
queue_needed = 1;
asoc->size_on_all_streams += control->length;
sctp_ucount_incr(asoc->cnt_on_all_streams);
@@ -560,13 +661,48 @@ protocol_error:
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
}
+ /* EY it wont be queued if it could be delivered directly */
queue_needed = 0;
asoc->size_on_all_streams -= control->length;
sctp_ucount_decr(asoc->cnt_on_all_streams);
strm->last_sequence_delivered++;
+ /* EY will be used to calculate nr-gap */
+ nr_tsn = control->sinfo_tsn;
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
+
+ /*
+ * EY this is the chunk that should be tagged nr gapped
+ * calculate the gap and such then tag this TSN nr
+ * chk->rec.data.TSN_seq
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
+ nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
+ } else {
+ nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
+ }
+ if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
+ (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
+ /*
+ * EY The 1st should never happen, as in
+ * process_a_data_chunk method this check
+ * should be done
+ */
+ /*
+ * EY The 2nd should never happen, because
+ * nr_mapping_array is always expanded when
+ * mapping_array is expanded
+ */
+ } else {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ asoc->highest_tsn_inside_nr_map = nr_tsn;
+ }
+ }
control = TAILQ_FIRST(&strm->inqueue);
while (control != NULL) {
/* all delivered */
@@ -587,9 +723,47 @@ protocol_error:
sctp_log_strm_del(control, NULL,
SCTP_STR_LOG_FROM_IMMED_DEL);
}
+ /* EY will be used to calculate nr-gap */
+ nr_tsn = control->sinfo_tsn;
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
+ /*
+ * EY this is the chunk that should be
+ * tagged nr gapped calculate the gap and
+ * such then tag this TSN nr
+ * chk->rec.data.TSN_seq
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
+ nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
+ } else {
+ nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
+ }
+ if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
+ (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
+ /*
+ * EY The 1st should never
+ * happen, as in
+ * process_a_data_chunk
+ * method this check should
+ * be done
+ */
+ /*
+ * EY The 2nd should never
+ * happen, because
+ * nr_mapping_array is
+ * always expanded when
+ * mapping_array is expanded
+ */
+ } else {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ asoc->highest_tsn_inside_nr_map = nr_tsn;
+ }
+ }
control = at;
continue;
}
@@ -1392,6 +1566,9 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
/* struct sctp_tmit_chunk *chk; */
struct sctp_tmit_chunk *chk;
uint32_t tsn, gap;
+
+ /* EY - for nr_sack */
+ uint32_t nr_gap;
struct mbuf *dmbuf;
int indx, the_len;
int need_reasm_check = 0;
@@ -1447,6 +1624,9 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
return (0);
}
}
+ /* EY - for nr_sack */
+ nr_gap = gap;
+
if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
*high_tsn = tsn;
}
@@ -1563,9 +1743,17 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
SCTP_STAT_INCR(sctps_badsid);
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
+ /* EY set this tsn present in nr_sack's nr_mapping_array */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+ }
if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
/* we have a new high score */
asoc->highest_tsn_inside_map = tsn;
+ /* EY nr_sack version of the above */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
+ asoc->highest_tsn_inside_nr_map = tsn;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
@@ -1716,6 +1904,48 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
goto failed_express_del;
}
sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
+
+ /*
+ * EY here I should check if this delivered tsn is
+ * out_of_order, if yes then update the nr_map
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ /*
+ * EY check if the mapping_array and nr_mapping
+ * array are consistent
+ */
+ if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
+ /*
+ * printf("EY-IN
+ * sctp_process_a_data_chunk(5): Something
+ * is wrong the map base tsn" "\nEY-and
+ * nr_map base tsn should be equal.");
+ */
+ /* EY debugging block */
+ {
+ /*
+ * printf("\nEY-Calculating an
+ * nr_gap!!\nmapping_array_size = %d
+ * nr_mapping_array_size = %d"
+ * "\nEY-mapping_array_base = %d
+ * nr_mapping_array_base =
+ * %d\nEY-highest_tsn_inside_map = %d"
+ * "highest_tsn_inside_nr_map = %d\nEY-TSN =
+ * %d nr_gap = %d",asoc->mapping_array_size,
+ * asoc->nr_mapping_array_size,
+ * asoc->mapping_array_base_tsn,
+ * asoc->nr_mapping_array_base_tsn,
+ * asoc->highest_tsn_inside_map,
+ * asoc->highest_tsn_inside_nr_map,tsn,nr_gap
+ * );
+ */
+ }
+ /* EY - not %100 sure about the lock thing */
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
/* for ordered, bump what we delivered */
asoc->strmin[strmno].last_sequence_delivered++;
@@ -1761,6 +1991,38 @@ failed_express_del:
SCTP_PRINTF("Append fails end:%d\n", end);
goto failed_pdapi_express_del;
}
+ /*
+ * EY It is appended to the read queue in prev if
+ * block here I should check if this delivered tsn
+ * is out_of_order, if yes then update the nr_map
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ /* EY debugging block */
+ {
+ /*
+ * printf("\nEY-Calculating an
+ * nr_gap!!\nEY-mapping_array_size =
+ * %d nr_mapping_array_size = %d"
+ * "\nEY-mapping_array_base = %d
+ * nr_mapping_array_base =
+ * %d\nEY-highest_tsn_inside_map =
+ * %d" "highest_tsn_inside_nr_map =
+ * %d\nEY-TSN = %d nr_gap =
+ * %d",asoc->mapping_array_size,
+ * asoc->nr_mapping_array_size,
+ * asoc->mapping_array_base_tsn,
+ * asoc->nr_mapping_array_base_tsn,
+ * asoc->highest_tsn_inside_map,
+ * asoc->highest_tsn_inside_nr_map,ts
+ * n,nr_gap);
+ */
+ }
+ /* EY - not %100 sure about the lock thing */
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
SCTP_STAT_INCR(sctps_recvexpressm);
control->sinfo_tsn = tsn;
asoc->tsn_last_delivered = tsn;
@@ -1978,6 +2240,55 @@ failed_pdapi_express_del:
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
+
+ /*
+ * EY It is added to the read queue in prev if block
+ * here I should check if this delivered tsn is
+ * out_of_order, if yes then update the nr_map
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ /*
+ * EY check if the mapping_array and
+ * nr_mapping array are consistent
+ */
+ if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
+ /*
+ * printf("EY-IN
+ * sctp_process_a_data_chunk(6):
+ * Something is wrong the map base
+ * tsn" "\nEY-and nr_map base tsn
+ * should be equal.");
+ */
+ /*
+ * EY - not %100 sure about the lock
+ * thing, i think we don't need the
+ * below,
+ */
+ /* SCTP_TCB_LOCK_ASSERT(stcb); */
+ {
+ /*
+ * printf("\nEY-Calculating an
+ * nr_gap!!\nEY-mapping_array_size =
+ * %d nr_mapping_array_size = %d"
+ * "\nEY-mapping_array_base = %d
+ * nr_mapping_array_base =
+ * %d\nEY-highest_tsn_inside_map =
+ * %d" "highest_tsn_inside_nr_map =
+ * %d\nEY-TSN = %d nr_gap =
+ * %d",asoc->mapping_array_size,
+ * asoc->nr_mapping_array_size,
+ * asoc->mapping_array_base_tsn,
+ * asoc->nr_mapping_array_base_tsn,
+ * asoc->highest_tsn_inside_map,
+ * asoc->highest_tsn_inside_nr_map,ts
+ * n,nr_gap);
+ */
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
} else {
/*
* Special check for when streams are resetting. We
@@ -2185,9 +2496,20 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
int at;
int last_all_ones = 0;
int slide_from, slide_end, lgap, distance;
+
+ /* EY nr_mapping array variables */
+ int nr_at;
+ int nr_last_all_ones = 0;
+ int nr_slide_from, nr_slide_end, nr_lgap, nr_distance;
+
uint32_t old_cumack, old_base, old_highest;
unsigned char aux_array[64];
+ /*
+ * EY! Don't think this is required but I am immitating the code for
+ * map just to make sure
+ */
+ unsigned char nr_aux_array[64];
asoc = &stcb->asoc;
at = 0;
@@ -2200,7 +2522,29 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
asoc->mapping_array_size);
else
memcpy(aux_array, asoc->mapping_array, 64);
+ /* EY do the same for nr_mapping_array */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
+ /*
+ * printf("\nEY-IN sack_check method: \nEY-" "The
+ * size of map and nr_map are inconsitent")
+ */ ;
+ }
+ if (asoc->nr_mapping_array_base_tsn != asoc->mapping_array_base_tsn) {
+ /*
+ * printf("\nEY-IN sack_check method VERY CRUCIAL
+ * error: \nEY-" "The base tsns of map and nr_map
+ * are inconsitent")
+ */ ;
+ }
+ /* EY! just immitating the above code */
+ if (asoc->nr_mapping_array_size < 64)
+ memcpy(nr_aux_array, asoc->nr_mapping_array,
+ asoc->nr_mapping_array_size);
+ else
+ memcpy(aux_array, asoc->nr_mapping_array, 64);
+ }
/*
* We could probably improve this a small bit by calculating the
* offset of the current cum-ack as the starting point.
@@ -2235,6 +2579,7 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
+ asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
#endif
}
if ((asoc->cumulative_tsn == asoc->highest_tsn_inside_map) && (at >= 8)) {
@@ -2251,6 +2596,17 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
memset(asoc->mapping_array, 0, clr);
/* base becomes one ahead of the cum-ack */
asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
+
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ if (clr > asoc->nr_mapping_array_size)
+ clr = asoc->nr_mapping_array_size;
+
+ memset(asoc->nr_mapping_array, 0, clr);
+ /* base becomes one ahead of the cum-ack */
+ asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
+ asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
+ }
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(old_base, old_cumack, old_highest,
SCTP_MAP_PREPARE_SLIDE);
@@ -2330,6 +2686,103 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
}
}
/*
+ * EY if doing nr_sacks then slide the nr_mapping_array accordingly
+ * please
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ nr_at = 0;
+ for (nr_slide_from = 0; nr_slide_from < stcb->asoc.nr_mapping_array_size; nr_slide_from++) {
+
+ if (asoc->nr_mapping_array[nr_slide_from] == 0xff) {
+ nr_at += 8;
+ nr_last_all_ones = 1;
+ } else {
+ /* there is a 0 bit */
+ nr_at += sctp_map_lookup_tab[asoc->nr_mapping_array[nr_slide_from]];
+ nr_last_all_ones = 0;
+ break;
+ }
+ }
+
+ nr_at++;
+
+ if (compare_with_wrap(asoc->cumulative_tsn,
+ asoc->highest_tsn_inside_nr_map, MAX_TSN) && (at >= 8)) {
+ /* The complete array was completed by a single FR */
+ /* higest becomes the cum-ack */
+ int clr;
+
+ clr = (nr_at >> 3) + 1;
+
+ if (clr > asoc->nr_mapping_array_size)
+ clr = asoc->nr_mapping_array_size;
+
+ memset(asoc->nr_mapping_array, 0, clr);
+ /* base becomes one ahead of the cum-ack */
+ asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
+ asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
+
+ } else if (nr_at >= 8) {
+ /* we can slide the mapping array down */
+ /* Calculate the new byte postion we can move down */
+
+ /*
+ * now calculate the ceiling of the move using our
+ * highest TSN value
+ */
+ if (asoc->highest_tsn_inside_nr_map >= asoc->nr_mapping_array_base_tsn) {
+ nr_lgap = asoc->highest_tsn_inside_nr_map -
+ asoc->nr_mapping_array_base_tsn;
+ } else {
+ nr_lgap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) +
+ asoc->highest_tsn_inside_nr_map + 1;
+ }
+ nr_slide_end = nr_lgap >> 3;
+ if (nr_slide_end < nr_slide_from) {
+#ifdef INVARIANTS
+ panic("impossible slide");
+#else
+ printf("impossible slide?\n");
+ return;
+#endif
+ }
+ if (nr_slide_end > asoc->nr_mapping_array_size) {
+#ifdef INVARIANTS
+ panic("would overrun buffer");
+#else
+ printf("Gak, would have overrun map end:%d nr_slide_end:%d\n",
+ asoc->nr_mapping_array_size, nr_slide_end);
+ nr_slide_end = asoc->nr_mapping_array_size;
+#endif
+ }
+ nr_distance = (nr_slide_end - nr_slide_from) + 1;
+
+ if (nr_distance + nr_slide_from > asoc->nr_mapping_array_size ||
+ nr_distance < 0) {
+ /*
+ * Here we do NOT slide forward the array so
+ * that hopefully when more data comes in to
+ * fill it up we will be able to slide it
+ * forward. Really I don't think this should
+ * happen :-0
+ */
+ ;
+ } else {
+ int ii;
+
+ for (ii = 0; ii < nr_distance; ii++) {
+ asoc->nr_mapping_array[ii] =
+ asoc->nr_mapping_array[nr_slide_from + ii];
+ }
+ for (ii = nr_distance; ii <= nr_slide_end; ii++) {
+ asoc->nr_mapping_array[ii] = 0;
+ }
+ asoc->nr_mapping_array_base_tsn += (nr_slide_from << 3);
+ }
+ }
+ }
+ /*
* Now we need to see if we need to queue a sack or just start the
* timer (if allowed).
*/
@@ -2345,7 +2798,14 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
}
sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
- sctp_send_sack(stcb);
+ /*
+ * EY if nr_sacks used then send an nr-sack , a sack
+ * otherwise
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
+ sctp_send_nr_sack(stcb);
+ else
+ sctp_send_sack(stcb);
} else {
int is_a_gap;
@@ -2398,7 +2858,14 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
* duplicates.
*/
(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
- sctp_send_sack(stcb);
+ /*
+ * EY if nr_sacks used then send an
+ * nr-sack , a sack otherwise
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
+ sctp_send_nr_sack(stcb);
+ else
+ sctp_send_sack(stcb);
}
} else {
if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
@@ -2608,6 +3075,7 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
case SCTP_INITIATION:
case SCTP_INITIATION_ACK:
case SCTP_SELECTIVE_ACK:
+ case SCTP_NR_SELECTIVE_ACK: /* EY */
case SCTP_HEARTBEAT_REQUEST:
case SCTP_HEARTBEAT_ACK:
case SCTP_ABORT_ASSOCIATION:
@@ -2738,7 +3206,14 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
}
- sctp_send_sack(stcb);
+ /*
+ * EY if nr_sacks used then send an nr-sack , a sack
+ * otherwise
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
+ sctp_send_nr_sack(stcb);
+ else
+ sctp_send_sack(stcb);
} else {
if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
sctp_timer_start(SCTP_TIMER_TYPE_RECV,
@@ -2794,7 +3269,7 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
for (i = 0; i < num_seg; i++) {
frag_strt = ntohs(frag->start);
frag_end = ntohs(frag->end);
- /* some sanity checks on the fargment offsets */
+ /* some sanity checks on the fragment offsets */
if (frag_strt > frag_end) {
/* this one is malformed, skip */
frag++;
@@ -2829,7 +3304,7 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
* reset the queue this will cause extra
* hunting but hey, they chose the
* performance hit when they failed to order
- * there gaps..
+ * their gaps
*/
tp1 = TAILQ_FIRST(&asoc->sent_queue);
}
@@ -3136,6 +3611,7 @@ sctp_check_for_revoked(struct sctp_tcb *stcb,
}
}
+
static void
sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
@@ -3650,51 +4126,6 @@ sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
/* advance PeerAckPoint goes forward */
asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
a_adv = tp1;
- /*
- * we don't want to de-queue it here. Just wait for
- * the next peer SACK to come with a new cumTSN and
- * then the chunk will be droped in the normal
- * fashion.
- */
- if (tp1->data) {
- sctp_free_bufspace(stcb, asoc, tp1, 1);
- /*
- * Maybe there should be another
- * notification type
- */
- sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
- (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
- tp1, SCTP_SO_NOT_LOCKED);
- sctp_m_freem(tp1->data);
- tp1->data = NULL;
- if (stcb->sctp_socket) {
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- struct socket *so;
-
- so = SCTP_INP_SO(stcb->sctp_ep);
- atomic_add_int(&stcb->asoc.refcnt, 1);
- SCTP_TCB_UNLOCK(stcb);
- SCTP_SOCKET_LOCK(so, 1);
- SCTP_TCB_LOCK(stcb);
- atomic_subtract_int(&stcb->asoc.refcnt, 1);
- if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
- /*
- * assoc was freed while we
- * were unlocked
- */
- SCTP_SOCKET_UNLOCK(so, 1);
- return (NULL);
- }
-#endif
- sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
-#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
- SCTP_SOCKET_UNLOCK(so, 1);
-#endif
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
- sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN);
- }
- }
- }
} else {
/*
* If it is still in RESEND we can advance no
@@ -4268,6 +4699,38 @@ again:
stcb->sctp_ep, stcb, asoc->primary_destination);
}
}
+ /* PR-Sctp issues need to be addressed too */
+ if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
+ struct sctp_tmit_chunk *lchk;
+ uint32_t old_adv_peer_ack_point;
+
+ old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
+ lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
+ /* C3. See if we need to send a Fwd-TSN */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
+ MAX_TSN)) {
+ /*
+ * ISSUE with ECN, see FWD-TSN processing for notes
+ * on issues that will occur when the ECN NONCE
+ * stuff is put into SCTP for cross checking.
+ */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
+ MAX_TSN)) {
+ send_forward_tsn(stcb, asoc);
+ /*
+ * ECN Nonce: Disable Nonce Sum check when
+ * FWD TSN is sent and store resync tsn
+ */
+ asoc->nonce_sum_check = 0;
+ asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
+ }
+ }
+ if (lchk) {
+ /* Assure a timer is up */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, lchk->whoTo);
+ }
+ }
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
rwnd,
@@ -4991,34 +5454,6 @@ done_with_it:
if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
asoc->advanced_peer_ack_point = cum_ack;
}
- /* C2. try to further move advancedPeerAckPoint ahead */
- if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
- struct sctp_tmit_chunk *lchk;
-
- lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
- /* C3. See if we need to send a Fwd-TSN */
- if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
- MAX_TSN)) {
- /*
- * ISSUE with ECN, see FWD-TSN processing for notes
- * on issues that will occur when the ECN NONCE
- * stuff is put into SCTP for cross checking.
- */
- send_forward_tsn(stcb, asoc);
-
- /*
- * ECN Nonce: Disable Nonce Sum check when FWD TSN
- * is sent and store resync tsn
- */
- asoc->nonce_sum_check = 0;
- asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
- if (lchk) {
- /* Assure a timer is up */
- sctp_timer_start(SCTP_TIMER_TYPE_SEND,
- stcb->sctp_ep, stcb, lchk->whoTo);
- }
- }
- }
/* JRS - Use the congestion control given in the CC module */
asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
@@ -5180,6 +5615,38 @@ again:
done_once = 1;
goto again;
}
+ /* C2. try to further move advancedPeerAckPoint ahead */
+ if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
+ struct sctp_tmit_chunk *lchk;
+ uint32_t old_adv_peer_ack_point;
+
+ old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
+ lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
+ /* C3. See if we need to send a Fwd-TSN */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
+ MAX_TSN)) {
+ /*
+ * ISSUE with ECN, see FWD-TSN processing for notes
+ * on issues that will occur when the ECN NONCE
+ * stuff is put into SCTP for cross checking.
+ */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
+ MAX_TSN)) {
+ send_forward_tsn(stcb, asoc);
+ /*
+ * ECN Nonce: Disable Nonce Sum check when
+ * FWD TSN is sent and store resync tsn
+ */
+ asoc->nonce_sum_check = 0;
+ asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
+ }
+ }
+ if (lchk) {
+ /* Assure a timer is up */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, lchk->whoTo);
+ }
+ }
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
a_rwnd,
@@ -5212,6 +5679,9 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
struct sctp_association *asoc;
int tt;
+ /* EY -used to calculate nr_gap information */
+ uint32_t nr_tsn, nr_gap;
+
asoc = &stcb->asoc;
tt = strmin->last_sequence_delivered;
/*
@@ -5230,9 +5700,85 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
sctp_ucount_decr(asoc->cnt_on_all_streams);
/* deliver it to at least the delivery-q */
if (stcb->sctp_socket) {
+ /* EY need the tsn info for calculating nr */
+ nr_tsn = ctl->sinfo_tsn;
sctp_add_to_readq(stcb->sctp_ep, stcb,
ctl,
&stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
+ /*
+ * EY this is the chunk that should be
+ * tagged nr gapped calculate the gap and
+ * such then tag this TSN nr
+ * chk->rec.data.TSN_seq
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
+ nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
+ } else {
+ nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
+ }
+ if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
+ (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
+ /*
+ * EY These should never
+ * happen- explained before
+ */
+ } else {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ asoc->highest_tsn_inside_nr_map = nr_tsn;
+ }
+
+ if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
+ /*
+ * printf("In
+ * sctp_kick_prsctp_reorder_q
+ * ueue(7): Something wrong,
+ * the TSN to be tagged"
+ * "\nas NR is not even in
+ * the mapping_array, or map
+ * and nr_map are
+ * inconsistent");
+ */
+ /*
+ * EY - not %100 sure about
+ * the lock thing, don't
+ * think its required
+ */
+ /*
+ * SCTP_TCB_LOCK_ASSERT(stcb)
+ * ;
+ */
+ {
+ /*
+ * printf("\nCalculating an
+ * nr_gap!!\nmapping_array_si
+ * ze = %d
+ * nr_mapping_array_size =
+ * %d" "\nmapping_array_base
+ * = %d
+ * nr_mapping_array_base =
+ * %d\nhighest_tsn_inside_map
+ * = %d"
+ * "highest_tsn_inside_nr_map
+ * = %d\nTSN = %d nr_gap =
+ * %d",asoc->mapping_array_si
+ * ze,
+ * asoc->nr_mapping_array_siz
+ * e,
+ * asoc->mapping_array_base_t
+ * sn,
+ * asoc->nr_mapping_array_bas
+ * e_tsn,
+ * asoc->highest_tsn_inside_m
+ * ap,
+ * asoc->highest_tsn_inside_n
+ * r_map,tsn,nr_gap);
+ */
+ }
+ }
}
} else {
/* no more delivery now. */
@@ -5257,9 +5803,86 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
/* deliver it to at least the delivery-q */
strmin->last_sequence_delivered = ctl->sinfo_ssn;
if (stcb->sctp_socket) {
+ /* EY */
+ nr_tsn = ctl->sinfo_tsn;
sctp_add_to_readq(stcb->sctp_ep, stcb,
ctl,
&stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
+ /*
+ * EY this is the chunk that should be
+ * tagged nr gapped calculate the gap and
+ * such then tag this TSN nr
+ * chk->rec.data.TSN_seq
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+
+ if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
+ nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
+ } else {
+ nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
+ }
+ if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
+ (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
+ /*
+ * EY These should never
+ * happen, explained before
+ */
+ } else {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
+ if (nr_tsn > asoc->highest_tsn_inside_nr_map)
+ asoc->highest_tsn_inside_nr_map = nr_tsn;
+ }
+
+
+ if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
+ /*
+ * printf("In
+ * sctp_kick_prsctp_reorder_q
+ * ueue(8): Something wrong,
+ * the TSN to be tagged"
+ * "\nas NR is not even in
+ * the mapping_array, or map
+ * and nr_map are
+ * inconsistent");
+ */
+ /*
+ * EY - not %100 sure about
+ * the lock thing, don't
+ * think its required
+ */
+ /*
+ * SCTP_TCB_LOCK_ASSERT(stcb)
+ * ;
+ */
+ {
+ /*
+ * printf("\nCalculating an
+ * nr_gap!!\nmapping_array_si
+ * ze = %d
+ * nr_mapping_array_size =
+ * %d" "\nmapping_array_base
+ * = %d
+ * nr_mapping_array_base =
+ * %d\nhighest_tsn_inside_map
+ * = %d"
+ * "highest_tsn_inside_nr_map
+ * = %d\nTSN = %d nr_gap =
+ * %d",asoc->mapping_array_si
+ * ze,
+ * asoc->nr_mapping_array_siz
+ * e,
+ * asoc->mapping_array_base_t
+ * sn,
+ * asoc->nr_mapping_array_bas
+ * e_tsn,
+ * asoc->highest_tsn_inside_m
+ * ap,
+ * asoc->highest_tsn_inside_n
+ * r_map,tsn,nr_gap);
+ */
+ }
+ }
}
tt = strmin->last_sequence_delivered + 1;
} else {
@@ -5324,6 +5947,12 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
MAX_TSN)) {
asoc->highest_tsn_inside_map = new_cum_tsn;
+ /* EY nr_mapping_array version of the above */
+ /*
+ * if(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
+ * asoc->peer_supports_nr_sack)
+ */
+ asoc->highest_tsn_inside_nr_map = new_cum_tsn;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
@@ -5382,7 +6011,19 @@ slide_out:
cumack_set_flag = 1;
asoc->mapping_array_base_tsn = new_cum_tsn + 1;
asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn;
-
+ /* EY - nr_sack: nr_mapping_array version of the above */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
+ memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
+ asoc->nr_mapping_array_base_tsn = new_cum_tsn + 1;
+ asoc->highest_tsn_inside_nr_map = new_cum_tsn;
+ if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
+ /*
+ * printf("IN sctp_handle_forward_tsn:
+ * Something is wrong the size of" "map and
+ * nr_map should be equal!")
+ */ ;
+ }
+ }
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
@@ -5569,3 +6210,2000 @@ slide_out:
sctp_deliver_reasm_check(stcb, &stcb->asoc);
}
}
+
+/* EY fully identical to sctp_express_handle_sack, duplicated for only naming convention */
+void
+sctp_express_handle_nr_sack(struct sctp_tcb *stcb, uint32_t cumack,
+ uint32_t rwnd, int nonce_sum_flag, int *abort_now)
+{
+ struct sctp_nets *net;
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *tp1, *tp2;
+ uint32_t old_rwnd;
+ int win_probe_recovery = 0;
+ int win_probe_recovered = 0;
+ int j, done_once = 0;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
+ rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
+ stcb->asoc.cumack_log_at++;
+ if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
+ stcb->asoc.cumack_log_at = 0;
+ }
+#endif
+ asoc = &stcb->asoc;
+ old_rwnd = asoc->peers_rwnd;
+ if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
+ /* old ack */
+ return;
+ } else if (asoc->last_acked_seq == cumack) {
+ /* Window update sack */
+ asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
+ (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ if (asoc->peers_rwnd > old_rwnd) {
+ goto again;
+ }
+ return;
+ }
+ /* First setup for CC stuff */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->prev_cwnd = net->cwnd;
+ net->net_ack = 0;
+ net->net_ack2 = 0;
+
+ /*
+ * CMT: Reset CUC and Fast recovery algo variables before
+ * SACK processing
+ */
+ net->new_pseudo_cumack = 0;
+ net->will_exit_fast_recovery = 0;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
+ uint32_t send_s;
+
+ if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+ tp1 = TAILQ_LAST(&asoc->sent_queue,
+ sctpchunk_listhead);
+ send_s = tp1->rec.data.TSN_seq + 1;
+ } else {
+ send_s = asoc->sending_seq;
+ }
+ if ((cumack == send_s) ||
+ compare_with_wrap(cumack, send_s, MAX_TSN)) {
+#ifndef INVARIANTS
+ struct mbuf *oper;
+
+#endif
+#ifdef INVARIANTS
+ panic("Impossible sack 1");
+#else
+ *abort_now = 1;
+ /* XXX */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+ return;
+#endif
+ }
+ }
+ asoc->this_sack_highest_gap = cumack;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INDATA,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
+ /* process the new consecutive TSN first */
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ tp2 = TAILQ_NEXT(tp1, sctp_next);
+ if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
+ MAX_TSN) ||
+ cumack == tp1->rec.data.TSN_seq) {
+ if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
+ printf("Warning, an unsent is now acked?\n");
+ }
+ /*
+ * ECN Nonce: Add the nonce to the sender's
+ * nonce sum
+ */
+ asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
+ if (tp1->sent < SCTP_DATAGRAM_ACKED) {
+ /*
+ * If it is less than ACKED, it is
+ * now no-longer in flight. Higher
+ * values may occur during marking
+ */
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ }
+ sctp_flight_size_decrease(tp1);
+ /* sa_ignore NO_NULL_CHK */
+ sctp_total_flight_decrease(stcb, tp1);
+ }
+ tp1->whoTo->net_ack += tp1->send_size;
+ if (tp1->snd_count < 2) {
+ /*
+ * True non-retransmited
+ * chunk
+ */
+ tp1->whoTo->net_ack2 +=
+ tp1->send_size;
+
+ /* update RTO too? */
+ if (tp1->do_rtt) {
+ tp1->whoTo->RTO =
+ /*
+ * sa_ignore
+ * NO_NULL_CHK
+ */
+ sctp_calculate_rto(stcb,
+ asoc, tp1->whoTo,
+ &tp1->sent_rcv_time,
+ sctp_align_safe_nocopy);
+ tp1->do_rtt = 0;
+ }
+ }
+ /*
+ * CMT: CUCv2 algorithm. From the
+ * cumack'd TSNs, for each TSN being
+ * acked for the first time, set the
+ * following variables for the
+ * corresp destination.
+ * new_pseudo_cumack will trigger a
+ * cwnd update.
+ * find_(rtx_)pseudo_cumack will
+ * trigger search for the next
+ * expected (rtx-)pseudo-cumack.
+ */
+ tp1->whoTo->new_pseudo_cumack = 1;
+ tp1->whoTo->find_pseudo_cumack = 1;
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+ }
+ if (tp1->rec.data.chunk_was_revoked) {
+ /* deflate the cwnd */
+ tp1->whoTo->cwnd -= tp1->book_size;
+ tp1->rec.data.chunk_was_revoked = 0;
+ }
+ tp1->sent = SCTP_DATAGRAM_ACKED;
+ TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
+ if (tp1->data) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_free_bufspace(stcb, asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cumack,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_FREE_SENT);
+ }
+ tp1->data = NULL;
+ asoc->sent_queue_cnt--;
+ sctp_free_a_chunk(stcb, tp1);
+ tp1 = tp2;
+ } else {
+ break;
+ }
+ }
+
+ }
+ /* sa_ignore NO_NULL_CHK */
+ if (stcb->sctp_socket) {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+
+ SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
+ }
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
+ }
+ }
+
+ /* JRS - Use the congestion control given in the CC module */
+ if (asoc->last_acked_seq != cumack)
+ asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
+
+ asoc->last_acked_seq = cumack;
+
+ if (TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left in-flight */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->flight_size = 0;
+ net->partial_bytes_acked = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ }
+ /* Fix up the a-p-a-p for future PR-SCTP sends */
+ if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
+ asoc->advanced_peer_ack_point = cumack;
+ }
+ /* ECN Nonce updates */
+ if (asoc->ecn_nonce_allowed) {
+ if (asoc->nonce_sum_check) {
+ if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
+ if (asoc->nonce_wait_for_ecne == 0) {
+ struct sctp_tmit_chunk *lchk;
+
+ lchk = TAILQ_FIRST(&asoc->send_queue);
+ asoc->nonce_wait_for_ecne = 1;
+ if (lchk) {
+ asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
+ } else {
+ asoc->nonce_wait_tsn = asoc->sending_seq;
+ }
+ } else {
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
+ (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
+ /*
+ * Misbehaving peer. We need
+ * to react to this guy
+ */
+ asoc->ecn_allowed = 0;
+ asoc->ecn_nonce_allowed = 0;
+ }
+ }
+ }
+ } else {
+ /* See if Resynchronization Possible */
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
+ asoc->nonce_sum_check = 1;
+ /*
+ * now we must calculate what the base is.
+ * We do this based on two things, we know
+ * the total's for all the segments
+ * gap-acked in the SACK (none), We also
+ * know the SACK's nonce sum, its in
+ * nonce_sum_flag. So we can build a truth
+ * table to back-calculate the new value of
+ * asoc->nonce_sum_expect_base:
+ *
+ * SACK-flag-Value Seg-Sums Base 0 0 0
+ * 1 0 1 0 1 1 1 1 0
+ */
+ asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
+ }
+ }
+ }
+ /* RWND update */
+ asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
+ (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ if (asoc->peers_rwnd > old_rwnd) {
+ win_probe_recovery = 1;
+ }
+ /* Now assure a timer where data is queued at */
+again:
+ j = 0;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (win_probe_recovery && (net->window_probe)) {
+ net->window_probe = 0;
+ win_probe_recovered = 1;
+ /*
+ * Find first chunk that was used with window probe
+ * and clear the sent
+ */
+ /* sa_ignore FREED_MEMORY */
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->window_probe) {
+ /* move back to data send queue */
+ sctp_window_probe_recovery(stcb, asoc, net, tp1);
+ break;
+ }
+ }
+ }
+ if (net->flight_size) {
+ int to_ticks;
+
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ j++;
+ (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
+ sctp_timeout_handler, &net->rxt_timer);
+ } else {
+ if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
+ }
+ }
+ }
+ }
+ if ((j == 0) &&
+ (!TAILQ_EMPTY(&asoc->sent_queue)) &&
+ (asoc->sent_queue_retran_cnt == 0) &&
+ (win_probe_recovered == 0) &&
+ (done_once == 0)) {
+ /* huh, this should not happen */
+ sctp_fs_audit(asoc);
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->flight_size = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ asoc->sent_queue_retran_cnt = 0;
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_flight_size_increase(tp1);
+ sctp_total_flight_increase(stcb, tp1);
+ } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ asoc->sent_queue_retran_cnt++;
+ }
+ }
+ done_once = 1;
+ goto again;
+ }
+ /**********************************/
+ /* Now what about shutdown issues */
+ /**********************************/
+ if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left on sendqueue.. consider done */
+ /* clean up */
+ if ((asoc->stream_queue_cnt == 1) &&
+ ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+ (asoc->locked_on_sending)
+ ) {
+ struct sctp_stream_queue_pending *sp;
+
+ /*
+ * I may be in a state where we got all across.. but
+ * cannot write more due to a shutdown... we abort
+ * since the user did not indicate EOR in this case.
+ * The sp will be cleaned during free of the asoc.
+ */
+ sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
+ sctp_streamhead);
+ if ((sp) && (sp->length == 0)) {
+ /* Let cleanup code purge it */
+ if (sp->msg_is_complete) {
+ asoc->stream_queue_cnt--;
+ } else {
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ asoc->locked_on_sending = NULL;
+ asoc->stream_queue_cnt--;
+ }
+ }
+ }
+ if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ /* Need to abort here */
+ struct mbuf *oper;
+
+ abort_out_now:
+ *abort_now = 1;
+ /* XXX */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
+ } else {
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown(stcb,
+ stcb->asoc.primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ }
+ } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ goto abort_out_now;
+ }
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_send_shutdown_ack(stcb,
+ stcb->asoc.primary_destination);
+
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
+ rwnd,
+ stcb->asoc.peers_rwnd,
+ stcb->asoc.total_flight,
+ stcb->asoc.total_output_queue_size);
+ }
+}
+
+/* EY! nr_sack version of sctp_handle_segments, nr-gapped TSNs get removed from RtxQ in this method*/
+static void
+sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_nr_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
+ uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
+ uint32_t num_seg, uint32_t num_nr_seg, int *ecn_seg_sums)
+{
+ /************************************************/
+ /* process fragments and update sendqueue */
+ /************************************************/
+ struct sctp_nr_sack *nr_sack;
+ struct sctp_gap_ack_block *frag, block;
+ struct sctp_nr_gap_ack_block *nr_frag, nr_block;
+ struct sctp_tmit_chunk *tp1;
+ uint32_t i, j, all_bit;
+ int wake_him = 0;
+ uint32_t theTSN;
+ int num_frs = 0;
+
+ uint16_t frag_strt, frag_end, primary_flag_set;
+ uint16_t nr_frag_strt, nr_frag_end;
+
+ uint32_t last_frag_high;
+ uint32_t last_nr_frag_high;
+
+ all_bit = ch->ch.chunk_flags & SCTP_NR_SACK_ALL_BIT;
+
+ /*
+ * @@@ JRI : TODO: This flag is not used anywhere .. remove?
+ */
+ if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
+ primary_flag_set = 1;
+ } else {
+ primary_flag_set = 0;
+ }
+ nr_sack = &ch->nr_sack;
+
+ /*
+ * EY! - I will process nr_gaps similarly,by going to this position
+ * again if All bit is set
+ */
+ frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
+ *offset += sizeof(block);
+ if (frag == NULL) {
+ return;
+ }
+ tp1 = NULL;
+ last_frag_high = 0;
+ for (i = 0; i < num_seg; i++) {
+ frag_strt = ntohs(frag->start);
+ frag_end = ntohs(frag->end);
+ /* some sanity checks on the fargment offsets */
+ if (frag_strt > frag_end) {
+ /* this one is malformed, skip */
+ frag++;
+ continue;
+ }
+ if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
+ MAX_TSN))
+ *biggest_tsn_acked = frag_end + last_tsn;
+
+ /* mark acked dgs and find out the highestTSN being acked */
+ if (tp1 == NULL) {
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+
+ /* save the locations of the last frags */
+ last_frag_high = frag_end + last_tsn;
+ } else {
+ /*
+ * now lets see if we need to reset the queue due to
+ * a out-of-order SACK fragment
+ */
+ if (compare_with_wrap(frag_strt + last_tsn,
+ last_frag_high, MAX_TSN)) {
+ /*
+ * if the new frag starts after the last TSN
+ * frag covered, we are ok and this one is
+ * beyond the last one
+ */
+ ;
+ } else {
+ /*
+ * ok, they have reset us, so we need to
+ * reset the queue this will cause extra
+ * hunting but hey, they chose the
+ * performance hit when they failed to order
+ * there gaps..
+ */
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ }
+ last_frag_high = frag_end + last_tsn;
+ }
+ for (j = frag_strt; j <= frag_end; j++) {
+ theTSN = j + last_tsn;
+ while (tp1) {
+ if (tp1->rec.data.doing_fast_retransmit)
+ num_frs++;
+
+ /*
+ * CMT: CUCv2 algorithm. For each TSN being
+ * processed from the sent queue, track the
+ * next expected pseudo-cumack, or
+ * rtx_pseudo_cumack, if required. Separate
+ * cumack trackers for first transmissions,
+ * and retransmissions.
+ */
+ if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
+ (tp1->snd_count == 1)) {
+ tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
+ tp1->whoTo->find_pseudo_cumack = 0;
+ }
+ if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
+ (tp1->snd_count > 1)) {
+ tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
+ tp1->whoTo->find_rtx_pseudo_cumack = 0;
+ }
+ if (tp1->rec.data.TSN_seq == theTSN) {
+ if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
+ /*
+ * must be held until
+ * cum-ack passes
+ */
+ /*
+ * ECN Nonce: Add the nonce
+ * value to the sender's
+ * nonce sum
+ */
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ /*-
+ * If it is less than RESEND, it is
+ * now no-longer in flight.
+ * Higher values may already be set
+ * via previous Gap Ack Blocks...
+ * i.e. ACKED or RESEND.
+ */
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ *biggest_newly_acked_tsn, MAX_TSN)) {
+ *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
+ }
+ /*
+ * CMT: SFR algo
+ * (and HTNA) - set
+ * saw_newack to 1
+ * for dest being
+ * newly acked.
+ * update
+ * this_sack_highest_
+ * newack if
+ * appropriate.
+ */
+ if (tp1->rec.data.chunk_was_revoked == 0)
+ tp1->whoTo->saw_newack = 1;
+
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ tp1->whoTo->this_sack_highest_newack,
+ MAX_TSN)) {
+ tp1->whoTo->this_sack_highest_newack =
+ tp1->rec.data.TSN_seq;
+ }
+ /*
+ * CMT DAC algo:
+ * also update
+ * this_sack_lowest_n
+ * ewack
+ */
+ if (*this_sack_lowest_newack == 0) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(*this_sack_lowest_newack,
+ last_tsn,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_TSN_ACKED);
+ }
+ *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
+ }
+ /*
+ * CMT: CUCv2
+ * algorithm. If
+ * (rtx-)pseudo-cumac
+ * k for corresp
+ * dest is being
+ * acked, then we
+ * have a new
+ * (rtx-)pseudo-cumac
+ * k. Set
+ * new_(rtx_)pseudo_c
+ * umack to TRUE so
+ * that the cwnd for
+ * this dest can be
+ * updated. Also
+ * trigger search
+ * for the next
+ * expected
+ * (rtx-)pseudo-cumac
+ * k. Separate
+ * pseudo_cumack
+ * trackers for
+ * first
+ * transmissions and
+ * retransmissions.
+ */
+ if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
+ if (tp1->rec.data.chunk_was_revoked == 0) {
+ tp1->whoTo->new_pseudo_cumack = 1;
+ }
+ tp1->whoTo->find_pseudo_cumack = 1;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+ }
+ if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
+ if (tp1->rec.data.chunk_was_revoked == 0) {
+ tp1->whoTo->new_pseudo_cumack = 1;
+ }
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(*biggest_newly_acked_tsn,
+ last_tsn,
+ tp1->rec.data.TSN_seq,
+ frag_strt,
+ frag_end,
+ SCTP_LOG_TSN_ACKED);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ }
+ sctp_flight_size_decrease(tp1);
+ sctp_total_flight_decrease(stcb, tp1);
+
+ tp1->whoTo->net_ack += tp1->send_size;
+ if (tp1->snd_count < 2) {
+ /*
+ * True
+ * non-retran
+ * smited
+ * chunk
+ */
+ tp1->whoTo->net_ack2 += tp1->send_size;
+
+ /*
+ * update
+ * RTO too ?
+ */
+ if (tp1->do_rtt) {
+ tp1->whoTo->RTO =
+ sctp_calculate_rto(stcb,
+ asoc,
+ tp1->whoTo,
+ &tp1->sent_rcv_time,
+ sctp_align_safe_nocopy);
+ tp1->do_rtt = 0;
+ }
+ }
+ }
+ if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
+ (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
+ (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ asoc->this_sack_highest_gap,
+ MAX_TSN)) {
+ asoc->this_sack_highest_gap =
+ tp1->rec.data.TSN_seq;
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB2,
+ (asoc->sent_queue_retran_cnt & 0x000000ff));
+#endif
+ }
+ }
+ /*
+ * All chunks NOT UNSENT
+ * fall through here and are
+ * marked
+ */
+ tp1->sent = SCTP_DATAGRAM_MARKED;
+ if (tp1->rec.data.chunk_was_revoked) {
+ /* deflate the cwnd */
+ tp1->whoTo->cwnd -= tp1->book_size;
+ tp1->rec.data.chunk_was_revoked = 0;
+ }
+ /*
+ * EY - if all bit is set
+ * then this TSN is
+ * nr_marked
+ */
+ if (all_bit) {
+ tp1->sent = SCTP_DATAGRAM_NR_MARKED;
+ /*
+ * TAILQ_REMOVE(&asoc
+ * ->sent_queue,
+ * tp1, sctp_next);
+ */
+ if (tp1->data) {
+ /*
+ * sa_ignore
+ * NO_NULL_CH
+ * K
+ */
+ sctp_free_bufspace(stcb, asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ }
+ tp1->data = NULL;
+ /*
+ * asoc->sent_queue_c
+ * nt--;
+ */
+ /*
+ * sctp_free_a_chunk(
+ * stcb, tp1);
+ */
+ wake_him++;
+ }
+ }
+ break;
+ } /* if (tp1->TSN_seq == theTSN) */
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
+ MAX_TSN))
+ break;
+
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ } /* end while (tp1) */
+ } /* end for (j = fragStart */
+ frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
+ *offset += sizeof(block);
+ if (frag == NULL) {
+ break;
+ }
+ }
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ if (num_frs)
+ sctp_log_fr(*biggest_tsn_acked,
+ *biggest_newly_acked_tsn,
+ last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
+ }
+ /*
+ * EY - if all bit is not set then there should be other loops to
+ * identify nr TSNs
+ */
+ if (!all_bit) {
+
+ nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
+ *offset += sizeof(nr_block);
+
+
+
+ if (nr_frag == NULL) {
+ return;
+ }
+ tp1 = NULL;
+ last_nr_frag_high = 0;
+
+ for (i = 0; i < num_nr_seg; i++) {
+
+ nr_frag_strt = ntohs(nr_frag->start);
+ nr_frag_end = ntohs(nr_frag->end);
+
+ /* some sanity checks on the nr fargment offsets */
+ if (nr_frag_strt > nr_frag_end) {
+ /* this one is malformed, skip */
+ nr_frag++;
+ continue;
+ }
+ /*
+ * mark acked dgs and find out the highestTSN being
+ * acked
+ */
+ if (tp1 == NULL) {
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+
+ /* save the locations of the last frags */
+ last_nr_frag_high = nr_frag_end + last_tsn;
+ } else {
+ /*
+ * now lets see if we need to reset the
+ * queue due to a out-of-order SACK fragment
+ */
+ if (compare_with_wrap(nr_frag_strt + last_tsn,
+ last_nr_frag_high, MAX_TSN)) {
+ /*
+ * if the new frag starts after the
+ * last TSN frag covered, we are ok
+ * and this one is beyond the last
+ * one
+ */
+ ;
+ } else {
+ /*
+ * ok, they have reset us, so we
+ * need to reset the queue this will
+ * cause extra hunting but hey, they
+ * chose the performance hit when
+ * they failed to order there gaps..
+ */
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ }
+ last_nr_frag_high = nr_frag_end + last_tsn;
+ }
+
+ for (j = nr_frag_strt + last_tsn; (compare_with_wrap((nr_frag_end + last_tsn), j, MAX_TSN)); j++) {
+ while (tp1) {
+ if (tp1->rec.data.TSN_seq == j) {
+ if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
+ tp1->sent = SCTP_DATAGRAM_NR_MARKED;
+ /*
+ * TAILQ_REMOVE(&asoc
+ * ->sent_queue,
+ * tp1, sctp_next);
+ */
+ if (tp1->data) {
+ /*
+ * sa_ignore
+ * NO_NULL_CH
+ * K
+ */
+ sctp_free_bufspace(stcb, asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ }
+ tp1->data = NULL;
+ /*
+ * asoc->sent_queue_c
+ * nt--;
+ */
+ /*
+ * sctp_free_a_chunk(
+ * stcb, tp1);
+ */
+ wake_him++;
+ }
+ break;
+ } /* if (tp1->TSN_seq == j) */
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
+ MAX_TSN))
+ break;
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ } /* end while (tp1) */
+
+ } /* end for (j = nrFragStart */
+
+ nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
+ *offset += sizeof(nr_block);
+ if (nr_frag == NULL) {
+ break;
+ }
+ } /* end of if(!all_bit) */
+ }
+ /*
+ * EY- wake up the socket if things have been removed from the sent
+ * queue
+ */
+ if ((wake_him) && (stcb->sctp_socket)) {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+ SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
+ /*
+ * if (SCTP_BASE_SYSCTL(sctp_logging_level) &
+ * SCTP_WAKE_LOGGING_ENABLE) { sctp_wakeup_log(stcb,
+ * cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);}
+ */
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ } /* else { if
+ * (SCTP_BASE_SYSCTL(sctp_logging_level) &
+ * SCTP_WAKE_LOGGING_ENABLE) {
+ * sctp_wakeup_log(stcb, cum_ack, wake_him,
+ * SCTP_NOWAKE_FROM_SACK); } } */
+}
+
+/* EY- nr_sack */
+/* Identifies the non-renegable tsns that are revoked*/
+static void
+sctp_check_for_nr_revoked(struct sctp_tcb *stcb,
+ struct sctp_association *asoc, uint32_t cumack,
+ u_long biggest_tsn_acked)
+{
+ struct sctp_tmit_chunk *tp1;
+
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
+ MAX_TSN)) {
+ /*
+ * ok this guy is either ACK or MARKED. If it is
+ * ACKED it has been previously acked but not this
+ * time i.e. revoked. If it is MARKED it was ACK'ed
+ * again.
+ */
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
+ MAX_TSN))
+ break;
+
+
+ if (tp1->sent == SCTP_DATAGRAM_NR_ACKED) {
+ /*
+ * EY! a non-renegable TSN is revoked, need
+ * to abort the association
+ */
+ /*
+ * EY TODO: put in the code to abort the
+ * assoc.
+ */
+ return;
+ } else if (tp1->sent == SCTP_DATAGRAM_NR_MARKED) {
+ /* it has been re-acked in this SACK */
+ tp1->sent = SCTP_DATAGRAM_NR_ACKED;
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_UNSENT)
+ break;
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ }
+}
+
+/* EY! nr_sack version of sctp_handle_sack, nr_gap_ack processing should be added to this method*/
+void
+sctp_handle_nr_sack(struct mbuf *m, int offset,
+ struct sctp_nr_sack_chunk *ch, struct sctp_tcb *stcb,
+ struct sctp_nets *net_from, int *abort_now, int nr_sack_len, uint32_t rwnd)
+{
+ struct sctp_association *asoc;
+
+ /* EY sack */
+ struct sctp_nr_sack *nr_sack;
+ struct sctp_tmit_chunk *tp1, *tp2;
+ uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
+ this_sack_lowest_newack;
+ uint32_t sav_cum_ack;
+
+ /* EY num_seg */
+ uint16_t num_seg, num_nr_seg, num_dup;
+ uint16_t wake_him = 0;
+ unsigned int nr_sack_length;
+ uint32_t send_s = 0;
+ long j;
+ int accum_moved = 0;
+ int will_exit_fast_recovery = 0;
+ uint32_t a_rwnd, old_rwnd;
+ int win_probe_recovery = 0;
+ int win_probe_recovered = 0;
+ struct sctp_nets *net = NULL;
+ int nonce_sum_flag, ecn_seg_sums = 0, all_bit;
+ int done_once;
+ uint8_t reneged_all = 0;
+ uint8_t cmt_dac_flag;
+
+ /*
+ * we take any chance we can to service our queues since we cannot
+ * get awoken when the socket is read from :<
+ */
+ /*
+ * Now perform the actual SACK handling: 1) Verify that it is not an
+ * old sack, if so discard. 2) If there is nothing left in the send
+ * queue (cum-ack is equal to last acked) then you have a duplicate
+ * too, update any rwnd change and verify no timers are running.
+ * then return. 3) Process any new consequtive data i.e. cum-ack
+ * moved process these first and note that it moved. 4) Process any
+ * sack blocks. 5) Drop any acked from the queue. 6) Check for any
+ * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
+ * sync up flightsizes and things, stop all timers and also check
+ * for shutdown_pending state. If so then go ahead and send off the
+ * shutdown. If in shutdown recv, send off the shutdown-ack and
+ * start that timer, Ret. 9) Strike any non-acked things and do FR
+ * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
+ * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
+ * if in shutdown_recv state.
+ */
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ nr_sack = &ch->nr_sack;
+ /* CMT DAC algo */
+ this_sack_lowest_newack = 0;
+ j = 0;
+ nr_sack_length = (unsigned int)nr_sack_len;
+ /* ECN Nonce */
+ SCTP_STAT_INCR(sctps_slowpath_sack);
+ nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
+ cum_ack = last_tsn = ntohl(nr_sack->cum_tsn_ack);
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
+ stcb->asoc.cumack_log_at++;
+ if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
+ stcb->asoc.cumack_log_at = 0;
+ }
+#endif
+ all_bit = ch->ch.chunk_flags & SCTP_NR_SACK_ALL_BIT;
+ num_seg = ntohs(nr_sack->num_gap_ack_blks);
+ num_nr_seg = ntohs(nr_sack->num_nr_gap_ack_blks);
+ if (all_bit)
+ num_seg = num_nr_seg;
+ a_rwnd = rwnd;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
+ rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
+ }
+ /* CMT DAC algo */
+ cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
+ num_dup = ntohs(nr_sack->num_dup_tsns);
+
+ old_rwnd = stcb->asoc.peers_rwnd;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INDATA,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ asoc = &stcb->asoc;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cum_ack,
+ 0,
+ num_seg,
+ num_dup,
+ SCTP_LOG_NEW_SACK);
+ }
+ if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
+ int off_to_dup, iii;
+ uint32_t *dupdata, dblock;
+
+ /* EY! gotta be careful here */
+ if (all_bit) {
+ off_to_dup = (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) +
+ sizeof(struct sctp_nr_sack_chunk);
+ } else {
+ off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) +
+ (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) + sizeof(struct sctp_nr_sack_chunk);
+ }
+ if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= nr_sack_length) {
+ dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
+ sizeof(uint32_t), (uint8_t *) & dblock);
+ off_to_dup += sizeof(uint32_t);
+ if (dupdata) {
+ for (iii = 0; iii < num_dup; iii++) {
+ sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
+ dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
+ sizeof(uint32_t), (uint8_t *) & dblock);
+ if (dupdata == NULL)
+ break;
+ off_to_dup += sizeof(uint32_t);
+ }
+ }
+ } else {
+ SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d nr_sack_len:%d num gaps:%d num nr_gaps:%d\n",
+ off_to_dup, num_dup, nr_sack_length, num_seg, num_nr_seg);
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
+ /* reality check */
+ if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+ tp1 = TAILQ_LAST(&asoc->sent_queue,
+ sctpchunk_listhead);
+ send_s = tp1->rec.data.TSN_seq + 1;
+ } else {
+ send_s = asoc->sending_seq;
+ }
+ if (cum_ack == send_s ||
+ compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
+#ifndef INVARIANTS
+ struct mbuf *oper;
+
+#endif
+#ifdef INVARIANTS
+ hopeless_peer:
+ panic("Impossible sack 1");
+#else
+
+
+ /*
+ * no way, we have not even sent this TSN out yet.
+ * Peer is hopelessly messed up with us.
+ */
+ hopeless_peer:
+ *abort_now = 1;
+ /* XXX */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+ return;
+#endif
+ }
+ }
+ /**********************/
+ /* 1) check the range */
+ /**********************/
+ if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
+ /* acking something behind */
+ return;
+ }
+ sav_cum_ack = asoc->last_acked_seq;
+
+ /* update the Rwnd of the peer */
+ if (TAILQ_EMPTY(&asoc->sent_queue) &&
+ TAILQ_EMPTY(&asoc->send_queue) &&
+ (asoc->stream_queue_cnt == 0)
+ ) {
+ /* nothing left on send/sent and strmq */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+ asoc->peers_rwnd, 0, 0, a_rwnd);
+ }
+ asoc->peers_rwnd = a_rwnd;
+ if (asoc->sent_queue_retran_cnt) {
+ asoc->sent_queue_retran_cnt = 0;
+ }
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ /* stop any timers */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
+ }
+ }
+ net->partial_bytes_acked = 0;
+ net->flight_size = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ return;
+ }
+ /*
+ * We init netAckSz and netAckSz2 to 0. These are used to track 2
+ * things. The total byte count acked is tracked in netAckSz AND
+ * netAck2 is used to track the total bytes acked that are un-
+ * amibguious and were never retransmitted. We track these on a per
+ * destination address basis.
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->prev_cwnd = net->cwnd;
+ net->net_ack = 0;
+ net->net_ack2 = 0;
+
+ /*
+ * CMT: Reset CUC and Fast recovery algo variables before
+ * SACK processing
+ */
+ net->new_pseudo_cumack = 0;
+ net->will_exit_fast_recovery = 0;
+ }
+ /* process the new consecutive TSN first */
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
+ MAX_TSN) ||
+ last_tsn == tp1->rec.data.TSN_seq) {
+ if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
+ /*
+ * ECN Nonce: Add the nonce to the sender's
+ * nonce sum
+ */
+ asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
+ accum_moved = 1;
+ if (tp1->sent < SCTP_DATAGRAM_ACKED) {
+ /*
+ * If it is less than ACKED, it is
+ * now no-longer in flight. Higher
+ * values may occur during marking
+ */
+ if ((tp1->whoTo->dest_state &
+ SCTP_ADDR_UNCONFIRMED) &&
+ (tp1->snd_count < 2)) {
+ /*
+ * If there was no retran
+ * and the address is
+ * un-confirmed and we sent
+ * there and are now
+ * sacked.. its confirmed,
+ * mark it so.
+ */
+ tp1->whoTo->dest_state &=
+ ~SCTP_ADDR_UNCONFIRMED;
+ }
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ }
+ sctp_flight_size_decrease(tp1);
+ sctp_total_flight_decrease(stcb, tp1);
+ }
+ tp1->whoTo->net_ack += tp1->send_size;
+
+ /* CMT SFR and DAC algos */
+ this_sack_lowest_newack = tp1->rec.data.TSN_seq;
+ tp1->whoTo->saw_newack = 1;
+
+ if (tp1->snd_count < 2) {
+ /*
+ * True non-retransmited
+ * chunk
+ */
+ tp1->whoTo->net_ack2 +=
+ tp1->send_size;
+
+ /* update RTO too? */
+ if (tp1->do_rtt) {
+ tp1->whoTo->RTO =
+ sctp_calculate_rto(stcb,
+ asoc, tp1->whoTo,
+ &tp1->sent_rcv_time,
+ sctp_align_safe_nocopy);
+ tp1->do_rtt = 0;
+ }
+ }
+ /*
+ * CMT: CUCv2 algorithm. From the
+ * cumack'd TSNs, for each TSN being
+ * acked for the first time, set the
+ * following variables for the
+ * corresp destination.
+ * new_pseudo_cumack will trigger a
+ * cwnd update.
+ * find_(rtx_)pseudo_cumack will
+ * trigger search for the next
+ * expected (rtx-)pseudo-cumack.
+ */
+ tp1->whoTo->new_pseudo_cumack = 1;
+ tp1->whoTo->find_pseudo_cumack = 1;
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cum_ack,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_TSN_ACKED);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB3,
+ (asoc->sent_queue_retran_cnt & 0x000000ff));
+#endif
+ }
+ if (tp1->rec.data.chunk_was_revoked) {
+ /* deflate the cwnd */
+ tp1->whoTo->cwnd -= tp1->book_size;
+ tp1->rec.data.chunk_was_revoked = 0;
+ }
+ tp1->sent = SCTP_DATAGRAM_ACKED;
+ }
+ } else {
+ break;
+ }
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ }
+ biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
+ /* always set this up to cum-ack */
+ asoc->this_sack_highest_gap = last_tsn;
+
+ /* Move offset up to point to gaps/dups */
+ offset += sizeof(struct sctp_nr_sack_chunk);
+ if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_nr_sack_chunk)) > nr_sack_length) {
+
+ /* skip corrupt segments */
+ goto skip_segments;
+ }
+ if (num_seg > 0) {
+
+ /*
+ * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
+ * to be greater than the cumack. Also reset saw_newack to 0
+ * for all dests.
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->saw_newack = 0;
+ net->this_sack_highest_newack = last_tsn;
+ }
+
+ /*
+ * thisSackHighestGap will increase while handling NEW
+ * segments this_sack_highest_newack will increase while
+ * handling NEWLY ACKED chunks. this_sack_lowest_newack is
+ * used for CMT DAC algo. saw_newack will also change.
+ */
+
+ sctp_handle_nr_sack_segments(m, &offset, stcb, asoc, ch, last_tsn,
+ &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
+ num_seg, num_nr_seg, &ecn_seg_sums);
+
+
+ if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
+ /*
+ * validate the biggest_tsn_acked in the gap acks if
+ * strict adherence is wanted.
+ */
+ if ((biggest_tsn_acked == send_s) ||
+ (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
+ /*
+ * peer is either confused or we are under
+ * attack. We must abort.
+ */
+ goto hopeless_peer;
+ }
+ }
+ }
+skip_segments:
+ /*******************************************/
+ /* cancel ALL T3-send timer if accum moved */
+ /*******************************************/
+ if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->new_pseudo_cumack)
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
+
+ }
+ } else {
+ if (accum_moved) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
+ }
+ }
+ }
+ /********************************************/
+ /* drop the acked chunks from the sendqueue */
+ /********************************************/
+ asoc->last_acked_seq = cum_ack;
+
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ if (tp1 == NULL)
+ goto done_with_it;
+ do {
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
+ MAX_TSN)) {
+ break;
+ }
+ if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
+ /* no more sent on list */
+ printf("Warning, tp1->sent == %d and its now acked?\n",
+ tp1->sent);
+ }
+ tp2 = TAILQ_NEXT(tp1, sctp_next);
+ TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
+ if (tp1->pr_sctp_on) {
+ if (asoc->pr_sctp_cnt != 0)
+ asoc->pr_sctp_cnt--;
+ }
+ if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
+ (asoc->total_flight > 0)) {
+#ifdef INVARIANTS
+ panic("Warning flight size is postive and should be 0");
+#else
+ SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
+ asoc->total_flight);
+#endif
+ asoc->total_flight = 0;
+ }
+ if (tp1->data) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_free_bufspace(stcb, asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
+ asoc->sent_queue_cnt_removeable--;
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cum_ack,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_FREE_SENT);
+ }
+ tp1->data = NULL;
+ asoc->sent_queue_cnt--;
+ sctp_free_a_chunk(stcb, tp1);
+ wake_him++;
+ tp1 = tp2;
+ } while (tp1 != NULL);
+
+done_with_it:
+ /* sa_ignore NO_NULL_CHK */
+ if ((wake_him) && (stcb->sctp_socket)) {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+ SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
+ }
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
+ }
+ }
+
+ if (asoc->fast_retran_loss_recovery && accum_moved) {
+ if (compare_with_wrap(asoc->last_acked_seq,
+ asoc->fast_recovery_tsn, MAX_TSN) ||
+ asoc->last_acked_seq == asoc->fast_recovery_tsn) {
+ /* Setup so we will exit RFC2582 fast recovery */
+ will_exit_fast_recovery = 1;
+ }
+ }
+ /*
+ * Check for revoked fragments:
+ *
+ * if Previous sack - Had no frags then we can't have any revoked if
+ * Previous sack - Had frag's then - If we now have frags aka
+ * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
+ * some of them. else - The peer revoked all ACKED fragments, since
+ * we had some before and now we have NONE.
+ */
+
+ if (num_seg)
+ sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
+
+ else if (asoc->saw_sack_with_frags) {
+ int cnt_revoked = 0;
+
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ if (tp1 != NULL) {
+ /* Peer revoked all dg's marked or acked */
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ /*
+ * EY- maybe check only if it is nr_acked
+ * nr_marked may not be possible
+ */
+ if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
+ (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
+ /*
+ * EY! - TODO: Something previously
+ * nr_gapped is reneged, abort the
+ * association
+ */
+ return;
+ }
+ if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
+ (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
+ tp1->sent = SCTP_DATAGRAM_SENT;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ }
+ sctp_flight_size_increase(tp1);
+ sctp_total_flight_increase(stcb, tp1);
+ tp1->rec.data.chunk_was_revoked = 1;
+ /*
+ * To ensure that this increase in
+ * flightsize, which is artificial,
+ * does not throttle the sender, we
+ * also increase the cwnd
+ * artificially.
+ */
+ tp1->whoTo->cwnd += tp1->book_size;
+ cnt_revoked++;
+ }
+ }
+ if (cnt_revoked) {
+ reneged_all = 1;
+ }
+ }
+ asoc->saw_sack_with_frags = 0;
+ }
+ if (num_seg)
+ asoc->saw_sack_with_frags = 1;
+ else
+ asoc->saw_sack_with_frags = 0;
+
+ /* EY! - not sure about if there should be an IF */
+ if (num_nr_seg)
+ sctp_check_for_nr_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
+ else if (asoc->saw_sack_with_nr_frags) {
+ /*
+ * EY!- TODO: all previously nr_gapped chunks have been
+ * reneged abort the association
+ */
+ asoc->saw_sack_with_nr_frags = 0;
+ }
+ if (num_nr_seg)
+ asoc->saw_sack_with_nr_frags = 1;
+ else
+ asoc->saw_sack_with_nr_frags = 0;
+ /* JRS - Use the congestion control given in the CC module */
+ asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
+
+ if (TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left in-flight */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ /* stop all timers */
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
+ }
+ }
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
+ net->flight_size = 0;
+ net->partial_bytes_acked = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ }
+ /**********************************/
+ /* Now what about shutdown issues */
+ /**********************************/
+ if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left on sendqueue.. consider done */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+ asoc->peers_rwnd, 0, 0, a_rwnd);
+ }
+ asoc->peers_rwnd = a_rwnd;
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ /* clean up */
+ if ((asoc->stream_queue_cnt == 1) &&
+ ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+ (asoc->locked_on_sending)
+ ) {
+ struct sctp_stream_queue_pending *sp;
+
+ /*
+ * I may be in a state where we got all across.. but
+ * cannot write more due to a shutdown... we abort
+ * since the user did not indicate EOR in this case.
+ */
+ sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
+ sctp_streamhead);
+ if ((sp) && (sp->length == 0)) {
+ asoc->locked_on_sending = NULL;
+ if (sp->msg_is_complete) {
+ asoc->stream_queue_cnt--;
+ } else {
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ asoc->stream_queue_cnt--;
+ }
+ }
+ }
+ if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ /* Need to abort here */
+ struct mbuf *oper;
+
+ abort_out_now:
+ *abort_now = 1;
+ /* XXX */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
+ return;
+ } else {
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown(stcb,
+ stcb->asoc.primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ }
+ return;
+ } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ goto abort_out_now;
+ }
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_send_shutdown_ack(stcb,
+ stcb->asoc.primary_destination);
+
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ return;
+ }
+ }
+ /*
+ * Now here we are going to recycle net_ack for a different use...
+ * HEADS UP.
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->net_ack = 0;
+ }
+
+ /*
+ * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
+ * to be done. Setting this_sack_lowest_newack to the cum_ack will
+ * automatically ensure that.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
+ this_sack_lowest_newack = cum_ack;
+ }
+ if (num_seg > 0) {
+ sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
+ biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
+ }
+ /* JRS - Use the congestion control given in the CC module */
+ asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
+
+ /******************************************************************
+ * Here we do the stuff with ECN Nonce checking.
+ * We basically check to see if the nonce sum flag was incorrect
+ * or if resynchronization needs to be done. Also if we catch a
+ * misbehaving receiver we give him the kick.
+ ******************************************************************/
+
+ if (asoc->ecn_nonce_allowed) {
+ if (asoc->nonce_sum_check) {
+ if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
+ if (asoc->nonce_wait_for_ecne == 0) {
+ struct sctp_tmit_chunk *lchk;
+
+ lchk = TAILQ_FIRST(&asoc->send_queue);
+ asoc->nonce_wait_for_ecne = 1;
+ if (lchk) {
+ asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
+ } else {
+ asoc->nonce_wait_tsn = asoc->sending_seq;
+ }
+ } else {
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
+ (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
+ /*
+ * Misbehaving peer. We need
+ * to react to this guy
+ */
+ asoc->ecn_allowed = 0;
+ asoc->ecn_nonce_allowed = 0;
+ }
+ }
+ }
+ } else {
+ /* See if Resynchronization Possible */
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
+ asoc->nonce_sum_check = 1;
+ /*
+ * now we must calculate what the base is.
+ * We do this based on two things, we know
+ * the total's for all the segments
+ * gap-acked in the SACK, its stored in
+ * ecn_seg_sums. We also know the SACK's
+ * nonce sum, its in nonce_sum_flag. So we
+ * can build a truth table to back-calculate
+ * the new value of
+ * asoc->nonce_sum_expect_base:
+ *
+ * SACK-flag-Value Seg-Sums Base 0 0 0
+ * 1 0 1 0 1 1 1 1 0
+ */
+ asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
+ }
+ }
+ }
+ /* Now are we exiting loss recovery ? */
+ if (will_exit_fast_recovery) {
+ /* Ok, we must exit fast recovery */
+ asoc->fast_retran_loss_recovery = 0;
+ }
+ if ((asoc->sat_t3_loss_recovery) &&
+ ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
+ MAX_TSN) ||
+ (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
+ /* end satellite t3 loss recovery */
+ asoc->sat_t3_loss_recovery = 0;
+ }
+ /*
+ * CMT Fast recovery
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->will_exit_fast_recovery) {
+ /* Ok, we must exit fast recovery */
+ net->fast_retran_loss_recovery = 0;
+ }
+ }
+
+ /* Adjust and set the new rwnd value */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+ asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
+ }
+ asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
+ (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ if (asoc->peers_rwnd > old_rwnd) {
+ win_probe_recovery = 1;
+ }
+ /*
+ * Now we must setup so we have a timer up for anyone with
+ * outstanding data.
+ */
+ done_once = 0;
+again:
+ j = 0;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (win_probe_recovery && (net->window_probe)) {
+ net->window_probe = 0;
+ win_probe_recovered = 1;
+ /*-
+ * Find first chunk that was used with
+ * window probe and clear the event. Put
+ * it back into the send queue as if has
+ * not been sent.
+ */
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->window_probe) {
+ sctp_window_probe_recovery(stcb, asoc, net, tp1);
+ break;
+ }
+ }
+ }
+ if (net->flight_size) {
+ j++;
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+ } else {
+ if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
+ }
+ }
+ }
+ }
+ if ((j == 0) &&
+ (!TAILQ_EMPTY(&asoc->sent_queue)) &&
+ (asoc->sent_queue_retran_cnt == 0) &&
+ (win_probe_recovered == 0) &&
+ (done_once == 0)) {
+ /* huh, this should not happen */
+ sctp_fs_audit(asoc);
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->flight_size = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ asoc->sent_queue_retran_cnt = 0;
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_flight_size_increase(tp1);
+ sctp_total_flight_increase(stcb, tp1);
+ } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ asoc->sent_queue_retran_cnt++;
+ }
+ }
+ done_once = 1;
+ goto again;
+ }
+ /*********************************************/
+ /* Here we perform PR-SCTP procedures */
+ /* (section 4.2) */
+ /*********************************************/
+ /* C1. update advancedPeerAckPoint */
+ if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
+ asoc->advanced_peer_ack_point = cum_ack;
+ }
+ /* C2. try to further move advancedPeerAckPoint ahead */
+ if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
+ struct sctp_tmit_chunk *lchk;
+ uint32_t old_adv_peer_ack_point;
+
+ old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
+ lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
+ /* C3. See if we need to send a Fwd-TSN */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
+ MAX_TSN)) {
+ /*
+ * ISSUE with ECN, see FWD-TSN processing for notes
+ * on issues that will occur when the ECN NONCE
+ * stuff is put into SCTP for cross checking.
+ */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
+ MAX_TSN)) {
+ send_forward_tsn(stcb, asoc);
+ /*
+ * ECN Nonce: Disable Nonce Sum check when
+ * FWD TSN is sent and store resync tsn
+ */
+ asoc->nonce_sum_check = 0;
+ asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
+ }
+ }
+ if (lchk) {
+ /* Assure a timer is up */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, lchk->whoTo);
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
+ a_rwnd,
+ stcb->asoc.peers_rwnd,
+ stcb->asoc.total_flight,
+ stcb->asoc.total_output_queue_size);
+ }
+}
diff --git a/sys/netinet/sctp_indata.h b/sys/netinet/sctp_indata.h
index 81c1d72..76fa946 100644
--- a/sys/netinet/sctp_indata.h
+++ b/sys/netinet/sctp_indata.h
@@ -99,6 +99,16 @@ void
sctp_handle_sack(struct mbuf *m, int offset, struct sctp_sack_chunk *, struct sctp_tcb *,
struct sctp_nets *, int *, int, uint32_t);
+/* EY does "exactly" the same as sctp_express_handle_sack */
+void
+sctp_express_handle_nr_sack(struct sctp_tcb *stcb, uint32_t cumack,
+ uint32_t rwnd, int nonce_sum_flag, int *abort_now);
+
+/* EY nr_sack version of sctp_handle_sack */
+void
+sctp_handle_nr_sack(struct mbuf *m, int offset, struct sctp_nr_sack_chunk *, struct sctp_tcb *,
+ struct sctp_nets *, int *, int, uint32_t);
+
/* draft-ietf-tsvwg-usctp */
void
sctp_handle_forward_tsn(struct sctp_tcb *,
diff --git a/sys/netinet/sctp_input.c b/sys/netinet/sctp_input.c
index e7d7e2c..c686fac 100644
--- a/sys/netinet/sctp_input.c
+++ b/sys/netinet/sctp_input.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -316,6 +316,8 @@ sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
asoc->streamoutcnt = asoc->pre_open_streams;
/* init tsn's */
asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
+ /* EY - nr_sack: initialize highest tsn in nr_mapping_array */
+ asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
@@ -323,6 +325,11 @@ sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
+ /*
+ * EY 05/13/08 - nr_sack: initialize nr_mapping array's base tsn
+ * like above
+ */
+ asoc->nr_mapping_array_base_tsn = ntohl(init->initial_tsn);
asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
asoc->last_echo_tsn = asoc->asconf_seq_in;
asoc->advanced_peer_ack_point = asoc->last_acked_seq;
@@ -393,6 +400,7 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
struct mbuf *op_err;
int retval, abort_flag;
uint32_t initack_limit;
+ int nat_friendly = 0;
/* First verify that we have no illegal param's */
abort_flag = 0;
@@ -400,7 +408,7 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
op_err = sctp_arethere_unrecognized_parameters(m,
(offset + sizeof(struct sctp_init_chunk)),
- &abort_flag, (struct sctp_chunkhdr *)cp);
+ &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly);
if (abort_flag) {
/* Send an abort and notify peer */
sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED);
@@ -408,6 +416,7 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
return (-1);
}
asoc = &stcb->asoc;
+ asoc->peer_supports_nat = (uint8_t) nat_friendly;
/* process the peer's parameters in the INIT-ACK */
retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
if (retval < 0) {
@@ -637,6 +646,69 @@ sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
}
}
+static int
+sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
+{
+ /*
+ * return 0 means we want you to proceed with the abort non-zero
+ * means no abort processing
+ */
+ struct sctpasochead *head;
+
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
+ /* generate a new vtag and send init */
+ LIST_REMOVE(stcb, sctp_asocs);
+ stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
+ /*
+ * put it in the bucket in the vtag hash of assoc's for the
+ * system
+ */
+ LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+ sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+ return (1);
+ }
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
+ /*
+ * treat like a case where the cookie expired i.e.: - dump
+ * current cookie. - generate a new vtag. - resend init.
+ */
+ /* generate a new vtag and send init */
+ LIST_REMOVE(stcb, sctp_asocs);
+ stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED;
+ stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT;
+ sctp_stop_all_cookie_timers(stcb);
+ sctp_toss_old_cookies(stcb, &stcb->asoc);
+ stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
+ /*
+ * put it in the bucket in the vtag hash of assoc's for the
+ * system
+ */
+ LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+ sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+ return (1);
+ }
+ return (0);
+}
+
+static int
+sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ /*
+ * return 0 means we want you to proceed with the abort non-zero
+ * means no abort processing
+ */
+ if (stcb->asoc.peer_supports_auth == 0) {
+ SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
+ return (0);
+ }
+ sctp_asconf_send_nat_state_update(stcb, net);
+ return (1);
+}
+
+
static void
sctp_handle_abort(struct sctp_abort_chunk *cp,
struct sctp_tcb *stcb, struct sctp_nets *net)
@@ -645,11 +717,40 @@ sctp_handle_abort(struct sctp_abort_chunk *cp,
struct socket *so;
#endif
+ uint16_t len;
SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
if (stcb == NULL)
return;
+ len = ntohs(cp->ch.chunk_length);
+ if (len > sizeof(struct sctp_chunkhdr)) {
+ /*
+ * Need to check the cause codes for our two magic nat
+ * aborts which don't kill the assoc necessarily.
+ */
+ struct sctp_abort_chunk *cpnext;
+ struct sctp_missing_nat_state *natc;
+ uint16_t cause;
+
+ cpnext = cp;
+ cpnext++;
+ natc = (struct sctp_missing_nat_state *)cpnext;
+ cause = ntohs(natc->cause);
+ if (cause == SCTP_CAUSE_NAT_COLLIDING_STATE) {
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
+ cp->ch.chunk_flags);
+ if (sctp_handle_nat_colliding_state(stcb)) {
+ return;
+ }
+ } else if (cause == SCTP_CAUSE_NAT_MISSING_STATE) {
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
+ cp->ch.chunk_flags);
+ if (sctp_handle_nat_missing_state(stcb, net)) {
+ return;
+ }
+ }
+ }
/* stop any receive timers */
sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
/* notify user of the abort and clean up... */
@@ -926,6 +1027,9 @@ sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
case SCTP_SUPPORTED_CHUNK_EXT:
break;
/* draft-ietf-tsvwg-addip-sctp */
+ case SCTP_HAS_NAT_SUPPORT:
+ stcb->asoc.peer_supports_nat = 0;
+ break;
case SCTP_ECN_NONCE_SUPPORTED:
stcb->asoc.peer_supports_ecn_nonce = 0;
stcb->asoc.ecn_nonce_allowed = 0;
@@ -990,6 +1094,20 @@ sctp_handle_error(struct sctp_chunkhdr *ch,
SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
error_type);
break;
+ case SCTP_CAUSE_NAT_COLLIDING_STATE:
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
+ ch->chunk_flags);
+ if (sctp_handle_nat_colliding_state(stcb)) {
+ return (0);
+ }
+ break;
+ case SCTP_CAUSE_NAT_MISSING_STATE:
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
+ ch->chunk_flags);
+ if (sctp_handle_nat_missing_state(stcb, net)) {
+ return (0);
+ }
+ break;
case SCTP_CAUSE_STALE_COOKIE:
/*
* We only act if we have echoed a cookie and are
@@ -1022,9 +1140,9 @@ sctp_handle_error(struct sctp_chunkhdr *ch,
return (-1);
}
/* blast back to INIT state */
+ sctp_toss_old_cookies(stcb, &stcb->asoc);
asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
asoc->state |= SCTP_STATE_COOKIE_WAIT;
-
sctp_stop_all_cookie_timers(stcb);
sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
}
@@ -1213,6 +1331,14 @@ sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
return (0);
}
+static struct sctp_tcb *
+sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
+ struct sctp_inpcb *inp, struct sctp_nets **netp,
+ struct sockaddr *init_src, int *notification,
+ int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
+ uint32_t vrf_id, uint16_t port);
+
/*
* handle a state cookie for an existing association m: input packet mbuf
@@ -1223,19 +1349,23 @@ sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
static struct sctp_tcb *
sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
- struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
+ struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
- uint32_t vrf_id)
+ uint32_t vrf_id, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, uint16_t port)
{
struct sctp_association *asoc;
struct sctp_init_chunk *init_cp, init_buf;
struct sctp_init_ack_chunk *initack_cp, initack_buf;
+ struct sctp_nets *net;
+ struct mbuf *op_err;
+ struct sctp_paramhdr *ph;
int chk_length;
int init_offset, initack_offset, i;
int retval;
int spec_flag = 0;
uint32_t how_indx;
+ net = *netp;
/* I know that the TCB is non-NULL from the caller */
asoc = &stcb->asoc;
for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
@@ -1247,9 +1377,6 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
}
if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
/* SHUTDOWN came in after sending INIT-ACK */
- struct mbuf *op_err;
- struct sctp_paramhdr *ph;
-
sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
0, M_DONTWAIT, 1, MT_DATA);
@@ -1457,9 +1584,50 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
asoc->cookie_how[how_indx] = 6;
return (NULL);
}
- if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag &&
- (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag ||
- init_cp->init.initiate_tag == 0)) {
+ /*
+ * If nat support, and the below and stcb is established, send back
+ * a ABORT(colliding state) if we are established.
+ */
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) &&
+ (asoc->peer_supports_nat) &&
+ ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
+ ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
+ (asoc->peer_vtag == 0)))) {
+ /*
+ * Special case - Peer's support nat. We may have two init's
+ * that we gave out the same tag on since one was not
+ * established.. i.e. we get INIT from host-1 behind the nat
+ * and we respond tag-a, we get a INIT from host-2 behind
+ * the nat and we get tag-a again. Then we bring up host-1
+ * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1).
+ * Now we have colliding state. We must send an abort here
+ * with colliding state indication.
+ */
+ op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err == NULL) {
+ /* FOOBAR */
+ return (NULL);
+ }
+ /* pre-reserve some space */
+#ifdef INET6
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
+#else
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
+#endif
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+ /* Set the len */
+ SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
+ ph = mtod(op_err, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_NAT_COLLIDING_STATE);
+ ph->param_length = htons(sizeof(struct sctp_paramhdr));
+ sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
+ return (NULL);
+ }
+ if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
+ ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
+ (asoc->peer_vtag == 0))) {
/*
* case B in Section 5.2.4 Table 2: MXAA or MOAA my info
* should be ok, re-accept peer info
@@ -1612,6 +1780,17 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
cookie->tie_tag_peer_vtag != 0) {
struct sctpasochead *head;
+ if (asoc->peer_supports_nat) {
+ /*
+ * This is a gross gross hack. just call the
+ * cookie_new code since we are allowing a duplicate
+ * association. I hope this works...
+ */
+ return (sctp_process_cookie_new(m, iphlen, offset, sh, cookie, cookie_len,
+ inp, netp, init_src, notification,
+ auth_skipped, auth_offset, auth_len,
+ vrf_id, port));
+ }
/*
* case A in Section 5.2.4 Table 2: XXMM (peer restarted)
*/
@@ -1660,6 +1839,12 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
memset(asoc->mapping_array, 0,
asoc->mapping_array_size);
}
+ /* EY 05/13/08 - nr_sack version of the above if statement */
+ if (asoc->nr_mapping_array && SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)
+ && asoc->peer_supports_nr_sack) {
+ memset(asoc->nr_mapping_array, 0,
+ asoc->nr_mapping_array_size);
+ }
SCTP_TCB_UNLOCK(stcb);
SCTP_INP_INFO_WLOCK();
SCTP_INP_WLOCK(stcb->sctp_ep);
@@ -1689,14 +1874,6 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
*/
LIST_INSERT_HEAD(head, stcb, sctp_asocs);
- /* Is this the first restart? */
- if (stcb->asoc.in_restart_hash == 0) {
- /* Ok add it to assoc_id vtag hash */
- head = &SCTP_BASE_INFO(sctp_restarthash)[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
- SCTP_BASE_INFO(hashrestartmark))];
- LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash);
- stcb->asoc.in_restart_hash = 1;
- }
/* process the INIT info (peer's info) */
SCTP_TCB_SEND_UNLOCK(stcb);
SCTP_INP_WUNLOCK(stcb->sctp_ep);
@@ -1746,7 +1923,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
* cookie-echo chunk length: length of the cookie chunk to: where the init
* was from returns a new TCB
*/
-static struct sctp_tcb *
+struct sctp_tcb *
sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
struct sctp_inpcb *inp, struct sctp_nets **netp,
@@ -1886,7 +2063,7 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
}
/* process the INIT-ACK info (my info) */
old_tag = asoc->my_vtag;
- asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
+ asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
@@ -2089,6 +2266,18 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
return (stcb);
}
+/*
+ * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
+ * we NEED to make sure we are not already using the vtag. If so we
+ * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
+ SCTP_BASE_INFO(hashasocmark))];
+ LIST_FOREACH(stcb, head, sctp_asocs) {
+ if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) {
+ -- SEND ABORT - TRY AGAIN --
+ }
+ }
+*/
/*
* handles a COOKIE-ECHO message stcb: modified to either a new or left as
@@ -2422,8 +2611,8 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
/* this is abnormal... cookie-echo on existing TCB */
had_a_existing_tcb = 1;
*stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
- cookie, cookie_len, *inp_p, *stcb, *netp, to,
- &notification, &sac_restart_id, vrf_id);
+ cookie, cookie_len, *inp_p, *stcb, netp, to,
+ &notification, &sac_restart_id, vrf_id, auth_skipped, auth_offset, auth_len, port);
}
if (*stcb == NULL) {
@@ -2544,8 +2733,6 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
inp->sctp_ep.local_auth_chunks =
sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
- (void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys,
- &inp->sctp_ep.shared_keys);
/*
* Now we must move it from one hash table to
@@ -3040,6 +3227,10 @@ process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
/* resend the sack */
sctp_send_sack(stcb);
break;
+ /* EY for nr_sacks */
+ case SCTP_NR_SELECTIVE_ACK:
+ sctp_send_nr_sack(stcb); /* EY resend the nr-sack */
+ break;
case SCTP_HEARTBEAT_REQUEST:
/* resend a demand HB */
if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
@@ -3296,6 +3487,17 @@ sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
+
+ /*
+ * EY 05/13/08 - nr_sack: to keep
+ * nr_mapping array be consistent
+ * with mapping_array
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack) {
+ stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
+ stcb->asoc.nr_mapping_array_base_tsn = stcb->asoc.mapping_array_base_tsn;
+ memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
+ }
stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
@@ -3402,6 +3604,15 @@ sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
+ /*
+ * EY 05/13/08 -nr_sack: to keep nr_mapping array consistent
+ * with mapping array
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack) {
+ stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
+ stcb->asoc.nr_mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
+ memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
+ }
atomic_add_int(&stcb->asoc.sending_seq, 1);
/* save off historical data for retrans */
stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
@@ -3940,7 +4151,7 @@ __attribute__((noinline))
if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
break;
stcb = sctp_findassociation_ep_asconf(m, iphlen,
- *offset, sh, &inp, netp);
+ *offset, sh, &inp, netp, vrf_id);
if (stcb != NULL)
break;
asconf_offset += SCTP_SIZE32(asconf_len);
@@ -4043,6 +4254,8 @@ __attribute__((noinline))
* process all control chunks...
*/
if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
+ /* EY */
+ (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
(ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
(SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
/* implied cookie-ack.. we must have lost the ack */
@@ -4328,6 +4541,11 @@ process_control_chunks:
sctp_handle_sack(m, *offset,
sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
}
+ if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
+ TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
+ (stcb->asoc.stream_queue_cnt == 0)) {
+ sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ }
if (abort_now) {
/* ABORT signal from sack processing */
*offset = length;
@@ -4335,6 +4553,102 @@ process_control_chunks:
}
}
break;
+ /*
+ * EY - nr_sack: If the received chunk is an
+ * nr_sack chunk
+ */
+ case SCTP_NR_SELECTIVE_ACK:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n");
+ SCTP_STAT_INCR(sctps_recvsacks);
+ {
+ struct sctp_nr_sack_chunk *nr_sack;
+ int abort_now = 0;
+ uint32_t a_rwnd, cum_ack;
+ uint16_t num_seg, num_nr_seg;
+ int nonce_sum_flag, all_bit;
+
+ if ((stcb == NULL) || (chk_length < sizeof(struct sctp_nr_sack_chunk))) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on nr_sack chunk, too small\n");
+ ignore_nr_sack:
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ /*
+ * EY nr_sacks have not been negotiated but
+ * the peer end sent an nr_sack, silently
+ * discard the chunk
+ */
+ if (!(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)) {
+ goto unknown_chunk;
+ }
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
+ /*-
+ * If we have sent a shutdown-ack, we will pay no
+ * attention to a sack sent in to us since
+ * we don't care anymore.
+ */
+ goto ignore_nr_sack;
+ }
+ nr_sack = (struct sctp_nr_sack_chunk *)ch;
+ nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
+ all_bit = ch->chunk_flags & SCTP_NR_SACK_ALL_BIT;
+
+ cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
+ num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
+ /*
+ * EY -if All bit is set, then there are as
+ * many gaps as nr_gaps
+ */
+ if (all_bit) {
+ num_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
+ }
+ num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
+ a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
+ cum_ack,
+ num_seg,
+ a_rwnd
+ );
+ stcb->asoc.seen_a_sack_this_pkt = 1;
+ if ((stcb->asoc.pr_sctp_cnt == 0) &&
+ (num_seg == 0) &&
+ ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
+ (cum_ack == stcb->asoc.last_acked_seq)) &&
+ (stcb->asoc.saw_sack_with_frags == 0) &&
+ (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
+ ) {
+ /*
+ * We have a SIMPLE sack having no
+ * prior segments and data on sent
+ * queue to be acked.. Use the
+ * faster path sack processing. We
+ * also allow window update sacks
+ * with no missing segments to go
+ * this way too.
+ */
+ sctp_express_handle_nr_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
+ &abort_now);
+ } else {
+ if (netp && *netp)
+ sctp_handle_nr_sack(m, *offset,
+ nr_sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
+ }
+ if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
+ TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
+ (stcb->asoc.stream_queue_cnt == 0)) {
+ sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ }
+ if (abort_now) {
+ /* ABORT signal from sack processing */
+ *offset = length;
+ return (NULL);
+ }
+ }
+ break;
+
case SCTP_HEARTBEAT_REQUEST:
SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
if ((stcb) && netp && *netp) {
@@ -5242,6 +5556,18 @@ out_now:
return;
}
+#if 0
+static void
+sctp_print_mbuf_chain(struct mbuf *m)
+{
+ for (; m; m = SCTP_BUF_NEXT(m)) {
+ printf("%p: m_len = %ld\n", m, SCTP_BUF_LEN(m));
+ if (SCTP_BUF_IS_EXTENDED(m))
+ printf("%p: extend_size = %d\n", m, SCTP_BUF_EXTEND_SIZE(m));
+ }
+}
+
+#endif
void
sctp_input_with_port(i_pak, off, port)
diff --git a/sys/netinet/sctp_os_bsd.h b/sys/netinet/sctp_os_bsd.h
index 64a4794..ff9d534 100644
--- a/sys/netinet/sctp_os_bsd.h
+++ b/sys/netinet/sctp_os_bsd.h
@@ -500,3 +500,24 @@ sctp_get_mbuf_for_msg(unsigned int space_needed,
#define MD5_Final MD5Final
#endif
+
+#define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 1)
+#if defined(INVARIANTS)
+#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
+{ \
+ int32_t oldval; \
+ oldval = atomic_fetchadd_int(addr, -val); \
+ if (oldval < val) { \
+ panic("Counter goes negative"); \
+ } \
+}
+#else
+#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
+{ \
+ int32_t oldval; \
+ oldval = atomic_fetchadd_int(addr, -val); \
+ if (oldval < val) { \
+ *addr = 0; \
+ } \
+}
+#endif
diff --git a/sys/netinet/sctp_output.c b/sys/netinet/sctp_output.c
index 09b0145..9efff50 100644
--- a/sys/netinet/sctp_output.c
+++ b/sys/netinet/sctp_output.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -1858,6 +1858,1812 @@ struct sack_track sack_array[256] = {
}
};
+/* EY below are nr_sacks version of the preceeding two data structures, identical except their names */
+#define SCTP_MAX_NR_GAPS_INARRAY 4
+struct nr_sack_track {
+ uint8_t right_edge; /* mergable on the right edge */
+ uint8_t left_edge; /* mergable on the left edge */
+ uint8_t num_entries;
+ uint8_t spare;
+ struct sctp_nr_gap_ack_block nr_gaps[SCTP_MAX_NR_GAPS_INARRAY];
+};
+
+struct nr_sack_track nr_sack_array[256] = {
+ {0, 0, 0, 0, /* 0x00 */
+ {{0, 0},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x01 */
+ {{0, 0},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x02 */
+ {{1, 1},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x03 */
+ {{0, 1},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x04 */
+ {{2, 2},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x05 */
+ {{0, 0},
+ {2, 2},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x06 */
+ {{1, 2},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x07 */
+ {{0, 2},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x08 */
+ {{3, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x09 */
+ {{0, 0},
+ {3, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x0a */
+ {{1, 1},
+ {3, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x0b */
+ {{0, 1},
+ {3, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x0c */
+ {{2, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x0d */
+ {{0, 0},
+ {2, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x0e */
+ {{1, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x0f */
+ {{0, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x10 */
+ {{4, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x11 */
+ {{0, 0},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x12 */
+ {{1, 1},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x13 */
+ {{0, 1},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x14 */
+ {{2, 2},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x15 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x16 */
+ {{1, 2},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x17 */
+ {{0, 2},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x18 */
+ {{3, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x19 */
+ {{0, 0},
+ {3, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x1a */
+ {{1, 1},
+ {3, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x1b */
+ {{0, 1},
+ {3, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x1c */
+ {{2, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x1d */
+ {{0, 0},
+ {2, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x1e */
+ {{1, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x1f */
+ {{0, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x20 */
+ {{5, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x21 */
+ {{0, 0},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x22 */
+ {{1, 1},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x23 */
+ {{0, 1},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x24 */
+ {{2, 2},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x25 */
+ {{0, 0},
+ {2, 2},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x26 */
+ {{1, 2},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x27 */
+ {{0, 2},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x28 */
+ {{3, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x29 */
+ {{0, 0},
+ {3, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x2a */
+ {{1, 1},
+ {3, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x2b */
+ {{0, 1},
+ {3, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x2c */
+ {{2, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x2d */
+ {{0, 0},
+ {2, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x2e */
+ {{1, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x2f */
+ {{0, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x30 */
+ {{4, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x31 */
+ {{0, 0},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x32 */
+ {{1, 1},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x33 */
+ {{0, 1},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x34 */
+ {{2, 2},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x35 */
+ {{0, 0},
+ {2, 2},
+ {4, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x36 */
+ {{1, 2},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x37 */
+ {{0, 2},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x38 */
+ {{3, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x39 */
+ {{0, 0},
+ {3, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x3a */
+ {{1, 1},
+ {3, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x3b */
+ {{0, 1},
+ {3, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x3c */
+ {{2, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x3d */
+ {{0, 0},
+ {2, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x3e */
+ {{1, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x3f */
+ {{0, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x40 */
+ {{6, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x41 */
+ {{0, 0},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x42 */
+ {{1, 1},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x43 */
+ {{0, 1},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x44 */
+ {{2, 2},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x45 */
+ {{0, 0},
+ {2, 2},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x46 */
+ {{1, 2},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x47 */
+ {{0, 2},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x48 */
+ {{3, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x49 */
+ {{0, 0},
+ {3, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x4a */
+ {{1, 1},
+ {3, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x4b */
+ {{0, 1},
+ {3, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x4c */
+ {{2, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x4d */
+ {{0, 0},
+ {2, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x4e */
+ {{1, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x4f */
+ {{0, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x50 */
+ {{4, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x51 */
+ {{0, 0},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x52 */
+ {{1, 1},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x53 */
+ {{0, 1},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x54 */
+ {{2, 2},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 4, 0, /* 0x55 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {6, 6}
+ }
+ },
+ {0, 0, 3, 0, /* 0x56 */
+ {{1, 2},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x57 */
+ {{0, 2},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x58 */
+ {{3, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x59 */
+ {{0, 0},
+ {3, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x5a */
+ {{1, 1},
+ {3, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x5b */
+ {{0, 1},
+ {3, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x5c */
+ {{2, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x5d */
+ {{0, 0},
+ {2, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x5e */
+ {{1, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x5f */
+ {{0, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x60 */
+ {{5, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x61 */
+ {{0, 0},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x62 */
+ {{1, 1},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x63 */
+ {{0, 1},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x64 */
+ {{2, 2},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x65 */
+ {{0, 0},
+ {2, 2},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x66 */
+ {{1, 2},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x67 */
+ {{0, 2},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x68 */
+ {{3, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x69 */
+ {{0, 0},
+ {3, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x6a */
+ {{1, 1},
+ {3, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x6b */
+ {{0, 1},
+ {3, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x6c */
+ {{2, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x6d */
+ {{0, 0},
+ {2, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x6e */
+ {{1, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x6f */
+ {{0, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x70 */
+ {{4, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x71 */
+ {{0, 0},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x72 */
+ {{1, 1},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x73 */
+ {{0, 1},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x74 */
+ {{2, 2},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x75 */
+ {{0, 0},
+ {2, 2},
+ {4, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x76 */
+ {{1, 2},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x77 */
+ {{0, 2},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x78 */
+ {{3, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x79 */
+ {{0, 0},
+ {3, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x7a */
+ {{1, 1},
+ {3, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x7b */
+ {{0, 1},
+ {3, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x7c */
+ {{2, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x7d */
+ {{0, 0},
+ {2, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x7e */
+ {{1, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x7f */
+ {{0, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0x80 */
+ {{7, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x81 */
+ {{0, 0},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x82 */
+ {{1, 1},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x83 */
+ {{0, 1},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x84 */
+ {{2, 2},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x85 */
+ {{0, 0},
+ {2, 2},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x86 */
+ {{1, 2},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x87 */
+ {{0, 2},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x88 */
+ {{3, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x89 */
+ {{0, 0},
+ {3, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x8a */
+ {{1, 1},
+ {3, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x8b */
+ {{0, 1},
+ {3, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x8c */
+ {{2, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x8d */
+ {{0, 0},
+ {2, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x8e */
+ {{1, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x8f */
+ {{0, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x90 */
+ {{4, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x91 */
+ {{0, 0},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x92 */
+ {{1, 1},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x93 */
+ {{0, 1},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x94 */
+ {{2, 2},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0x95 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0x96 */
+ {{1, 2},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x97 */
+ {{0, 2},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x98 */
+ {{3, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x99 */
+ {{0, 0},
+ {3, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x9a */
+ {{1, 1},
+ {3, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x9b */
+ {{0, 1},
+ {3, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x9c */
+ {{2, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x9d */
+ {{0, 0},
+ {2, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x9e */
+ {{1, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x9f */
+ {{0, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xa0 */
+ {{5, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xa1 */
+ {{0, 0},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa2 */
+ {{1, 1},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xa3 */
+ {{0, 1},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa4 */
+ {{2, 2},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xa5 */
+ {{0, 0},
+ {2, 2},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa6 */
+ {{1, 2},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xa7 */
+ {{0, 2},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa8 */
+ {{3, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xa9 */
+ {{0, 0},
+ {3, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 4, 0, /* 0xaa */
+ {{1, 1},
+ {3, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {1, 1, 4, 0, /* 0xab */
+ {{0, 1},
+ {3, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xac */
+ {{2, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xad */
+ {{0, 0},
+ {2, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xae */
+ {{1, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xaf */
+ {{0, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xb0 */
+ {{4, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb1 */
+ {{0, 0},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xb2 */
+ {{1, 1},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb3 */
+ {{0, 1},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xb4 */
+ {{2, 2},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xb5 */
+ {{0, 0},
+ {2, 2},
+ {4, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xb6 */
+ {{1, 2},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb7 */
+ {{0, 2},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xb8 */
+ {{3, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb9 */
+ {{0, 0},
+ {3, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xba */
+ {{1, 1},
+ {3, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xbb */
+ {{0, 1},
+ {3, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xbc */
+ {{2, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xbd */
+ {{0, 0},
+ {2, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xbe */
+ {{1, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xbf */
+ {{0, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xc0 */
+ {{6, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xc1 */
+ {{0, 0},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc2 */
+ {{1, 1},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xc3 */
+ {{0, 1},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc4 */
+ {{2, 2},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xc5 */
+ {{0, 0},
+ {2, 2},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc6 */
+ {{1, 2},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xc7 */
+ {{0, 2},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc8 */
+ {{3, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xc9 */
+ {{0, 0},
+ {3, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xca */
+ {{1, 1},
+ {3, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xcb */
+ {{0, 1},
+ {3, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xcc */
+ {{2, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xcd */
+ {{0, 0},
+ {2, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xce */
+ {{1, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xcf */
+ {{0, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xd0 */
+ {{4, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd1 */
+ {{0, 0},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xd2 */
+ {{1, 1},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd3 */
+ {{0, 1},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xd4 */
+ {{2, 2},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xd5 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {6, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xd6 */
+ {{1, 2},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd7 */
+ {{0, 2},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xd8 */
+ {{3, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd9 */
+ {{0, 0},
+ {3, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xda */
+ {{1, 1},
+ {3, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xdb */
+ {{0, 1},
+ {3, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xdc */
+ {{2, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xdd */
+ {{0, 0},
+ {2, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xde */
+ {{1, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xdf */
+ {{0, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xe0 */
+ {{5, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xe1 */
+ {{0, 0},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe2 */
+ {{1, 1},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xe3 */
+ {{0, 1},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe4 */
+ {{2, 2},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xe5 */
+ {{0, 0},
+ {2, 2},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe6 */
+ {{1, 2},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xe7 */
+ {{0, 2},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe8 */
+ {{3, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xe9 */
+ {{0, 0},
+ {3, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xea */
+ {{1, 1},
+ {3, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xeb */
+ {{0, 1},
+ {3, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xec */
+ {{2, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xed */
+ {{0, 0},
+ {2, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xee */
+ {{1, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xef */
+ {{0, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xf0 */
+ {{4, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf1 */
+ {{0, 0},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xf2 */
+ {{1, 1},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf3 */
+ {{0, 1},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xf4 */
+ {{2, 2},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xf5 */
+ {{0, 0},
+ {2, 2},
+ {4, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xf6 */
+ {{1, 2},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf7 */
+ {{0, 2},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xf8 */
+ {{3, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf9 */
+ {{0, 0},
+ {3, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xfa */
+ {{1, 1},
+ {3, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xfb */
+ {{0, 1},
+ {3, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xfc */
+ {{2, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xfd */
+ {{0, 0},
+ {2, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xfe */
+ {{1, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 1, 0, /* 0xff */
+ {{0, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ }
+};
+
+
int
sctp_is_address_in_scope(struct sctp_ifa *ifa,
@@ -3402,6 +5208,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
struct mbuf *m,
uint32_t auth_offset,
struct sctp_auth_chunk *auth,
+ uint16_t auth_keyid,
int nofragment_flag,
int ecn_ok,
struct sctp_tmit_chunk *chk,
@@ -3454,7 +5261,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
/* fill in the HMAC digest for any AUTH chunk in the packet */
if ((auth != NULL) && (stcb != NULL)) {
- sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb);
+ sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
}
/* Calculate the csum and fill in the length of the packet */
sctphdr = mtod(m, struct sctphdr *);
@@ -3748,11 +5555,12 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
uint32_t mtu;
mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
- if (mtu &&
- (stcb->asoc.smallest_mtu > mtu)) {
+ if (net->port) {
+ mtu -= sizeof(struct udphdr);
+ }
+ if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
#ifdef SCTP_PRINT_FOR_B_AND_M
- SCTP_PRINTF("sctp_mtu_size_reset called after ip_output mtu-change:%d\n",
- mtu);
+ SCTP_PRINTF("sctp_mtu_size_reset called after ip_output mtu-change:%d\n", mtu);
#endif
sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
net->mtu = mtu;
@@ -4058,6 +5866,9 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
#endif
sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
net->mtu = mtu;
+ if (net->port) {
+ net->mtu -= sizeof(struct udphdr);
+ }
}
} else if (ifp) {
if (ND_IFINFO(ifp)->linkmtu &&
@@ -4243,6 +6054,12 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
+ /*
+ * EY if the initiator supports nr_sacks, need to report that to
+ * responder in INIT chunk
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
+ pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
p_len = sizeof(*pr_supported) + num_ext;
pr_supported->ph.param_length = htons(p_len);
bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
@@ -4256,6 +6073,15 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
}
+ if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
+ /* Add NAT friendly parameter */
+ struct sctp_paramhdr *ph;
+
+ ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
+ ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
+ ph->param_length = htons(sizeof(struct sctp_paramhdr));
+ SCTP_BUF_LEN(m) += sizeof(sizeof(struct sctp_paramhdr));
+ }
/* add authentication parameters */
if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
struct sctp_auth_random *randp;
@@ -4359,7 +6185,7 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
ret = sctp_lowlevel_chunk_output(inp, stcb, net,
(struct sockaddr *)&net->ro._l_addr,
- m, 0, NULL, 0, 0, NULL, 0, net->port, so_locked, NULL);
+ m, 0, NULL, 0, 0, 0, NULL, 0, net->port, so_locked, NULL);
SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
@@ -4368,7 +6194,7 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
struct mbuf *
sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
- int param_offset, int *abort_processing, struct sctp_chunkhdr *cp)
+ int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
{
/*
* Given a mbuf containing an INIT or INIT-ACK with the param_offset
@@ -4481,10 +6307,14 @@ sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
}
at += padded_size;
break;
+ case SCTP_HAS_NAT_SUPPORT:
+ *nat_friendly = 1;
+ /* fall through */
case SCTP_ECN_NONCE_SUPPORTED:
case SCTP_PRSCTP_SUPPORTED:
+
if (padded_size != sizeof(struct sctp_paramhdr)) {
- SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecnnonce/prsctp %d\n", plen);
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecnnonce/prsctp/nat support %d\n", plen);
goto invalid_size;
}
at += padded_size;
@@ -4909,6 +6739,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
int abort_flag, padval;
int num_ext;
int p_len;
+ int nat_friendly = 0;
struct socket *so;
if (stcb)
@@ -4931,7 +6762,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
abort_flag = 0;
op_err = sctp_arethere_unrecognized_parameters(init_pkt,
(offset + sizeof(struct sctp_init_chunk)),
- &abort_flag, (struct sctp_chunkhdr *)init_chk);
+ &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
if (abort_flag) {
do_a_abort:
sctp_send_abort(init_pkt, iphlen, sh,
@@ -5261,7 +7092,15 @@ do_a_abort:
if (asoc) {
atomic_add_int(&asoc->refcnt, 1);
SCTP_TCB_UNLOCK(stcb);
- vtag = sctp_select_a_tag(inp, 1);
+ new_tag:
+ vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
+ if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
+ /*
+ * Got a duplicate vtag on some guy behind a
+ * nat make sure we don't use it.
+ */
+ goto new_tag;
+ }
initackm_out->msg.init.initiate_tag = htonl(vtag);
/* get a TSN to use too */
itsn = sctp_select_initial_TSN(&inp->sctp_ep);
@@ -5269,7 +7108,7 @@ do_a_abort:
SCTP_TCB_LOCK(stcb);
atomic_add_int(&asoc->refcnt, -1);
} else {
- vtag = sctp_select_a_tag(inp, 1);
+ vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
initackm_out->msg.init.initiate_tag = htonl(vtag);
/* get a TSN to use too */
initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
@@ -5337,11 +7176,17 @@ do_a_abort:
prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
prsctp->ph.param_length = htons(sizeof(*prsctp));
SCTP_BUF_LEN(m) += sizeof(*prsctp);
+ if (nat_friendly) {
+ /* Add NAT friendly parameter */
+ struct sctp_paramhdr *ph;
+ ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
+ ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
+ ph->param_length = htons(sizeof(struct sctp_paramhdr));
+ SCTP_BUF_LEN(m) += sizeof(sizeof(struct sctp_paramhdr));
+ }
/* And now tell the peer we do all the extensions */
- pr_supported = (struct sctp_supported_chunk_types_param *)
- ((caddr_t)prsctp + sizeof(*prsctp));
-
+ pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
num_ext = 0;
pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
@@ -5351,6 +7196,12 @@ do_a_abort:
pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
+ /*
+ * EY if the sysctl variable is set, tell the assoc. initiator that
+ * we do nr_sack
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
+ pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
p_len = sizeof(*pr_supported) + num_ext;
pr_supported->ph.param_length = htons(p_len);
bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
@@ -5523,7 +7374,7 @@ do_a_abort:
}
(void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
- NULL, 0, port, SCTP_SO_NOT_LOCKED, over_addr);
+ 0, NULL, 0, port, SCTP_SO_NOT_LOCKED, over_addr);
SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
}
@@ -6522,6 +8373,7 @@ sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc)
chk; chk = nchk) {
nchk = TAILQ_NEXT(chk, sctp_next);
if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
(chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
(chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
(chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
@@ -6801,7 +8653,8 @@ re_look:
if (sp->sinfo_flags & SCTP_UNORDERED) {
rcv_flags |= SCTP_DATA_UNORDERED;
}
- if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) {
+ if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
+ ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
}
/* clear out the chunk before setting up */
@@ -6981,6 +8834,11 @@ dont_do_it:
chk->whoTo = net;
atomic_add_int(&chk->whoTo->ref_count, 1);
+ if (sp->holds_key_ref) {
+ chk->auth_keyid = sp->auth_keyid;
+ sctp_auth_key_acquire(stcb, chk->auth_keyid);
+ chk->holds_key_ref = 1;
+ }
chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
@@ -7319,6 +9177,8 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
int tsns_sent = 0;
uint32_t auth_offset = 0;
struct sctp_auth_chunk *auth = NULL;
+ uint16_t auth_keyid = 0;
+ int data_auth_reqd = 0;
/*
* JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
@@ -7688,7 +9548,7 @@ again_one_more_time:
auth_offset += sizeof(struct sctphdr);
if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
(struct sockaddr *)&net->ro._l_addr,
- outchain, auth_offset, auth,
+ outchain, auth_offset, auth, stcb->asoc.authinfo.active_keyid,
no_fragmentflg, 0, NULL, asconf, net->port, so_locked, NULL))) {
if (error == ENOBUFS) {
asoc->ifp_had_enobuf = 1;
@@ -7837,6 +9697,7 @@ again_one_more_time:
chk->data = NULL;
/* Mark things to be removed, if needed */
if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
(chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
(chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
(chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
@@ -7864,6 +9725,18 @@ again_one_more_time:
inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
}
}
+ /*
+ * EY -Nr-sack version of the above
+ * if statement
+ */
+ if ((SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) &&
+ (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) { /* EY !?! */
+ /* turn off the timer */
+ if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+ inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
+ }
+ }
ctl_cnt++;
} else {
/*
@@ -7915,7 +9788,8 @@ again_one_more_time:
auth_offset += sizeof(struct sctphdr);
if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
(struct sockaddr *)&net->ro._l_addr,
- outchain, auth_offset, auth,
+ outchain,
+ auth_offset, auth, stcb->asoc.authinfo.active_keyid,
no_fragmentflg, 0, NULL, asconf, net->port, so_locked, NULL))) {
if (error == ENOBUFS) {
asoc->ifp_had_enobuf = 1;
@@ -7989,9 +9863,9 @@ again_one_more_time:
* bundled, this adjustment won't matter anyways since the
* packet will be going out...
*/
- if ((auth == NULL) &&
- sctp_auth_is_required_chunk(SCTP_DATA,
- stcb->asoc.peer_auth_chunks)) {
+ data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
+ stcb->asoc.peer_auth_chunks);
+ if (data_auth_reqd && (auth == NULL)) {
mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
}
/* now lets add any data within the MTU constraints */
@@ -8066,17 +9940,23 @@ again_one_more_time:
* requires it, save the offset into
* the chain for AUTH
*/
- if ((auth == NULL) &&
- (sctp_auth_is_required_chunk(SCTP_DATA,
- stcb->asoc.peer_auth_chunks))) {
-
- outchain = sctp_add_auth_chunk(outchain,
- &endoutchain,
- &auth,
- &auth_offset,
- stcb,
- SCTP_DATA);
- SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ if (data_auth_reqd) {
+ if (auth == NULL) {
+ outchain = sctp_add_auth_chunk(outchain,
+ &endoutchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ SCTP_DATA);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ auth_keyid = chk->auth_keyid;
+ } else if (auth_keyid != chk->auth_keyid) {
+ /*
+ * different keyid,
+ * so done bundling
+ */
+ break;
+ }
}
outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
chk->send_size, chk->copy_by_ref);
@@ -8205,6 +10085,12 @@ again_one_more_time:
shdr->v_tag = htonl(stcb->asoc.peer_vtag);
shdr->checksum = 0;
auth_offset += sizeof(struct sctphdr);
+ /*
+ * if data auth isn't needed, use the assoc active
+ * key
+ */
+ if (!data_auth_reqd)
+ auth_keyid = stcb->asoc.authinfo.active_keyid;
if ((error = sctp_lowlevel_chunk_output(inp,
stcb,
net,
@@ -8212,6 +10098,7 @@ again_one_more_time:
outchain,
auth_offset,
auth,
+ auth_keyid,
no_fragmentflg,
bundle_at,
data_list[0],
@@ -8848,6 +10735,8 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
struct sctp_auth_chunk *auth = NULL;
uint32_t auth_offset = 0;
+ uint16_t auth_keyid = 0;
+ int data_auth_reqd = 0;
uint32_t dmtu = 0;
SCTP_TCB_LOCK_ASSERT(stcb);
@@ -8899,6 +10788,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
&auth, &auth_offset,
stcb,
chk->rec.chunk_id.id);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
}
m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
break;
@@ -8928,8 +10818,9 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
chk->snd_count++; /* update our count */
if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
- (struct sockaddr *)&chk->whoTo->ro._l_addr, m, auth_offset,
- auth, no_fragmentflg, 0, NULL, 0, chk->whoTo->port, so_locked, NULL))) {
+ (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
+ auth_offset, auth, stcb->asoc.authinfo.active_keyid,
+ no_fragmentflg, 0, NULL, 0, chk->whoTo->port, so_locked, NULL))) {
SCTP_STAT_INCR(sctps_lowlevelerr);
return (error);
}
@@ -8967,6 +10858,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
#ifdef SCTP_AUDITING_ENABLED
sctp_auditing(20, inp, stcb, NULL);
#endif
+ data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
if (chk->sent != SCTP_DATAGRAM_RESEND) {
/* No, not sent to this net or not ready for rtx */
@@ -9046,9 +10938,7 @@ one_chunk_around:
* until the AUTH chunk is actually added below in case
* there is no room for this chunk.
*/
- if ((auth == NULL) &&
- sctp_auth_is_required_chunk(SCTP_DATA,
- stcb->asoc.peer_auth_chunks)) {
+ if (data_auth_reqd && (auth == NULL)) {
dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
} else
dmtu = 0;
@@ -9056,12 +10946,20 @@ one_chunk_around:
if ((chk->send_size <= (mtu - dmtu)) ||
(chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
/* ok we will add this one */
- if ((auth == NULL) &&
- (sctp_auth_is_required_chunk(SCTP_DATA,
- stcb->asoc.peer_auth_chunks))) {
- m = sctp_add_auth_chunk(m, &endofchain,
- &auth, &auth_offset,
- stcb, SCTP_DATA);
+ if (data_auth_reqd) {
+ if (auth == NULL) {
+ m = sctp_add_auth_chunk(m,
+ &endofchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ SCTP_DATA);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ auth_keyid = chk->auth_keyid;
+ } else if (chk->auth_keyid != auth_keyid) {
+ /* different keyid, so done bundling */
+ break;
+ }
}
m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
if (m == NULL) {
@@ -9099,21 +10997,28 @@ one_chunk_around:
fwd = TAILQ_NEXT(fwd, sctp_next);
continue;
}
- if ((auth == NULL) &&
- sctp_auth_is_required_chunk(SCTP_DATA,
- stcb->asoc.peer_auth_chunks)) {
+ if (data_auth_reqd && (auth == NULL)) {
dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
} else
dmtu = 0;
if (fwd->send_size <= (mtu - dmtu)) {
- if ((auth == NULL) &&
- (sctp_auth_is_required_chunk(SCTP_DATA,
- stcb->asoc.peer_auth_chunks))) {
- m = sctp_add_auth_chunk(m,
- &endofchain,
- &auth, &auth_offset,
- stcb,
- SCTP_DATA);
+ if (data_auth_reqd) {
+ if (auth == NULL) {
+ m = sctp_add_auth_chunk(m,
+ &endofchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ SCTP_DATA);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ auth_keyid = fwd->auth_keyid;
+ } else if (fwd->auth_keyid != auth_keyid) {
+ /*
+ * different keyid,
+ * so done bundling
+ */
+ break;
+ }
}
m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
if (m == NULL) {
@@ -9165,10 +11070,17 @@ one_chunk_around:
shdr->v_tag = htonl(stcb->asoc.peer_vtag);
shdr->checksum = 0;
auth_offset += sizeof(struct sctphdr);
+ /*
+ * if doing DATA auth, use the data chunk(s) key id,
+ * otherwise use the assoc's active key id
+ */
+ if (!data_auth_reqd)
+ auth_keyid = stcb->asoc.authinfo.active_keyid;
/* Now lets send it, if there is anything to send :> */
if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
- (struct sockaddr *)&net->ro._l_addr, m, auth_offset,
- auth, no_fragmentflg, 0, NULL, 0, net->port, so_locked, NULL))) {
+ (struct sockaddr *)&net->ro._l_addr, m,
+ auth_offset, auth, auth_keyid,
+ no_fragmentflg, 0, NULL, 0, net->port, so_locked, NULL))) {
/* error, we could not output */
SCTP_STAT_INCR(sctps_lowlevelerr);
return (error);
@@ -9382,7 +11294,14 @@ sctp_chunk_output(struct sctp_inpcb *inp,
* running, if so piggy-back the sack.
*/
if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
- sctp_send_sack(stcb);
+ /*
+ * EY if nr_sacks used then send an nr-sack , a sack
+ * otherwise
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
+ sctp_send_nr_sack(stcb);
+ else
+ sctp_send_sack(stcb);
(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
}
while (asoc->sent_queue_retran_cnt) {
@@ -10071,6 +11990,403 @@ sctp_send_sack(struct sctp_tcb *stcb)
return;
}
+/* EY - This method will replace sctp_send_sack method if nr_sacks negotiated*/
+void
+sctp_send_nr_sack(struct sctp_tcb *stcb)
+{
+ /*-
+ * Queue up an NR-SACK in the control queue. We must first check to see
+ * if an NR-SACK is somehow on the control queue. If so, we will take
+ * and and remove the old one.
+ */
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk, *a_chk;
+
+ struct sctp_nr_sack_chunk *nr_sack;
+
+ struct sctp_gap_ack_block *gap_descriptor;
+ struct sctp_nr_gap_ack_block *nr_gap_descriptor;
+
+ struct sack_track *selector;
+ struct nr_sack_track *nr_selector;
+
+ /* EY do we need nr_mergeable, NO */
+ int mergeable = 0;
+ int offset;
+ caddr_t limit;
+ uint32_t *dup;
+ int limit_reached = 0;
+ unsigned int i, jstart, siz, j;
+ unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
+ int num_dups = 0;
+ int space_req;
+ unsigned int reserved = 0;
+
+ a_chk = NULL;
+ asoc = &stcb->asoc;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (asoc->last_data_chunk_from == NULL) {
+ /* Hmm we never received anything */
+ return;
+ }
+ sctp_set_rwnd(stcb, asoc);
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) {
+ /* Hmm, found a sack already on queue, remove it */
+ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+ a_chk = chk;
+ if (a_chk->data) {
+ sctp_m_freem(a_chk->data);
+ a_chk->data = NULL;
+ }
+ sctp_free_remote_addr(a_chk->whoTo);
+ a_chk->whoTo = NULL;
+ break;
+ }
+ }
+ if (a_chk == NULL) {
+ sctp_alloc_a_chunk(stcb, a_chk);
+ if (a_chk == NULL) {
+ /* No memory so we drop the idea, and set a timer */
+ if (stcb->asoc.delayed_ack) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ } else {
+ stcb->asoc.send_sack = 1;
+ }
+ return;
+ }
+ a_chk->copy_by_ref = 0;
+ /* a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; */
+ a_chk->rec.chunk_id.id = SCTP_NR_SELECTIVE_ACK;
+ a_chk->rec.chunk_id.can_take_data = 1;
+ }
+ /* Clear our pkt counts */
+ asoc->data_pkts_seen = 0;
+
+ a_chk->asoc = asoc;
+ a_chk->snd_count = 0;
+ a_chk->send_size = 0; /* fill in later */
+ a_chk->sent = SCTP_DATAGRAM_UNSENT;
+ a_chk->whoTo = NULL;
+
+ if ((asoc->numduptsns) ||
+ (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)
+ ) {
+ /*-
+ * Ok, we have some duplicates or the destination for the
+ * sack is unreachable, lets see if we can select an
+ * alternate than asoc->last_data_chunk_from
+ */
+ if ((!(asoc->last_data_chunk_from->dest_state &
+ SCTP_ADDR_NOT_REACHABLE)) &&
+ (asoc->used_alt_onsack > asoc->numnets)) {
+ /* We used an alt last time, don't this time */
+ a_chk->whoTo = NULL;
+ } else {
+ asoc->used_alt_onsack++;
+ a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
+ }
+ if (a_chk->whoTo == NULL) {
+ /* Nope, no alternate */
+ a_chk->whoTo = asoc->last_data_chunk_from;
+ asoc->used_alt_onsack = 0;
+ }
+ } else {
+ /*
+ * No duplicates so we use the last place we received data
+ * from.
+ */
+ asoc->used_alt_onsack = 0;
+ a_chk->whoTo = asoc->last_data_chunk_from;
+ }
+ if (a_chk->whoTo) {
+ atomic_add_int(&a_chk->whoTo->ref_count, 1);
+ }
+ if (asoc->highest_tsn_inside_map == asoc->cumulative_tsn) {
+ /* no gaps */
+ space_req = sizeof(struct sctp_nr_sack_chunk);
+ } else {
+ /* EY - what is this about? */
+ /* gaps get a cluster */
+ space_req = MCLBYTES;
+ }
+ /* Ok now lets formulate a MBUF with our sack */
+ a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
+ if ((a_chk->data == NULL) ||
+ (a_chk->whoTo == NULL)) {
+ /* rats, no mbuf memory */
+ if (a_chk->data) {
+ /* was a problem with the destination */
+ sctp_m_freem(a_chk->data);
+ a_chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, a_chk);
+ /* sa_ignore NO_NULL_CHK */
+ if (stcb->asoc.delayed_ack) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ } else {
+ stcb->asoc.send_sack = 1;
+ }
+ return;
+ }
+ /* ok, lets go through and fill it in */
+ SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
+ space = M_TRAILINGSPACE(a_chk->data);
+ if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
+ space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
+ }
+ limit = mtod(a_chk->data, caddr_t);
+ limit += space;
+
+ nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
+ nr_sack->ch.chunk_type = SCTP_NR_SELECTIVE_ACK;
+ /* EYJ */
+ /* 0x01 is used by nonce for ecn */
+ if ((SCTP_BASE_SYSCTL(sctp_ecn_enable)) &&
+ (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) &&
+ (asoc->peer_supports_ecn_nonce))
+ nr_sack->ch.chunk_flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM);
+ else
+ nr_sack->ch.chunk_flags = 0;
+
+ if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+ /*-
+ * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
+ * received, then set high bit to 1, else 0. Reset
+ * pkts_rcvd.
+ */
+ /* EY - TODO: which chunk flag is used in here? -The LSB */
+ nr_sack->ch.chunk_flags |= (asoc->cmt_dac_pkts_rcvd << 6);
+ asoc->cmt_dac_pkts_rcvd = 0;
+ }
+ /*
+ * EY - this is a never reneging receiver, that makes all gaps are
+ * nr-gaps, set the All bit
+ */
+ if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
+ nr_sack->ch.chunk_flags |= SCTP_NR_SACK_ALL_BIT;
+ }
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
+ stcb->asoc.cumack_log_atsnt++;
+ if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
+ stcb->asoc.cumack_log_atsnt = 0;
+ }
+#endif
+ nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
+ nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
+ asoc->my_last_reported_rwnd = asoc->my_rwnd;
+
+ /* reset the readers interpretation */
+ stcb->freed_by_sorcv_sincelast = 0;
+
+ gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
+ nr_gap_descriptor = (struct sctp_nr_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
+
+ siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
+ if (compare_with_wrap(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
+ offset = 1;
+ /*-
+ * cum-ack behind the mapping array, so we start and use all
+ * entries.
+ */
+ jstart = 0;
+ } else {
+ offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
+ /*-
+ * we skip the first one when the cum-ack is at or above the
+ * mapping array base. Note this only works if
+ */
+ jstart = 1;
+ }
+ if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN)) {
+ /* we have a gap .. maybe */
+ for (i = 0; i < siz; i++) {
+ selector = &sack_array[asoc->mapping_array[i]];
+ if (mergeable && selector->right_edge) {
+ /*
+ * Backup, left and right edges were ok to
+ * merge.
+ */
+ num_gap_blocks--;
+ gap_descriptor--;
+ }
+ if (selector->num_entries == 0)
+ mergeable = 0;
+ else {
+ for (j = jstart; j < selector->num_entries; j++) {
+ if (mergeable && selector->right_edge) {
+ /*
+ * do a merge by NOT setting
+ * the left side
+ */
+ mergeable = 0;
+ } else {
+ /*
+ * no merge, set the left
+ * side
+ */
+ mergeable = 0;
+ gap_descriptor->start = htons((selector->gaps[j].start + offset));
+ }
+ gap_descriptor->end = htons((selector->gaps[j].end + offset));
+ num_gap_blocks++;
+ gap_descriptor++;
+ if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
+ /* no more room */
+ limit_reached = 1;
+ break;
+ }
+ }
+ if (selector->left_edge) {
+ mergeable = 1;
+ }
+ }
+ if (limit_reached) {
+ /* Reached the limit stop */
+ break;
+ }
+ jstart = 0;
+ offset += 8;
+ }
+ if (num_gap_blocks == 0) {
+ /*
+ * slide not yet happened, and somehow we got called
+ * to send a sack. Cumack needs to move up.
+ */
+ int abort_flag = 0;
+
+ asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
+ nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
+ sctp_sack_check(stcb, 0, 0, &abort_flag);
+ }
+ }
+ /*---------------------------------------------------------filling the nr_gap_ack blocks----------------------------------------------------*/
+
+ nr_gap_descriptor = (struct sctp_nr_gap_ack_block *)gap_descriptor;
+
+ /* EY - there will be gaps + nr_gaps if draining is possible */
+ if (SCTP_BASE_SYSCTL(sctp_do_drain)) {
+
+ mergeable = 0;
+
+ siz = (((asoc->highest_tsn_inside_nr_map - asoc->nr_mapping_array_base_tsn) + 1) + 7) / 8;
+ if (compare_with_wrap(asoc->nr_mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
+ offset = 1;
+ /*-
+ * cum-ack behind the mapping array, so we start and use all
+ * entries.
+ */
+ jstart = 0;
+ } else {
+ offset = asoc->nr_mapping_array_base_tsn - asoc->cumulative_tsn;
+ /*-
+ * we skip the first one when the cum-ack is at or above the
+ * mapping array base. Note this only works if
+ */
+ jstart = 1;
+ }
+ if (compare_with_wrap(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn, MAX_TSN)) {
+ /* we have a gap .. maybe */
+ for (i = 0; i < siz; i++) {
+ nr_selector = &nr_sack_array[asoc->nr_mapping_array[i]];
+ if (mergeable && nr_selector->right_edge) {
+ /*
+ * Backup, left and right edges were
+ * ok to merge.
+ */
+ num_nr_gap_blocks--;
+ nr_gap_descriptor--;
+ }
+ if (nr_selector->num_entries == 0)
+ mergeable = 0;
+ else {
+ for (j = jstart; j < nr_selector->num_entries; j++) {
+ if (mergeable && nr_selector->right_edge) {
+ /*
+ * do a merge by NOT
+ * setting the left
+ * side
+ */
+ mergeable = 0;
+ } else {
+ /*
+ * no merge, set the
+ * left side
+ */
+ mergeable = 0;
+ nr_gap_descriptor->start = htons((nr_selector->nr_gaps[j].start + offset));
+ }
+ nr_gap_descriptor->end = htons((nr_selector->nr_gaps[j].end + offset));
+ num_nr_gap_blocks++;
+ nr_gap_descriptor++;
+ if (((caddr_t)nr_gap_descriptor + sizeof(struct sctp_nr_gap_ack_block)) > limit) {
+ /* no more room */
+ limit_reached = 1;
+ break;
+ }
+ }
+ if (nr_selector->left_edge) {
+ mergeable = 1;
+ }
+ }
+ if (limit_reached) {
+ /* Reached the limit stop */
+ break;
+ }
+ jstart = 0;
+ offset += 8;
+ }
+ }
+ }
+ /*---------------------------------------------------End of---filling the nr_gap_ack blocks----------------------------------------------------*/
+
+ /* now we must add any dups we are going to report. */
+ if ((limit_reached == 0) && (asoc->numduptsns)) {
+ dup = (uint32_t *) nr_gap_descriptor;
+ for (i = 0; i < asoc->numduptsns; i++) {
+ *dup = htonl(asoc->dup_tsns[i]);
+ dup++;
+ num_dups++;
+ if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
+ /* no more room */
+ break;
+ }
+ }
+ asoc->numduptsns = 0;
+ }
+ /*
+ * now that the chunk is prepared queue it to the control chunk
+ * queue.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
+ num_nr_gap_blocks = num_gap_blocks;
+ num_gap_blocks = 0;
+ }
+ a_chk->send_size = (sizeof(struct sctp_nr_sack_chunk) +
+ (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
+ (num_nr_gap_blocks * sizeof(struct sctp_nr_gap_ack_block)) +
+ (num_dups * sizeof(int32_t)));
+
+ SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
+ nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
+ nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
+ nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
+ nr_sack->nr_sack.reserved = htons(reserved);
+ nr_sack->ch.chunk_length = htons(a_chk->send_size);
+ TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+ asoc->send_sack = 0;
+ SCTP_STAT_INCR(sctps_sendsacks);
+ return;
+}
void
sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
@@ -10095,6 +12411,7 @@ sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
stcb->asoc.peer_auth_chunks)) {
m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset,
stcb, SCTP_ABORT_ASSOCIATION);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
}
SCTP_TCB_LOCK_ASSERT(stcb);
m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
@@ -10148,7 +12465,7 @@ sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
(void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb,
stcb->asoc.primary_destination,
(struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr,
- m_out, auth_offset, auth, 1, 0, NULL, 0, stcb->asoc.primary_destination->port, so_locked, NULL);
+ m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, NULL, 0, stcb->asoc.primary_destination->port, so_locked, NULL);
SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
}
@@ -10177,7 +12494,7 @@ sctp_send_shutdown_complete(struct sctp_tcb *stcb,
SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_msg);
(void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
(struct sockaddr *)&net->ro._l_addr,
- m_shutdown_comp, 0, NULL, 1, 0, NULL, 0, net->port, SCTP_SO_NOT_LOCKED, NULL);
+ m_shutdown_comp, 0, NULL, 0, 1, 0, NULL, 0, net->port, SCTP_SO_NOT_LOCKED, NULL);
SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
return;
}
@@ -11686,6 +14003,11 @@ sctp_copy_it_in(struct sctp_tcb *stcb,
*error = 0;
goto skip_copy;
}
+ sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
+ if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
+ sctp_auth_key_acquire(stcb, stcb->asoc.authinfo.active_keyid);
+ sp->holds_key_ref = 1;
+ }
*error = sctp_copy_one(sp, uio, resv_in_first);
skip_copy:
if (*error) {
@@ -11830,9 +14152,9 @@ sctp_lower_sosend(struct socket *so,
addr,
sndlen);
/*-
- * Pre-screen address, if one is given the sin-len
- * must be set correctly!
- */
+ * Pre-screen address, if one is given the sin-len
+ * must be set correctly!
+ */
if (addr) {
if ((addr->sa_family == AF_INET) &&
(addr->sa_len != sizeof(struct sockaddr_in))) {
@@ -12092,6 +14414,9 @@ sctp_lower_sosend(struct socket *so,
if (initm.sinit_max_init_timeo)
asoc->initial_init_rto_max = initm.sinit_max_init_timeo;
if (asoc->streamoutcnt < asoc->pre_open_streams) {
+ struct sctp_stream_out *tmp_str;
+ int had_lock = 0;
+
/* Default is NOT correct */
SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, defout:%d pre_open:%d\n",
asoc->streamoutcnt, asoc->pre_open_streams);
@@ -12099,46 +14424,42 @@ sctp_lower_sosend(struct socket *so,
* What happens if this
* fails? we panic ...
*/
- {
- struct sctp_stream_out *tmp_str;
- int had_lock = 0;
- if (hold_tcblock) {
- had_lock = 1;
- SCTP_TCB_UNLOCK(stcb);
- }
- SCTP_MALLOC(tmp_str,
- struct sctp_stream_out *,
- (asoc->pre_open_streams *
- sizeof(struct sctp_stream_out)),
- SCTP_M_STRMO);
- if (had_lock) {
- SCTP_TCB_LOCK(stcb);
- }
- if (tmp_str != NULL) {
- SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
- asoc->strmout = tmp_str;
- asoc->streamoutcnt = asoc->pre_open_streams;
- } else {
- asoc->pre_open_streams = asoc->streamoutcnt;
- }
+ if (hold_tcblock) {
+ had_lock = 1;
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_MALLOC(tmp_str,
+ struct sctp_stream_out *,
+ (asoc->pre_open_streams *
+ sizeof(struct sctp_stream_out)),
+ SCTP_M_STRMO);
+ if (had_lock) {
+ SCTP_TCB_LOCK(stcb);
+ }
+ if (tmp_str != NULL) {
+ SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
+ asoc->strmout = tmp_str;
+ asoc->streamoutcnt = asoc->pre_open_streams;
+ } else {
+ asoc->pre_open_streams = asoc->streamoutcnt;
}
for (i = 0; i < asoc->streamoutcnt; i++) {
/*-
- * inbound side must be set
- * to 0xffff, also NOTE when
- * we get the INIT-ACK back
- * (for INIT sender) we MUST
- * reduce the count
- * (streamoutcnt) but first
- * check if we sent to any
- * of the upper streams that
- * were dropped (if some
- * were). Those that were
- * dropped must be notified
- * to the upper layer as
- * failed to send.
- */
+ * inbound side must be set
+ * to 0xffff, also NOTE when
+ * we get the INIT-ACK back
+ * (for INIT sender) we MUST
+ * reduce the count
+ * (streamoutcnt) but first
+ * check if we sent to any
+ * of the upper streams that
+ * were dropped (if some
+ * were). Those that were
+ * dropped must be notified
+ * to the upper layer as
+ * failed to send.
+ */
asoc->strmout[i].next_sequence_sent = 0x0;
TAILQ_INIT(&asoc->strmout[i].outqueue);
asoc->strmout[i].stream_no = i;
@@ -12153,11 +14474,11 @@ sctp_lower_sosend(struct socket *so,
/* out with the INIT */
queue_only_for_init = 1;
/*-
- * we may want to dig in after this call and adjust the MTU
- * value. It defaulted to 1500 (constant) but the ro
- * structure may now have an update and thus we may need to
- * change it BEFORE we append the message.
- */
+ * we may want to dig in after this call and adjust the MTU
+ * value. It defaulted to 1500 (constant) but the ro
+ * structure may now have an update and thus we may need to
+ * change it BEFORE we append the message.
+ */
net = stcb->asoc.primary_destination;
asoc = &stcb->asoc;
}
@@ -12168,6 +14489,7 @@ sctp_lower_sosend(struct socket *so,
non_blocking = 1;
}
asoc = &stcb->asoc;
+ atomic_add_int(&stcb->total_sends, 1);
if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
if (sndlen > asoc->smallest_mtu) {
@@ -12277,7 +14599,6 @@ sctp_lower_sosend(struct socket *so,
* sctp_chunk_output() can be called below.
*/
queue_only = 1;
-
} else if (asoc->ifp_had_enobuf) {
SCTP_STAT_INCR(sctps_ifnomemqueued);
if (net->flight_size > (net->mtu * 2))
@@ -12352,10 +14673,10 @@ sctp_lower_sosend(struct socket *so,
error = uiomove((caddr_t)ph, (int)tot_out, uio);
if (error) {
/*-
- * Here if we can't get his data we
- * still abort we just don't get to
- * send the users note :-0
- */
+ * Here if we can't get his data we
+ * still abort we just don't get to
+ * send the users note :-0
+ */
sctp_m_freem(mm);
mm = NULL;
}
@@ -12450,22 +14771,21 @@ sctp_lower_sosend(struct socket *so,
if (((max_len <= local_add_more) &&
(SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
(max_len == 0) ||
- ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { /* if */
+ ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
/* No room right now ! */
SOCKBUF_LOCK(&so->so_snd);
inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
- ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) /* while */ ) {
- SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%d <(inq:%d + %d) || (%d+%d > %d)\n",
- SCTP_SB_LIMIT_SND(so),
+ ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
+ (unsigned int)SCTP_SB_LIMIT_SND(so),
inqueue_bytes,
local_add_more,
stcb->asoc.stream_queue_cnt,
stcb->asoc.chunks_on_out_queue,
SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
- sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA,
- so, asoc, sndlen);
+ sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, so, asoc, sndlen);
}
be.error = 0;
stcb->block_entry = &be;
@@ -12503,7 +14823,6 @@ skip_preblock:
if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
goto out_unlocked;
}
- atomic_add_int(&stcb->total_sends, 1);
/*
* sndlen covers for mbuf case uio_resid covers for the non-mbuf
* case NOTE: uio will be null when top/mbuf is passed
@@ -12603,8 +14922,7 @@ skip_preblock:
if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
(max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
- (uio->uio_resid &&
- (uio->uio_resid <= (int)max_len))) {
+ (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
sndout = 0;
new_tail = NULL;
if (hold_tcblock) {
@@ -12650,8 +14968,7 @@ skip_preblock:
if ((uio->uio_resid == 0) &&
((user_marks_eor == 0) ||
(srcv->sinfo_flags & SCTP_EOF) ||
- (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))
- ) {
+ (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
sp->msg_is_complete = 1;
} else {
sp->msg_is_complete = 0;
@@ -12718,8 +15035,7 @@ skip_preblock:
if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
(stcb->asoc.total_flight > 0) &&
(stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
- (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
- ) {
+ (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
/*-
* Ok, Nagle is set on and we have data outstanding.
@@ -12764,8 +15080,7 @@ skip_preblock:
queue_only = 1;
}
}
- if ((queue_only == 0) && (nagle_applies == 0)
- ) {
+ if ((queue_only == 0) && (nagle_applies == 0)) {
/*-
* need to start chunk output
* before blocking.. note that if
@@ -12792,22 +15107,21 @@ skip_preblock:
}
SOCKBUF_LOCK(&so->so_snd);
/*-
- * This is a bit strange, but I think it will
- * work. The total_output_queue_size is locked and
- * protected by the TCB_LOCK, which we just released.
- * There is a race that can occur between releasing it
- * above, and me getting the socket lock, where sacks
- * come in but we have not put the SB_WAIT on the
- * so_snd buffer to get the wakeup. After the LOCK
- * is applied the sack_processing will also need to
- * LOCK the so->so_snd to do the actual sowwakeup(). So
- * once we have the socket buffer lock if we recheck the
- * size we KNOW we will get to sleep safely with the
- * wakeup flag in place.
- */
+ * This is a bit strange, but I think it will
+ * work. The total_output_queue_size is locked and
+ * protected by the TCB_LOCK, which we just released.
+ * There is a race that can occur between releasing it
+ * above, and me getting the socket lock, where sacks
+ * come in but we have not put the SB_WAIT on the
+ * so_snd buffer to get the wakeup. After the LOCK
+ * is applied the sack_processing will also need to
+ * LOCK the so->so_snd to do the actual sowwakeup(). So
+ * once we have the socket buffer lock if we recheck the
+ * size we KNOW we will get to sleep safely with the
+ * wakeup flag in place.
+ */
if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
- min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))
- ) {
+ min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
so, asoc, uio->uio_resid);
@@ -12879,8 +15193,7 @@ dataless_eof:
/* EOF thing ? */
if ((srcv->sinfo_flags & SCTP_EOF) &&
(got_all_of_the_send == 1) &&
- (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)
- ) {
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
int cnt;
SCTP_STAT_INCR(sctps_sends_with_eof);
@@ -12914,15 +15227,15 @@ dataless_eof:
}
} else {
/*-
- * we still got (or just got) data to send, so set
- * SHUTDOWN_PENDING
- */
+ * we still got (or just got) data to send, so set
+ * SHUTDOWN_PENDING
+ */
/*-
- * XXX sockets draft says that SCTP_EOF should be
- * sent with no data. currently, we will allow user
- * data to be sent first and move to
- * SHUTDOWN-PENDING
- */
+ * XXX sockets draft says that SCTP_EOF should be
+ * sent with no data. currently, we will allow user
+ * data to be sent first and move to
+ * SHUTDOWN-PENDING
+ */
if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
(SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
(SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
@@ -12999,8 +15312,7 @@ skip_out_eof:
if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
(stcb->asoc.total_flight > 0) &&
(stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
- (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
- ) {
+ (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
/*-
* Ok, Nagle is set on and we have data outstanding.
* Don't send anything and let SACKs drive out the
diff --git a/sys/netinet/sctp_output.h b/sys/netinet/sctp_output.h
index ddbe7096..898f554 100644
--- a/sys/netinet/sctp_output.h
+++ b/sys/netinet/sctp_output.h
@@ -88,7 +88,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *, struct sctp_tcb *,
struct mbuf *
sctp_arethere_unrecognized_parameters(struct mbuf *, int, int *,
- struct sctp_chunkhdr *);
+ struct sctp_chunkhdr *, int *);
void sctp_queue_op_err(struct sctp_tcb *, struct mbuf *);
int
@@ -150,6 +150,9 @@ void send_forward_tsn(struct sctp_tcb *, struct sctp_association *);
void sctp_send_sack(struct sctp_tcb *);
+/* EY 05/07/08 if nr_sacks used, the following function will be called instead of sctp_send_sack */
+void sctp_send_nr_sack(struct sctp_tcb *);
+
int sctp_send_hb(struct sctp_tcb *, int, struct sctp_nets *);
void sctp_send_ecn_echo(struct sctp_tcb *, struct sctp_nets *, uint32_t);
diff --git a/sys/netinet/sctp_pcb.c b/sys/netinet/sctp_pcb.c
index 4ce0f12..d300382 100644
--- a/sys/netinet/sctp_pcb.c
+++ b/sys/netinet/sctp_pcb.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -226,10 +226,7 @@ sctp_find_vrf(uint32_t vrf_id)
void
sctp_free_vrf(struct sctp_vrf *vrf)
{
- int ret;
-
- ret = atomic_fetchadd_int(&vrf->refcount, -1);
- if (ret == 1) {
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&vrf->refcount)) {
if (vrf->vrf_addr_hash) {
SCTP_HASH_FREE(vrf->vrf_addr_hash, vrf->vrf_addr_hashmark);
vrf->vrf_addr_hash = NULL;
@@ -244,10 +241,7 @@ sctp_free_vrf(struct sctp_vrf *vrf)
void
sctp_free_ifn(struct sctp_ifn *sctp_ifnp)
{
- int ret;
-
- ret = atomic_fetchadd_int(&sctp_ifnp->refcount, -1);
- if (ret == 1) {
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifnp->refcount)) {
/* We zero'd the count */
if (sctp_ifnp->vrf) {
sctp_free_vrf(sctp_ifnp->vrf);
@@ -272,10 +266,7 @@ sctp_update_ifn_mtu(uint32_t ifn_index, uint32_t mtu)
void
sctp_free_ifa(struct sctp_ifa *sctp_ifap)
{
- int ret;
-
- ret = atomic_fetchadd_int(&sctp_ifap->refcount, -1);
- if (ret == 1) {
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifap->refcount)) {
/* We zero'd the count */
if (sctp_ifap->ifn_p) {
sctp_free_ifn(sctp_ifap->ifn_p);
@@ -1295,7 +1286,7 @@ null_return:
*/
struct sctp_tcb *
-sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
+sctp_findasoc_ep_asocid_locked(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
{
/*
* Use my the assoc_id to find a endpoint
@@ -1304,33 +1295,29 @@ sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int
struct sctp_tcb *stcb;
uint32_t id;
- if (asoc_id == 0 || inp == NULL) {
+ if (inp == NULL) {
+ SCTP_PRINTF("TSNH ep_associd\n");
+ return (NULL);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_PRINTF("TSNH ep_associd0\n");
return (NULL);
}
- SCTP_INP_INFO_RLOCK();
id = (uint32_t) asoc_id;
- head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(id,
- SCTP_BASE_INFO(hashasocmark))];
+ head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)];
if (head == NULL) {
/* invalid id TSNH */
- SCTP_INP_INFO_RUNLOCK();
+ SCTP_PRINTF("TSNH ep_associd1\n");
return (NULL);
}
- LIST_FOREACH(stcb, head, sctp_asocs) {
- SCTP_INP_RLOCK(stcb->sctp_ep);
- if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
- SCTP_INP_RUNLOCK(stcb->sctp_ep);
- SCTP_INP_INFO_RUNLOCK();
- return (NULL);
- }
+ LIST_FOREACH(stcb, head, sctp_tcbasocidhash) {
if (stcb->asoc.assoc_id == id) {
- /* candidate */
if (inp != stcb->sctp_ep) {
/*
* some other guy has the same id active (id
* collision ??).
*/
- SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ SCTP_PRINTF("TSNH ep_associd2\n");
continue;
}
if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
@@ -1339,58 +1326,25 @@ sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int
if (want_lock) {
SCTP_TCB_LOCK(stcb);
}
- SCTP_INP_RUNLOCK(stcb->sctp_ep);
- SCTP_INP_INFO_RUNLOCK();
- return (stcb);
- }
- SCTP_INP_RUNLOCK(stcb->sctp_ep);
- }
- /* Ok if we missed here, lets try the restart hash */
- head = &SCTP_BASE_INFO(sctp_restarthash)[SCTP_PCBHASH_ASOC(id, SCTP_BASE_INFO(hashrestartmark))];
- if (head == NULL) {
- /* invalid id TSNH */
- SCTP_INP_INFO_RUNLOCK();
- return (NULL);
- }
- LIST_FOREACH(stcb, head, sctp_tcbrestarhash) {
- SCTP_INP_RLOCK(stcb->sctp_ep);
- if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
- SCTP_INP_RUNLOCK(stcb->sctp_ep);
- continue;
- }
- if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
- continue;
- }
- if (want_lock) {
- SCTP_TCB_LOCK(stcb);
- }
- if (stcb->asoc.assoc_id == id) {
- /* candidate */
- SCTP_INP_RUNLOCK(stcb->sctp_ep);
- if (inp != stcb->sctp_ep) {
- /*
- * some other guy has the same id active (id
- * collision ??).
- */
- if (want_lock) {
- SCTP_TCB_UNLOCK(stcb);
- }
- continue;
- }
- SCTP_INP_INFO_RUNLOCK();
return (stcb);
- } else {
- SCTP_INP_RUNLOCK(stcb->sctp_ep);
- }
- if (want_lock) {
- SCTP_TCB_UNLOCK(stcb);
}
}
- SCTP_INP_INFO_RUNLOCK();
return (NULL);
}
+struct sctp_tcb *
+sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
+{
+ struct sctp_tcb *stcb;
+
+ SCTP_INP_RLOCK(inp);
+ stcb = sctp_findasoc_ep_asocid_locked(inp, asoc_id, want_lock);
+ SCTP_INP_RUNLOCK(inp);
+ return (stcb);
+}
+
+
static struct sctp_inpcb *
sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
uint16_t lport, uint32_t vrf_id)
@@ -1434,6 +1388,7 @@ sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
if (head == NULL)
return (NULL);
+
LIST_FOREACH(inp, head, sctp_hash) {
SCTP_INP_RLOCK(inp);
if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
@@ -1695,8 +1650,11 @@ sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock,
* If the TCP model exists it could be that the main listening
* endpoint is gone but there exists a connected socket for this guy
* yet. If so we can return the first one that we find. This may NOT
- * be the correct one but the sctp_findassociation_ep_addr has
- * further code to look at all TCP models.
+ * be the correct one so the caller should be wary on the return
+ * INP. Currently the onlyc caller that sets this flag is in bindx
+ * where we are verifying that a user CAN bind the address. He
+ * either has bound it already, or someone else has, or its open to
+ * bind, so this is good enough.
*/
if (inp == NULL && find_tcp_pool) {
head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashtcpmark))];
@@ -1844,11 +1802,67 @@ sctp_findassociation_special_addr(struct mbuf *m, int iphlen, int offset,
return (NULL);
}
+static int
+sctp_does_stcb_own_this_addr(struct sctp_tcb *stcb, struct sockaddr *to)
+{
+ struct sctp_nets *net;
+
+ /*
+ * Simple question, the ports match, does the tcb own the to
+ * address?
+ */
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)) {
+ /* of course */
+ return (1);
+ }
+ /* have to look at all bound addresses */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (net->ro._l_addr.sa.sa_family != to->sa_family) {
+ /* not the same family, can't be a match */
+ continue;
+ }
+ switch (to->sa_family) {
+ case AF_INET:
+ {
+ struct sockaddr_in *sin, *rsin;
+
+ sin = (struct sockaddr_in *)&net->ro._l_addr;
+ rsin = (struct sockaddr_in *)to;
+ if (sin->sin_addr.s_addr ==
+ rsin->sin_addr.s_addr) {
+ /* found it */
+ return (1);
+ }
+ break;
+ }
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6, *rsin6;
+
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ rsin6 = (struct sockaddr_in6 *)to;
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ rsin6)) {
+ /* Update the endpoint pointer */
+ return (1);
+ }
+ break;
+ }
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ }
+ /* Nope, do not have the address ;-( */
+ return (0);
+}
static struct sctp_tcb *
-sctp_findassoc_by_vtag(struct sockaddr *from, uint32_t vtag,
+sctp_findassoc_by_vtag(struct sockaddr *from, struct sockaddr *to, uint32_t vtag,
struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint16_t rport,
- uint16_t lport, int skip_src_check)
+ uint16_t lport, int skip_src_check, uint32_t vrf_id, uint32_t remote_tag)
{
/*
* Use my vtag to hash. If we find it we then verify the source addr
@@ -1880,18 +1894,10 @@ sctp_findassoc_by_vtag(struct sockaddr *from, uint32_t vtag,
if (stcb->asoc.my_vtag == vtag) {
/* candidate */
if (stcb->rport != rport) {
- /*
- * we could remove this if vtags are unique
- * across the system.
- */
SCTP_TCB_UNLOCK(stcb);
continue;
}
if (stcb->sctp_ep->sctp_lport != lport) {
- /*
- * we could remove this if vtags are unique
- * across the system.
- */
SCTP_TCB_UNLOCK(stcb);
continue;
}
@@ -1899,8 +1905,33 @@ sctp_findassoc_by_vtag(struct sockaddr *from, uint32_t vtag,
SCTP_TCB_UNLOCK(stcb);
continue;
}
+ /* RRS:Need toaddr check here */
+ if (sctp_does_stcb_own_this_addr(stcb, to) == 0) {
+ /* Endpoint does not own this address */
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ if (remote_tag) {
+ /*
+ * If we have both vtags thats all we match
+ * on
+ */
+ if (stcb->asoc.peer_vtag == remote_tag) {
+ /*
+ * If both tags match we consider it
+ * conclusive and check NO
+ * source/destination addresses
+ */
+ goto conclusive;
+ }
+ }
if (skip_src_check) {
- *netp = NULL; /* unknown */
+ conclusive:
+ if (from) {
+ net = sctp_findnet(stcb, from);
+ } else {
+ *netp = NULL; /* unknown */
+ }
if (inp_p)
*inp_p = stcb->sctp_ep;
SCTP_INP_INFO_RUNLOCK();
@@ -1985,14 +2016,8 @@ sctp_findassociation_addr(struct mbuf *m, int iphlen, int offset,
/* Currently not supported. */
return (NULL);
}
- if (sh->v_tag) {
- /* we only go down this path if vtag is non-zero */
- retval = sctp_findassoc_by_vtag(from, ntohl(sh->v_tag),
- inp_p, netp, sh->src_port, sh->dest_port, 0);
- if (retval) {
- return (retval);
- }
- }
+
+
switch (iph->ip_v) {
case IPVERSION:
{
@@ -2032,6 +2057,14 @@ sctp_findassociation_addr(struct mbuf *m, int iphlen, int offset,
/* TSNH */
break;
}
+ if (sh->v_tag) {
+ /* we only go down this path if vtag is non-zero */
+ retval = sctp_findassoc_by_vtag(from, to, ntohl(sh->v_tag),
+ inp_p, netp, sh->src_port, sh->dest_port, 0, vrf_id, 0);
+ if (retval) {
+ return (retval);
+ }
+ }
find_tcp_pool = 0;
if ((ch->chunk_type != SCTP_INITIATION) &&
(ch->chunk_type != SCTP_INITIATION_ACK) &&
@@ -2084,7 +2117,7 @@ sctp_findassociation_addr(struct mbuf *m, int iphlen, int offset,
*/
struct sctp_tcb *
sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
- struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp)
+ struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint32_t vrf_id)
{
struct sctp_tcb *stcb;
struct sockaddr_in *sin;
@@ -2094,6 +2127,7 @@ sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
#endif
struct sockaddr_storage local_store, remote_store;
+ struct sockaddr *to;
struct ip *iph;
#ifdef INET6
@@ -2107,7 +2141,7 @@ sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
memset(&local_store, 0, sizeof(local_store));
memset(&remote_store, 0, sizeof(remote_store));
-
+ to = (struct sockaddr *)&local_store;
/* First get the destination address setup too. */
iph = mtod(m, struct ip *);
switch (iph->ip_v) {
@@ -2202,8 +2236,8 @@ sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
}
if (zero_address) {
- stcb = sctp_findassoc_by_vtag(NULL, ntohl(sh->v_tag), inp_p,
- netp, sh->src_port, sh->dest_port, 1);
+ stcb = sctp_findassoc_by_vtag(NULL, to, ntohl(sh->v_tag), inp_p,
+ netp, sh->src_port, sh->dest_port, 1, vrf_id, 0);
/*
* printf("findassociation_ep_asconf: zero lookup address
* finds stcb 0x%x\n", (uint32_t)stcb);
@@ -2211,7 +2245,7 @@ sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
} else {
stcb = sctp_findassociation_ep_addr(inp_p,
(struct sockaddr *)&remote_store, netp,
- (struct sockaddr *)&local_store, NULL);
+ to, NULL);
}
return (stcb);
}
@@ -2256,10 +2290,16 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id)
/* setup socket pointers */
inp->sctp_socket = so;
inp->ip_inp.inp.inp_socket = so;
-
+ inp->sctp_associd_counter = 1;
inp->partial_delivery_point = SCTP_SB_LIMIT_RCV(so) >> SCTP_PARTIAL_DELIVERY_SHIFT;
inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
-
+ /* init the small hash table we use to track asocid <-> tcb */
+ inp->sctp_asocidhash = SCTP_HASH_INIT(SCTP_STACK_VTAG_HASH_SIZE, &inp->hashasocidmark);
+ if (inp->sctp_asocidhash == NULL) {
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return error;
+ }
#ifdef IPSEC
{
struct inpcbpolicy *pcb_sp = NULL;
@@ -2597,8 +2637,9 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
sin = (struct sockaddr_in *)addr;
lport = sin->sin_port;
/*
- * For LOOPBACK the prison_local_ip4() call will transmute the ip address
- * to the proper value.
+ * For LOOPBACK the prison_local_ip4() call
+ * will transmute the ip address to the
+ * proper value.
*/
if (p && prison_local_ip4(p->td_ucred, &sin->sin_addr) != 0) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
@@ -2627,8 +2668,9 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
lport = sin6->sin6_port;
/*
- * For LOOPBACK the prison_local_ip6() call will transmute the ipv6 address
- * to the proper value.
+ * For LOOPBACK the prison_local_ip6() call
+ * will transmute the ipv6 address to the
+ * proper value.
*/
if (p && prison_local_ip6(p->td_ucred, &sin6->sin6_addr,
(SCTP_IPV6_V6ONLY(inp) != 0)) != 0) {
@@ -3373,6 +3415,10 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
(void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NONE;
/* Clear the read queue */
+ if ((inp->sctp_asocidhash) != NULL) {
+ SCTP_HASH_FREE(inp->sctp_asocidhash, inp->hashasocidmark);
+ inp->sctp_asocidhash = NULL;
+ }
/* sa_ignore FREED_MEMORY */
while ((sq = TAILQ_FIRST(&inp->read_queue)) != NULL) {
/* Its only abandoned if it had data left */
@@ -3864,6 +3910,32 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
}
+static uint32_t
+sctp_aloc_a_assoc_id(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+ uint32_t id;
+ struct sctpasochead *head;
+ struct sctp_tcb *lstcb;
+
+try_again:
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ /* TSNH */
+ return (0);
+ }
+ SCTP_INP_WLOCK(inp);
+ id = inp->sctp_associd_counter;
+ inp->sctp_associd_counter++;
+ lstcb = sctp_findasoc_ep_asocid_locked(inp, (sctp_assoc_t) id, 0);
+ if (lstcb) {
+ goto try_again;
+ }
+ head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)];
+ LIST_INSERT_HEAD(head, stcb, sctp_tcbasocidhash);
+ stcb->asoc.in_asocid_hash = 1;
+ SCTP_INP_WUNLOCK(inp);
+ return id;
+}
+
/*
* allocate an association and add it to the endpoint. The caller must be
* careful to add all additional addresses once they are know right away or
@@ -3983,8 +4055,11 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
bzero(stcb, sizeof(*stcb));
asoc = &stcb->asoc;
+
+ asoc->assoc_id = sctp_aloc_a_assoc_id(inp, stcb);
SCTP_TCB_LOCK_INIT(stcb);
SCTP_TCB_SEND_LOCK_INIT(stcb);
+ stcb->rport = rport;
/* setup back pointer's */
stcb->sctp_ep = inp;
stcb->sctp_socket = inp->sctp_socket;
@@ -3992,19 +4067,20 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
/* failed */
SCTP_TCB_LOCK_DESTROY(stcb);
SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+ LIST_REMOVE(stcb, sctp_tcbasocidhash);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
SCTP_DECR_ASOC_COUNT();
*error = err;
return (NULL);
}
/* and the port */
- stcb->rport = rport;
SCTP_INP_INFO_WLOCK();
SCTP_INP_WLOCK(inp);
if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
/* inpcb freed while alloc going on */
SCTP_TCB_LOCK_DESTROY(stcb);
SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+ LIST_REMOVE(stcb, sctp_tcbasocidhash);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
SCTP_INP_WUNLOCK(inp);
SCTP_INP_INFO_WUNLOCK();
@@ -4016,12 +4092,12 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
SCTP_TCB_LOCK(stcb);
/* now that my_vtag is set, add it to the hash */
- head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
- SCTP_BASE_INFO(hashasocmark))];
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
/* put it in the bucket in the vtag hash of assoc's for the system */
LIST_INSERT_HEAD(head, stcb, sctp_asocs);
- sctp_delete_from_timewait(stcb->asoc.my_vtag);
-
+#ifdef MICHAELS_EXPERIMENT
+ sctp_delete_from_timewait(stcb->asoc.my_vtag, inp->sctp_lport, stcb->rport);
+#endif
SCTP_INP_INFO_WUNLOCK();
if ((err = sctp_add_remote_addr(stcb, firstaddr, SCTP_DO_SETSCOPE, SCTP_ALLOC_ASOC))) {
@@ -4037,6 +4113,7 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
SCTP_DECR_ASOC_COUNT();
SCTP_TCB_LOCK_DESTROY(stcb);
SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+ LIST_REMOVE(stcb, sctp_tcbasocidhash);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
SCTP_INP_WUNLOCK(inp);
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
@@ -4159,7 +4236,7 @@ sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr)
}
void
-sctp_delete_from_timewait(uint32_t tag)
+sctp_delete_from_timewait(uint32_t tag, uint16_t lport, uint16_t rport)
{
struct sctpvtaghead *chain;
struct sctp_tagblock *twait_block;
@@ -4170,9 +4247,13 @@ sctp_delete_from_timewait(uint32_t tag)
if (!SCTP_LIST_EMPTY(chain)) {
LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
- if (twait_block->vtag_block[i].v_tag == tag) {
+ if ((twait_block->vtag_block[i].v_tag == tag) &&
+ (twait_block->vtag_block[i].lport == lport) &&
+ (twait_block->vtag_block[i].rport == rport)) {
twait_block->vtag_block[i].tv_sec_at_expire = 0;
twait_block->vtag_block[i].v_tag = 0;
+ twait_block->vtag_block[i].lport = 0;
+ twait_block->vtag_block[i].rport = 0;
found = 1;
break;
}
@@ -4184,7 +4265,7 @@ sctp_delete_from_timewait(uint32_t tag)
}
int
-sctp_is_in_timewait(uint32_t tag)
+sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport)
{
struct sctpvtaghead *chain;
struct sctp_tagblock *twait_block;
@@ -4196,7 +4277,9 @@ sctp_is_in_timewait(uint32_t tag)
if (!SCTP_LIST_EMPTY(chain)) {
LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
- if (twait_block->vtag_block[i].v_tag == tag) {
+ if ((twait_block->vtag_block[i].v_tag == tag) &&
+ (twait_block->vtag_block[i].lport == lport) &&
+ (twait_block->vtag_block[i].rport == rport)) {
found = 1;
break;
}
@@ -4211,7 +4294,7 @@ sctp_is_in_timewait(uint32_t tag)
void
-sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time)
+sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time, uint16_t lport, uint16_t rport)
{
struct sctpvtaghead *chain;
struct sctp_tagblock *twait_block;
@@ -4230,16 +4313,22 @@ sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time)
twait_block->vtag_block[i].tv_sec_at_expire =
now.tv_sec + time;
twait_block->vtag_block[i].v_tag = tag;
+ twait_block->vtag_block[i].lport = lport;
+ twait_block->vtag_block[i].rport = rport;
set = 1;
} else if ((twait_block->vtag_block[i].v_tag) &&
((long)twait_block->vtag_block[i].tv_sec_at_expire < now.tv_sec)) {
/* Audit expires this guy */
twait_block->vtag_block[i].tv_sec_at_expire = 0;
twait_block->vtag_block[i].v_tag = 0;
+ twait_block->vtag_block[i].lport = 0;
+ twait_block->vtag_block[i].rport = 0;
if (set == 0) {
/* Reuse it for my new tag */
twait_block->vtag_block[i].tv_sec_at_expire = now.tv_sec + time;
twait_block->vtag_block[i].v_tag = tag;
+ twait_block->vtag_block[i].lport = lport;
+ twait_block->vtag_block[i].rport = rport;
set = 1;
}
}
@@ -4267,6 +4356,8 @@ sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time)
LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock);
twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + time;
twait_block->vtag_block[0].v_tag = tag;
+ twait_block->vtag_block[0].lport = lport;
+ twait_block->vtag_block[0].rport = rport;
}
}
@@ -4549,8 +4640,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
if (inp->sctp_tcbhash) {
LIST_REMOVE(stcb, sctp_tcbhash);
}
- if (stcb->asoc.in_restart_hash) {
- LIST_REMOVE(stcb, sctp_tcbrestarhash);
+ if (stcb->asoc.in_asocid_hash) {
+ LIST_REMOVE(stcb, sctp_tcbasocidhash);
}
/* Now lets remove it from the list of ALL associations in the EP */
LIST_REMOVE(stcb, sctp_tcblist);
@@ -4561,7 +4652,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
}
/* pull from vtag hash */
LIST_REMOVE(stcb, sctp_asocs);
- sctp_add_vtag_to_timewait(asoc->my_vtag, SCTP_TIME_WAIT);
+ sctp_add_vtag_to_timewait(asoc->my_vtag, inp->sctp_lport, stcb->rport, SCTP_TIME_WAIT);
/*
* Now restop the timers to be sure - this is paranoia at is finest!
@@ -4602,6 +4693,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
}
sctp_free_remote_addr(sp->net);
sctp_free_spbufspace(stcb, asoc, sp);
+ if (sp->holds_key_ref)
+ sctp_auth_key_release(stcb, sp->auth_keyid);
/* Free the zone stuff */
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_strmoq), sp);
SCTP_DECR_STRMOQ_COUNT();
@@ -4640,6 +4733,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
sctp_m_freem(chk->data);
chk->data = NULL;
}
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid);
ccnt++;
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
SCTP_DECR_CHK_COUNT();
@@ -4657,6 +4752,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
sctp_m_freem(chk->data);
chk->data = NULL;
}
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid);
ccnt++;
sctp_free_remote_addr(chk->whoTo);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
@@ -4680,6 +4777,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
sctp_m_freem(chk->data);
chk->data = NULL;
}
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid);
ccnt++;
sctp_free_remote_addr(chk->whoTo);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
@@ -4703,6 +4802,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
sctp_m_freem(chk->data);
chk->data = NULL;
}
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid);
ccnt++;
sctp_free_remote_addr(chk->whoTo);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
@@ -4727,6 +4828,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
sctp_m_freem(chk->data);
chk->data = NULL;
}
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid);
ccnt++;
sctp_free_remote_addr(chk->whoTo);
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
@@ -4749,6 +4852,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
sctp_m_freem(chk->data);
chk->data = NULL;
}
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid);
sctp_free_remote_addr(chk->whoTo);
ccnt++;
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
@@ -5310,17 +5415,12 @@ sctp_pcb_init()
&SCTP_BASE_INFO(hashtcpmark));
SCTP_BASE_INFO(hashtblsize) = SCTP_BASE_SYSCTL(sctp_hashtblsize);
- /* init the small hash table we use to track restarted asoc's */
- SCTP_BASE_INFO(sctp_restarthash) = SCTP_HASH_INIT(SCTP_STACK_VTAG_HASH_SIZE,
- &SCTP_BASE_INFO(hashrestartmark));
-
SCTP_BASE_INFO(sctp_vrfhash) = SCTP_HASH_INIT(SCTP_SIZE_OF_VRF_HASH,
&SCTP_BASE_INFO(hashvrfmark));
SCTP_BASE_INFO(vrf_ifn_hash) = SCTP_HASH_INIT(SCTP_VRF_IFN_HASH_SIZE,
&SCTP_BASE_INFO(vrf_ifn_hashmark));
-
/* init the zones */
/*
* FIX ME: Should check for NULL returns, but if it does fail we are
@@ -5508,8 +5608,6 @@ sctp_pcb_finish(void)
SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_ephash), SCTP_BASE_INFO(hashmark));
if (SCTP_BASE_INFO(sctp_tcpephash) != NULL)
SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_tcpephash), SCTP_BASE_INFO(hashtcpmark));
- if (SCTP_BASE_INFO(sctp_restarthash) != NULL)
- SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_restarthash), SCTP_BASE_INFO(hashrestartmark));
}
@@ -5900,6 +5998,8 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
if (lsa) {
(void)sctp_set_primary_addr(stcb, sa, NULL);
}
+ } else if (ptype == SCTP_HAS_NAT_SUPPORT) {
+ stcb->asoc.peer_supports_nat = 1;
} else if (ptype == SCTP_PRSCTP_SUPPORTED) {
/* Peer supports pr-sctp */
stcb->asoc.peer_supports_prsctp = 1;
@@ -6180,7 +6280,7 @@ sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa,
}
int
-sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, struct timeval *now, int save_in_twait)
+sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, uint16_t lport, uint16_t rport, struct timeval *now, int save_in_twait)
{
/*
* This function serves two purposes. It will see if a TAG can be
@@ -6188,54 +6288,45 @@ sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, struct timeval *now, int
* tag. A secondary function it will do is purge out old tags that
* can be removed.
*/
- struct sctpasochead *head;
struct sctpvtaghead *chain;
struct sctp_tagblock *twait_block;
+ struct sctpasochead *head;
struct sctp_tcb *stcb;
int i;
- SCTP_INP_INFO_WLOCK();
- chain = &SCTP_BASE_INFO(vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE))];
- /* First is the vtag in use ? */
-
+ SCTP_INP_INFO_RLOCK();
head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
SCTP_BASE_INFO(hashasocmark))];
if (head == NULL) {
- goto check_restart;
+ /* invalid vtag */
+ goto skip_vtag_check;
}
LIST_FOREACH(stcb, head, sctp_asocs) {
-
- if (stcb->asoc.my_vtag == tag) {
- /*
- * We should remove this if and return 0 always if
- * we want vtags unique across all endpoints. For
- * now within a endpoint is ok.
- */
- if (inp == stcb->sctp_ep) {
- /* bad tag, in use */
- SCTP_INP_INFO_WUNLOCK();
- return (0);
- }
+ /*
+ * We choose not to lock anything here. TCB's can't be
+ * removed since we have the read lock, so they can't be
+ * freed on us, same thing for the INP. I may be wrong with
+ * this assumption, but we will go with it for now :-)
+ */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ continue;
}
- }
-check_restart:
- /* Now lets check the restart hash */
- head = &SCTP_BASE_INFO(sctp_restarthash)[SCTP_PCBHASH_ASOC(tag,
- SCTP_BASE_INFO(hashrestartmark))];
- if (head == NULL) {
- goto check_time_wait;
- }
- LIST_FOREACH(stcb, head, sctp_tcbrestarhash) {
- if (stcb->asoc.assoc_id == tag) {
+ if (stcb->asoc.my_vtag == tag) {
/* candidate */
- if (inp == stcb->sctp_ep) {
- /* bad tag, in use */
- SCTP_INP_INFO_WUNLOCK();
- return (0);
+ if (stcb->rport != rport) {
+ continue;
}
+ if (stcb->sctp_ep->sctp_lport != lport) {
+ continue;
+ }
+ /* Its a used tag set */
+ SCTP_INP_INFO_WUNLOCK();
+ return (0);
}
}
-check_time_wait:
+skip_vtag_check:
+
+ chain = &SCTP_BASE_INFO(vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE))];
/* Now what about timed wait ? */
if (!SCTP_LIST_EMPTY(chain)) {
/*
@@ -6252,8 +6343,9 @@ check_time_wait:
/* Audit expires this guy */
twait_block->vtag_block[i].tv_sec_at_expire = 0;
twait_block->vtag_block[i].v_tag = 0;
- } else if (twait_block->vtag_block[i].v_tag ==
- tag) {
+ } else if ((twait_block->vtag_block[i].v_tag == tag) &&
+ (twait_block->vtag_block[i].lport == lport) &&
+ (twait_block->vtag_block[i].rport == rport)) {
/* Bad tag, sorry :< */
SCTP_INP_INFO_WUNLOCK();
return (0);
@@ -6261,6 +6353,8 @@ check_time_wait:
}
}
}
+ SCTP_INP_INFO_RUNLOCK();
+#ifdef MICHAELS_EXPERIMENT
/*-
* Not found, ok to use the tag, add it to the time wait hash
* as well this will prevent two sucessive cookies from getting
@@ -6269,9 +6363,12 @@ check_time_wait:
* add this tag to the assoc hash we need to purge it from
* the t-wait hash.
*/
+ SCTP_INP_INFO_WLOCK();
if (save_in_twait)
- sctp_add_vtag_to_timewait(tag, TICKS_TO_SEC(inp->sctp_ep.def_cookie_life));
+ sctp_add_vtag_to_timewait(tag, TICKS_TO_SEC(inp->sctp_ep.def_cookie_life, lport, rport));
SCTP_INP_INFO_WUNLOCK();
+#endif
+
return (1);
}
diff --git a/sys/netinet/sctp_pcb.h b/sys/netinet/sctp_pcb.h
index 19d1190..66c5f7b 100644
--- a/sys/netinet/sctp_pcb.h
+++ b/sys/netinet/sctp_pcb.h
@@ -133,6 +133,8 @@ struct sctp_block_entry {
struct sctp_timewait {
uint32_t tv_sec_at_expire; /* the seconds from boot to expire */
uint32_t v_tag; /* the vtag that can not be reused */
+ uint16_t lport; /* the local port used in vtag */
+ uint16_t rport; /* the remote port used in vtag */
};
struct sctp_tagblock {
@@ -148,8 +150,6 @@ struct sctp_epinfo {
struct sctppcbhead *sctp_ephash;
u_long hashmark;
- struct sctpasochead *sctp_restarthash;
- u_long hashrestartmark;
/*-
* The TCP model represents a substantial overhead in that we get an
* additional hash table to keep explicit connections in. The
@@ -411,6 +411,10 @@ struct sctp_inpcb {
uint32_t total_recvs;
uint32_t last_abort_code;
uint32_t total_nospaces;
+ struct sctpasochead *sctp_asocidhash;
+ u_long hashasocidmark;
+ uint32_t sctp_associd_counter;
+
#ifdef SCTP_ASOCLOG_OF_TSNS
struct sctp_pcbtsn_rlog readlog[SCTP_READ_LOG_SIZE];
uint32_t readlog_index;
@@ -424,7 +428,7 @@ struct sctp_tcb {
* table */
LIST_ENTRY(sctp_tcb) sctp_tcblist; /* list of all of the
* TCB's */
- LIST_ENTRY(sctp_tcb) sctp_tcbrestarhash; /* next link in restart
+ LIST_ENTRY(sctp_tcb) sctp_tcbasocidhash; /* next link in asocid
* hash table */
LIST_ENTRY(sctp_tcb) sctp_asocs; /* vtag hash list */
struct sctp_block_entry *block_entry; /* pointer locked by socket
@@ -537,12 +541,15 @@ sctp_findassociation_ep_addr(struct sctp_inpcb **,
struct sctp_tcb *);
struct sctp_tcb *
+ sctp_findasoc_ep_asocid_locked(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock);
+
+struct sctp_tcb *
sctp_findassociation_ep_asocid(struct sctp_inpcb *,
sctp_assoc_t, int);
struct sctp_tcb *
sctp_findassociation_ep_asconf(struct mbuf *, int, int,
- struct sctphdr *, struct sctp_inpcb **, struct sctp_nets **);
+ struct sctphdr *, struct sctp_inpcb **, struct sctp_nets **, uint32_t vrf_id);
int sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id);
@@ -557,12 +564,12 @@ sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *,
int sctp_free_assoc(struct sctp_inpcb *, struct sctp_tcb *, int, int);
-void sctp_delete_from_timewait(uint32_t);
+void sctp_delete_from_timewait(uint32_t, uint16_t, uint16_t);
-int sctp_is_in_timewait(uint32_t tag);
+int sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport);
void
- sctp_add_vtag_to_timewait(uint32_t, uint32_t);
+ sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time, uint16_t lport, uint16_t rport);
void sctp_add_local_addr_ep(struct sctp_inpcb *, struct sctp_ifa *, uint32_t);
@@ -593,7 +600,7 @@ int
sctp_set_primary_addr(struct sctp_tcb *, struct sockaddr *,
struct sctp_nets *);
-int sctp_is_vtag_good(struct sctp_inpcb *, uint32_t, struct timeval *, int);
+int sctp_is_vtag_good(struct sctp_inpcb *, uint32_t, uint16_t lport, uint16_t rport, struct timeval *, int);
/* void sctp_drain(void); */
diff --git a/sys/netinet/sctp_structs.h b/sys/netinet/sctp_structs.h
index 8beb3bf..35318ff 100644
--- a/sys/netinet/sctp_structs.h
+++ b/sys/netinet/sctp_structs.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -356,6 +356,8 @@ struct sctp_tmit_chunk {
uint16_t send_size;
uint16_t book_size;
uint16_t mbcnt;
+ uint16_t auth_keyid;
+ uint8_t holds_key_ref; /* flag if auth keyid refcount is held */
uint8_t pad_inplace;
uint8_t do_rtt;
uint8_t book_size_scale;
@@ -435,6 +437,8 @@ struct sctp_stream_queue_pending {
uint16_t stream;
uint16_t strseq;
uint16_t act_flags;
+ uint16_t auth_keyid;
+ uint8_t holds_key_ref;
uint8_t msg_is_complete;
uint8_t some_taken;
uint8_t addr_over;
@@ -472,6 +476,8 @@ struct sctp_asconf_addr {
struct sctp_asconf_addr_param ap;
struct sctp_ifa *ifa; /* save the ifa for add/del ip */
uint8_t sent; /* has this been sent yet? */
+ uint8_t special_del; /* not to be used in lookup */
+
};
struct sctp_scoping {
@@ -763,6 +769,12 @@ struct sctp_association {
*/
uint32_t highest_tsn_inside_map;
+ /* EY - new NR variables used for nr_sack based on mapping_array */
+ uint8_t *nr_mapping_array;
+ uint32_t nr_mapping_array_base_tsn;
+ uint32_t highest_tsn_inside_nr_map;
+ uint16_t nr_mapping_array_size;
+
uint32_t last_echo_tsn;
uint32_t last_cwr_tsn;
uint32_t fast_recovery_tsn;
@@ -992,6 +1004,8 @@ struct sctp_association {
/* flag to indicate if peer can do asconf */
uint8_t peer_supports_asconf;
+ /* EY - flag to indicate if peer can do nr_sack */
+ uint8_t peer_supports_nr_sack;
/* pr-sctp support flag */
uint8_t peer_supports_prsctp;
/* peer authentication support flag */
@@ -999,6 +1013,7 @@ struct sctp_association {
/* stream resets are supported by the peer */
uint8_t peer_supports_strreset;
+ uint8_t peer_supports_nat;
/*
* packet drop's are supported by the peer, we don't really care
* about this but we bookkeep it anyway.
@@ -1028,7 +1043,9 @@ struct sctp_association {
uint8_t delayed_connection;
uint8_t ifp_had_enobuf;
uint8_t saw_sack_with_frags;
- uint8_t in_restart_hash;
+ /* EY */
+ uint8_t saw_sack_with_nr_frags;
+ uint8_t in_asocid_hash;
uint8_t assoc_up_sent;
uint8_t adaptation_needed;
uint8_t adaptation_sent;
@@ -1037,6 +1054,8 @@ struct sctp_association {
uint8_t sctp_cmt_on_off;
uint8_t iam_blocking;
uint8_t cookie_how[8];
+ /* EY 05/05/08 - NR_SACK variable */
+ uint8_t sctp_nr_sack_on_off;
/* JRS 5/21/07 - CMT PF variable */
uint8_t sctp_cmt_pf;
/*
diff --git a/sys/netinet/sctp_sysctl.c b/sys/netinet/sctp_sysctl.c
index 30baae8..6703daf 100644
--- a/sys/netinet/sctp_sysctl.c
+++ b/sys/netinet/sctp_sysctl.c
@@ -81,6 +81,8 @@ sctp_init_sysctls()
SCTP_BASE_SYSCTL(sctp_add_more_threshold) = SCTPCTL_ADD_MORE_ON_OUTPUT_DEFAULT;
SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default) = SCTPCTL_OUTGOING_STREAMS_DEFAULT;
SCTP_BASE_SYSCTL(sctp_cmt_on_off) = SCTPCTL_CMT_ON_OFF_DEFAULT;
+ /* EY */
+ SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) = SCTPCTL_NR_SACK_ON_OFF_DEFAULT;
SCTP_BASE_SYSCTL(sctp_cmt_use_dac) = SCTPCTL_CMT_USE_DAC_DEFAULT;
SCTP_BASE_SYSCTL(sctp_cmt_pf) = SCTPCTL_CMT_PF_DEFAULT;
SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) = SCTPCTL_CWND_MAXBURST_DEFAULT;
@@ -109,6 +111,7 @@ sctp_init_sysctls()
SCTP_BASE_SYSCTL(sctp_udp_tunneling_for_client_enable) = SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_DEFAULT;
SCTP_BASE_SYSCTL(sctp_udp_tunneling_port) = SCTPCTL_UDP_TUNNELING_PORT_DEFAULT;
SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) = SCTPCTL_SACK_IMMEDIATELY_ENABLE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly) = SCTPCTL_NAT_FRIENDLY_DEFAULT;
#if defined(SCTP_DEBUG)
SCTP_BASE_SYSCTL(sctp_debug_on) = SCTPCTL_DEBUG_DEFAULT;
#endif
@@ -574,6 +577,8 @@ sysctl_sctp_check(SYSCTL_HANDLER_ARGS)
RANGECHK(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTPCTL_ADD_MORE_ON_OUTPUT_MIN, SCTPCTL_ADD_MORE_ON_OUTPUT_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default), SCTPCTL_OUTGOING_STREAMS_MIN, SCTPCTL_OUTGOING_STREAMS_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_on_off), SCTPCTL_CMT_ON_OFF_MIN, SCTPCTL_CMT_ON_OFF_MAX);
+ /* EY */
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off), SCTPCTL_NR_SACK_ON_OFF_MIN, SCTPCTL_NR_SACK_ON_OFF_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_use_dac), SCTPCTL_CMT_USE_DAC_MIN, SCTPCTL_CMT_USE_DAC_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_pf), SCTPCTL_CMT_PF_MIN, SCTPCTL_CMT_PF_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst), SCTPCTL_CWND_MAXBURST_MIN, SCTPCTL_CWND_MAXBURST_MAX);
@@ -601,6 +606,8 @@ sysctl_sctp_check(SYSCTL_HANDLER_ARGS)
#endif
RANGECHK(SCTP_BASE_SYSCTL(sctp_udp_tunneling_for_client_enable), SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MIN, SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_enable_sack_immediately), SCTPCTL_SACK_IMMEDIATELY_ENABLE_MIN, SCTPCTL_SACK_IMMEDIATELY_ENABLE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly), SCTPCTL_NAT_FRIENDLY_MIN, SCTPCTL_NAT_FRIENDLY_MAX);
+
#ifdef SCTP_DEBUG
RANGECHK(SCTP_BASE_SYSCTL(sctp_debug_on), SCTPCTL_DEBUG_MIN, SCTPCTL_DEBUG_MAX);
#endif
@@ -767,6 +774,11 @@ SYSCTL_PROC(_net_inet_sctp, OID_AUTO, cmt_on_off, CTLTYPE_INT | CTLFLAG_RW,
&SCTP_BASE_SYSCTL(sctp_cmt_on_off), 0, sysctl_sctp_check, "IU",
SCTPCTL_CMT_ON_OFF_DESC);
+/* EY */
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, nr_sack_on_off, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_nr_sack_on_off), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_NR_SACK_ON_OFF_DESC);
+
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, cmt_use_dac, CTLTYPE_INT | CTLFLAG_RW,
&SCTP_BASE_SYSCTL(sctp_cmt_use_dac), 0, sysctl_sctp_check, "IU",
SCTPCTL_CMT_USE_DAC_DESC);
@@ -880,6 +892,10 @@ SYSCTL_PROC(_net_inet_sctp, OID_AUTO, enable_sack_immediately, CTLTYPE_INT | CTL
&SCTP_BASE_SYSCTL(sctp_enable_sack_immediately), 0, sysctl_sctp_check, "IU",
SCTPCTL_SACK_IMMEDIATELY_ENABLE_DESC);
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, nat_friendly_init, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_NAT_FRIENDLY_DESC);
+
#ifdef SCTP_DEBUG
SYSCTL_PROC(_net_inet_sctp, OID_AUTO, debug, CTLTYPE_INT | CTLFLAG_RW,
&SCTP_BASE_SYSCTL(sctp_debug_on), 0, sysctl_sctp_check, "IU",
diff --git a/sys/netinet/sctp_sysctl.h b/sys/netinet/sctp_sysctl.h
index 399bcb0..dd42d41 100644
--- a/sys/netinet/sctp_sysctl.h
+++ b/sys/netinet/sctp_sysctl.h
@@ -74,6 +74,8 @@ struct sctp_sysctl {
uint32_t sctp_nr_outgoing_streams_default;
uint32_t sctp_cmt_on_off;
uint32_t sctp_cmt_use_dac;
+/* EY 5/5/08 - nr_sack flag variable */
+ uint32_t sctp_nr_sack_on_off;
uint32_t sctp_cmt_pf;
uint32_t sctp_use_cwnd_based_maxburst;
uint32_t sctp_early_fr;
@@ -95,6 +97,7 @@ struct sctp_sysctl {
uint32_t sctp_default_frag_interleave;
uint32_t sctp_mobility_base;
uint32_t sctp_mobility_fasthandoff;
+ uint32_t sctp_inits_include_nat_friendly;
#if defined(SCTP_LOCAL_TRACE_BUF)
struct sctp_log sctp_log;
#endif
@@ -322,6 +325,12 @@ struct sctp_sysctl {
#define SCTPCTL_CMT_ON_OFF_MAX 1
#define SCTPCTL_CMT_ON_OFF_DEFAULT 0
+/* EY - nr_sack_on_off: NR_SACK on/off flag */
+#define SCTPCTL_NR_SACK_ON_OFF_DESC "NR_SACK on/off flag"
+#define SCTPCTL_NR_SACK_ON_OFF_MIN 0
+#define SCTPCTL_NR_SACK_ON_OFF_MAX 1
+#define SCTPCTL_NR_SACK_ON_OFF_DEFAULT 0
+
/* cmt_use_dac: CMT DAC on/off flag */
#define SCTPCTL_CMT_USE_DAC_DESC "CMT DAC on/off flag"
#define SCTPCTL_CMT_USE_DAC_MIN 0
@@ -466,6 +475,13 @@ struct sctp_sysctl {
#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_MAX 1
#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_DEFAULT SCTPCTL_SACK_IMMEDIATELY_ENABLE_MIN
+/* Enable sending of the SACK-IMMEDIATELY bit */
+#define SCTPCTL_NAT_FRIENDLY_INITS "Enable sending of the nat-friendly SCTP option on INITs."
+#define SCTPCTL_NAT_FRIENDLY_INITS_MIN 0
+#define SCTPCTL_NAT_FRIENDLY_INITS_MAX 1
+#define SCTPCTL_NAT_FRIENDLY_INITS_DEFAULT SCTPCTL_NAT_FRIENDLY_INITS_MIN
+
+
#if defined(SCTP_DEBUG)
/* debug: Configure debug output */
#define SCTPCTL_DEBUG_DESC "Configure debug output"
diff --git a/sys/netinet/sctp_timer.c b/sys/netinet/sctp_timer.c
index 28661c2..1d2d5f2 100644
--- a/sys/netinet/sctp_timer.c
+++ b/sys/netinet/sctp_timer.c
@@ -49,7 +49,7 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_input.h>
#include <netinet/sctp.h>
#include <netinet/sctp_uio.h>
-
+#include <netinet/udp.h>
void
@@ -769,8 +769,8 @@ start_again:
(SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
&stcb->asoc.sent_queue, SCTP_SO_NOT_LOCKED);
}
+ continue;
}
- continue;
}
if (PR_SCTP_RTX_ENABLED(chk->flags)) {
/* Has it been retransmitted tv_sec times? */
@@ -781,8 +781,8 @@ start_again:
(SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
&stcb->asoc.sent_queue, SCTP_SO_NOT_LOCKED);
}
+ continue;
}
- continue;
}
if (chk->sent < SCTP_DATAGRAM_RESEND) {
sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
@@ -1088,7 +1088,11 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
* more, request a RTT update
*/
if (sctp_send_hb(stcb, 1, net) < 0)
- return 1;
+ /*
+ * Less than 0 means we lost
+ * the assoc
+ */
+ return (1);
}
}
}
@@ -1146,7 +1150,8 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
* manually.
*/
if (sctp_send_hb(stcb, 1, net) < 0)
- return 1;
+ /* Return less than 0 means we lost the association */
+ return (1);
}
/*
* Special case for cookie-echo'ed case, we don't do output but must
@@ -1789,6 +1794,9 @@ sctp_pathmtu_timer(struct sctp_inpcb *inp,
}
if (net->ro._s_addr) {
mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt);
+ if (net->port) {
+ mtu -= sizeof(struct udphdr);
+ }
if (mtu > next_mtu) {
net->mtu = next_mtu;
}
diff --git a/sys/netinet/sctp_uio.h b/sys/netinet/sctp_uio.h
index 3c2a034..46e604f 100644
--- a/sys/netinet/sctp_uio.h
+++ b/sys/netinet/sctp_uio.h
@@ -56,6 +56,7 @@ struct sctp_event_subscribe {
uint8_t sctp_partial_delivery_event;
uint8_t sctp_adaptation_layer_event;
uint8_t sctp_authentication_event;
+ uint8_t sctp_sender_dry_event;
uint8_t sctp_stream_reset_events;
};
@@ -139,17 +140,18 @@ struct sctp_snd_all_completes {
};
/* Flags that go into the sinfo->sinfo_flags field */
-#define SCTP_EOF 0x0100/* Start shutdown procedures */
-#define SCTP_ABORT 0x0200/* Send an ABORT to peer */
-#define SCTP_UNORDERED 0x0400/* Message is un-ordered */
-#define SCTP_ADDR_OVER 0x0800/* Override the primary-address */
-#define SCTP_SENDALL 0x1000/* Send this on all associations */
-#define SCTP_EOR 0x2000/* end of message signal */
-#define SCTP_PR_POLICY_VALID 0x4000 /* pr sctp policy valid */
+#define SCTP_EOF 0x0100 /* Start shutdown procedures */
+#define SCTP_ABORT 0x0200 /* Send an ABORT to peer */
+#define SCTP_UNORDERED 0x0400 /* Message is un-ordered */
+#define SCTP_ADDR_OVER 0x0800 /* Override the primary-address */
+#define SCTP_SENDALL 0x1000 /* Send this on all associations */
+#define SCTP_EOR 0x2000 /* end of message signal */
+#define SCTP_SACK_IMMEDIATELY 0x4000 /* Set I-Bit */
#define INVALID_SINFO_FLAG(x) (((x) & 0xffffff00 \
& ~(SCTP_EOF | SCTP_ABORT | SCTP_UNORDERED |\
- SCTP_ADDR_OVER | SCTP_SENDALL | SCTP_EOR)) != 0)
+ SCTP_ADDR_OVER | SCTP_SENDALL | SCTP_EOR |\
+ SCTP_SACK_IMMEDIATELY)) != 0)
/* for the endpoint */
/* The lower byte is an enumeration of PR-SCTP policies */
@@ -346,6 +348,16 @@ struct sctp_authkey_event {
/* indication values */
#define SCTP_AUTH_NEWKEY 0x0001
+#define SCTP_AUTH_NO_AUTH 0x0002
+#define SCTP_AUTH_FREE_KEY 0x0003
+
+
+struct sctp_sender_dry_event {
+ uint16_t sender_dry_type;
+ uint16_t sender_dry_flags;
+ uint32_t sender_dry_length;
+ sctp_assoc_t sender_dry_assoc_id;
+};
/*
@@ -386,6 +398,7 @@ union sctp_notification {
struct sctp_adaption_event sn_adaption_event;
struct sctp_pdapi_event sn_pdapi_event;
struct sctp_authkey_event sn_auth_event;
+ struct sctp_sender_dry_event sn_sender_dry_event;
struct sctp_stream_reset_event sn_strreset_event;
};
@@ -401,7 +414,7 @@ union sctp_notification {
#define SCTP_PARTIAL_DELIVERY_EVENT 0x0007
#define SCTP_AUTHENTICATION_EVENT 0x0008
#define SCTP_STREAM_RESET_EVENT 0x0009
-
+#define SCTP_SENDER_DRY_EVENT 0x000a
/*
* socket option structs
@@ -539,6 +552,7 @@ struct sctp_assoc_value {
};
struct sctp_assoc_ids {
+ uint32_t gaids_number_of_ids;
sctp_assoc_t gaids_assoc_id[0];
};
diff --git a/sys/netinet/sctp_usrreq.c b/sys/netinet/sctp_usrreq.c
index 058415e..c642762 100644
--- a/sys/netinet/sctp_usrreq.c
+++ b/sys/netinet/sctp_usrreq.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_auth.h>
#include <netinet/sctp_bsd_addr.h>
#include <netinet/sctp_cc_functions.h>
+#include <netinet/udp.h>
@@ -201,6 +202,9 @@ sctp_notify_mbuf(struct sctp_inpcb *inp,
/* Adjust destination size limit */
if (net->mtu > nxtsz) {
net->mtu = nxtsz;
+ if (net->port) {
+ net->mtu -= sizeof(struct udphdr);
+ }
}
/* now what about the ep? */
if (stcb->asoc.smallest_mtu > nxtsz) {
@@ -507,8 +511,10 @@ sctp_attach(struct socket *so, int proto, struct thread *p)
struct inpcb *ip_inp;
int error;
uint32_t vrf_id = SCTP_DEFAULT_VRFID;
+
#ifdef IPSEC
uint32_t flags;
+
#endif
inp = (struct sctp_inpcb *)so->so_pcb;
@@ -1704,6 +1710,29 @@ flags_out:
*optsize = sizeof(*av);
}
break;
+ /* EY - set socket option for nr_sacks */
+ case SCTP_NR_SACK_ON_OFF:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) {
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+ if (stcb) {
+ av->assoc_value = stcb->asoc.sctp_nr_sack_on_off;
+ SCTP_TCB_UNLOCK(stcb);
+
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+ error = ENOTCONN;
+ }
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
+ error = ENOPROTOOPT;
+ }
+ *optsize = sizeof(*av);
+ }
+ break;
/* JRS - Get socket option for pluggable congestion control */
case SCTP_PLUGGABLE_CC:
{
@@ -1767,7 +1796,7 @@ flags_out:
SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize);
at = 0;
- limit = *optsize / sizeof(sctp_assoc_t);
+ limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t);
SCTP_INP_RLOCK(inp);
LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
if (at < limit) {
@@ -1779,7 +1808,8 @@ flags_out:
}
}
SCTP_INP_RUNLOCK(inp);
- *optsize = at * sizeof(sctp_assoc_t);
+ ids->gaids_number_of_ids = at;
+ *optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t));
}
break;
case SCTP_CONTEXT:
@@ -1961,6 +1991,9 @@ flags_out:
if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT))
events->sctp_authentication_event = 1;
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT))
+ events->sctp_sender_dry_event = 1;
+
if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
events->sctp_stream_reset_events = 1;
SCTP_INP_RUNLOCK(inp);
@@ -2532,7 +2565,7 @@ flags_out:
if (stcb) {
/* get the active key on the assoc */
- scact->scact_keynumber = stcb->asoc.authinfo.assoc_keyid;
+ scact->scact_keynumber = stcb->asoc.authinfo.active_keyid;
SCTP_TCB_UNLOCK(stcb);
} else {
/* get the endpoint active key */
@@ -2789,6 +2822,27 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
}
}
break;
+ /* EY nr_sack_on_off socket option */
+ case SCTP_NR_SACK_ON_OFF:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) {
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+ if (stcb) {
+ stcb->asoc.sctp_nr_sack_on_off = (uint8_t) av->assoc_value;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+ error = ENOTCONN;
+ }
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
+ error = ENOPROTOOPT;
+ }
+ }
+ break;
/* JRS - Set socket option for pluggable congestion control */
case SCTP_PLUGGABLE_CC:
{
@@ -3012,7 +3066,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
}
shared_key->key = key;
shared_key->keyid = sca->sca_keynumber;
- sctp_insert_sharedkey(shared_keys, shared_key);
+ error = sctp_insert_sharedkey(shared_keys, shared_key);
SCTP_TCB_UNLOCK(stcb);
} else {
/* set it on the endpoint */
@@ -3046,7 +3100,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
}
shared_key->key = key;
shared_key->keyid = sca->sca_keynumber;
- sctp_insert_sharedkey(shared_keys, shared_key);
+ error = sctp_insert_sharedkey(shared_keys, shared_key);
SCTP_INP_WUNLOCK(inp);
}
break;
@@ -3108,22 +3162,29 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
{
struct sctp_authkeyid *scact;
- SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, optsize);
+ SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid,
+ optsize);
SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
/* set the active key on the right place */
if (stcb) {
/* set the active key on the assoc */
- if (sctp_auth_setactivekey(stcb, scact->scact_keynumber)) {
- SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ if (sctp_auth_setactivekey(stcb,
+ scact->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
+ SCTP_FROM_SCTP_USRREQ,
+ EINVAL);
error = EINVAL;
}
SCTP_TCB_UNLOCK(stcb);
} else {
/* set the active key on the endpoint */
SCTP_INP_WLOCK(inp);
- if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber)) {
- SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ if (sctp_auth_setactivekey_ep(inp,
+ scact->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
+ SCTP_FROM_SCTP_USRREQ,
+ EINVAL);
error = EINVAL;
}
SCTP_INP_WUNLOCK(inp);
@@ -3134,20 +3195,58 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
{
struct sctp_authkeyid *scdel;
- SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, optsize);
+ SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid,
+ optsize);
SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id);
/* delete the key from the right place */
if (stcb) {
- if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber)) {
- SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ if (sctp_delete_sharedkey(stcb,
+ scdel->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
+ SCTP_FROM_SCTP_USRREQ,
+ EINVAL);
error = EINVAL;
}
SCTP_TCB_UNLOCK(stcb);
} else {
SCTP_INP_WLOCK(inp);
- if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber)) {
- SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ if (sctp_delete_sharedkey_ep(inp,
+ scdel->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
+ SCTP_FROM_SCTP_USRREQ,
+ EINVAL);
+ error = EINVAL;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ }
+ case SCTP_AUTH_DEACTIVATE_KEY:
+ {
+ struct sctp_authkeyid *keyid;
+
+ SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid,
+ optsize);
+ SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id);
+
+ /* deactivate the key from the right place */
+ if (stcb) {
+ if (sctp_deact_sharedkey(stcb,
+ keyid->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
+ SCTP_FROM_SCTP_USRREQ,
+ EINVAL);
+ error = EINVAL;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (sctp_deact_sharedkey_ep(inp,
+ keyid->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
+ SCTP_FROM_SCTP_USRREQ,
+ EINVAL);
error = EINVAL;
}
SCTP_INP_WUNLOCK(inp);
@@ -3414,6 +3513,12 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT);
}
+ if (events->sctp_sender_dry_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT);
+ }
+
if (events->sctp_stream_reset_events) {
sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
} else {
@@ -4123,6 +4228,7 @@ sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
#ifdef INET6
if (addr->sa_family == AF_INET6) {
struct sockaddr_in6 *sin6p;
+
if (addr->sa_len != sizeof(struct sockaddr_in6)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
return (EINVAL);
@@ -4136,6 +4242,7 @@ sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
#endif
if (addr->sa_family == AF_INET) {
struct sockaddr_in *sinp;
+
if (addr->sa_len != sizeof(struct sockaddr_in)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
return (EINVAL);
diff --git a/sys/netinet/sctp_var.h b/sys/netinet/sctp_var.h
index 9340c1c..934af00 100644
--- a/sys/netinet/sctp_var.h
+++ b/sys/netinet/sctp_var.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -86,6 +86,10 @@ extern struct pr_usrreqs sctp_usrreqs;
}
#define sctp_free_a_strmoq(_stcb, _strmoq) { \
+ if ((_strmoq)->holds_key_ref) { \
+ sctp_auth_key_release(stcb, sp->auth_keyid); \
+ (_strmoq)->holds_key_ref = 0; \
+ } \
SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_strmoq), (_strmoq)); \
SCTP_DECR_STRMOQ_COUNT(); \
}
@@ -94,11 +98,15 @@ extern struct pr_usrreqs sctp_usrreqs;
(_strmoq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_strmoq), struct sctp_stream_queue_pending); \
if ((_strmoq)) { \
SCTP_INCR_STRMOQ_COUNT(); \
+ (_strmoq)->holds_key_ref = 0; \
} \
}
-
#define sctp_free_a_chunk(_stcb, _chk) { \
+ if ((_chk)->holds_key_ref) {\
+ sctp_auth_key_release((_stcb), (_chk)->auth_keyid); \
+ (_chk)->holds_key_ref = 0; \
+ } \
if(_stcb) { \
SCTP_TCB_LOCK_ASSERT((_stcb)); \
if ((_chk)->whoTo) { \
@@ -126,21 +134,22 @@ extern struct pr_usrreqs sctp_usrreqs;
if ((_chk)) { \
SCTP_INCR_CHK_COUNT(); \
(_chk)->whoTo = NULL; \
+ (_chk)->holds_key_ref = 0; \
} \
} else { \
(_chk) = TAILQ_FIRST(&(_stcb)->asoc.free_chunks); \
TAILQ_REMOVE(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \
+ (_chk)->holds_key_ref = 0; \
SCTP_STAT_INCR(sctps_cached_chk); \
(_stcb)->asoc.free_chunk_cnt--; \
} \
}
-
#define sctp_free_remote_addr(__net) { \
if ((__net)) { \
- if (atomic_fetchadd_int(&(__net)->ref_count, -1) == 1) { \
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&(__net)->ref_count)) { \
(void)SCTP_OS_TIMER_STOP(&(__net)->rxt_timer.timer); \
(void)SCTP_OS_TIMER_STOP(&(__net)->pmtu_timer.timer); \
(void)SCTP_OS_TIMER_STOP(&(__net)->fr_timer.timer); \
@@ -160,64 +169,18 @@ extern struct pr_usrreqs sctp_usrreqs;
} \
}
-#ifdef INVARIANTS
-
-
#define sctp_sbfree(ctl, stcb, sb, m) { \
- uint32_t val; \
- val = atomic_fetchadd_int(&(sb)->sb_cc,-(SCTP_BUF_LEN((m)))); \
- if (val < SCTP_BUF_LEN((m))) { \
- panic("sb_cc goes negative"); \
- } \
- val = atomic_fetchadd_int(&(sb)->sb_mbcnt,-(MSIZE)); \
- if (val < MSIZE) { \
- panic("sb_mbcnt goes negative"); \
- } \
+ SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \
+ SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_mbcnt, MSIZE); \
if (((ctl)->do_not_ref_stcb == 0) && stcb) {\
- val = atomic_fetchadd_int(&(stcb)->asoc.sb_cc,-(SCTP_BUF_LEN((m)))); \
- if (val < SCTP_BUF_LEN((m))) {\
- panic("stcb->sb_cc goes negative"); \
- } \
- val = atomic_fetchadd_int(&(stcb)->asoc.my_rwnd_control_len,-(MSIZE)); \
- if (val < MSIZE) { \
- panic("asoc->mbcnt goes negative"); \
- } \
+ SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
+ SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
} \
if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
SCTP_BUF_TYPE(m) != MT_OOBDATA) \
atomic_subtract_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \
}
-
-#else
-
-#define sctp_sbfree(ctl, stcb, sb, m) { \
- uint32_t val; \
- val = atomic_fetchadd_int(&(sb)->sb_cc,-(SCTP_BUF_LEN((m)))); \
- if (val < SCTP_BUF_LEN((m))) { \
- (sb)->sb_cc = 0;\
- } \
- val = atomic_fetchadd_int(&(sb)->sb_mbcnt,-(MSIZE)); \
- if (val < MSIZE) { \
- (sb)->sb_mbcnt = 0; \
- } \
- if (((ctl)->do_not_ref_stcb == 0) && stcb) {\
- val = atomic_fetchadd_int(&(stcb)->asoc.sb_cc,-(SCTP_BUF_LEN((m)))); \
- if (val < SCTP_BUF_LEN((m))) {\
- (stcb)->asoc.sb_cc = 0; \
- } \
- val = atomic_fetchadd_int(&(stcb)->asoc.my_rwnd_control_len,-(MSIZE)); \
- if (val < MSIZE) { \
- (stcb)->asoc.my_rwnd_control_len = 0; \
- } \
- } \
- if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
- SCTP_BUF_TYPE(m) != MT_OOBDATA) \
- atomic_subtract_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \
-}
-
-#endif
-
#define sctp_sballoc(stcb, sb, m) { \
atomic_add_int(&(sb)->sb_cc,SCTP_BUF_LEN((m))); \
atomic_add_int(&(sb)->sb_mbcnt, MSIZE); \
diff --git a/sys/netinet/sctputil.c b/sys/netinet/sctputil.c
index 4a6de38..ce06853 100644
--- a/sys/netinet/sctputil.c
+++ b/sys/netinet/sctputil.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -848,7 +848,7 @@ retry:
}
uint32_t
-sctp_select_a_tag(struct sctp_inpcb *inp, int save_in_twait)
+sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
{
u_long x, not_done;
struct timeval now;
@@ -861,7 +861,7 @@ sctp_select_a_tag(struct sctp_inpcb *inp, int save_in_twait)
/* we never use 0 */
continue;
}
- if (sctp_is_vtag_good(inp, x, &now, save_in_twait)) {
+ if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
not_done = 0;
}
}
@@ -894,6 +894,8 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
asoc->cookie_life = m->sctp_ep.def_cookie_life;
asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
+ /* EY Init nr_sack variable */
+ asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
/* JRS 5/21/07 - Init CMT PF variables */
asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
asoc->sctp_frag_point = m->sctp_frag_point;
@@ -910,7 +912,8 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
#endif
asoc->sb_send_resv = 0;
if (override_tag) {
- if (sctp_is_in_timewait(override_tag)) {
+#ifdef MICHAELS_EXPERIMENT
+ if (sctp_is_in_timewait(override_tag, stcb->sctp_ep->sctp_lport, stcb->rport)) {
/*
* It must be in the time-wait hash, we put it there
* when we aloc one. If not the peer is playing
@@ -924,13 +927,15 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
#endif
return (ENOMEM);
}
-
+#else
+ asoc->my_vtag = override_tag;
+#endif
} else {
- asoc->my_vtag = sctp_select_a_tag(m, 1);
+ asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
}
/* Get the nonce tags */
- asoc->my_vtag_nonce = sctp_select_a_tag(m, 0);
- asoc->peer_vtag_nonce = sctp_select_a_tag(m, 0);
+ asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
+ asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
asoc->vrf_id = vrf_id;
if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
@@ -951,13 +956,12 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
#endif
asoc->refcnt = 0;
asoc->assoc_up_sent = 0;
- asoc->assoc_id = asoc->my_vtag;
asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
sctp_select_initial_TSN(&m->sctp_ep);
asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
/* we are optimisitic here */
asoc->peer_supports_pktdrop = 1;
-
+ asoc->peer_supports_nat = 0;
asoc->sent_queue_retran_cnt = 0;
/* for CMT */
@@ -1146,6 +1150,17 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
return (ENOMEM);
}
memset(asoc->mapping_array, 0, asoc->mapping_array_size);
+ /* EY - initialize the nr_mapping_array just like mapping array */
+ asoc->nr_mapping_array_size = SCTP_INITIAL_NR_MAPPING_ARRAY;
+ SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->nr_mapping_array_size,
+ SCTP_M_MAP);
+ /*
+ * if (asoc->nr_mapping_array == NULL) { SCTP_FREE(asoc->strmout,
+ * SCTP_M_STRMO); SCTP_LTRACE_ERR_RET(NULL, stcb, NULL,
+ * SCTP_FROM_SCTPUTIL, ENOMEM); return (ENOMEM); }
+ */
+ memset(asoc->nr_mapping_array, 0, asoc->nr_mapping_array_size);
+
/* Now the init of the other outqueues */
TAILQ_INIT(&asoc->free_chunks);
TAILQ_INIT(&asoc->out_wheel);
@@ -1159,6 +1174,7 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
TAILQ_INIT(&asoc->asconf_queue);
/* authentication fields */
asoc->authinfo.random = NULL;
+ asoc->authinfo.active_keyid = 0;
asoc->authinfo.assoc_key = NULL;
asoc->authinfo.assoc_keyid = 0;
asoc->authinfo.recv_key = NULL;
@@ -1204,6 +1220,30 @@ sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
return (0);
}
+/* EY - nr_sack version of the above method */
+int
+sctp_expand_nr_mapping_array(struct sctp_association *asoc, uint32_t needed)
+{
+ /* nr mapping array needs to grow */
+ uint8_t *new_array;
+ uint32_t new_size;
+
+ new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR);
+ SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
+ if (new_array == NULL) {
+ /* can't get more, forget it */
+ SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
+ new_size);
+ return (-1);
+ }
+ memset(new_array, 0, new_size);
+ memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size);
+ SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
+ asoc->nr_mapping_array = new_array;
+ asoc->nr_mapping_array_size = new_size;
+ return (0);
+}
+
#if defined(SCTP_USE_THREAD_BASED_ITERATOR)
static void
sctp_iterator_work(struct sctp_iterator *it)
@@ -1617,7 +1657,15 @@ sctp_timeout_handler(void *t)
stcb->asoc.timosack++;
if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
sctp_sack_check(stcb, 0, 0, &abort_flag);
- sctp_send_sack(stcb);
+
+ /*
+ * EY if nr_sacks used then send an nr-sack , a sack
+ * otherwise
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
+ sctp_send_nr_sack(stcb);
+ else
+ sctp_send_sack(stcb);
}
#ifdef SCTP_AUDITING_ENABLED
sctp_auditing(4, inp, stcb, net);
@@ -2905,19 +2953,6 @@ sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
#endif
/*
- * First if we are are going down dump everything we can to the
- * socket rcv queue.
- */
-
- if ((stcb == NULL) ||
- (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
- (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
- (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
- ) {
- /* If the socket is gone we are out of here */
- return;
- }
- /*
* For TCP model AND UDP connected sockets we will send an error up
* when an ABORT comes in.
*/
@@ -3025,10 +3060,10 @@ sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
struct sctp_paddr_change *spc;
struct sctp_queued_to_read *control;
- if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)))
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
/* event not enabled */
return;
-
+ }
m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
if (m_notify == NULL)
return;
@@ -3099,15 +3134,15 @@ sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
#endif
)
{
- struct mbuf *m_notify, *tt;
+ struct mbuf *m_notify;
struct sctp_send_failed *ssf;
struct sctp_queued_to_read *control;
int length;
- if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
/* event not enabled */
return;
-
+ }
m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
if (m_notify == NULL)
/* no space left */
@@ -3133,20 +3168,18 @@ sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
ssf->ssf_assoc_id = sctp_get_associd(stcb);
- /* Take off the chunk header */
- m_adj(chk->data, sizeof(struct sctp_data_chunk));
-
- /* trim out any 0 len mbufs */
- while (SCTP_BUF_LEN(chk->data) == 0) {
- tt = chk->data;
- chk->data = SCTP_BUF_NEXT(tt);
- SCTP_BUF_NEXT(tt) = NULL;
- sctp_m_freem(tt);
- }
-
SCTP_BUF_NEXT(m_notify) = chk->data;
SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
-
+ if (chk->data) {
+ /*
+ * trim off the sctp chunk header(it should be there)
+ */
+ if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
+ m_adj(chk->data, sizeof(struct sctp_data_chunk));
+ sctp_mbuf_crush(chk->data);
+ chk->send_size -= sizeof(struct sctp_data_chunk);
+ }
+ }
/* Steal off the mbuf */
chk->data = NULL;
/*
@@ -3187,10 +3220,10 @@ sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
struct sctp_queued_to_read *control;
int length;
- if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
/* event not enabled */
return;
-
+ }
length = sizeof(struct sctp_send_failed) + sp->length;
m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
if (m_notify == NULL)
@@ -3257,10 +3290,10 @@ sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
struct sctp_adaptation_event *sai;
struct sctp_queued_to_read *control;
- if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)))
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
/* event not enabled */
return;
-
+ }
m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
if (m_notify == NULL)
/* no space left */
@@ -3304,11 +3337,10 @@ sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
struct sctp_queued_to_read *control;
struct sockbuf *sb;
- if ((stcb == NULL) || (stcb->sctp_socket == NULL) ||
- sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
/* event not enabled */
return;
-
+ }
m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
if (m_notify == NULL)
/* no space left */
@@ -3378,9 +3410,6 @@ sctp_notify_shutdown_event(struct sctp_tcb *stcb)
* For TCP model AND UDP connected sockets we will send an error up
* when an SHUTDOWN completes
*/
- if (stcb == NULL) {
- return;
- }
if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
/* mark socket closed for read/write and wakeup! */
@@ -3404,10 +3433,10 @@ sctp_notify_shutdown_event(struct sctp_tcb *stcb)
SCTP_SOCKET_UNLOCK(so, 1);
#endif
}
- if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
/* event not enabled */
return;
-
+ }
m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
if (m_notify == NULL)
/* no space left */
@@ -3440,6 +3469,53 @@ sctp_notify_shutdown_event(struct sctp_tcb *stcb)
}
static void
+sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
+ int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ struct mbuf *m_notify;
+ struct sctp_sender_dry_event *event;
+ struct sctp_queued_to_read *control;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
+ /* event not enabled */
+ return;
+ }
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL) {
+ /* no space left */
+ return;
+ }
+ SCTP_BUF_LEN(m_notify) = 0;
+ event = mtod(m_notify, struct sctp_sender_dry_event *);
+ event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
+ event->sender_dry_flags = 0;
+ event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
+ event->sender_dry_assoc_id = sctp_get_associd(stcb);
+
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0, m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb, control,
+ &stcb->sctp_socket->so_rcv, 1, so_locked);
+}
+
+static void
sctp_notify_stream_reset(struct sctp_tcb *stcb,
int number_entries, uint16_t * list, int flag)
{
@@ -3448,13 +3524,10 @@ sctp_notify_stream_reset(struct sctp_tcb *stcb,
struct sctp_stream_reset_event *strreset;
int len;
- if (stcb == NULL) {
- return;
- }
- if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
/* event not enabled */
return;
-
+ }
m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
if (m_notify == NULL)
/* no space left */
@@ -3516,19 +3589,11 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
#endif
)
{
- if (stcb == NULL) {
- /* unlikely but */
- return;
- }
- if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ if ((stcb == NULL) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
- (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
- ) {
- /* No notifications up when we are in a no socket state */
- return;
- }
- if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
- /* Can't send up to a closed socket any notifications */
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
+ /* If the socket is gone we are out of here */
return;
}
if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
@@ -3549,6 +3614,10 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
sctp_notify_adaptation_layer(stcb, error);
}
+ if (stcb->asoc.peer_supports_auth == 0) {
+ sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
+ NULL, so_locked);
+ }
break;
case SCTP_NOTIFY_ASSOC_DOWN:
sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
@@ -3613,6 +3682,10 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
break;
case SCTP_NOTIFY_ASSOC_RESTART:
sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
+ if (stcb->asoc.peer_supports_auth == 0) {
+ sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
+ NULL, so_locked);
+ }
break;
case SCTP_NOTIFY_HB_RESP:
break;
@@ -3651,16 +3724,22 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
break;
case SCTP_NOTIFY_AUTH_NEW_KEY:
sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
- (uint16_t) (uintptr_t) data);
+ (uint16_t) (uintptr_t) data,
+ so_locked);
break;
-#if 0
- case SCTP_NOTIFY_AUTH_KEY_CONFLICT:
- sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT,
- error, (uint16_t) (uintptr_t) data);
+ case SCTP_NOTIFY_AUTH_FREE_KEY:
+ sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
+ (uint16_t) (uintptr_t) data,
+ so_locked);
+ break;
+ case SCTP_NOTIFY_NO_PEER_AUTH:
+ sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
+ (uint16_t) (uintptr_t) data,
+ so_locked);
+ break;
+ case SCTP_NOTIFY_SENDER_DRY:
+ sctp_notify_sender_dry_event(stcb, so_locked);
break;
-#endif /* not yet? remove? */
-
-
default:
SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
__FUNCTION__, notification, notification);
@@ -3701,17 +3780,6 @@ sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
while (chk) {
TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
asoc->sent_queue_cnt--;
- if (chk->data) {
- /*
- * trim off the sctp chunk header(it should
- * be there)
- */
- if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
- m_adj(chk->data, sizeof(struct sctp_data_chunk));
- sctp_mbuf_crush(chk->data);
- chk->send_size -= sizeof(struct sctp_data_chunk);
- }
- }
sctp_free_bufspace(stcb, asoc, chk, 1);
sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
@@ -3730,17 +3798,6 @@ sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
while (chk) {
TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
asoc->send_queue_cnt--;
- if (chk->data) {
- /*
- * trim off the sctp chunk header(it should
- * be there)
- */
- if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
- m_adj(chk->data, sizeof(struct sctp_data_chunk));
- sctp_mbuf_crush(chk->data);
- chk->send_size -= sizeof(struct sctp_data_chunk);
- }
- }
sctp_free_bufspace(stcb, asoc, chk, 1);
sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
if (chk->data) {
@@ -4355,7 +4412,6 @@ sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
SCTP_INP_READ_UNLOCK(new_inp);
}
-
void
sctp_add_to_readq(struct sctp_inpcb *inp,
struct sctp_tcb *stcb,
@@ -4687,7 +4743,9 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
#endif
sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
- sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, SCTP_SO_NOT_LOCKED);
+ sctp_flight_size_decrease(tp1);
+ sctp_total_flight_decrease(stcb, tp1);
+ sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
sctp_m_freem(tp1->data);
tp1->data = NULL;
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
@@ -4970,7 +5028,15 @@ sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
goto out;
}
SCTP_STAT_INCR(sctps_wu_sacks_sent);
- sctp_send_sack(stcb);
+ /*
+ * EY if nr_sacks used then send an nr-sack , a sack
+ * otherwise
+ */
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
+ sctp_send_nr_sack(stcb);
+ else
+ sctp_send_sack(stcb);
+
sctp_chunk_output(stcb->sctp_ep, stcb,
SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
/* make sure no timer is running */
diff --git a/sys/netinet/sctputil.h b/sys/netinet/sctputil.h
index 5be1df7..cc940b3 100644
--- a/sys/netinet/sctputil.h
+++ b/sys/netinet/sctputil.h
@@ -77,7 +77,7 @@ struct sctp_ifa *
uint32_t sctp_select_initial_TSN(struct sctp_pcb *);
-uint32_t sctp_select_a_tag(struct sctp_inpcb *, int);
+uint32_t sctp_select_a_tag(struct sctp_inpcb *, uint16_t lport, uint16_t rport, int);
int sctp_init_asoc(struct sctp_inpcb *, struct sctp_tcb *, int, uint32_t, uint32_t);
@@ -168,6 +168,8 @@ sctp_report_all_outbound(struct sctp_tcb *, int, int
int sctp_expand_mapping_array(struct sctp_association *, uint32_t);
+/* EY nr_sack version of the above method, expands nr_mapping_array */
+int sctp_expand_nr_mapping_array(struct sctp_association *, uint32_t);
void
sctp_abort_notification(struct sctp_tcb *, int, int
#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
diff --git a/sys/netinet6/sctp6_usrreq.c b/sys/netinet6/sctp6_usrreq.c
index d021dfc..c6fc3cf 100644
--- a/sys/netinet6/sctp6_usrreq.c
+++ b/sys/netinet6/sctp6_usrreq.c
@@ -286,6 +286,9 @@ sctp6_notify_mbuf(struct sctp_inpcb *inp, struct icmp6_hdr *icmp6,
/* Adjust destination size limit */
if (net->mtu > nxtsz) {
net->mtu = nxtsz;
+ if (net->port) {
+ net->mtu -= sizeof(struct udphdr);
+ }
}
/* now what about the ep? */
if (stcb->asoc.smallest_mtu > nxtsz) {
OpenPOWER on IntegriCloud