summaryrefslogtreecommitdiffstats
path: root/sys/netinet/sctp_usrreq.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/netinet/sctp_usrreq.c')
-rw-r--r--sys/netinet/sctp_usrreq.c144
1 files changed, 126 insertions, 18 deletions
diff --git a/sys/netinet/sctp_usrreq.c b/sys/netinet/sctp_usrreq.c
index 6c34d47..2f24181 100644
--- a/sys/netinet/sctp_usrreq.c
+++ b/sys/netinet/sctp_usrreq.c
@@ -1390,7 +1390,8 @@ sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
return (EADDRINUSE);
}
- if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
+ (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
return (EINVAL);
}
@@ -1461,6 +1462,7 @@ sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
/* FIX ME: do we want to pass in a vrf on the connect call? */
vrf_id = inp->def_vrf_id;
+
/* We are GOOD to go */
stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0, vrf_id,
(struct thread *)p
@@ -1639,6 +1641,20 @@ flags_out:
#endif
break;
}
+ case SCTP_REUSE_PORT:
+ {
+ uint32_t *value;
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
+ /* Can't do this for a 1-m socket */
+ error = EINVAL;
+ break;
+ }
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+ *value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
+ *optsize = sizeof(uint32_t);
+ }
+ break;
case SCTP_PARTIAL_DELIVERY_POINT:
{
uint32_t *value;
@@ -2498,8 +2514,10 @@ flags_out:
break;
}
/* copy in the list */
- for (i = 0; i < hmaclist->num_algo; i++)
+ shmac->shmac_number_of_idents = hmaclist->num_algo;
+ for (i = 0; i < hmaclist->num_algo; i++) {
shmac->shmac_idents[i] = hmaclist->hmac[i];
+ }
SCTP_INP_RUNLOCK(inp);
*optsize = size;
break;
@@ -2696,6 +2714,25 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
}
SCTP_INP_WUNLOCK(inp);
break;
+ case SCTP_REUSE_PORT:
+ {
+ SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
+ /* Can't set it after we are bound */
+ error = EINVAL;
+ break;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
+ /* Can't do this for a 1-m socket */
+ error = EINVAL;
+ break;
+ }
+ if (optval)
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
+ else
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE);
+ }
+ break;
case SCTP_PARTIAL_DELIVERY_POINT:
{
uint32_t *value;
@@ -3017,20 +3054,26 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
{
struct sctp_hmacalgo *shmac;
sctp_hmaclist_t *hmaclist;
- uint32_t hmacid;
- size_t size, i, found;
+ uint16_t hmacid;
+ uint32_t i;
+
+ size_t found;
SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize);
- size = (optsize - sizeof(*shmac)) / sizeof(shmac->shmac_idents[0]);
- hmaclist = sctp_alloc_hmaclist(size);
+ if (optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ hmaclist = sctp_alloc_hmaclist(shmac->shmac_number_of_idents);
if (hmaclist == NULL) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
error = ENOMEM;
break;
}
- for (i = 0; i < size; i++) {
+ for (i = 0; i < shmac->shmac_number_of_idents; i++) {
hmacid = shmac->shmac_idents[i];
- if (sctp_auth_add_hmacid(hmaclist, (uint16_t) hmacid)) {
+ if (sctp_auth_add_hmacid(hmaclist, hmacid)) {
/* invalid HMACs were found */ ;
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
error = EINVAL;
@@ -4098,7 +4141,8 @@ sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
}
}
/* Now do we connect? */
- if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
+ (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
error = EINVAL;
goto out_now;
@@ -4117,7 +4161,7 @@ sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
} else {
/*
* We increment here since sctp_findassociation_ep_addr()
- * wil do a decrement if it finds the stcb as long as the
+ * will do a decrement if it finds the stcb as long as the
* locked tcb (last argument) is NOT a TCB.. aka NULL.
*/
SCTP_INP_INCR_REF(inp);
@@ -4183,6 +4227,64 @@ sctp_listen(struct socket *so, int backlog, struct thread *p)
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
return (ECONNRESET);
}
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) {
+ /* See if we have a listener */
+ struct sctp_inpcb *tinp;
+ union sctp_sockstore store, *sp;
+
+ sp = &store;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /* not bound all */
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ memcpy(&store, &laddr->ifa->address, sizeof(store));
+ sp->sin.sin_port = inp->sctp_lport;
+ tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id);
+ if (tinp && (tinp != inp) &&
+ ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
+ ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (tinp->sctp_socket->so_qlimit)) {
+ /*
+ * we have a listener already and
+ * its not this inp.
+ */
+ SCTP_INP_DECR_REF(tinp);
+ return (EADDRINUSE);
+ } else if (tinp) {
+ SCTP_INP_DECR_REF(tinp);
+ }
+ }
+ } else {
+ /* Setup a local addr bound all */
+ memset(&store, 0, sizeof(store));
+ store.sin.sin_port = inp->sctp_lport;
+#ifdef INET6
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ store.sa.sa_family = AF_INET6;
+ store.sa.sa_len = sizeof(struct sockaddr_in6);
+ }
+#endif
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+ store.sa.sa_family = AF_INET;
+ store.sa.sa_len = sizeof(struct sockaddr_in);
+ }
+ tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id);
+ if (tinp && (tinp != inp) &&
+ ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
+ ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (tinp->sctp_socket->so_qlimit)) {
+ /*
+ * we have a listener already and its not
+ * this inp.
+ */
+ SCTP_INP_DECR_REF(tinp);
+ return (EADDRINUSE);
+ } else if (tinp) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ }
SCTP_INP_RLOCK(inp);
#ifdef SCTP_LOCK_LOGGING
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) {
@@ -4196,30 +4298,36 @@ sctp_listen(struct socket *so, int backlog, struct thread *p)
SCTP_INP_RUNLOCK(inp);
return (error);
}
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /*
+ * The unlucky case - We are in the tcp pool with this guy.
+ * - Someone else is in the main inp slot. - We must move
+ * this guy (the listener) to the main slot - We must then
+ * move the guy that was listener to the TCP Pool.
+ */
+ if (sctp_swap_inpcb_for_listen(inp)) {
+ goto in_use;
+ }
+ }
if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
(inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
/* We are already connected AND the TCP model */
+in_use:
SCTP_INP_RUNLOCK(inp);
SOCK_UNLOCK(so);
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
return (EADDRINUSE);
}
+ SCTP_INP_RUNLOCK(inp);
if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
/* We must do a bind. */
SOCK_UNLOCK(so);
- SCTP_INP_RUNLOCK(inp);
if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) {
/* bind error, probably perm */
return (error);
}
SOCK_LOCK(so);
- } else {
- if (backlog != 0) {
- inp->sctp_flags |= SCTP_PCB_FLAGS_LISTENING;
- } else {
- inp->sctp_flags &= ~SCTP_PCB_FLAGS_LISTENING;
- }
- SCTP_INP_RUNLOCK(inp);
}
/* It appears for 7.0 and on, we must always call this. */
solisten_proto(so, backlog);
OpenPOWER on IntegriCloud