summaryrefslogtreecommitdiffstats
path: root/sys/netinet/sctp_cc_functions.c
diff options
context:
space:
mode:
authortuexen <tuexen@FreeBSD.org>2011-01-16 10:02:46 +0000
committertuexen <tuexen@FreeBSD.org>2011-01-16 10:02:46 +0000
commit5ca779f63c301bfc79e2b5c70e9149eebd39f8a7 (patch)
treec04b991a6228d6c61a688473cd85fd07e7501dd3 /sys/netinet/sctp_cc_functions.c
parentaa74efa4656fdb04bfdda91050ce8b0c4527ebcc (diff)
downloadFreeBSD-src-5ca779f63c301bfc79e2b5c70e9149eebd39f8a7.zip
FreeBSD-src-5ca779f63c301bfc79e2b5c70e9149eebd39f8a7.tar.gz
Add support for resource pooling to CMT.
An original version of the patch was developed by Martin Becke and Thomas Dreibholz. MFC after: 3 months
Diffstat (limited to 'sys/netinet/sctp_cc_functions.c')
-rw-r--r--sys/netinet/sctp_cc_functions.c163
1 files changed, 119 insertions, 44 deletions
diff --git a/sys/netinet/sctp_cc_functions.c b/sys/netinet/sctp_cc_functions.c
index fc8811b..d1bfa00 100644
--- a/sys/netinet/sctp_cc_functions.c
+++ b/sys/netinet/sctp_cc_functions.c
@@ -66,6 +66,13 @@ sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
cwnd_in_mtu = assoc->max_burst;
net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
}
+ if (stcb->asoc.sctp_cmt_on_off == 2) {
+ /* In case of resource pooling initialize appropriately */
+ net->cwnd /= assoc->numnets;
+ if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
+ net->cwnd = net->mtu - sizeof(struct sctphdr);
+ }
+ }
net->ssthresh = assoc->peers_rwnd;
SDT_PROBE(sctp, cwnd, net, init,
@@ -82,7 +89,17 @@ sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
struct sctp_association *asoc)
{
struct sctp_nets *net;
-
+ uint32_t t_ssthresh, t_cwnd;
+
+ /* MT FIXME: Don't compute this over and over again */
+ t_ssthresh = 0;
+ t_cwnd = 0;
+ if (asoc->sctp_cmt_on_off == 2) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ t_ssthresh += net->ssthresh;
+ t_cwnd += net->cwnd;
+ }
+ }
/*-
* CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
* (net->fast_retran_loss_recovery == 0)))
@@ -101,9 +118,23 @@ sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
struct sctp_tmit_chunk *lchk;
int old_cwnd = net->cwnd;
- net->ssthresh = net->cwnd / 2;
- if (net->ssthresh < (net->mtu * 2)) {
- net->ssthresh = 2 * net->mtu;
+ if (asoc->sctp_cmt_on_off == 2) {
+ net->ssthresh = (uint32_t) (((uint64_t) 4 *
+ (uint64_t) net->mtu *
+ (uint64_t) net->ssthresh) /
+ (uint64_t) t_ssthresh);
+ if ((net->cwnd > t_cwnd / 2) &&
+ (net->ssthresh < net->cwnd - t_cwnd / 2)) {
+ net->ssthresh = net->cwnd - t_cwnd / 2;
+ }
+ if (net->ssthresh < net->mtu) {
+ net->ssthresh = net->mtu;
+ }
+ } else {
+ net->ssthresh = net->cwnd / 2;
+ if (net->ssthresh < (net->mtu * 2)) {
+ net->ssthresh = 2 * net->mtu;
+ }
}
net->cwnd = net->ssthresh;
SDT_PROBE(sctp, cwnd, net, fr,
@@ -167,7 +198,17 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
{
struct sctp_nets *net;
int old_cwnd;
-
+ uint32_t t_ssthresh, t_cwnd, incr;
+
+ /* MT FIXME: Don't compute this over and over again */
+ t_ssthresh = 0;
+ t_cwnd = 0;
+ if (stcb->asoc.sctp_cmt_on_off == 2) {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ t_ssthresh += net->ssthresh;
+ t_cwnd += net->cwnd;
+ }
+ }
/******************************/
/* update cwnd and Early FR */
/******************************/
@@ -178,11 +219,8 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
* CMT fast recovery code. Need to debug.
*/
if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
- if (compare_with_wrap(asoc->last_acked_seq,
- net->fast_recovery_tsn, MAX_TSN) ||
- (asoc->last_acked_seq == net->fast_recovery_tsn) ||
- compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
- (net->pseudo_cumack == net->fast_recovery_tsn)) {
+ if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
+ SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
net->will_exit_fast_recovery = 1;
}
}
@@ -304,32 +342,39 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
if (net->cwnd <= net->ssthresh) {
/* We are in slow start */
if (net->flight_size + net->net_ack >= net->cwnd) {
- if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
- old_cwnd = net->cwnd;
- net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
- SDT_PROBE(sctp, cwnd, net, ack,
- stcb->asoc.my_vtag,
- ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
- net,
- old_cwnd, net->cwnd);
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
- sctp_log_cwnd(stcb, net, net->mtu,
- SCTP_CWND_LOG_FROM_SS);
+ old_cwnd = net->cwnd;
+ if (stcb->asoc.sctp_cmt_on_off == 2) {
+ uint32_t limit;
+
+ limit = (uint32_t) (((uint64_t) net->mtu *
+ (uint64_t) SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
+ (uint64_t) net->ssthresh) /
+ (uint64_t) t_ssthresh);
+ incr = (uint32_t) (((uint64_t) net->net_ack *
+ (uint64_t) net->ssthresh) /
+ (uint64_t) t_ssthresh);
+ if (incr > limit) {
+ incr = limit;
+ }
+ if (incr == 0) {
+ incr = 1;
}
} else {
- old_cwnd = net->cwnd;
- net->cwnd += net->net_ack;
- SDT_PROBE(sctp, cwnd, net, ack,
- stcb->asoc.my_vtag,
- ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
- net,
- old_cwnd, net->cwnd);
-
- if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
- sctp_log_cwnd(stcb, net, net->net_ack,
- SCTP_CWND_LOG_FROM_SS);
+ incr = net->net_ack;
+ if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) {
+ incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable);
}
}
+ net->cwnd += incr;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, incr,
+ SCTP_CWND_LOG_FROM_SS);
+ }
+ SDT_PROBE(sctp, cwnd, net, ack,
+ stcb->asoc.my_vtag,
+ ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+ net,
+ old_cwnd, net->cwnd);
} else {
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, net, net->net_ack,
@@ -338,6 +383,8 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
}
} else {
/* We are in congestion avoidance */
+ uint32_t incr;
+
/*
* Add to pba
*/
@@ -347,7 +394,17 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
(net->partial_bytes_acked >= net->cwnd)) {
net->partial_bytes_acked -= net->cwnd;
old_cwnd = net->cwnd;
- net->cwnd += net->mtu;
+ if (asoc->sctp_cmt_on_off == 2) {
+ incr = (uint32_t) (((uint64_t) net->mtu *
+ (uint64_t) net->ssthresh) /
+ (uint64_t) t_ssthresh);
+ if (incr == 0) {
+ incr = 1;
+ }
+ } else {
+ incr = net->mtu;
+ }
+ net->cwnd += incr;
SDT_PROBE(sctp, cwnd, net, ack,
stcb->asoc.my_vtag,
((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
@@ -394,8 +451,32 @@ void
sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net)
{
int old_cwnd = net->cwnd;
+ uint32_t t_ssthresh, t_cwnd;
- net->ssthresh = max(net->cwnd / 2, 4 * net->mtu);
+ /* MT FIXME: Don't compute this over and over again */
+ t_ssthresh = 0;
+ t_cwnd = 0;
+ if (stcb->asoc.sctp_cmt_on_off == 2) {
+ struct sctp_nets *lnet;
+
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+ t_ssthresh += lnet->ssthresh;
+ t_cwnd += lnet->cwnd;
+ }
+ net->ssthresh = (uint32_t) (((uint64_t) 4 *
+ (uint64_t) net->mtu *
+ (uint64_t) net->ssthresh) /
+ (uint64_t) t_ssthresh);
+ if ((net->cwnd > t_cwnd / 2) &&
+ (net->ssthresh < net->cwnd - t_cwnd / 2)) {
+ net->ssthresh = net->cwnd - t_cwnd / 2;
+ }
+ if (net->ssthresh < net->mtu) {
+ net->ssthresh = net->mtu;
+ }
+ } else {
+ net->ssthresh = max(net->cwnd / 2, 4 * net->mtu);
+ }
net->cwnd = net->mtu;
net->partial_bytes_acked = 0;
SDT_PROBE(sctp, cwnd, net, to,
@@ -844,11 +925,8 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
* CMT fast recovery code. Need to debug.
*/
if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
- if (compare_with_wrap(asoc->last_acked_seq,
- net->fast_recovery_tsn, MAX_TSN) ||
- (asoc->last_acked_seq == net->fast_recovery_tsn) ||
- compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
- (net->pseudo_cumack == net->fast_recovery_tsn)) {
+ if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
+ SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
net->will_exit_fast_recovery = 1;
}
}
@@ -1329,11 +1407,8 @@ sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
* CMT fast recovery code. Need to debug.
*/
if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
- if (compare_with_wrap(asoc->last_acked_seq,
- net->fast_recovery_tsn, MAX_TSN) ||
- (asoc->last_acked_seq == net->fast_recovery_tsn) ||
- compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
- (net->pseudo_cumack == net->fast_recovery_tsn)) {
+ if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
+ SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
net->will_exit_fast_recovery = 1;
}
}
OpenPOWER on IntegriCloud