summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/netinet/tcp_input.c52
-rw-r--r--sys/netinet/tcp_output.c53
-rw-r--r--sys/netinet/tcp_reass.c52
-rw-r--r--sys/netinet/tcp_sack.c2
-rw-r--r--sys/netinet/tcp_subr.c29
-rw-r--r--sys/netinet/tcp_syncache.c2
-rw-r--r--sys/netinet/tcp_timer.c474
-rw-r--r--sys/netinet/tcp_timer.h21
-rw-r--r--sys/netinet/tcp_timewait.c29
-rw-r--r--sys/netinet/tcp_usrreq.c18
-rw-r--r--sys/netinet/tcp_var.h10
11 files changed, 427 insertions, 315 deletions
diff --git a/sys/netinet/tcp_input.c b/sys/netinet/tcp_input.c
index b8fc45f..a07aa8b 100644
--- a/sys/netinet/tcp_input.c
+++ b/sys/netinet/tcp_input.c
@@ -210,7 +210,7 @@ do { \
* - this is a half-synchronized T/TCP connection.
*/
#define DELAY_ACK(tp) \
- ((!callout_active(tp->tt_delack) && \
+ ((!tcp_timer_active(tp, TT_DELACK) && \
(tp->t_flags & TF_RXWIN0SENT) == 0) && \
(tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
@@ -1043,7 +1043,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
tp->t_rcvtime = ticks;
if (TCPS_HAVEESTABLISHED(tp->t_state))
- callout_reset(tp->tt_keep, tcp_keepidle, tcp_timer_keep, tp);
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
/*
* Unscale the window into a 32-bit value.
@@ -1231,11 +1231,10 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
#endif
*/
if (tp->snd_una == tp->snd_max)
- callout_stop(tp->tt_rexmt);
- else if (!callout_active(tp->tt_persist))
- callout_reset(tp->tt_rexmt,
- tp->t_rxtcur,
- tcp_timer_rexmt, tp);
+ tcp_timer_activate(tp, TT_REXMT, 0);
+ else if (!tcp_timer_active(tp, TT_PERSIST))
+ tcp_timer_activate(tp, TT_REXMT,
+ tp->t_rxtcur);
sowwakeup(so);
if (so->so_snd.sb_cc)
@@ -1441,8 +1440,8 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* ACKNOW will be turned on later.
*/
if (DELAY_ACK(tp) && tlen != 0)
- callout_reset(tp->tt_delack, tcp_delacktime,
- tcp_timer_delack, tp);
+ tcp_timer_activate(tp, TT_DELACK,
+ tcp_delacktime);
else
tp->t_flags |= TF_ACKNOW;
/*
@@ -1458,8 +1457,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
thflags &= ~TH_SYN;
} else {
tp->t_state = TCPS_ESTABLISHED;
- callout_reset(tp->tt_keep, tcp_keepidle,
- tcp_timer_keep, tp);
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
}
} else {
/*
@@ -1473,7 +1471,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* If there was no CC option, clear cached CC value.
*/
tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
- callout_stop(tp->tt_rexmt);
+ tcp_timer_activate(tp, TT_REXMT, 0);
tp->t_state = TCPS_SYN_RECEIVED;
}
@@ -1863,8 +1861,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
tp->t_flags &= ~TF_NEEDFIN;
} else {
tp->t_state = TCPS_ESTABLISHED;
- callout_reset(tp->tt_keep, tcp_keepidle,
- tcp_timer_keep, tp);
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
}
/*
* If segment contains data or ACK, will call tcp_reass()
@@ -1928,7 +1925,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* to keep a constant cwnd packets in the
* network.
*/
- if (!callout_active(tp->tt_rexmt) ||
+ if (!tcp_timer_active(tp, TT_REXMT) ||
th->th_ack != tp->snd_una)
tp->t_dupacks = 0;
else if (++tp->t_dupacks > tcprexmtthresh ||
@@ -1984,7 +1981,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
tp->snd_ssthresh = win * tp->t_maxseg;
ENTER_FASTRECOVERY(tp);
tp->snd_recover = tp->snd_max;
- callout_stop(tp->tt_rexmt);
+ tcp_timer_activate(tp, TT_REXMT, 0);
tp->t_rtttime = 0;
if (tp->sack_enable) {
tcpstat.tcps_sack_recovery_episode++;
@@ -2162,11 +2159,10 @@ process_ACK:
* timer, using current (possibly backed-off) value.
*/
if (th->th_ack == tp->snd_max) {
- callout_stop(tp->tt_rexmt);
+ tcp_timer_activate(tp, TT_REXMT, 0);
needoutput = 1;
- } else if (!callout_active(tp->tt_persist))
- callout_reset(tp->tt_rexmt, tp->t_rxtcur,
- tcp_timer_rexmt, tp);
+ } else if (!tcp_timer_active(tp, TT_PERSIST))
+ tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
/*
* If no data (only SYN) was ACK'd,
@@ -2245,8 +2241,7 @@ process_ACK:
soisdisconnected(so);
timeout = (tcp_fast_finwait2_recycle) ?
tcp_finwait2_timeout : tcp_maxidle;
- callout_reset(tp->tt_2msl, timeout,
- tcp_timer_2msl, tp);
+ tcp_timer_activate(tp, TT_2MSL, timeout);
}
tp->t_state = TCPS_FIN_WAIT_2;
}
@@ -2293,8 +2288,7 @@ process_ACK:
case TCPS_TIME_WAIT:
KASSERT(tp->t_state != TCPS_TIME_WAIT,
("%s: timewait", __func__));
- callout_reset(tp->tt_2msl, 2 * tcp_msl,
- tcp_timer_2msl, tp);
+ tcp_timer_activate(tp, TT_2MSL, 2 * tcp_msl);
goto dropafterack;
}
}
@@ -2511,8 +2505,7 @@ dodata: /* XXX */
case TCPS_TIME_WAIT:
KASSERT(tp->t_state != TCPS_TIME_WAIT,
("%s: timewait", __func__));
- callout_reset(tp->tt_2msl, 2 * tcp_msl,
- tcp_timer_2msl, tp);
+ tcp_timer_activate(tp, TT_2MSL, 2 * tcp_msl);
break;
}
}
@@ -2536,8 +2529,7 @@ check_delack:
INP_LOCK_ASSERT(tp->t_inpcb);
if (tp->t_flags & TF_DELACK) {
tp->t_flags &= ~TF_DELACK;
- callout_reset(tp->tt_delack, tcp_delacktime,
- tcp_timer_delack, tp);
+ tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
}
INP_UNLOCK(tp->t_inpcb);
return (0);
@@ -2619,11 +2611,11 @@ tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
#ifdef INET6
struct ip6_hdr *ip6;
#endif
-
/*
* Generate a RST, dropping incoming segment.
* Make ACK acceptable to originator of segment.
* Don't bother to respond if destination was broadcast/multicast.
+ * tp may be NULL.
*/
if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
goto drop;
@@ -3190,7 +3182,7 @@ tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
tcp_seq onxt = tp->snd_nxt;
u_long ocwnd = tp->snd_cwnd;
- callout_stop(tp->tt_rexmt);
+ tcp_timer_activate(tp, TT_REXMT, 0);
tp->t_rtttime = 0;
tp->snd_nxt = th->th_ack;
/*
diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c
index 51a8ce3..1b05abb 100644
--- a/sys/netinet/tcp_output.c
+++ b/sys/netinet/tcp_output.c
@@ -298,7 +298,7 @@ after_sack_rexmit:
flags &= ~TH_FIN;
sendwin = 1;
} else {
- callout_stop(tp->tt_persist);
+ tcp_timer_activate(tp, TT_PERSIST, 0);
tp->t_rxtshift = 0;
}
}
@@ -384,10 +384,10 @@ after_sack_rexmit:
*/
len = 0;
if (sendwin == 0) {
- callout_stop(tp->tt_rexmt);
+ tcp_timer_activate(tp, TT_REXMT, 0);
tp->t_rxtshift = 0;
tp->snd_nxt = tp->snd_una;
- if (!callout_active(tp->tt_persist))
+ if (!tcp_timer_active(tp, TT_PERSIST))
tcp_setpersist(tp);
}
}
@@ -567,10 +567,9 @@ after_sack_rexmit:
* that the retransmission timer is set.
*/
if (tp->sack_enable && SEQ_GT(tp->snd_max, tp->snd_una) &&
- !callout_active(tp->tt_rexmt) &&
- !callout_active(tp->tt_persist)) {
- callout_reset(tp->tt_rexmt, tp->t_rxtcur,
- tcp_timer_rexmt, tp);
+ !tcp_timer_active(tp, TT_REXMT) &&
+ !tcp_timer_active(tp, TT_PERSIST)) {
+ tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
goto just_return;
}
/*
@@ -581,11 +580,11 @@ after_sack_rexmit:
* persisting to move a small or zero window
* (re)transmitting and thereby not persisting
*
- * callout_active(tp->tt_persist)
+ * tcp_timer_active(tp, TT_PERSIST)
* is true when we are in persist state.
* (tp->t_flags & TF_FORCEDATA)
* is set when we are called to send a persist packet.
- * callout_active(tp->tt_rexmt)
+ * tcp_timer_active(tp, TT_REXMT)
* is set when we are retransmitting
* The output side is idle when both timers are zero.
*
@@ -595,8 +594,8 @@ after_sack_rexmit:
* if window is nonzero, transmit what we can,
* otherwise force out a byte.
*/
- if (so->so_snd.sb_cc && !callout_active(tp->tt_rexmt) &&
- !callout_active(tp->tt_persist)) {
+ if (so->so_snd.sb_cc && !tcp_timer_active(tp, TT_REXMT) &&
+ !tcp_timer_active(tp, TT_PERSIST)) {
tp->t_rxtshift = 0;
tcp_setpersist(tp);
}
@@ -883,8 +882,8 @@ send:
* (retransmit and persist are mutually exclusive...)
*/
if (sack_rxmit == 0) {
- if (len || (flags & (TH_SYN|TH_FIN))
- || callout_active(tp->tt_persist))
+ if (len || (flags & (TH_SYN|TH_FIN)) ||
+ tcp_timer_active(tp, TT_PERSIST))
th->th_seq = htonl(tp->snd_nxt);
else
th->th_seq = htonl(tp->snd_max);
@@ -990,7 +989,7 @@ send:
* the retransmit. In persist state, just set snd_max.
*/
if ((tp->t_flags & TF_FORCEDATA) == 0 ||
- !callout_active(tp->tt_persist)) {
+ !tcp_timer_active(tp, TT_PERSIST)) {
tcp_seq startseq = tp->snd_nxt;
/*
@@ -1029,15 +1028,14 @@ send:
* of retransmit time.
*/
timer:
- if (!callout_active(tp->tt_rexmt) &&
+ if (!tcp_timer_active(tp, TT_PERSIST) &&
((sack_rxmit && tp->snd_nxt != tp->snd_max) ||
(tp->snd_nxt != tp->snd_una))) {
- if (callout_active(tp->tt_persist)) {
- callout_stop(tp->tt_persist);
+ if (tcp_timer_active(tp, TT_PERSIST)) {
+ tcp_timer_activate(tp, TT_PERSIST, 0);
tp->t_rxtshift = 0;
}
- callout_reset(tp->tt_rexmt, tp->t_rxtcur,
- tcp_timer_rexmt, tp);
+ tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
}
} else {
/*
@@ -1138,7 +1136,7 @@ timer:
* away would be the really correct behavior instead.
*/
if (((tp->t_flags & TF_FORCEDATA) == 0 ||
- !callout_active(tp->tt_persist)) &&
+ !tcp_timer_active(tp, TT_PERSIST)) &&
((flags & TH_SYN) == 0) &&
(error != EPERM)) {
if (sack_rxmit) {
@@ -1156,10 +1154,9 @@ out:
tp->t_softerror = error;
return (error);
case ENOBUFS:
- if (!callout_active(tp->tt_rexmt) &&
- !callout_active(tp->tt_persist))
- callout_reset(tp->tt_rexmt, tp->t_rxtcur,
- tcp_timer_rexmt, tp);
+ if (!tcp_timer_active(tp, TT_REXMT) &&
+ !tcp_timer_active(tp, TT_PERSIST))
+ tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
tp->snd_cwnd = tp->t_maxseg;
return (0);
case EMSGSIZE:
@@ -1207,8 +1204,8 @@ out:
tp->rcv_adv = tp->rcv_nxt + recwin;
tp->last_ack_sent = tp->rcv_nxt;
tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
- if (callout_active(tp->tt_delack))
- callout_stop(tp->tt_delack);
+ if (tcp_timer_active(tp, TT_DELACK))
+ tcp_timer_activate(tp, TT_DELACK, 0);
#if 0
/*
* This completely breaks TCP if newreno is turned on. What happens
@@ -1230,14 +1227,14 @@ tcp_setpersist(struct tcpcb *tp)
int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1;
int tt;
- if (callout_active(tp->tt_rexmt))
+ if (tcp_timer_active(tp, TT_REXMT))
panic("tcp_setpersist: retransmit pending");
/*
* Start/restart persistance timer.
*/
TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
TCPTV_PERSMIN, TCPTV_PERSMAX);
- callout_reset(tp->tt_persist, tt, tcp_timer_persist, tp);
+ tcp_timer_activate(tp, TT_PERSIST, tt);
if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
tp->t_rxtshift++;
}
diff --git a/sys/netinet/tcp_reass.c b/sys/netinet/tcp_reass.c
index b8fc45f..a07aa8b 100644
--- a/sys/netinet/tcp_reass.c
+++ b/sys/netinet/tcp_reass.c
@@ -210,7 +210,7 @@ do { \
* - this is a half-synchronized T/TCP connection.
*/
#define DELAY_ACK(tp) \
- ((!callout_active(tp->tt_delack) && \
+ ((!tcp_timer_active(tp, TT_DELACK) && \
(tp->t_flags & TF_RXWIN0SENT) == 0) && \
(tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
@@ -1043,7 +1043,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
tp->t_rcvtime = ticks;
if (TCPS_HAVEESTABLISHED(tp->t_state))
- callout_reset(tp->tt_keep, tcp_keepidle, tcp_timer_keep, tp);
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
/*
* Unscale the window into a 32-bit value.
@@ -1231,11 +1231,10 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
#endif
*/
if (tp->snd_una == tp->snd_max)
- callout_stop(tp->tt_rexmt);
- else if (!callout_active(tp->tt_persist))
- callout_reset(tp->tt_rexmt,
- tp->t_rxtcur,
- tcp_timer_rexmt, tp);
+ tcp_timer_activate(tp, TT_REXMT, 0);
+ else if (!tcp_timer_active(tp, TT_PERSIST))
+ tcp_timer_activate(tp, TT_REXMT,
+ tp->t_rxtcur);
sowwakeup(so);
if (so->so_snd.sb_cc)
@@ -1441,8 +1440,8 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* ACKNOW will be turned on later.
*/
if (DELAY_ACK(tp) && tlen != 0)
- callout_reset(tp->tt_delack, tcp_delacktime,
- tcp_timer_delack, tp);
+ tcp_timer_activate(tp, TT_DELACK,
+ tcp_delacktime);
else
tp->t_flags |= TF_ACKNOW;
/*
@@ -1458,8 +1457,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
thflags &= ~TH_SYN;
} else {
tp->t_state = TCPS_ESTABLISHED;
- callout_reset(tp->tt_keep, tcp_keepidle,
- tcp_timer_keep, tp);
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
}
} else {
/*
@@ -1473,7 +1471,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* If there was no CC option, clear cached CC value.
*/
tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
- callout_stop(tp->tt_rexmt);
+ tcp_timer_activate(tp, TT_REXMT, 0);
tp->t_state = TCPS_SYN_RECEIVED;
}
@@ -1863,8 +1861,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
tp->t_flags &= ~TF_NEEDFIN;
} else {
tp->t_state = TCPS_ESTABLISHED;
- callout_reset(tp->tt_keep, tcp_keepidle,
- tcp_timer_keep, tp);
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
}
/*
* If segment contains data or ACK, will call tcp_reass()
@@ -1928,7 +1925,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* to keep a constant cwnd packets in the
* network.
*/
- if (!callout_active(tp->tt_rexmt) ||
+ if (!tcp_timer_active(tp, TT_REXMT) ||
th->th_ack != tp->snd_una)
tp->t_dupacks = 0;
else if (++tp->t_dupacks > tcprexmtthresh ||
@@ -1984,7 +1981,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
tp->snd_ssthresh = win * tp->t_maxseg;
ENTER_FASTRECOVERY(tp);
tp->snd_recover = tp->snd_max;
- callout_stop(tp->tt_rexmt);
+ tcp_timer_activate(tp, TT_REXMT, 0);
tp->t_rtttime = 0;
if (tp->sack_enable) {
tcpstat.tcps_sack_recovery_episode++;
@@ -2162,11 +2159,10 @@ process_ACK:
* timer, using current (possibly backed-off) value.
*/
if (th->th_ack == tp->snd_max) {
- callout_stop(tp->tt_rexmt);
+ tcp_timer_activate(tp, TT_REXMT, 0);
needoutput = 1;
- } else if (!callout_active(tp->tt_persist))
- callout_reset(tp->tt_rexmt, tp->t_rxtcur,
- tcp_timer_rexmt, tp);
+ } else if (!tcp_timer_active(tp, TT_PERSIST))
+ tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
/*
* If no data (only SYN) was ACK'd,
@@ -2245,8 +2241,7 @@ process_ACK:
soisdisconnected(so);
timeout = (tcp_fast_finwait2_recycle) ?
tcp_finwait2_timeout : tcp_maxidle;
- callout_reset(tp->tt_2msl, timeout,
- tcp_timer_2msl, tp);
+ tcp_timer_activate(tp, TT_2MSL, timeout);
}
tp->t_state = TCPS_FIN_WAIT_2;
}
@@ -2293,8 +2288,7 @@ process_ACK:
case TCPS_TIME_WAIT:
KASSERT(tp->t_state != TCPS_TIME_WAIT,
("%s: timewait", __func__));
- callout_reset(tp->tt_2msl, 2 * tcp_msl,
- tcp_timer_2msl, tp);
+ tcp_timer_activate(tp, TT_2MSL, 2 * tcp_msl);
goto dropafterack;
}
}
@@ -2511,8 +2505,7 @@ dodata: /* XXX */
case TCPS_TIME_WAIT:
KASSERT(tp->t_state != TCPS_TIME_WAIT,
("%s: timewait", __func__));
- callout_reset(tp->tt_2msl, 2 * tcp_msl,
- tcp_timer_2msl, tp);
+ tcp_timer_activate(tp, TT_2MSL, 2 * tcp_msl);
break;
}
}
@@ -2536,8 +2529,7 @@ check_delack:
INP_LOCK_ASSERT(tp->t_inpcb);
if (tp->t_flags & TF_DELACK) {
tp->t_flags &= ~TF_DELACK;
- callout_reset(tp->tt_delack, tcp_delacktime,
- tcp_timer_delack, tp);
+ tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
}
INP_UNLOCK(tp->t_inpcb);
return (0);
@@ -2619,11 +2611,11 @@ tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
#ifdef INET6
struct ip6_hdr *ip6;
#endif
-
/*
* Generate a RST, dropping incoming segment.
* Make ACK acceptable to originator of segment.
* Don't bother to respond if destination was broadcast/multicast.
+ * tp may be NULL.
*/
if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
goto drop;
@@ -3190,7 +3182,7 @@ tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
tcp_seq onxt = tp->snd_nxt;
u_long ocwnd = tp->snd_cwnd;
- callout_stop(tp->tt_rexmt);
+ tcp_timer_activate(tp, TT_REXMT, 0);
tp->t_rtttime = 0;
tp->snd_nxt = th->th_ack;
/*
diff --git a/sys/netinet/tcp_sack.c b/sys/netinet/tcp_sack.c
index 70ef9d5..56d2688 100644
--- a/sys/netinet/tcp_sack.c
+++ b/sys/netinet/tcp_sack.c
@@ -589,7 +589,7 @@ tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th)
int num_segs = 1;
INP_LOCK_ASSERT(tp->t_inpcb);
- callout_stop(tp->tt_rexmt);
+ tcp_timer_activate(tp, TT_REXMT, 0);
tp->t_rtttime = 0;
/* send one or 2 segments based on how much new data was acked */
if (((th->th_ack - tp->snd_una) / tp->t_maxseg) > 2)
diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c
index c705121e..dc4a695 100644
--- a/sys/netinet/tcp_subr.c
+++ b/sys/netinet/tcp_subr.c
@@ -262,10 +262,9 @@ static void tcp_isn_tick(void *);
* separate because the tcpcb structure is exported to userland for sysctl
* parsing purposes, which do not know about callouts.
*/
-struct tcpcb_mem {
- struct tcpcb tcb;
- struct callout tcpcb_mem_rexmt, tcpcb_mem_persist, tcpcb_mem_keep;
- struct callout tcpcb_mem_2msl, tcpcb_mem_delack;
+struct tcpcb_mem {
+ struct tcpcb tcb;
+ struct tcp_timer tt;
};
static uma_zone_t tcpcb_zone;
@@ -490,7 +489,6 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th,
if (tp != NULL) {
inp = tp->t_inpcb;
KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
INP_LOCK_ASSERT(inp);
} else
inp = NULL;
@@ -645,6 +643,7 @@ tcp_newtcpcb(struct inpcb *inp)
if (tm == NULL)
return (NULL);
tp = &tm->tcb;
+ tp->t_timers = &tm->tt;
/* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */
tp->t_maxseg = tp->t_maxopd =
#ifdef INET6
@@ -653,11 +652,8 @@ tcp_newtcpcb(struct inpcb *inp)
tcp_mssdflt;
/* Set up our timeouts. */
- callout_init(tp->tt_rexmt = &tm->tcpcb_mem_rexmt, NET_CALLOUT_MPSAFE);
- callout_init(tp->tt_persist = &tm->tcpcb_mem_persist, NET_CALLOUT_MPSAFE);
- callout_init(tp->tt_keep = &tm->tcpcb_mem_keep, NET_CALLOUT_MPSAFE);
- callout_init(tp->tt_2msl = &tm->tcpcb_mem_2msl, NET_CALLOUT_MPSAFE);
- callout_init(tp->tt_delack = &tm->tcpcb_mem_delack, NET_CALLOUT_MPSAFE);
+ callout_init_mtx(&tp->t_timers->tt_timer, &inp->inp_mtx,
+ CALLOUT_RETURNUNLOCKED);
if (tcp_do_rfc1323)
tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
@@ -728,12 +724,15 @@ tcp_discardcb(struct tcpcb *tp)
/*
* Make sure that all of our timers are stopped before we
* delete the PCB.
+ *
+ * XXX: callout_stop() may race and a callout may already
+ * try to obtain the INP_LOCK. Only callout_drain() would
+ * stop this but it would cause a LOR thus we can't use it.
+ * The tcp_timer() function contains a lot of checks to
+ * handle this case rather gracefully.
*/
- callout_stop(tp->tt_rexmt);
- callout_stop(tp->tt_persist);
- callout_stop(tp->tt_keep);
- callout_stop(tp->tt_2msl);
- callout_stop(tp->tt_delack);
+ tp->t_timers->tt_active = 0;
+ callout_stop(&tp->t_timers->tt_timer);
/*
* If we got enough samples through the srtt filter,
diff --git a/sys/netinet/tcp_syncache.c b/sys/netinet/tcp_syncache.c
index 2a11710..128057b 100644
--- a/sys/netinet/tcp_syncache.c
+++ b/sys/netinet/tcp_syncache.c
@@ -730,7 +730,7 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
*/
if (sc->sc_rxmits > 1)
tp->snd_cwnd = tp->t_maxseg;
- callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp);
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepinit);
INP_UNLOCK(inp);
diff --git a/sys/netinet/tcp_timer.c b/sys/netinet/tcp_timer.c
index f9d6f97..c3fd03c 100644
--- a/sys/netinet/tcp_timer.c
+++ b/sys/netinet/tcp_timer.c
@@ -35,7 +35,9 @@
#include <sys/param.h>
#include <sys/kernel.h>
+#include <sys/ktr.h>
#include <sys/lock.h>
+#include <sys/limits.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/protosw.h>
@@ -113,6 +115,13 @@ int tcp_maxpersistidle;
/* max idle time in persist */
int tcp_maxidle;
+static void tcp_timer(void *);
+static int tcp_timer_delack(struct tcpcb *, struct inpcb *);
+static int tcp_timer_2msl(struct tcpcb *, struct inpcb *);
+static int tcp_timer_keep(struct tcpcb *, struct inpcb *);
+static int tcp_timer_persist(struct tcpcb *, struct inpcb *);
+static int tcp_timer_rexmt(struct tcpcb *, struct inpcb *);
+
/*
* Tcp protocol timeout routine called every 500 ms.
* Updates timestamps used for TCP
@@ -140,82 +149,297 @@ static int tcp_timer_race;
SYSCTL_INT(_net_inet_tcp, OID_AUTO, timer_race, CTLFLAG_RD, &tcp_timer_race,
0, "Count of t_inpcb races on tcp_discardcb");
-/*
- * TCP timer processing.
- */
void
-tcp_timer_delack(void *xtp)
+tcp_timer_activate(struct tcpcb *tp, int timer_type, u_int delta)
+{
+ struct inpcb *inp = tp->t_inpcb;
+ struct tcp_timer *tt = tp->t_timers;
+ int tick = ticks; /* Stable time base. */
+ int next = delta ? tick + delta : 0;
+
+ INP_LOCK_ASSERT(inp);
+
+ CTR6(KTR_NET, "%p %s inp %p active %x delta %i nextc %i",
+ tp, __func__, inp, tt->tt_active, delta, tt->tt_nextc);
+
+ /* Set new value for timer. */
+ switch(timer_type) {
+ case TT_DELACK:
+ CTR4(KTR_NET, "%p %s TT_DELACK old %i new %i",
+ tp, __func__, tt->tt_delack, next);
+ tt->tt_delack = next;
+ break;
+ case TT_REXMT:
+ CTR4(KTR_NET, "%p %s TT_REXMT old %i new %i",
+ tp, __func__, tt->tt_rexmt, next);
+ tt->tt_rexmt = next;
+ break;
+ case TT_PERSIST:
+ CTR4(KTR_NET, "%p %s TT_PERSIST old %i new %i",
+ tp, __func__, tt->tt_persist, next);
+ tt->tt_persist = next;
+ break;
+ case TT_KEEP:
+ CTR4(KTR_NET, "%p %s TT_KEEP old %i new %i",
+ tp, __func__, tt->tt_keep, next);
+ tt->tt_keep = next;
+ break;
+ case TT_2MSL:
+ CTR4(KTR_NET, "%p %s TT_2MSL old %i new %i",
+ tp, __func__, tt->tt_2msl, next);
+ tt->tt_2msl = next;
+ break;
+ case 0: /* Dummy for timer rescan. */
+ CTR3(KTR_NET, "%p %s timer rescan new %i", tp, __func__, next);
+ break;
+ }
+
+ /* If some other timer is active and is schedules sooner just return. */
+ if (tt->tt_active != timer_type && tt->tt_nextc < next &&
+ callout_active(&tt->tt_timer))
+ return;
+
+ /* Select next timer to schedule. */
+ tt->tt_nextc = INT_MAX;
+ tt->tt_active = 0;
+ if (tt->tt_delack && tt->tt_delack < tt->tt_nextc) {
+ tt->tt_nextc = tt->tt_delack;
+ tt->tt_active = TT_DELACK;
+ }
+ if (tt->tt_rexmt && tt->tt_rexmt < tt->tt_nextc) {
+ tt->tt_nextc = tt->tt_rexmt;
+ tt->tt_active = TT_REXMT;
+ }
+ if (tt->tt_persist && tt->tt_persist < tt->tt_nextc) {
+ tt->tt_nextc = tt->tt_persist;
+ tt->tt_active = TT_PERSIST;
+ }
+ if (tt->tt_keep && tt->tt_keep < tt->tt_nextc) {
+ tt->tt_nextc = tt->tt_keep;
+ tt->tt_active = TT_KEEP;
+ }
+ if (tt->tt_2msl && tt->tt_2msl < tt->tt_nextc) {
+ tt->tt_nextc = tt->tt_2msl;
+ tt->tt_active = TT_2MSL;
+ }
+
+ /* Rearm callout with new timer if we found one. */
+ if (tt->tt_active) {
+ CTR4(KTR_NET, "%p %s callout_reset active %x nextc in %i",
+ tp, __func__, tt->tt_active, tt->tt_nextc - tick);
+ callout_reset(&tt->tt_timer,
+ tt->tt_nextc - tick, tcp_timer, (void *)inp);
+ } else {
+ CTR2(KTR_NET, "%p %s callout_stop", tp, __func__);
+ callout_stop(&tt->tt_timer);
+ tt->tt_nextc = 0;
+ }
+
+ return;
+}
+
+int
+tcp_timer_active(struct tcpcb *tp, int timer_type)
+{
+
+ switch (timer_type) {
+ case TT_DELACK:
+ CTR3(KTR_NET, "%p %s TT_DELACK %i",
+ tp, __func__, tp->t_timers->tt_delack);
+ return (tp->t_timers->tt_delack ? 1 : 0);
+ break;
+ case TT_REXMT:
+ CTR3(KTR_NET, "%p %s TT_REXMT %i",
+ tp, __func__, tp->t_timers->tt_rexmt);
+ return (tp->t_timers->tt_rexmt ? 1 : 0);
+ break;
+ case TT_PERSIST:
+ CTR3(KTR_NET, "%p %s TT_PERSIST %i",
+ tp, __func__, tp->t_timers->tt_persist);
+ return (tp->t_timers->tt_persist ? 1 : 0);
+ break;
+ case TT_KEEP:
+ CTR3(KTR_NET, "%p %s TT_KEEP %i",
+ tp, __func__, tp->t_timers->tt_keep);
+ return (tp->t_timers->tt_keep ? 1 : 0);
+ break;
+ case TT_2MSL:
+ CTR3(KTR_NET, "%p %s TT_2MSL %i",
+ tp, __func__, tp->t_timers->tt_2msl);
+ return (tp->t_timers->tt_2msl ? 1 : 0);
+ break;
+ }
+ return (0);
+}
+
+static void
+tcp_timer(void *xinp)
{
- struct tcpcb *tp = xtp;
- struct inpcb *inp;
+ struct inpcb *inp = (struct inpcb *)xinp;
+ struct tcpcb *tp = intotcpcb(inp);
+ struct tcp_timer *tt;
+ int tick = ticks;
+ int down, timer;
+
+ /* INP lock was obtained by callout. */
+ INP_LOCK_ASSERT(inp);
- INP_INFO_RLOCK(&tcbinfo);
- inp = tp->t_inpcb;
/*
- * XXXRW: While this assert is in fact correct, bugs in the tcpcb
- * tear-down mean we need it as a work-around for races between
- * timers and tcp_discardcb().
- *
- * KASSERT(inp != NULL, ("tcp_timer_delack: inp == NULL"));
+ * We've got a couple of race conditions here:
+ * - The tcpcb was converted into a compressed TW pcb. All our
+ * timers have been stopped while this callout already tried
+ * to obtain the inpcb lock. TW pcbs have their own timers
+ * and we just return.
*/
- if (inp == NULL) {
- tcp_timer_race++;
- INP_INFO_RUNLOCK(&tcbinfo);
+ if (inp->inp_vflag & INP_TIMEWAIT)
+ return;
+ /*
+ * - The tcpcb was discarded. All our timers have been stopped
+ * while this callout already tried to obtain the inpcb lock
+ * and we just return.
+ */
+ if (tp == NULL)
return;
+
+ tt = tp->t_timers; /* Initialize. */
+ CTR6(KTR_NET, "%p %s inp %p active %x tick %i nextc %i",
+ tp, __func__, inp, tt->tt_active, tick, tt->tt_nextc);
+
+ /*
+ * - We may have been waiting on the lock while the tcpcb has
+ * been scheduled for destruction. In this case no active
+ * timers remain and we just return.
+ */
+ if (tt->tt_active == 0)
+ goto done;
+
+ /*
+ * - The timer was rescheduled while this callout was already
+ * waiting on the lock. This may happen when a packet just
+ * came in. Rescan and reschedule the the timer in case we
+ * just turned it off.
+ */
+ if (tick < tt->tt_nextc)
+ goto rescan;
+
+ /*
+ * Mark as done. The active bit in struct callout is not
+ * automatically cleared. See callout(9) for more info.
+ * In tcp_discardcb() we depend on the correctly cleared
+ * active bit for faster processing.
+ */
+ callout_deactivate(&tt->tt_timer);
+
+ /* Check which timer has fired and remove this timer activation. */
+ timer = tt->tt_active;
+ tt->tt_active = 0;
+ tt->tt_nextc = 0;
+
+ switch (timer) {
+ case TT_DELACK:
+ CTR2(KTR_NET, "%p %s running TT_DELACK", tp, __func__);
+ tt->tt_delack = 0;
+ down = tcp_timer_delack(tp, inp); /* down == 0 */
+ break;
+ case TT_REXMT:
+ CTR2(KTR_NET, "%p %s running TT_REXMT", tp, __func__);
+ tt->tt_rexmt = 0;
+ down = tcp_timer_rexmt(tp, inp);
+ break;
+ case TT_PERSIST:
+ CTR2(KTR_NET, "%p %s running TT_PERSIST", tp, __func__);
+ tt->tt_persist = 0;
+ down = tcp_timer_persist(tp, inp);
+ break;
+ case TT_KEEP:
+ CTR2(KTR_NET, "%p %s running TT_KEEP", tp, __func__);
+ tt->tt_keep = 0;
+ down = tcp_timer_keep(tp, inp);
+ break;
+ case TT_2MSL:
+ CTR2(KTR_NET, "%p %s running TT_2MSL", tp, __func__);
+ tt->tt_2msl = 0;
+ down = tcp_timer_2msl(tp, inp);
+ break;
+ default:
+ CTR2(KTR_NET, "%p %s running nothing", tp, __func__);
+ down = 0;
}
+
+ CTR4(KTR_NET, "%p %s down %i active %x",
+ tp, __func__, down, tt->tt_active);
+ /* Do we still exist? */
+ if (down)
+ goto shutdown;
+
+rescan:
+ /* Rescan if no timer was reactivated above. */
+ if (tt->tt_active == 0)
+ tcp_timer_activate(tp, 0, 0);
+
+done:
+ INP_UNLOCK(inp); /* CALLOUT_RETURNUNLOCKED */
+ return;
+
+shutdown:
+ INP_UNLOCK(inp); /* Prevent LOR at expense of race. */
+ INP_INFO_WLOCK(&tcbinfo);
INP_LOCK(inp);
- INP_INFO_RUNLOCK(&tcbinfo);
- if ((inp->inp_vflag & INP_DROPPED) || callout_pending(tp->tt_delack)
- || !callout_active(tp->tt_delack)) {
- INP_UNLOCK(inp);
+
+ /* When tp is gone we've lost the race. */
+ if (inp->inp_ppcb == NULL) {
+ CTR3(KTR_NET, "%p %s inp %p lost shutdown race",
+ tp, __func__, inp);
+ tcp_timer_race++;
+ INP_UNLOCK(inp); /* CALLOUT_RETURNUNLOCKED */
+ INP_INFO_WUNLOCK(&tcbinfo);
return;
}
- callout_deactivate(tp->tt_delack);
+ KASSERT(tp == inp->inp_ppcb, ("%s: tp changed", __func__));
+
+ /* Shutdown the connection. */
+ switch (down) {
+ case 1:
+ tp = tcp_close(tp);
+ break;
+ case 2:
+ tp = tcp_drop(tp,
+ tp->t_softerror ? tp->t_softerror : ETIMEDOUT);
+ break;
+ }
+ CTR3(KTR_NET, "%p %s inp %p after shutdown", tp, __func__, inp);
+
+ if (tp)
+ INP_UNLOCK(inp); /* CALLOUT_RETURNUNLOCKED */
+
+ INP_INFO_WUNLOCK(&tcbinfo);
+ return;
+}
+
+
+/*
+ * TCP timer processing.
+ */
+static int
+tcp_timer_delack(struct tcpcb *tp, struct inpcb *inp)
+{
tp->t_flags |= TF_ACKNOW;
tcpstat.tcps_delack++;
(void) tcp_output(tp);
- INP_UNLOCK(inp);
+ return (0);
}
-void
-tcp_timer_2msl(void *xtp)
+static int
+tcp_timer_2msl(struct tcpcb *tp, struct inpcb *inp)
{
- struct tcpcb *tp = xtp;
- struct inpcb *inp;
#ifdef TCPDEBUG
int ostate;
ostate = tp->t_state;
#endif
/*
- * XXXRW: Does this actually happen?
- */
- INP_INFO_WLOCK(&tcbinfo);
- inp = tp->t_inpcb;
- /*
- * XXXRW: While this assert is in fact correct, bugs in the tcpcb
- * tear-down mean we need it as a work-around for races between
- * timers and tcp_discardcb().
- *
- * KASSERT(inp != NULL, ("tcp_timer_2msl: inp == NULL"));
- */
- if (inp == NULL) {
- tcp_timer_race++;
- INP_INFO_WUNLOCK(&tcbinfo);
- return;
- }
- INP_LOCK(inp);
- tcp_free_sackholes(tp);
- if ((inp->inp_vflag & INP_DROPPED) || callout_pending(tp->tt_2msl) ||
- !callout_active(tp->tt_2msl)) {
- INP_UNLOCK(tp->t_inpcb);
- INP_INFO_WUNLOCK(&tcbinfo);
- return;
- }
- callout_deactivate(tp->tt_2msl);
- /*
* 2 MSL timeout in shutdown went off. If we're closed but
* still waiting for peer to close and connection has been idle
* too long, or if 2MSL time is up from TIME_WAIT, delete connection
@@ -226,27 +450,24 @@ tcp_timer_2msl(void *xtp)
* Ignore fact that there were recent incoming segments.
*/
if (tcp_fast_finwait2_recycle && tp->t_state == TCPS_FIN_WAIT_2 &&
- tp->t_inpcb && tp->t_inpcb->inp_socket &&
+ tp->t_inpcb->inp_socket &&
(tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) {
tcpstat.tcps_finwait2_drops++;
- tp = tcp_close(tp);
+ return (1); /* tcp_close() */
} else {
if (tp->t_state != TCPS_TIME_WAIT &&
(ticks - tp->t_rcvtime) <= tcp_maxidle)
- callout_reset(tp->tt_2msl, tcp_keepintvl,
- tcp_timer_2msl, tp);
- else
- tp = tcp_close(tp);
- }
+ tcp_timer_activate(tp, TT_2MSL, tcp_keepintvl);
+ else
+ return (1); /* tcp_close( */
+ }
#ifdef TCPDEBUG
if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
PRU_SLOWTIMO);
#endif
- if (tp != NULL)
- INP_UNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ return (0);
}
/*
@@ -302,39 +523,15 @@ tcp_timer_2msl_tw(int reuse)
return (NULL);
}
-void
-tcp_timer_keep(void *xtp)
+static int
+tcp_timer_keep(struct tcpcb *tp, struct inpcb *inp)
{
- struct tcpcb *tp = xtp;
struct tcptemp *t_template;
- struct inpcb *inp;
#ifdef TCPDEBUG
int ostate;
ostate = tp->t_state;
#endif
- INP_INFO_WLOCK(&tcbinfo);
- inp = tp->t_inpcb;
- /*
- * XXXRW: While this assert is in fact correct, bugs in the tcpcb
- * tear-down mean we need it as a work-around for races between
- * timers and tcp_discardcb().
- *
- * KASSERT(inp != NULL, ("tcp_timer_keep: inp == NULL"));
- */
- if (inp == NULL) {
- tcp_timer_race++;
- INP_INFO_WUNLOCK(&tcbinfo);
- return;
- }
- INP_LOCK(inp);
- if ((inp->inp_vflag & INP_DROPPED) || callout_pending(tp->tt_keep)
- || !callout_active(tp->tt_keep)) {
- INP_UNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
- return;
- }
- callout_deactivate(tp->tt_keep);
/*
* Keep-alive timer went off; send something
* or drop connection if idle for too long.
@@ -366,65 +563,30 @@ tcp_timer_keep(void *xtp)
tp->rcv_nxt, tp->snd_una - 1, 0);
(void) m_free(dtom(t_template));
}
- callout_reset(tp->tt_keep, tcp_keepintvl, tcp_timer_keep, tp);
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepintvl);
} else
- callout_reset(tp->tt_keep, tcp_keepidle, tcp_timer_keep, tp);
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
#ifdef TCPDEBUG
if (inp->inp_socket->so_options & SO_DEBUG)
tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
PRU_SLOWTIMO);
#endif
- INP_UNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
- return;
+ return (0);
dropit:
tcpstat.tcps_keepdrops++;
- tp = tcp_drop(tp, ETIMEDOUT);
-
-#ifdef TCPDEBUG
- if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
- tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
- PRU_SLOWTIMO);
-#endif
- if (tp != NULL)
- INP_UNLOCK(tp->t_inpcb);
- INP_INFO_WUNLOCK(&tcbinfo);
+ return (2); /* tcp_drop() */
}
-void
-tcp_timer_persist(void *xtp)
+static int
+tcp_timer_persist(struct tcpcb *tp, struct inpcb *inp)
{
- struct tcpcb *tp = xtp;
- struct inpcb *inp;
#ifdef TCPDEBUG
int ostate;
ostate = tp->t_state;
#endif
- INP_INFO_WLOCK(&tcbinfo);
- inp = tp->t_inpcb;
- /*
- * XXXRW: While this assert is in fact correct, bugs in the tcpcb
- * tear-down mean we need it as a work-around for races between
- * timers and tcp_discardcb().
- *
- * KASSERT(inp != NULL, ("tcp_timer_persist: inp == NULL"));
- */
- if (inp == NULL) {
- tcp_timer_race++;
- INP_INFO_WUNLOCK(&tcbinfo);
- return;
- }
- INP_LOCK(inp);
- if ((inp->inp_vflag & INP_DROPPED) || callout_pending(tp->tt_persist)
- || !callout_active(tp->tt_persist)) {
- INP_UNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
- return;
- }
- callout_deactivate(tp->tt_persist);
/*
* Persistance timer into zero window.
* Force a byte to be output, if possible.
@@ -441,59 +603,29 @@ tcp_timer_persist(void *xtp)
((ticks - tp->t_rcvtime) >= tcp_maxpersistidle ||
(ticks - tp->t_rcvtime) >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
tcpstat.tcps_persistdrop++;
- tp = tcp_drop(tp, ETIMEDOUT);
- goto out;
+ return (2); /* tcp_drop() */
}
tcp_setpersist(tp);
tp->t_flags |= TF_FORCEDATA;
(void) tcp_output(tp);
tp->t_flags &= ~TF_FORCEDATA;
-out:
#ifdef TCPDEBUG
if (tp != NULL && tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO);
#endif
- if (tp != NULL)
- INP_UNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ return (0);
}
-void
-tcp_timer_rexmt(void * xtp)
+static int
+tcp_timer_rexmt(struct tcpcb *tp, struct inpcb *inp)
{
- struct tcpcb *tp = xtp;
int rexmt;
- int headlocked;
- struct inpcb *inp;
#ifdef TCPDEBUG
int ostate;
ostate = tp->t_state;
#endif
- INP_INFO_WLOCK(&tcbinfo);
- headlocked = 1;
- inp = tp->t_inpcb;
- /*
- * XXXRW: While this assert is in fact correct, bugs in the tcpcb
- * tear-down mean we need it as a work-around for races between
- * timers and tcp_discardcb().
- *
- * KASSERT(inp != NULL, ("tcp_timer_rexmt: inp == NULL"));
- */
- if (inp == NULL) {
- tcp_timer_race++;
- INP_INFO_WUNLOCK(&tcbinfo);
- return;
- }
- INP_LOCK(inp);
- if ((inp->inp_vflag & INP_DROPPED) || callout_pending(tp->tt_rexmt)
- || !callout_active(tp->tt_rexmt)) {
- INP_UNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
- return;
- }
- callout_deactivate(tp->tt_rexmt);
tcp_free_sackholes(tp);
/*
* Retransmission timer went off. Message has not
@@ -503,12 +635,8 @@ tcp_timer_rexmt(void * xtp)
if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
tp->t_rxtshift = TCP_MAXRXTSHIFT;
tcpstat.tcps_timeoutdrop++;
- tp = tcp_drop(tp, tp->t_softerror ?
- tp->t_softerror : ETIMEDOUT);
- goto out;
+ return (2); /* tcp_drop() */
}
- INP_INFO_WUNLOCK(&tcbinfo);
- headlocked = 0;
if (tp->t_rxtshift == 1) {
/*
* first retransmit; record ssthresh and cwnd so they can
@@ -523,9 +651,9 @@ tcp_timer_rexmt(void * xtp)
tp->snd_ssthresh_prev = tp->snd_ssthresh;
tp->snd_recover_prev = tp->snd_recover;
if (IN_FASTRECOVERY(tp))
- tp->t_flags |= TF_WASFRECOVERY;
+ tp->t_flags |= TF_WASFRECOVERY;
else
- tp->t_flags &= ~TF_WASFRECOVERY;
+ tp->t_flags &= ~TF_WASFRECOVERY;
tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
}
tcpstat.tcps_rexmttimeo++;
@@ -604,14 +732,10 @@ tcp_timer_rexmt(void * xtp)
EXIT_FASTRECOVERY(tp);
(void) tcp_output(tp);
-out:
#ifdef TCPDEBUG
if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
PRU_SLOWTIMO);
#endif
- if (tp != NULL)
- INP_UNLOCK(inp);
- if (headlocked)
- INP_INFO_WUNLOCK(&tcbinfo);
+ return (0);
}
diff --git a/sys/netinet/tcp_timer.h b/sys/netinet/tcp_timer.h
index 8d8742de..1c801ad 100644
--- a/sys/netinet/tcp_timer.h
+++ b/sys/netinet/tcp_timer.h
@@ -128,6 +128,22 @@ static const char *tcptimers[] =
{ "REXMT", "PERSIST", "KEEP", "2MSL" };
#endif
+struct tcp_timer {
+ struct callout tt_timer;
+ int tt_nextc; /* next callout time in time_uptime */
+ int tt_active; /* engaged callouts */
+#define TT_DELACK 0x01
+#define TT_REXMT 0x02
+#define TT_PERSIST 0x04
+#define TT_KEEP 0x08
+#define TT_2MSL 0x10
+ int tt_delack;
+ int tt_rexmt;
+ int tt_persist;
+ int tt_keep;
+ int tt_2msl;
+};
+
/*
* Force a time value to be in a certain range.
*/
@@ -158,15 +174,10 @@ extern int tcp_finwait2_timeout;
extern int tcp_fast_finwait2_recycle;
void tcp_timer_init(void);
-void tcp_timer_2msl(void *xtp);
struct tcptw *
tcp_timer_2msl_tw(int _reuse); /* XXX temporary */
void tcp_timer_2msl_reset(struct tcptw *_tw, int rearm);
void tcp_timer_2msl_stop(struct tcptw *_tw);
-void tcp_timer_keep(void *xtp);
-void tcp_timer_persist(void *xtp);
-void tcp_timer_rexmt(void *xtp);
-void tcp_timer_delack(void *xtp);
#endif /* _KERNEL */
diff --git a/sys/netinet/tcp_timewait.c b/sys/netinet/tcp_timewait.c
index c705121e..dc4a695 100644
--- a/sys/netinet/tcp_timewait.c
+++ b/sys/netinet/tcp_timewait.c
@@ -262,10 +262,9 @@ static void tcp_isn_tick(void *);
* separate because the tcpcb structure is exported to userland for sysctl
* parsing purposes, which do not know about callouts.
*/
-struct tcpcb_mem {
- struct tcpcb tcb;
- struct callout tcpcb_mem_rexmt, tcpcb_mem_persist, tcpcb_mem_keep;
- struct callout tcpcb_mem_2msl, tcpcb_mem_delack;
+struct tcpcb_mem {
+ struct tcpcb tcb;
+ struct tcp_timer tt;
};
static uma_zone_t tcpcb_zone;
@@ -490,7 +489,6 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th,
if (tp != NULL) {
inp = tp->t_inpcb;
KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
INP_LOCK_ASSERT(inp);
} else
inp = NULL;
@@ -645,6 +643,7 @@ tcp_newtcpcb(struct inpcb *inp)
if (tm == NULL)
return (NULL);
tp = &tm->tcb;
+ tp->t_timers = &tm->tt;
/* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */
tp->t_maxseg = tp->t_maxopd =
#ifdef INET6
@@ -653,11 +652,8 @@ tcp_newtcpcb(struct inpcb *inp)
tcp_mssdflt;
/* Set up our timeouts. */
- callout_init(tp->tt_rexmt = &tm->tcpcb_mem_rexmt, NET_CALLOUT_MPSAFE);
- callout_init(tp->tt_persist = &tm->tcpcb_mem_persist, NET_CALLOUT_MPSAFE);
- callout_init(tp->tt_keep = &tm->tcpcb_mem_keep, NET_CALLOUT_MPSAFE);
- callout_init(tp->tt_2msl = &tm->tcpcb_mem_2msl, NET_CALLOUT_MPSAFE);
- callout_init(tp->tt_delack = &tm->tcpcb_mem_delack, NET_CALLOUT_MPSAFE);
+ callout_init_mtx(&tp->t_timers->tt_timer, &inp->inp_mtx,
+ CALLOUT_RETURNUNLOCKED);
if (tcp_do_rfc1323)
tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
@@ -728,12 +724,15 @@ tcp_discardcb(struct tcpcb *tp)
/*
* Make sure that all of our timers are stopped before we
* delete the PCB.
+ *
+ * XXX: callout_stop() may race and a callout may already
+ * try to obtain the INP_LOCK. Only callout_drain() would
+ * stop this but it would cause a LOR thus we can't use it.
+ * The tcp_timer() function contains a lot of checks to
+ * handle this case rather gracefully.
*/
- callout_stop(tp->tt_rexmt);
- callout_stop(tp->tt_persist);
- callout_stop(tp->tt_keep);
- callout_stop(tp->tt_2msl);
- callout_stop(tp->tt_delack);
+ tp->t_timers->tt_active = 0;
+ callout_stop(&tp->t_timers->tt_timer);
/*
* If we got enough samples through the srtt filter,
diff --git a/sys/netinet/tcp_usrreq.c b/sys/netinet/tcp_usrreq.c
index 51e659b..cada87c 100644
--- a/sys/netinet/tcp_usrreq.c
+++ b/sys/netinet/tcp_usrreq.c
@@ -1146,7 +1146,7 @@ tcp_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td)
soisconnecting(so);
tcpstat.tcps_connattempt++;
tp->t_state = TCPS_SYN_SENT;
- callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp);
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepinit);
tp->iss = tcp_new_isn(tp);
tp->t_bw_rtseq = tp->iss;
tcp_sendseqinit(tp);
@@ -1209,7 +1209,7 @@ tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td)
soisconnecting(so);
tcpstat.tcps_connattempt++;
tp->t_state = TCPS_SYN_SENT;
- callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp);
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepinit);
tp->iss = tcp_new_isn(tp);
tp->t_bw_rtseq = tp->iss;
tcp_sendseqinit(tp);
@@ -1573,8 +1573,7 @@ tcp_usrclosed(struct tcpcb *tp)
timeout = (tcp_fast_finwait2_recycle) ?
tcp_finwait2_timeout : tcp_maxidle;
- callout_reset(tp->tt_2msl, timeout,
- tcp_timer_2msl, tp);
+ tcp_timer_activate(tp, TT_2MSL, timeout);
}
}
}
@@ -1774,12 +1773,15 @@ db_print_tcpcb(struct tcpcb *tp, const char *name, int indent)
LIST_FIRST(&tp->t_segq), tp->t_segqlen, tp->t_dupacks);
db_print_indent(indent);
- db_printf("tt_rexmt: %p tt_persist: %p tt_keep: %p\n",
- tp->tt_rexmt, tp->tt_persist, tp->tt_keep);
+ db_printf("t_inpcb: %p t_timers: %p tt_active: %x\n",
+ tp->t_inpcb, tp->t_timers, tp->t_timers->tt_active);
db_print_indent(indent);
- db_printf("tt_2msl: %p tt_delack: %p t_inpcb: %p\n", tp->tt_2msl,
- tp->tt_delack, tp->t_inpcb);
+ db_printf("tt_delack: %i tt_rexmt: %i tt_keep: %i "
+ "tt_persist: %i tt_2msl: %i\n",
+ tp->t_timers->tt_delack, tp->t_timers->tt_rexmt,
+ tp->t_timers->tt_keep, tp->t_timers->tt_persist,
+ tp->t_timers->tt_2msl);
db_print_indent(indent);
db_printf("t_state: %d (", tp->t_state);
diff --git a/sys/netinet/tcp_var.h b/sys/netinet/tcp_var.h
index 0770412..5752da9 100644
--- a/sys/netinet/tcp_var.h
+++ b/sys/netinet/tcp_var.h
@@ -84,11 +84,7 @@ struct tcpcb {
int t_segqlen; /* segment reassembly queue length */
int t_dupacks; /* consecutive dup acks recd */
- struct callout *tt_rexmt; /* retransmit timer */
- struct callout *tt_persist; /* retransmit persistence */
- struct callout *tt_keep; /* keepalive */
- struct callout *tt_2msl; /* 2*msl TIME_WAIT timer */
- struct callout *tt_delack; /* delayed ACK timer */
+ struct tcp_timer *t_timers; /* retransmit timer */
struct inpcb *t_inpcb; /* back pointer to internet pcb */
int t_state; /* state of this connection */
@@ -538,8 +534,8 @@ void tcp_slowtimo(void);
struct tcptemp *
tcpip_maketemplate(struct inpcb *);
void tcpip_fillheaders(struct inpcb *, void *, void *);
-struct tcpcb *
- tcp_timers(struct tcpcb *, int);
+void tcp_timer_activate(struct tcpcb *, int, u_int);
+int tcp_timer_active(struct tcpcb *, int);
void tcp_trace(int, int, struct tcpcb *, void *, struct tcphdr *, int);
void tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq);
void syncache_init(void);
OpenPOWER on IntegriCloud