summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjlemon <jlemon@FreeBSD.org>1999-08-30 21:17:07 +0000
committerjlemon <jlemon@FreeBSD.org>1999-08-30 21:17:07 +0000
commit628be0515eace470ecc5e0055dbc9cee71e2adb9 (patch)
tree541ccb366454973a67b93f62b3164fc1a98eced0 /sys/kern
parent6aee941745b6eecfd2d3e23b412969b4ddf18102 (diff)
downloadFreeBSD-src-628be0515eace470ecc5e0055dbc9cee71e2adb9.zip
FreeBSD-src-628be0515eace470ecc5e0055dbc9cee71e2adb9.tar.gz
Restructure TCP timeout handling:
- eliminate the fast/slow timeout lists for TCP and instead use a callout entry for each timer. - increase the TCP timer granularity to HZ - implement "bad retransmit" recovery, as presented in "On Estimating End-to-End Network Path Properties", by Allman and Paxson. Submitted by: jlemon, wollmann
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_timeout.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index 45bc266..e73d371 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -125,8 +125,7 @@ softclock()
c_links.sle);
} else {
c->c_flags =
- (c->c_flags & ~CALLOUT_PENDING)
- | CALLOUT_FIRED;
+ (c->c_flags & ~CALLOUT_PENDING);
}
splx(s);
c_func(c_arg);
@@ -218,11 +217,11 @@ callout_handle_init(struct callout_handle *handle)
* callout_init() - initialize a callout structure so that it can
* safely be passed to callout_reset() and callout_stop()
*
- * <sys/callout.h> defines two convenience macros:
+ * <sys/callout.h> defines three convenience macros:
*
- * callout_pending() - returns number of ticks until callout fires, or 0
- * if not scheduled
- * callout_fired() - returns truth if callout has already been fired
+ * callout_active() - returns truth if callout has not been serviced
+ * callout_pending() - returns truth if callout is still waiting for timeout
+ * callout_deactivate() - marks the callout as having been serviced
*/
void
callout_reset(c, to_ticks, ftn, arg)
@@ -240,13 +239,13 @@ callout_reset(c, to_ticks, ftn, arg)
/*
* We could spl down here and back up at the TAILQ_INSERT_TAIL,
* but there's no point since doing this setup doesn't take much
- ^ time.
+ * time.
*/
if (to_ticks <= 0)
to_ticks = 1;
c->c_arg = arg;
- c->c_flags = (c->c_flags & ~CALLOUT_FIRED) | CALLOUT_PENDING;
+ c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
c->c_func = ftn;
c->c_time = ticks + to_ticks;
TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
@@ -266,10 +265,11 @@ callout_stop(c)
* Don't attempt to delete a callout that's not on the queue.
*/
if (!(c->c_flags & CALLOUT_PENDING)) {
+ c->c_flags &= ~CALLOUT_ACTIVE;
splx(s);
return;
}
- c->c_flags &= ~CALLOUT_PENDING;
+ c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
if (nextsoftcheck == c) {
nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
OpenPOWER on IntegriCloud