summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_tc.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/kern_tc.c')
-rw-r--r--sys/kern/kern_tc.c285
1 files changed, 2 insertions, 283 deletions
diff --git a/sys/kern/kern_tc.c b/sys/kern/kern_tc.c
index b51b29c..ed85804 100644
--- a/sys/kern/kern_tc.c
+++ b/sys/kern/kern_tc.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
- * $Id: kern_clock.c,v 1.47 1997/12/23 16:31:54 nate Exp $
+ * $Id: kern_clock.c,v 1.48 1998/01/07 12:29:17 phk Exp $
*/
/* Portions of this software are covered by the following: */
@@ -87,13 +87,6 @@
static void initclocks __P((void *dummy));
SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
-/* Exported to machdep.c. */
-struct callout *callout;
-struct callout_list callfree;
-int callwheelsize, callwheelbits, callwheelmask;
-struct callout_tailq *callwheel;
-
-
/* Some of these don't belong here, but it's easiest to concentrate them. */
#if defined(SMP) && defined(BETTER_CLOCK)
long cp_time[CPUSTATES];
@@ -162,8 +155,6 @@ int stathz;
int profhz;
static int profprocs;
int ticks;
-static int softticks; /* Like ticks, but for softclock(). */
-static struct callout *nextsoftcheck; /* Next callout to be checked. */
static int psdiv, pscnt; /* prof => stat divider */
int psratio; /* ratio: prof / stat */
@@ -314,33 +305,6 @@ long pps_stbcnt = 0; /* stability limit exceeded */
#endif /* PPS_SYNC */
/* XXX none of this stuff works under FreeBSD */
-#ifdef EXT_CLOCK
-/*
- * External clock definitions
- *
- * The following definitions and declarations are used only if an
- * external clock (HIGHBALL or TPRO) is configured on the system.
- */
-#define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */
-
-/*
- * The clock_count variable is set to CLOCK_INTERVAL at each PPS
- * interrupt and decremented once each second.
- */
-int clock_count = 0; /* CPU clock counter */
-
-#ifdef HIGHBALL
-/*
- * The clock_offset and clock_cpu variables are used by the HIGHBALL
- * interface. The clock_offset variable defines the offset between
- * system time and the HIGBALL counters. The clock_cpu variable contains
- * the offset between the system clock and the HIGHBALL clock for use in
- * disciplining the kernel time variable.
- */
-extern struct timeval clock_offset; /* Highball clock offset */
-long clock_cpu = 0; /* CPU clock adjust */
-#endif /* HIGHBALL */
-#endif /* EXT_CLOCK */
/*
* hardupdate() - local clock update
@@ -665,186 +629,7 @@ hardclock(frame)
CPU_CLOCKUPDATE(&time, &newtime);
}
- /*
- * Process callouts at a very low cpu priority, so we don't keep the
- * relatively high clock interrupt priority any longer than necessary.
- */
- if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
- if (CLKF_BASEPRI(frame)) {
- /*
- * Save the overhead of a software interrupt;
- * it will happen as soon as we return, so do it now.
- */
- (void)splsoftclock();
- softclock();
- } else
- setsoftclock();
- } else if (softticks + 1 == ticks) {
- ++softticks;
- }
-}
-
-/*
- * The callout mechanism is based on the work of Adam M. Costello and
- * George Varghese, published in a technical report entitled "Redesigning
- * the BSD Callout and Timer Facilities" and modified slightly for inclusion
- * in FreeBSD by Justin T. Gibbs. The original work on the data structures
- * used in this implementation was published by G.Varghese and A. Lauck in
- * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
- * the Efficient Implementation of a Timer Facility" in the Proceedings of
- * the 11th ACM Annual Symposium on Operating Systems Principles,
- * Austin, Texas Nov 1987.
- */
-/*
- * Software (low priority) clock interrupt.
- * Run periodic events from timeout queue.
- */
-/*ARGSUSED*/
-void
-softclock()
-{
- register struct callout *c;
- register struct callout_tailq *bucket;
- register int s;
- register int curticks;
- register int steps; /*
- * Number of steps taken since
- * we last allowed interrupts.
- */
-
- #ifndef MAX_SOFTCLOCK_STEPS
- #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
- #endif /* MAX_SOFTCLOCK_STEPS */
-
- steps = 0;
- s = splhigh();
- while (softticks != ticks) {
- softticks++;
- /*
- * softticks may be modified by hard clock, so cache
- * it while we work on a given bucket.
- */
- curticks = softticks;
- bucket = &callwheel[curticks & callwheelmask];
- c = TAILQ_FIRST(bucket);
- while (c) {
- if (c->c_time != curticks) {
- c = TAILQ_NEXT(c, c_links.tqe);
- ++steps;
- if (steps >= MAX_SOFTCLOCK_STEPS) {
- nextsoftcheck = c;
- /* Give interrupts a chance. */
- splx(s);
- s = splhigh();
- c = nextsoftcheck;
- steps = 0;
- }
- } else {
- void (*c_func)(void *);
- void *c_arg;
-
- nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
- TAILQ_REMOVE(bucket, c, c_links.tqe);
- c_func = c->c_func;
- c_arg = c->c_arg;
- c->c_func = NULL;
- SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
- splx(s);
- c_func(c_arg);
- s = splhigh();
- steps = 0;
- c = nextsoftcheck;
- }
- }
- }
- nextsoftcheck = NULL;
- splx(s);
-}
-
-/*
- * timeout --
- * Execute a function after a specified length of time.
- *
- * untimeout --
- * Cancel previous timeout function call.
- *
- * callout_handle_init --
- * Initialize a handle so that using it with untimeout is benign.
- *
- * See AT&T BCI Driver Reference Manual for specification. This
- * implementation differs from that one in that although an
- * identification value is returned from timeout, the original
- * arguments to timeout as well as the identifier are used to
- * identify entries for untimeout.
- */
-struct callout_handle
-timeout(ftn, arg, to_ticks)
- timeout_t ftn;
- void *arg;
- register int to_ticks;
-{
- int s;
- struct callout *new;
- struct callout_handle handle;
-
- if (to_ticks <= 0)
- to_ticks = 1;
-
- /* Lock out the clock. */
- s = splhigh();
-
- /* Fill in the next free callout structure. */
- new = SLIST_FIRST(&callfree);
- if (new == NULL)
- /* XXX Attempt to malloc first */
- panic("timeout table full");
-
- SLIST_REMOVE_HEAD(&callfree, c_links.sle);
- new->c_arg = arg;
- new->c_func = ftn;
- new->c_time = ticks + to_ticks;
- TAILQ_INSERT_TAIL(&callwheel[new->c_time & callwheelmask],
- new, c_links.tqe);
-
- splx(s);
- handle.callout = new;
- return (handle);
-}
-
-void
-untimeout(ftn, arg, handle)
- timeout_t ftn;
- void *arg;
- struct callout_handle handle;
-{
- register int s;
-
- /*
- * Check for a handle that was initialized
- * by callout_handle_init, but never used
- * for a real timeout.
- */
- if (handle.callout == NULL)
- return;
-
- s = splhigh();
- if ((handle.callout->c_func == ftn)
- && (handle.callout->c_arg == arg)) {
- if (nextsoftcheck == handle.callout) {
- nextsoftcheck = TAILQ_NEXT(handle.callout, c_links.tqe);
- }
- TAILQ_REMOVE(&callwheel[handle.callout->c_time & callwheelmask],
- handle.callout, c_links.tqe);
- handle.callout->c_func = NULL;
- SLIST_INSERT_HEAD(&callfree, handle.callout, c_links.sle);
- }
- splx(s);
-}
-
-void
-callout_handle_init(struct callout_handle *handle)
-{
- handle->callout = NULL;
+ softclock(frame);
}
void
@@ -1354,69 +1139,3 @@ hardpps(tvp, p_usec)
}
#endif /* PPS_SYNC */
-#ifdef APM_FIXUP_CALLTODO
-/*
- * Adjust the kernel calltodo timeout list. This routine is used after
- * an APM resume to recalculate the calltodo timer list values with the
- * number of hz's we have been sleeping. The next hardclock() will detect
- * that there are fired timers and run softclock() to execute them.
- *
- * Please note, I have not done an exhaustive analysis of what code this
- * might break. I am motivated to have my select()'s and alarm()'s that
- * have expired during suspend firing upon resume so that the applications
- * which set the timer can do the maintanence the timer was for as close
- * as possible to the originally intended time. Testing this code for a
- * week showed that resuming from a suspend resulted in 22 to 25 timers
- * firing, which seemed independant on whether the suspend was 2 hours or
- * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
- */
-void
-adjust_timeout_calltodo(time_change)
- struct timeval *time_change;
-{
- register struct callout *p;
- unsigned long delta_ticks;
- int s;
-
- /*
- * How many ticks were we asleep?
- * (stolen from hzto()).
- */
-
- /* Don't do anything */
- if (time_change->tv_sec < 0)
- return;
- else if (time_change->tv_sec <= LONG_MAX / 1000000)
- delta_ticks = (time_change->tv_sec * 1000000 +
- time_change->tv_usec + (tick - 1)) / tick + 1;
- else if (time_change->tv_sec <= LONG_MAX / hz)
- delta_ticks = time_change->tv_sec * hz +
- (time_change->tv_usec + (tick - 1)) / tick + 1;
- else
- delta_ticks = LONG_MAX;
-
- if (delta_ticks > INT_MAX)
- delta_ticks = INT_MAX;
-
- /*
- * Now rip through the timer calltodo list looking for timers
- * to expire.
- */
-
- /* don't collide with softclock() */
- s = splhigh();
- for (p = calltodo.c_next; p != NULL; p = p->c_next) {
- p->c_time -= delta_ticks;
-
- /* Break if the timer had more time on it than delta_ticks */
- if (p->c_time > 0)
- break;
-
- /* take back the ticks the timer didn't use (p->c_time <= 0) */
- delta_ticks = -p->c_time;
- }
- splx(s);
-
- return;
-}
-#endif /* APM_FIXUP_CALLTODO */
OpenPOWER on IntegriCloud