summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2001-02-20 10:25:29 +0000
committerjhb <jhb@FreeBSD.org>2001-02-20 10:25:29 +0000
commitca00514d9e8f898a78403e8606df97f7230cc8c9 (patch)
tree3e47d9a1567340c0a7d4a175c440bff8f59a2c0d /sys
parent6d1f53999c24631267a7934463fc7e3e2cd3b233 (diff)
downloadFreeBSD-src-ca00514d9e8f898a78403e8606df97f7230cc8c9.zip
FreeBSD-src-ca00514d9e8f898a78403e8606df97f7230cc8c9.tar.gz
- Add a new ithread_schedule() function to do the bulk of the work of
scheduling an interrupt thread to run when needed. This has the side effect of enabling support for entropy gathering from interrupts on all architectures. - Change the software interrupt and x86 and alpha hardware interrupt code to use ithread_schedule() for most of their processing when scheduling an interrupt to run. - Remove the pesky Warning message about interrupt threads having entropy enabled. I'm not sure why I put that in there in the first place. - Add more error checking for parameters and change some cases that returned EINVAL to panic on failure instead via KASSERT(). - Instead of doing a documented evil hack of setting the P_NOLOAD flag on every interrupt thread whose pri was SWI_CLOCK, set the flag explicity for clk_ithd's proc during start_softintr().
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/interrupt.c41
-rw-r--r--sys/amd64/isa/ithread.c59
-rw-r--r--sys/i386/isa/ithread.c59
-rw-r--r--sys/kern/kern_intr.c148
-rw-r--r--sys/sys/interrupt.h3
5 files changed, 120 insertions, 190 deletions
diff --git a/sys/alpha/alpha/interrupt.c b/sys/alpha/alpha/interrupt.c
index 7d078e5..7c7c22b 100644
--- a/sys/alpha/alpha/interrupt.c
+++ b/sys/alpha/alpha/interrupt.c
@@ -396,8 +396,8 @@ alpha_dispatch_intr(void *frame, unsigned long vector)
int h = HASHVEC(vector);
struct alpha_intr *i;
struct ithd *ithd; /* our interrupt thread */
- int saveintr;
struct intrhand *ih;
+ int error;
/*
* Walk the hash bucket for this vector looking for this vector's
@@ -432,48 +432,13 @@ alpha_dispatch_intr(void *frame, unsigned long vector)
return;
}
- CTR3(KTR_INTR, "alpha_dispatch_intr: pid %d(%s) need=%d",
- ithd->it_proc->p_pid, ithd->it_proc->p_comm, ithd->it_need);
-
- /*
- * Set it_need so that if the thread is already running but close
- * to done, it will do another go-round. Then get the sched lock
- * and see if the thread is on whichkqs yet. If not, put it on
- * there. In any case, kick everyone so that if the new thread
- * is higher priority than their current thread, it gets run now.
- */
- ithd->it_need = 1;
if (ithd->it_disable) {
CTR1(KTR_INTR,
"alpha_dispatch_intr: disabling vector 0x%x", i->vector);
ithd->it_disable(ithd->it_vector);
}
- mtx_lock_spin(&sched_lock);
- if (ithd->it_proc->p_stat == SWAIT) {
- /* not on the run queue and not running */
- CTR1(KTR_INTR, "alpha_dispatch_intr: setrunqueue %d",
- ithd->it_proc->p_pid);
-
- alpha_mb(); /* XXX - this is bogus, mtx_lock_spin has a barrier */
- ithd->it_proc->p_stat = SRUN;
- setrunqueue(ithd->it_proc);
-#ifdef PREEMPTION
- /* Does not work on 4100 */
- if (!cold) {
- saveintr = sched_lock.mtx_saveintr;
- mtx_intr_enable(&sched_lock);
- if (curproc != PCPU_GET(idleproc))
- setrunqueue(curproc);
- mi_switch();
- sched_lock.mtx_saveintr = saveintr;
- } else
-#endif
- need_resched();
- } else {
- CTR3(KTR_INTR, "alpha_dispatch_intr: %d: it_need %d, state %d",
- ithd->it_proc->p_pid, ithd->it_need, ithd->it_proc->p_stat);
- }
- mtx_unlock_spin(&sched_lock);
+ error = ithread_schedule(ithd, !cold);
+ KASSERT(error == 0, ("got an impossible stray interrupt"));
}
static void
diff --git a/sys/amd64/isa/ithread.c b/sys/amd64/isa/ithread.c
index d9255e1..4f71516 100644
--- a/sys/amd64/isa/ithread.c
+++ b/sys/amd64/isa/ithread.c
@@ -31,8 +31,6 @@
/* Interrupt thread code. */
-#include "opt_auto_eoi.h"
-
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/rtprio.h> /* change this name XXX */
@@ -81,7 +79,8 @@ void
sched_ithd(void *cookie)
{
int irq = (int) cookie; /* IRQ we're handling */
- struct ithd *ir = ithds[irq]; /* and the process that does it */
+ struct ithd *ithd = ithds[irq]; /* and the process that does it */
+ int error;
/* This used to be in icu_vector.s */
/*
@@ -94,23 +93,15 @@ sched_ithd(void *cookie)
atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
/*
- * If this interrupt is marked as being a source of entropy, use
- * the current timestamp to feed entropy to the PRNG.
+ * Schedule the interrupt thread to run if needed and switch to it
+ * if we schedule it if !cold.
*/
- if (harvest.interrupt && ir != NULL && (ir->it_flags & IT_ENTROPY)) {
- struct int_entropy entropy;
+ error = ithread_schedule(ithd, !cold);
- entropy.irq = irq;
- entropy.p = curproc;
- random_harvest(&entropy, sizeof(entropy), 2, 0,
- RANDOM_INTERRUPT);
- }
-
/*
- * If we don't have an interrupt resource or an interrupt thread for
- * this IRQ, log it as a stray interrupt.
+ * Log stray interrupts.
*/
- if (ir == NULL || ir->it_proc == NULL) {
+ if (error == EINVAL)
if (straycount[irq] < MAX_STRAY_LOG) {
printf("stray irq %d\n", irq);
if (++straycount[irq] == MAX_STRAY_LOG)
@@ -118,40 +109,4 @@ sched_ithd(void *cookie)
"got %d stray irq %d's: not logging anymore\n",
MAX_STRAY_LOG, irq);
}
- return;
- }
-
- CTR3(KTR_INTR, "sched_ithd pid %d(%s) need=%d",
- ir->it_proc->p_pid, ir->it_proc->p_comm, ir->it_need);
-
- /*
- * Set it_need so that if the thread is already running but close
- * to done, it will do another go-round. Then get the sched lock
- * and see if the thread is on whichkqs yet. If not, put it on
- * there. In any case, kick everyone so that if the new thread
- * is higher priority than their current thread, it gets run now.
- */
- ir->it_need = 1;
- mtx_lock_spin(&sched_lock);
- if (ir->it_proc->p_stat == SWAIT) { /* not on run queue */
- CTR1(KTR_INTR, "sched_ithd: setrunqueue %d",
- ir->it_proc->p_pid);
-/* membar_lock(); */
- ir->it_proc->p_stat = SRUN;
- setrunqueue(ir->it_proc);
- if (!cold) {
- if (curproc != PCPU_GET(idleproc))
- setrunqueue(curproc);
- curproc->p_stats->p_ru.ru_nvcsw++;
- mi_switch();
- } else
- need_resched();
- }
- else {
- CTR3(KTR_INTR, "sched_ithd %d: it_need %d, state %d",
- ir->it_proc->p_pid,
- ir->it_need,
- ir->it_proc->p_stat );
- }
- mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/i386/isa/ithread.c b/sys/i386/isa/ithread.c
index d9255e1..4f71516 100644
--- a/sys/i386/isa/ithread.c
+++ b/sys/i386/isa/ithread.c
@@ -31,8 +31,6 @@
/* Interrupt thread code. */
-#include "opt_auto_eoi.h"
-
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/rtprio.h> /* change this name XXX */
@@ -81,7 +79,8 @@ void
sched_ithd(void *cookie)
{
int irq = (int) cookie; /* IRQ we're handling */
- struct ithd *ir = ithds[irq]; /* and the process that does it */
+ struct ithd *ithd = ithds[irq]; /* and the process that does it */
+ int error;
/* This used to be in icu_vector.s */
/*
@@ -94,23 +93,15 @@ sched_ithd(void *cookie)
atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
/*
- * If this interrupt is marked as being a source of entropy, use
- * the current timestamp to feed entropy to the PRNG.
+ * Schedule the interrupt thread to run if needed and switch to it
+ * if we schedule it if !cold.
*/
- if (harvest.interrupt && ir != NULL && (ir->it_flags & IT_ENTROPY)) {
- struct int_entropy entropy;
+ error = ithread_schedule(ithd, !cold);
- entropy.irq = irq;
- entropy.p = curproc;
- random_harvest(&entropy, sizeof(entropy), 2, 0,
- RANDOM_INTERRUPT);
- }
-
/*
- * If we don't have an interrupt resource or an interrupt thread for
- * this IRQ, log it as a stray interrupt.
+ * Log stray interrupts.
*/
- if (ir == NULL || ir->it_proc == NULL) {
+ if (error == EINVAL)
if (straycount[irq] < MAX_STRAY_LOG) {
printf("stray irq %d\n", irq);
if (++straycount[irq] == MAX_STRAY_LOG)
@@ -118,40 +109,4 @@ sched_ithd(void *cookie)
"got %d stray irq %d's: not logging anymore\n",
MAX_STRAY_LOG, irq);
}
- return;
- }
-
- CTR3(KTR_INTR, "sched_ithd pid %d(%s) need=%d",
- ir->it_proc->p_pid, ir->it_proc->p_comm, ir->it_need);
-
- /*
- * Set it_need so that if the thread is already running but close
- * to done, it will do another go-round. Then get the sched lock
- * and see if the thread is on whichkqs yet. If not, put it on
- * there. In any case, kick everyone so that if the new thread
- * is higher priority than their current thread, it gets run now.
- */
- ir->it_need = 1;
- mtx_lock_spin(&sched_lock);
- if (ir->it_proc->p_stat == SWAIT) { /* not on run queue */
- CTR1(KTR_INTR, "sched_ithd: setrunqueue %d",
- ir->it_proc->p_pid);
-/* membar_lock(); */
- ir->it_proc->p_stat = SRUN;
- setrunqueue(ir->it_proc);
- if (!cold) {
- if (curproc != PCPU_GET(idleproc))
- setrunqueue(curproc);
- curproc->p_stats->p_ru.ru_nvcsw++;
- mi_switch();
- } else
- need_resched();
- }
- else {
- CTR3(KTR_INTR, "sched_ithd %d: it_need %d, state %d",
- ir->it_proc->p_pid,
- ir->it_need,
- ir->it_proc->p_stat );
- }
- mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index 260acdc..e612d37 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -40,6 +40,7 @@
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/random.h>
#include <sys/resourcevar.h>
#include <sys/unistd.h>
#include <sys/vmmeter.h>
@@ -50,6 +51,11 @@
#include <net/netisr.h> /* prototype for legacy_setsoftnet */
+struct int_entropy {
+ struct proc *proc;
+ int vector;
+};
+
void *net_ih;
void *vm_ih;
void *softclock_ih;
@@ -144,11 +150,8 @@ ithread_update(struct ithd *ithd)
entropy++;
}
- if (entropy) {
- printf("Warning, ithread (%d, %s) is an entropy source.\n",
- p->p_pid, p->p_comm);
+ if (entropy)
ithd->it_flags |= IT_ENTROPY;
- }
else
ithd->it_flags &= ~IT_ENTROPY;
}
@@ -162,6 +165,10 @@ ithread_create(struct ithd **ithread, int vector, int flags,
int error;
va_list ap;
+ /* The only valid flag during creation is IT_SOFT. */
+ if ((flags & ~IT_SOFT) != 0)
+ return (EINVAL);
+
ithd = malloc(sizeof(struct ithd), M_ITHREAD, M_WAITOK | M_ZERO);
ithd->it_vector = vector;
ithd->it_disable = disable;
@@ -268,24 +275,23 @@ ithread_remove_handler(void *cookie)
struct ithd *ithread;
#ifdef INVARIANTS
struct intrhand *ih;
- int found;
#endif
- if (handler == NULL || (ithread = handler->ih_ithread) == NULL)
+ if (handler == NULL)
return (EINVAL);
+ KASSERT((ithread = handler->ih_ithread) != NULL,
+ ("interrupt handler \"%s\" has a NULL interrupt thread",
+ handler->ih_name));
mtx_lock_spin(&ithread_list_lock);
#ifdef INVARIANTS
- found = 0;
TAILQ_FOREACH(ih, &ithread->it_handlers, ih_next)
- if (ih == handler) {
- found++;
- break;
- }
- if (found == 0) {
- mtx_unlock_spin(&ithread_list_lock);
- return (EINVAL);
- }
+ if (ih == handler)
+ goto ok;
+ mtx_unlock_spin(&ithread_list_lock);
+ panic("interrupt handler \"%s\" not found in interrupt thread \"%s\"",
+ ih->ih_name, ithread->it_name);
+ok:
#endif
TAILQ_REMOVE(&ithread->it_handlers, handler, ih_next);
ithread_update(ithread);
@@ -296,27 +302,89 @@ ithread_remove_handler(void *cookie)
}
int
+ithread_schedule(struct ithd *ithread, int do_switch)
+{
+ struct int_entropy entropy;
+ struct proc *p;
+ intrmask_t saveintr;
+
+ /*
+ * If no ithread or no handlers, then we have a stray interrupt.
+ */
+ if ((ithread == NULL) || TAILQ_EMPTY(&ithread->it_handlers))
+ return (EINVAL);
+
+ /*
+ * If any of the handlers for this ithread claim to be good
+ * sources of entropy, then gather some.
+ */
+ if (harvest.interrupt && ithread->it_flags & IT_ENTROPY) {
+ entropy.vector = ithread->it_vector;
+ entropy.proc = CURPROC;
+ random_harvest(&entropy, sizeof(entropy), 2, 0,
+ RANDOM_INTERRUPT);
+ }
+
+ p = ithread->it_proc;
+ CTR3(KTR_INTR, __func__ ": pid %d: (%s) need = %d", p->p_pid, p->p_comm,
+ ithread->it_need);
+
+ /*
+ * Set it_need to tell the thread to keep running if it is already
+ * running. Then, grab sched_lock and see if we actually need to
+ * put this thread on the runqueue. If so and the do_switch flag is
+ * true, then switch to the ithread immediately. Otherwise, use
+ * need_resched() to guarantee that this ithread will run before any
+ * userland processes.
+ */
+ ithread->it_need = 1;
+ mtx_lock_spin(&sched_lock);
+ if (p->p_stat == SWAIT) {
+ CTR1(KTR_INTR, __func__ ": setrunqueue %d", p->p_pid);
+ p->p_stat = SRUN;
+ setrunqueue(p);
+#if !defined(__alpha__) || defined(PREEMPTION)
+ if (do_switch) {
+ saveintr = sched_lock.mtx_saveintr;
+ mtx_intr_enable(&sched_lock);
+ if (curproc != PCPU_GET(idleproc))
+ setrunqueue(curproc);
+ curproc->p_stats->p_ru.ru_nvcsw++;
+ mi_switch();
+ sched_lock.mtx_saveintr = saveintr;
+ } else
+#endif
+ need_resched();
+ } else {
+ CTR3(KTR_INTR, __func__ ": pid %d: it_need %d, state %d",
+ p->p_pid, ithread->it_need, p->p_stat);
+ }
+ mtx_unlock_spin(&sched_lock);
+
+ return (0);
+}
+
+int
swi_add(struct ithd **ithdp, const char *name, driver_intr_t handler,
void *arg, int pri, enum intr_type flags, void **cookiep)
{
- struct proc *p;
struct ithd *ithd;
int error;
+ if (flags & (INTR_FAST | INTR_ENTROPY))
+ return (EINVAL);
+
ithd = (ithdp != NULL) ? *ithdp : NULL;
- if (ithd == NULL) {
+ if (ithd != NULL) {
+ if ((ithd->it_flags & IT_SOFT) == 0)
+ return(EINVAL);
+ } else {
error = ithread_create(&ithd, pri, IT_SOFT, NULL, NULL,
"swi%d:", pri);
if (error)
return (error);
- /* XXX - some hacks are _really_ gross */
- p = ithd->it_proc;
- PROC_LOCK(p);
- if (pri == SWI_CLOCK)
- p->p_flag |= P_NOLOAD;
- PROC_UNLOCK(p);
if (ithdp != NULL)
*ithdp = ithd;
}
@@ -334,6 +402,7 @@ swi_sched(void *cookie, int flags)
struct intrhand *ih = (struct intrhand *)cookie;
struct ithd *it = ih->ih_ithread;
struct proc *p = it->it_proc;
+ int error;
atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
@@ -341,33 +410,14 @@ swi_sched(void *cookie, int flags)
p->p_pid, p->p_comm, it->it_need);
/*
- * Set it_need so that if the thread is already running but close
- * to done, it will do another go-round. Then get the sched lock
- * and see if the thread is on whichkqs yet. If not, put it on
- * there. In any case, kick everyone so that if the new thread
- * is higher priority than their current thread, it gets run now.
+ * Set ih_need for this handler so that if the ithread is already
+ * running it will execute this handler on the next pass. Otherwise,
+ * it will execute it the next time it runs.
*/
atomic_store_rel_int(&ih->ih_need, 1);
if (!(flags & SWI_DELAY)) {
- it->it_need = 1;
- mtx_lock_spin(&sched_lock);
- if (p->p_stat == SWAIT) { /* not on run queue */
- CTR1(KTR_INTR, "swi_sched: setrunqueue %d", p->p_pid);
- p->p_stat = SRUN;
- setrunqueue(p);
- if (!cold && flags & SWI_SWITCH) {
- if (curproc != PCPU_GET(idleproc))
- setrunqueue(curproc);
- curproc->p_stats->p_ru.ru_nvcsw++;
- mi_switch();
- } else
- need_resched();
- }
- else {
- CTR3(KTR_INTR, "swi_sched %d: it_need %d, state %d",
- p->p_pid, it->it_need, p->p_stat );
- }
- mtx_unlock_spin(&sched_lock);
+ error = ithread_schedule(it, !cold && flags & SWI_SWITCH);
+ KASSERT(error == 0, ("stray software interrupt"));
}
}
@@ -476,6 +526,10 @@ start_softintr(void *dummy)
INTR_MPSAFE, &softclock_ih) ||
swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, 0, &vm_ih))
panic("died while creating standard software ithreads");
+
+ PROC_LOCK(clk_ithd->it_proc);
+ clk_ithd->it_proc->p_flag |= P_NOLOAD;
+ PROC_UNLOCK(clk_ithd->it_proc);
}
SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
diff --git a/sys/sys/interrupt.h b/sys/sys/interrupt.h
index a0bea56..1bc48d6 100644
--- a/sys/sys/interrupt.h
+++ b/sys/sys/interrupt.h
@@ -88,7 +88,7 @@ extern void *softclock_ih;
extern void *vm_ih;
int ithread_create __P((struct ithd **ithread, int vector, int flags,
- void (*disable)(int), void (*enable)(int), const char *name, ...))
+ void (*disable)(int), void (*enable)(int), const char *fmt, ...))
__printflike(6, 7);
int ithread_destroy __P((struct ithd *ithread));
u_char ithread_priority __P((enum intr_type flags));
@@ -96,6 +96,7 @@ int ithread_add_handler __P((struct ithd *ithread, const char *name,
driver_intr_t handler, void *arg, u_char pri, enum intr_type flags,
void **cookiep));
int ithread_remove_handler __P((void *cookie));
+int ithread_schedule __P((struct ithd *ithread, int do_switch));
int swi_add __P((struct ithd **ithdp, const char *name,
driver_intr_t handler, void *arg, int pri, enum intr_type flags,
void **cookiep));
OpenPOWER on IntegriCloud