summaryrefslogtreecommitdiffstats
path: root/sys/i386/isa/ipl_funcs.c
diff options
context:
space:
mode:
authorfsmp <fsmp@FreeBSD.org>1997-08-24 00:05:37 +0000
committerfsmp <fsmp@FreeBSD.org>1997-08-24 00:05:37 +0000
commit618ef60cbd7b8b77d94128a1512d8332bdd69108 (patch)
tree4729e2ae70430cc50843ab3f87b9f48f20fa6957 /sys/i386/isa/ipl_funcs.c
parentfc8b5b4955e847d86a82903f1e573bba3a391e5b (diff)
downloadFreeBSD-src-618ef60cbd7b8b77d94128a1512d8332bdd69108.zip
FreeBSD-src-618ef60cbd7b8b77d94128a1512d8332bdd69108.tar.gz
The last of the encapsolation of cpl/spl/ipending things into a critical
region protected by the simplelock 'cpl_lock'. Notes: - this code is currently controlled on a section by section basis with defines in machine/param.h. All sections are currently enabled. - this code is not as clean as I would like, but that can wait till later. - the "giant lock" still surrounds most instances of this "cpl region". I still have to do the code that arbitrates setting cpl between the top and bottom halves of the kernel. - the possibility of deadlock exists, I am committing the code at this point so as to exercise it and detect any such cases B4 the "giant lock" is removed.
Diffstat (limited to 'sys/i386/isa/ipl_funcs.c')
-rw-r--r--sys/i386/isa/ipl_funcs.c161
1 files changed, 157 insertions, 4 deletions
diff --git a/sys/i386/isa/ipl_funcs.c b/sys/i386/isa/ipl_funcs.c
index c0849cd..60cfb27 100644
--- a/sys/i386/isa/ipl_funcs.c
+++ b/sys/i386/isa/ipl_funcs.c
@@ -23,13 +23,14 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: ipl_funcs.c,v 1.1 1997/05/31 08:57:05 peter Exp $
+ * $Id: ipl_funcs.c,v 1.6 1997/08/23 23:15:19 smp Exp smp $
*/
#include <sys/types.h>
#include <sys/systm.h>
#include <machine/ipl.h>
+#ifndef SMP
/*
* The volatile bitmap variables must be set atomically. This normally
* involves using a machine-dependent bit-set or `or' instruction.
@@ -93,12 +94,164 @@ splx(unsigned ipl)
splz();
}
-#ifdef SMP
+#else /* !SMP */
+
+#include <machine/param.h>
+#include <machine/smp.h>
+
+#if defined(REAL_IFCPL)
+
+#define IFCPL_LOCK() SCPL_LOCK()
+#define IFCPL_UNLOCK() SCPL_UNLOCK()
+
+#else /* REAL_IFCPL */
+
+#define IFCPL_LOCK()
+#define IFCPL_UNLOCK()
+
+#endif /* REAL_IFCPL */
+
+/*
+ * The volatile bitmap variables must be set atomically. This normally
+ * involves using a machine-dependent bit-set or `or' instruction.
+ */
+
+#define DO_SETBITS(name, var, bits) \
+void name(void) \
+{ \
+ IFCPL_LOCK(); \
+ setbits(var, bits); \
+ IFCPL_UNLOCK(); \
+}
+
+DO_SETBITS(setdelayed, &ipending, loadandclear((unsigned *)&idelayed))
+DO_SETBITS(setsoftast, &ipending, SWI_AST_PENDING)
+DO_SETBITS(setsoftclock, &ipending, SWI_CLOCK_PENDING)
+DO_SETBITS(setsoftnet, &ipending, SWI_NET_PENDING)
+DO_SETBITS(setsofttty, &ipending, SWI_TTY_PENDING)
+
+DO_SETBITS(schedsoftnet, &idelayed, SWI_NET_PENDING)
+DO_SETBITS(schedsofttty, &idelayed, SWI_TTY_PENDING)
+
+unsigned
+softclockpending(void)
+{
+ unsigned x;
+
+ IFCPL_LOCK();
+ x = ipending & SWI_CLOCK_PENDING;
+ IFCPL_UNLOCK();
+
+ return x;
+}
+
+
+#define GENSPL(name, set_cpl) \
+unsigned name(void) \
+{ \
+ unsigned x; \
+ \
+ IFCPL_LOCK(); \
+ x = cpl; \
+ /* XXX test cil */ \
+ set_cpl; \
+ IFCPL_UNLOCK(); \
+ \
+ return (x); \
+}
+
+GENSPL(splbio, cpl |= bio_imask)
+GENSPL(splclock, cpl = HWI_MASK | SWI_MASK)
+GENSPL(splimp, cpl |= net_imask)
+GENSPL(splnet, cpl |= SWI_NET_MASK)
+GENSPL(splsoftclock, cpl = SWI_CLOCK_MASK)
+GENSPL(splsofttty, cpl |= SWI_TTY_MASK)
+GENSPL(splstatclock, cpl |= stat_imask)
+GENSPL(splvm, cpl |= net_imask | bio_imask)
+
+
+/*
+ * This version has to check for smp_active,
+ * as calling simple_lock() (ie ss_lock) before then deadlocks the system.
+ */
+#define GENSPL2(name, set_cpl) \
+unsigned name(void) \
+{ \
+ unsigned x; \
+ \
+ if (smp_active) \
+ IFCPL_LOCK(); \
+ x = cpl; \
+ /* XXX test cil */ \
+ set_cpl; \
+ if (smp_active) \
+ IFCPL_UNLOCK(); \
+ \
+ return (x); \
+}
+
+GENSPL2(splhigh, cpl = HWI_MASK | SWI_MASK)
+GENSPL2(spltty, cpl |= tty_imask)
+
+
+void
+spl0(void)
+{
+ IFCPL_LOCK();
+
+ /* XXX test cil */
+ cpl = SWI_AST_MASK;
+ if (ipending & ~SWI_AST_MASK) {
+ IFCPL_UNLOCK();
+ splz();
+ }
+ else
+ IFCPL_UNLOCK();
+}
+
+void
+splx(unsigned ipl)
+{
+ if (smp_active)
+ IFCPL_LOCK();
+
+ /* XXX test cil */
+ cpl = ipl;
+ if (ipending & ~ipl) {
+ if (smp_active)
+ IFCPL_UNLOCK();
+ splz();
+ }
+ else
+ if (smp_active)
+ IFCPL_UNLOCK();
+}
+
+
+/*
+ * Replaces UP specific inline found in (?) pci/pci_support.c.
+ *
+ * Stefan said:
+ * You know, that splq() is used in the shared interrupt multiplexer, and that
+ * the SMP version should not have too much overhead. If it is significantly
+ * slower, then moving the splq() out of the loop in intr_mux() and passing in
+ * the logical OR of all mask values might be a better solution than the
+ * current code. (This logical OR could of course be pre-calculated whenever
+ * another shared interrupt is registered ...)
+ */
intrmask_t
splq(intrmask_t mask)
{
- intrmask_t tmp = cpl;
+ intrmask_t tmp;
+
+ IFCPL_LOCK();
+
+ tmp = cpl;
cpl |= mask;
+
+ IFCPL_UNLOCK();
+
return (tmp);
}
-#endif
+
+#endif /* !SMP */
OpenPOWER on IntegriCloud