summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2002-01-05 08:47:13 +0000
committerjhb <jhb@FreeBSD.org>2002-01-05 08:47:13 +0000
commit1ce407b6755fc202866c4e1e2887b37a6fc09a9a (patch)
tree619ee98d73f5552ae93c3c8fee5af6b40ff37908
parent2f03379495e351d56fc8e84f79e7d2150f6b4b49 (diff)
downloadFreeBSD-src-1ce407b6755fc202866c4e1e2887b37a6fc09a9a.zip
FreeBSD-src-1ce407b6755fc202866c4e1e2887b37a6fc09a9a.tar.gz
Change the preemption code for software interrupt thread schedules and
mutex releases to not require flags for the cases when preemption is not allowed: The purpose of the MTX_NOSWITCH and SWI_NOSWITCH flags is to prevent switching to a higher priority thread on mutex releease and swi schedule, respectively when that switch is not safe. Now that the critical section API maintains a per-thread nesting count, the kernel can easily check whether or not it should switch without relying on flags from the programmer. This fixes a few bugs in that all current callers of swi_sched() used SWI_NOSWITCH, when in fact, only the ones called from fast interrupt handlers and the swi_sched of softclock needed this flag. Note that to ensure that swi_sched()'s in clock and fast interrupt handlers do not switch, these handlers have to be explicitly wrapped in critical_enter/exit pairs. Presently, just wrapping the handlers is sufficient, but in the future with the fully preemptive kernel, the interrupt must be EOI'd before critical_exit() is called. (critical_exit() can switch due to a deferred preemption in a fully preemptive kernel.) I've tested the changes to the interrupt code on i386 and alpha. I have not tested ia64, but the interrupt code is almost identical to the alpha code, so I expect it will work fine. PowerPC and ARM do not yet have interrupt code in the tree so they shouldn't be broken. Sparc64 is broken, but that's been ok'd by jake and tmm who will be fixing the interrupt code for sparc64 shortly. Reviewed by: peter Tested on: i386, alpha
-rw-r--r--sys/alpha/alpha/busdma_machdep.c2
-rw-r--r--sys/alpha/alpha/interrupt.c4
-rw-r--r--sys/amd64/amd64/apic_vector.S2
-rw-r--r--sys/amd64/amd64/busdma_machdep.c2
-rw-r--r--sys/amd64/isa/atpic_vector.S2
-rw-r--r--sys/amd64/isa/icu_vector.S2
-rw-r--r--sys/amd64/isa/icu_vector.s2
-rw-r--r--sys/cam/cam_xpt.c4
-rw-r--r--sys/dev/acpica/Osd/OsdSchedule.c2
-rw-r--r--sys/dev/cy/cy.c20
-rw-r--r--sys/dev/cy/cy_isa.c20
-rw-r--r--sys/dev/rc/rc.c8
-rw-r--r--sys/dev/sio/sio.c8
-rw-r--r--sys/i386/i386/apic_vector.s2
-rw-r--r--sys/i386/i386/busdma_machdep.c2
-rw-r--r--sys/i386/isa/apic_vector.s2
-rw-r--r--sys/i386/isa/atpic_vector.s2
-rw-r--r--sys/i386/isa/cy.c20
-rw-r--r--sys/i386/isa/icu_vector.s2
-rw-r--r--sys/i386/isa/rc.c8
-rw-r--r--sys/ia64/ia64/busdma_machdep.c2
-rw-r--r--sys/ia64/ia64/interrupt.c4
-rw-r--r--sys/kern/kern_clock.c2
-rw-r--r--sys/kern/kern_condvar.c18
-rw-r--r--sys/kern/kern_exit.c4
-rw-r--r--sys/kern/kern_intr.c13
-rw-r--r--sys/kern/kern_mutex.c14
-rw-r--r--sys/kern/kern_shutdown.c2
-rw-r--r--sys/kern/kern_sig.c8
-rw-r--r--sys/kern/kern_subr.c2
-rw-r--r--sys/kern/kern_synch.c12
-rw-r--r--sys/kern/subr_taskqueue.c2
-rw-r--r--sys/kern/subr_trap.c2
-rw-r--r--sys/kern/subr_turnstile.c14
-rw-r--r--sys/kern/subr_witness.c16
-rw-r--r--sys/pc98/cbus/sio.c8
-rw-r--r--sys/pc98/pc98/sio.c8
-rw-r--r--sys/sys/interrupt.h4
-rw-r--r--sys/sys/lock.h1
-rw-r--r--sys/sys/mutex.h13
-rw-r--r--sys/sys/proc.h2
-rw-r--r--sys/vm/vm_glue.c2
42 files changed, 125 insertions, 144 deletions
diff --git a/sys/alpha/alpha/busdma_machdep.c b/sys/alpha/alpha/busdma_machdep.c
index 1c44b9e..58a31c0 100644
--- a/sys/alpha/alpha/busdma_machdep.c
+++ b/sys/alpha/alpha/busdma_machdep.c
@@ -697,7 +697,7 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
map, links);
busdma_swi_pending = 1;
- swi_sched(vm_ih, SWI_NOSWITCH);
+ swi_sched(vm_ih, 0);
}
}
mtx_unlock(&bounce_lock);
diff --git a/sys/alpha/alpha/interrupt.c b/sys/alpha/alpha/interrupt.c
index 3143856..31a74d5 100644
--- a/sys/alpha/alpha/interrupt.c
+++ b/sys/alpha/alpha/interrupt.c
@@ -437,7 +437,9 @@ alpha_dispatch_intr(void *frame, unsigned long vector)
*/
ih = TAILQ_FIRST(&ithd->it_handlers);
if ((ih->ih_flags & IH_FAST) != 0) {
+ critical_enter();
ih->ih_handler(ih->ih_argument);
+ critical_exit();
return;
}
@@ -461,6 +463,7 @@ alpha_clock_interrupt(struct trapframe *framep)
intrcnt[INTRCNT_CLOCK]++;
#endif
if (platform.clockintr) {
+ critical_enter();
#ifdef SMP
/*
* Only one processor drives the actual timer.
@@ -481,5 +484,6 @@ alpha_clock_interrupt(struct trapframe *framep)
mtx_unlock_spin(&sched_lock);
}
#endif
+ critical_exit();
}
}
diff --git a/sys/amd64/amd64/apic_vector.S b/sys/amd64/amd64/apic_vector.S
index 6147b31..95c9133 100644
--- a/sys/amd64/amd64/apic_vector.S
+++ b/sys/amd64/amd64/apic_vector.S
@@ -46,6 +46,7 @@ IDTVEC(vec_name) ; \
movl $KPSEL,%eax ; \
mov %ax,%fs ; \
FAKE_MCOUNT(13*4(%esp)) ; \
+ call critical_enter ; \
movl PCPU(CURTHREAD),%ebx ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
pushl intr_unit + (irq_num) * 4 ; \
@@ -58,6 +59,7 @@ IDTVEC(vec_name) ; \
lock ; \
incl (%eax) ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+ call critical_exit ; \
MEXITCOUNT ; \
jmp doreti
diff --git a/sys/amd64/amd64/busdma_machdep.c b/sys/amd64/amd64/busdma_machdep.c
index f20e80f..a3340b4 100644
--- a/sys/amd64/amd64/busdma_machdep.c
+++ b/sys/amd64/amd64/busdma_machdep.c
@@ -645,7 +645,7 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
map, links);
busdma_swi_pending = 1;
- swi_sched(vm_ih, SWI_NOSWITCH);
+ swi_sched(vm_ih, 0);
}
}
splx(s);
diff --git a/sys/amd64/isa/atpic_vector.S b/sys/amd64/isa/atpic_vector.S
index bd38dda..4e10cc2 100644
--- a/sys/amd64/isa/atpic_vector.S
+++ b/sys/amd64/isa/atpic_vector.S
@@ -61,6 +61,7 @@ IDTVEC(vec_name) ; \
mov $KPSEL,%ax ; \
mov %ax,%fs ; \
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
+ call critical_enter ; \
movl PCPU(CURTHREAD),%ebx ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
pushl intr_unit + (irq_num) * 4 ; \
@@ -71,6 +72,7 @@ IDTVEC(vec_name) ; \
movl intr_countp + (irq_num) * 4,%eax ; \
incl (%eax) ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+ call critical_exit ; \
MEXITCOUNT ; \
jmp doreti
diff --git a/sys/amd64/isa/icu_vector.S b/sys/amd64/isa/icu_vector.S
index bd38dda..4e10cc2 100644
--- a/sys/amd64/isa/icu_vector.S
+++ b/sys/amd64/isa/icu_vector.S
@@ -61,6 +61,7 @@ IDTVEC(vec_name) ; \
mov $KPSEL,%ax ; \
mov %ax,%fs ; \
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
+ call critical_enter ; \
movl PCPU(CURTHREAD),%ebx ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
pushl intr_unit + (irq_num) * 4 ; \
@@ -71,6 +72,7 @@ IDTVEC(vec_name) ; \
movl intr_countp + (irq_num) * 4,%eax ; \
incl (%eax) ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+ call critical_exit ; \
MEXITCOUNT ; \
jmp doreti
diff --git a/sys/amd64/isa/icu_vector.s b/sys/amd64/isa/icu_vector.s
index bd38dda..4e10cc2 100644
--- a/sys/amd64/isa/icu_vector.s
+++ b/sys/amd64/isa/icu_vector.s
@@ -61,6 +61,7 @@ IDTVEC(vec_name) ; \
mov $KPSEL,%ax ; \
mov %ax,%fs ; \
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
+ call critical_enter ; \
movl PCPU(CURTHREAD),%ebx ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
pushl intr_unit + (irq_num) * 4 ; \
@@ -71,6 +72,7 @@ IDTVEC(vec_name) ; \
movl intr_countp + (irq_num) * 4,%eax ; \
incl (%eax) ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+ call critical_exit ; \
MEXITCOUNT ; \
jmp doreti
diff --git a/sys/cam/cam_xpt.c b/sys/cam/cam_xpt.c
index 06d7b02..3e12dc3 100644
--- a/sys/cam/cam_xpt.c
+++ b/sys/cam/cam_xpt.c
@@ -4762,13 +4762,13 @@ xpt_done(union ccb *done_ccb)
TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
sim_links.tqe);
done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
- swi_sched(cambio_ih, SWI_NOSWITCH);
+ swi_sched(cambio_ih, 0);
break;
case CAM_PERIPH_NET:
TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
sim_links.tqe);
done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
- swi_sched(camnet_ih, SWI_NOSWITCH);
+ swi_sched(camnet_ih, 0);
break;
}
}
diff --git a/sys/dev/acpica/Osd/OsdSchedule.c b/sys/dev/acpica/Osd/OsdSchedule.c
index 60a8f48..6a47cba 100644
--- a/sys/dev/acpica/Osd/OsdSchedule.c
+++ b/sys/dev/acpica/Osd/OsdSchedule.c
@@ -83,7 +83,7 @@ static void *taskqueue_acpi_ih;
static void
taskqueue_acpi_enqueue(void *context)
{
- swi_sched(taskqueue_acpi_ih, SWI_NOSWITCH);
+ swi_sched(taskqueue_acpi_ih, 0);
}
static void
diff --git a/sys/dev/cy/cy.c b/sys/dev/cy/cy.c
index 0510e72..3d73faa 100644
--- a/sys/dev/cy/cy.c
+++ b/sys/dev/cy/cy.c
@@ -1181,7 +1181,7 @@ siointr(unit)
#ifndef SOFT_HOTCHAR
if (line_status & CD1400_RDSR_SPECIAL
&& com->hotchar != 0)
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
#endif
#if 1 /* XXX "intelligent" PFO error handling would break O error handling */
@@ -1209,7 +1209,7 @@ siointr(unit)
++com->bytes_in;
#ifdef SOFT_HOTCHAR
if (com->hotchar != 0 && recv_data == com->hotchar)
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
#endif
ioptr = com->iptr;
if (ioptr >= com->ibufend)
@@ -1259,7 +1259,7 @@ siointr(unit)
if (com->hotchar != 0
&& recv_data
== com->hotchar)
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
#endif
ioptr[0] = recv_data;
ioptr[com->ierroff] = 0;
@@ -1274,7 +1274,7 @@ siointr(unit)
#ifdef SOFT_HOTCHAR
if (com->hotchar != 0
&& recv_data == com->hotchar)
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
#endif
} while (--count != 0);
} else {
@@ -1299,7 +1299,7 @@ siointr(unit)
#ifdef SOFT_HOTCHAR
if (com->hotchar != 0
&& recv_data == com->hotchar)
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
#endif
ioptr[0] = recv_data;
ioptr[com->ierroff] = 0;
@@ -1364,7 +1364,7 @@ cont:
if (!(com->state & CS_CHECKMSR)) {
com_events += LOTS_OF_EVENTS;
com->state |= CS_CHECKMSR;
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
}
#ifdef SOFT_CTS_OFLOW
@@ -1495,7 +1495,7 @@ cont:
if (!(com->state & CS_ODONE)) {
com_events += LOTS_OF_EVENTS;
com->state |= CS_ODONE;
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
}
break;
case ETC_BREAK_ENDED:
@@ -1509,7 +1509,7 @@ cont:
if (!(com->extra_state & CSE_ODONE)) {
com_events += LOTS_OF_EVENTS;
com->extra_state |= CSE_ODONE;
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
}
cd_outb(iobase, CD1400_SRER, cy_align,
com->intr_enable
@@ -1567,7 +1567,7 @@ cont:
com->state |= CS_ODONE;
/* handle at high level ASAP */
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
}
}
}
@@ -1587,7 +1587,7 @@ terminate_tx_service:
/* ensure an edge for the next interrupt */
cy_outb(cy_iobase, CY_CLEAR_INTR, cy_align, 0);
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
COM_UNLOCK();
}
diff --git a/sys/dev/cy/cy_isa.c b/sys/dev/cy/cy_isa.c
index 0510e72..3d73faa 100644
--- a/sys/dev/cy/cy_isa.c
+++ b/sys/dev/cy/cy_isa.c
@@ -1181,7 +1181,7 @@ siointr(unit)
#ifndef SOFT_HOTCHAR
if (line_status & CD1400_RDSR_SPECIAL
&& com->hotchar != 0)
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
#endif
#if 1 /* XXX "intelligent" PFO error handling would break O error handling */
@@ -1209,7 +1209,7 @@ siointr(unit)
++com->bytes_in;
#ifdef SOFT_HOTCHAR
if (com->hotchar != 0 && recv_data == com->hotchar)
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
#endif
ioptr = com->iptr;
if (ioptr >= com->ibufend)
@@ -1259,7 +1259,7 @@ siointr(unit)
if (com->hotchar != 0
&& recv_data
== com->hotchar)
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
#endif
ioptr[0] = recv_data;
ioptr[com->ierroff] = 0;
@@ -1274,7 +1274,7 @@ siointr(unit)
#ifdef SOFT_HOTCHAR
if (com->hotchar != 0
&& recv_data == com->hotchar)
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
#endif
} while (--count != 0);
} else {
@@ -1299,7 +1299,7 @@ siointr(unit)
#ifdef SOFT_HOTCHAR
if (com->hotchar != 0
&& recv_data == com->hotchar)
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
#endif
ioptr[0] = recv_data;
ioptr[com->ierroff] = 0;
@@ -1364,7 +1364,7 @@ cont:
if (!(com->state & CS_CHECKMSR)) {
com_events += LOTS_OF_EVENTS;
com->state |= CS_CHECKMSR;
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
}
#ifdef SOFT_CTS_OFLOW
@@ -1495,7 +1495,7 @@ cont:
if (!(com->state & CS_ODONE)) {
com_events += LOTS_OF_EVENTS;
com->state |= CS_ODONE;
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
}
break;
case ETC_BREAK_ENDED:
@@ -1509,7 +1509,7 @@ cont:
if (!(com->extra_state & CSE_ODONE)) {
com_events += LOTS_OF_EVENTS;
com->extra_state |= CSE_ODONE;
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
}
cd_outb(iobase, CD1400_SRER, cy_align,
com->intr_enable
@@ -1567,7 +1567,7 @@ cont:
com->state |= CS_ODONE;
/* handle at high level ASAP */
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
}
}
}
@@ -1587,7 +1587,7 @@ terminate_tx_service:
/* ensure an edge for the next interrupt */
cy_outb(cy_iobase, CY_CLEAR_INTR, cy_align, 0);
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
COM_UNLOCK();
}
diff --git a/sys/dev/rc/rc.c b/sys/dev/rc/rc.c
index 59e8dff..d9bd2ad 100644
--- a/sys/dev/rc/rc.c
+++ b/sys/dev/rc/rc.c
@@ -362,7 +362,7 @@ rcintr(unit)
optr++;
rc_scheduled_event++;
if (val != 0 && val == rc->rc_hotchar)
- swi_sched(rc_ih, SWI_NOSWITCH);
+ swi_sched(rc_ih, 0);
}
} else {
/* Store also status data */
@@ -393,7 +393,7 @@ rcintr(unit)
&& (rc->rc_tp->t_iflag & INPCK))))
val = 0;
else if (val != 0 && val == rc->rc_hotchar)
- swi_sched(rc_ih, SWI_NOSWITCH);
+ swi_sched(rc_ih, 0);
optr[0] = val;
optr[INPUT_FLAGS_SHIFT] = iack;
optr++;
@@ -440,7 +440,7 @@ rcintr(unit)
if ((iack & MCR_CDchg) && !(rc->rc_flags & RC_MODCHG)) {
rc_scheduled_event += LOTS_OF_EVENTS;
rc->rc_flags |= RC_MODCHG;
- swi_sched(rc_ih, SWI_NOSWITCH);
+ swi_sched(rc_ih, 0);
}
goto more_intrs;
}
@@ -481,7 +481,7 @@ rcintr(unit)
if (!(rc->rc_flags & RC_DOXXFER)) {
rc_scheduled_event += LOTS_OF_EVENTS;
rc->rc_flags |= RC_DOXXFER;
- swi_sched(rc_ih, SWI_NOSWITCH);
+ swi_sched(rc_ih, 0);
}
}
}
diff --git a/sys/dev/sio/sio.c b/sys/dev/sio/sio.c
index 76eade6..5603096 100644
--- a/sys/dev/sio/sio.c
+++ b/sys/dev/sio/sio.c
@@ -1748,7 +1748,7 @@ siointr1(com)
}
++com->bytes_in;
if (com->hotchar != 0 && recv_data == com->hotchar)
- swi_sched(sio_fast_ih, SWI_NOSWITCH);
+ swi_sched(sio_fast_ih, 0);
ioptr = com->iptr;
if (ioptr >= com->ibufend)
CE_RECORD(com, CE_INTERRUPT_BUF_OVERFLOW);
@@ -1759,7 +1759,7 @@ siointr1(com)
swi_sched(sio_slow_ih, SWI_DELAY);
#if 0 /* for testing input latency vs efficiency */
if (com->iptr - com->ibuf == 8)
- swi_sched(sio_fast_ih, SWI_NOSWITCH);
+ swi_sched(sio_fast_ih, 0);
#endif
ioptr[0] = recv_data;
ioptr[com->ierroff] = line_status;
@@ -1797,7 +1797,7 @@ cont:
if (!(com->state & CS_CHECKMSR)) {
com_events += LOTS_OF_EVENTS;
com->state |= CS_CHECKMSR;
- swi_sched(sio_fast_ih, SWI_NOSWITCH);
+ swi_sched(sio_fast_ih, 0);
}
/* handle CTS change immediately for crisp flow ctl */
@@ -1852,7 +1852,7 @@ cont:
com_events += LOTS_OF_EVENTS;
com->state |= CS_ODONE;
/* handle at high level ASAP */
- swi_sched(sio_fast_ih, SWI_NOSWITCH);
+ swi_sched(sio_fast_ih, 0);
}
}
if (COM_IIR_TXRDYBUG(com->flags) && (int_ctl != int_ctl_new)) {
diff --git a/sys/i386/i386/apic_vector.s b/sys/i386/i386/apic_vector.s
index 6147b31..95c9133 100644
--- a/sys/i386/i386/apic_vector.s
+++ b/sys/i386/i386/apic_vector.s
@@ -46,6 +46,7 @@ IDTVEC(vec_name) ; \
movl $KPSEL,%eax ; \
mov %ax,%fs ; \
FAKE_MCOUNT(13*4(%esp)) ; \
+ call critical_enter ; \
movl PCPU(CURTHREAD),%ebx ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
pushl intr_unit + (irq_num) * 4 ; \
@@ -58,6 +59,7 @@ IDTVEC(vec_name) ; \
lock ; \
incl (%eax) ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+ call critical_exit ; \
MEXITCOUNT ; \
jmp doreti
diff --git a/sys/i386/i386/busdma_machdep.c b/sys/i386/i386/busdma_machdep.c
index f20e80f..a3340b4 100644
--- a/sys/i386/i386/busdma_machdep.c
+++ b/sys/i386/i386/busdma_machdep.c
@@ -645,7 +645,7 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
map, links);
busdma_swi_pending = 1;
- swi_sched(vm_ih, SWI_NOSWITCH);
+ swi_sched(vm_ih, 0);
}
}
splx(s);
diff --git a/sys/i386/isa/apic_vector.s b/sys/i386/isa/apic_vector.s
index 6147b31..95c9133 100644
--- a/sys/i386/isa/apic_vector.s
+++ b/sys/i386/isa/apic_vector.s
@@ -46,6 +46,7 @@ IDTVEC(vec_name) ; \
movl $KPSEL,%eax ; \
mov %ax,%fs ; \
FAKE_MCOUNT(13*4(%esp)) ; \
+ call critical_enter ; \
movl PCPU(CURTHREAD),%ebx ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
pushl intr_unit + (irq_num) * 4 ; \
@@ -58,6 +59,7 @@ IDTVEC(vec_name) ; \
lock ; \
incl (%eax) ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+ call critical_exit ; \
MEXITCOUNT ; \
jmp doreti
diff --git a/sys/i386/isa/atpic_vector.s b/sys/i386/isa/atpic_vector.s
index bd38dda..4e10cc2 100644
--- a/sys/i386/isa/atpic_vector.s
+++ b/sys/i386/isa/atpic_vector.s
@@ -61,6 +61,7 @@ IDTVEC(vec_name) ; \
mov $KPSEL,%ax ; \
mov %ax,%fs ; \
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
+ call critical_enter ; \
movl PCPU(CURTHREAD),%ebx ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
pushl intr_unit + (irq_num) * 4 ; \
@@ -71,6 +72,7 @@ IDTVEC(vec_name) ; \
movl intr_countp + (irq_num) * 4,%eax ; \
incl (%eax) ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+ call critical_exit ; \
MEXITCOUNT ; \
jmp doreti
diff --git a/sys/i386/isa/cy.c b/sys/i386/isa/cy.c
index 0510e72..3d73faa 100644
--- a/sys/i386/isa/cy.c
+++ b/sys/i386/isa/cy.c
@@ -1181,7 +1181,7 @@ siointr(unit)
#ifndef SOFT_HOTCHAR
if (line_status & CD1400_RDSR_SPECIAL
&& com->hotchar != 0)
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
#endif
#if 1 /* XXX "intelligent" PFO error handling would break O error handling */
@@ -1209,7 +1209,7 @@ siointr(unit)
++com->bytes_in;
#ifdef SOFT_HOTCHAR
if (com->hotchar != 0 && recv_data == com->hotchar)
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
#endif
ioptr = com->iptr;
if (ioptr >= com->ibufend)
@@ -1259,7 +1259,7 @@ siointr(unit)
if (com->hotchar != 0
&& recv_data
== com->hotchar)
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
#endif
ioptr[0] = recv_data;
ioptr[com->ierroff] = 0;
@@ -1274,7 +1274,7 @@ siointr(unit)
#ifdef SOFT_HOTCHAR
if (com->hotchar != 0
&& recv_data == com->hotchar)
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
#endif
} while (--count != 0);
} else {
@@ -1299,7 +1299,7 @@ siointr(unit)
#ifdef SOFT_HOTCHAR
if (com->hotchar != 0
&& recv_data == com->hotchar)
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
#endif
ioptr[0] = recv_data;
ioptr[com->ierroff] = 0;
@@ -1364,7 +1364,7 @@ cont:
if (!(com->state & CS_CHECKMSR)) {
com_events += LOTS_OF_EVENTS;
com->state |= CS_CHECKMSR;
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
}
#ifdef SOFT_CTS_OFLOW
@@ -1495,7 +1495,7 @@ cont:
if (!(com->state & CS_ODONE)) {
com_events += LOTS_OF_EVENTS;
com->state |= CS_ODONE;
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
}
break;
case ETC_BREAK_ENDED:
@@ -1509,7 +1509,7 @@ cont:
if (!(com->extra_state & CSE_ODONE)) {
com_events += LOTS_OF_EVENTS;
com->extra_state |= CSE_ODONE;
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
}
cd_outb(iobase, CD1400_SRER, cy_align,
com->intr_enable
@@ -1567,7 +1567,7 @@ cont:
com->state |= CS_ODONE;
/* handle at high level ASAP */
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
}
}
}
@@ -1587,7 +1587,7 @@ terminate_tx_service:
/* ensure an edge for the next interrupt */
cy_outb(cy_iobase, CY_CLEAR_INTR, cy_align, 0);
- swi_sched(sio_ih, SWI_NOSWITCH);
+ swi_sched(sio_ih, 0);
COM_UNLOCK();
}
diff --git a/sys/i386/isa/icu_vector.s b/sys/i386/isa/icu_vector.s
index bd38dda..4e10cc2 100644
--- a/sys/i386/isa/icu_vector.s
+++ b/sys/i386/isa/icu_vector.s
@@ -61,6 +61,7 @@ IDTVEC(vec_name) ; \
mov $KPSEL,%ax ; \
mov %ax,%fs ; \
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
+ call critical_enter ; \
movl PCPU(CURTHREAD),%ebx ; \
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
pushl intr_unit + (irq_num) * 4 ; \
@@ -71,6 +72,7 @@ IDTVEC(vec_name) ; \
movl intr_countp + (irq_num) * 4,%eax ; \
incl (%eax) ; \
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
+ call critical_exit ; \
MEXITCOUNT ; \
jmp doreti
diff --git a/sys/i386/isa/rc.c b/sys/i386/isa/rc.c
index 59e8dff..d9bd2ad 100644
--- a/sys/i386/isa/rc.c
+++ b/sys/i386/isa/rc.c
@@ -362,7 +362,7 @@ rcintr(unit)
optr++;
rc_scheduled_event++;
if (val != 0 && val == rc->rc_hotchar)
- swi_sched(rc_ih, SWI_NOSWITCH);
+ swi_sched(rc_ih, 0);
}
} else {
/* Store also status data */
@@ -393,7 +393,7 @@ rcintr(unit)
&& (rc->rc_tp->t_iflag & INPCK))))
val = 0;
else if (val != 0 && val == rc->rc_hotchar)
- swi_sched(rc_ih, SWI_NOSWITCH);
+ swi_sched(rc_ih, 0);
optr[0] = val;
optr[INPUT_FLAGS_SHIFT] = iack;
optr++;
@@ -440,7 +440,7 @@ rcintr(unit)
if ((iack & MCR_CDchg) && !(rc->rc_flags & RC_MODCHG)) {
rc_scheduled_event += LOTS_OF_EVENTS;
rc->rc_flags |= RC_MODCHG;
- swi_sched(rc_ih, SWI_NOSWITCH);
+ swi_sched(rc_ih, 0);
}
goto more_intrs;
}
@@ -481,7 +481,7 @@ rcintr(unit)
if (!(rc->rc_flags & RC_DOXXFER)) {
rc_scheduled_event += LOTS_OF_EVENTS;
rc->rc_flags |= RC_DOXXFER;
- swi_sched(rc_ih, SWI_NOSWITCH);
+ swi_sched(rc_ih, 0);
}
}
}
diff --git a/sys/ia64/ia64/busdma_machdep.c b/sys/ia64/ia64/busdma_machdep.c
index 8e10409..2247e35 100644
--- a/sys/ia64/ia64/busdma_machdep.c
+++ b/sys/ia64/ia64/busdma_machdep.c
@@ -689,7 +689,7 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
map, links);
busdma_swi_pending = 1;
- swi_sched(vm_ih, SWI_NOSWITCH);
+ swi_sched(vm_ih, 0);
}
}
splx(s);
diff --git a/sys/ia64/ia64/interrupt.c b/sys/ia64/ia64/interrupt.c
index 8e177e3..5dce36f 100644
--- a/sys/ia64/ia64/interrupt.c
+++ b/sys/ia64/ia64/interrupt.c
@@ -114,11 +114,13 @@ interrupt(u_int64_t vector, struct trapframe *framep)
#else
intrcnt[INTRCNT_CLOCK]++;
#endif
+ critical_enter();
handleclock(framep);
/* divide hz (1024) by 8 to get stathz (128) */
if((++schedclk2 & 0x7) == 0)
statclock((struct clockframe *)framep);
+ critical_exit();
#ifdef SMP
} else if (vector == mp_ipi_vector[IPI_AST]) {
ast(framep);
@@ -317,8 +319,10 @@ ia64_dispatch_intr(void *frame, unsigned long vector)
*/
ih = TAILQ_FIRST(&ithd->it_handlers);
if ((ih->ih_flags & IH_FAST) != 0) {
+ critical_enter();
ih->ih_handler(ih->ih_argument);
ia64_send_eoi(vector);
+ critical_exit();
return;
}
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 6aa36ef..7591f11 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -242,7 +242,7 @@ hardclock(frame)
* callout_lock held; incorrect locking order.
*/
if (need_softclock)
- swi_sched(softclock_ih, SWI_NOSWITCH);
+ swi_sched(softclock_ih, 0);
}
/*
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index 14ed213..fccd59b 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -145,7 +145,7 @@ cv_switch_catch(struct thread *td)
PROC_LOCK(p);
sig = CURSIG(p); /* XXXKSE */
mtx_lock_spin(&sched_lock);
- PROC_UNLOCK_NOSWITCH(p);
+ PROC_UNLOCK(p);
if (sig != 0) {
if (td->td_wchan != NULL)
cv_waitq_remove(td);
@@ -218,8 +218,8 @@ cv_wait(struct cv *cvp, struct mtx *mp)
}
CV_WAIT_VALIDATE(cvp, mp);
- DROP_GIANT_NOSWITCH();
- mtx_unlock_flags(mp, MTX_NOSWITCH);
+ DROP_GIANT();
+ mtx_unlock(mp);
cv_waitq_add(cvp, td);
cv_switch(td);
@@ -273,8 +273,8 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
}
CV_WAIT_VALIDATE(cvp, mp);
- DROP_GIANT_NOSWITCH();
- mtx_unlock_flags(mp, MTX_NOSWITCH);
+ DROP_GIANT();
+ mtx_unlock(mp);
cv_waitq_add(cvp, td);
sig = cv_switch_catch(td);
@@ -339,8 +339,8 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
}
CV_WAIT_VALIDATE(cvp, mp);
- DROP_GIANT_NOSWITCH();
- mtx_unlock_flags(mp, MTX_NOSWITCH);
+ DROP_GIANT();
+ mtx_unlock(mp);
cv_waitq_add(cvp, td);
callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
@@ -412,8 +412,8 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
}
CV_WAIT_VALIDATE(cvp, mp);
- DROP_GIANT_NOSWITCH();
- mtx_unlock_flags(mp, MTX_NOSWITCH);
+ DROP_GIANT();
+ mtx_unlock(mp);
cv_waitq_add(cvp, td);
callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index d772be6..b9e1641 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -402,7 +402,7 @@ exit1(td, rv)
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
while (mtx_owned(&Giant))
- mtx_unlock_flags(&Giant, MTX_NOSWITCH);
+ mtx_unlock(&Giant);
/*
* We have to wait until after releasing all locks before
@@ -413,7 +413,7 @@ exit1(td, rv)
p->p_stat = SZOMB;
wakeup(p->p_pptr);
- PROC_UNLOCK_NOSWITCH(p);
+ PROC_UNLOCK(p);
cnt.v_swtch++;
cpu_throw();
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index 88868dc..4bd6837 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -381,9 +381,9 @@ ithread_schedule(struct ithd *ithread, int do_switch)
* Set it_need to tell the thread to keep running if it is already
* running. Then, grab sched_lock and see if we actually need to
* put this thread on the runqueue. If so and the do_switch flag is
- * true, then switch to the ithread immediately. Otherwise, set the
- * needresched flag to guarantee that this ithread will run before any
- * userland processes.
+ * true and it is safe to switch, then switch to the ithread
+ * immediately. Otherwise, set the needresched flag to guarantee
+ * that this ithread will run before any userland processes.
*/
ithread->it_need = 1;
mtx_lock_spin(&sched_lock);
@@ -391,7 +391,8 @@ ithread_schedule(struct ithd *ithread, int do_switch)
CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
p->p_stat = SRUN;
setrunqueue(td); /* XXXKSE */
- if (do_switch && curthread->td_proc->p_stat == SRUN) {
+ if (do_switch && curthread->td_critnest == 1 &&
+ curthread->td_proc->p_stat == SRUN) {
if (curthread != PCPU_GET(idlethread))
setrunqueue(curthread);
curthread->td_proc->p_stats->p_ru.ru_nivcsw++;
@@ -458,7 +459,7 @@ swi_sched(void *cookie, int flags)
*/
atomic_store_rel_int(&ih->ih_need, 1);
if (!(flags & SWI_DELAY)) {
- error = ithread_schedule(it, !cold && flags & SWI_SWITCH);
+ error = ithread_schedule(it, !cold);
KASSERT(error == 0, ("stray software interrupt"));
}
}
@@ -580,7 +581,7 @@ SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
void
legacy_setsoftnet(void)
{
- swi_sched(net_ih, SWI_NOSWITCH);
+ swi_sched(net_ih, 0);
}
/*
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 852b570..2c5217b 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -209,8 +209,6 @@ _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
{
MPASS(curthread != NULL);
- KASSERT((opts & MTX_NOSWITCH) == 0,
- ("MTX_NOSWITCH used at %s:%d", file, line));
_get_sleep_lock(m, curthread, opts, file, line);
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
@@ -264,12 +262,6 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
MPASS(curthread != NULL);
- /*
- * _mtx_trylock does not accept MTX_NOSWITCH option.
- */
- KASSERT((opts & MTX_NOSWITCH) == 0,
- ("mtx_trylock() called with invalid option flag(s) %d", opts));
-
rval = _obtain_lock(m, curthread);
LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
@@ -524,7 +516,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
td1->td_proc->p_stat = SRUN;
setrunqueue(td1);
- if ((opts & MTX_NOSWITCH) == 0 && td1->td_ksegrp->kg_pri.pri_level < pri) {
+ if (td->td_critnest == 1 && td1->td_ksegrp->kg_pri.pri_level < pri) {
#ifdef notyet
if (td->td_ithd != NULL) {
struct ithd *it = td->td_ithd;
@@ -691,8 +683,8 @@ mtx_destroy(struct mtx *m)
MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
/* Tell witness this isn't locked to make it happy. */
- WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE | LOP_NOSWITCH,
- __FILE__, __LINE__);
+ WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
+ __LINE__);
}
WITNESS_DESTROY(&m->mtx_object);
diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c
index 648e438..256d7bc 100644
--- a/sys/kern/kern_shutdown.c
+++ b/sys/kern/kern_shutdown.c
@@ -268,7 +268,7 @@ boot(int howto)
pbusy = nbusy;
sync(thread0, NULL);
if (curthread != NULL) {
- DROP_GIANT_NOSWITCH();
+ DROP_GIANT();
for (subiter = 0; subiter < 50 * iter; subiter++) {
mtx_lock_spin(&sched_lock);
setrunqueue(curthread);
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index d3f01e6..cc989e0 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -1560,8 +1560,8 @@ issignal(p)
do {
mtx_lock_spin(&sched_lock);
stop(p);
- PROC_UNLOCK_NOSWITCH(p);
- DROP_GIANT_NOSWITCH();
+ PROC_UNLOCK(p);
+ DROP_GIANT();
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mtx_unlock_spin(&sched_lock);
@@ -1639,8 +1639,8 @@ issignal(p)
}
mtx_lock_spin(&sched_lock);
stop(p);
- PROC_UNLOCK_NOSWITCH(p);
- DROP_GIANT_NOSWITCH();
+ PROC_UNLOCK(p);
+ DROP_GIANT();
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mtx_unlock_spin(&sched_lock);
diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c
index cab78c2..9449192 100644
--- a/sys/kern/kern_subr.c
+++ b/sys/kern/kern_subr.c
@@ -386,7 +386,7 @@ uio_yield()
td = curthread;
mtx_lock_spin(&sched_lock);
- DROP_GIANT_NOSWITCH();
+ DROP_GIANT();
td->td_ksegrp->kg_pri.pri_level = td->td_ksegrp->kg_pri.pri_user;
setrunqueue(td);
td->td_proc->p_stats->p_ru.ru_nivcsw++;
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index fce470f..9f3ba01 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -437,17 +437,17 @@ msleep(ident, mtx, priority, wmesg, timo)
* in case this is the idle process and already asleep.
*/
if (mtx != NULL && priority & PDROP)
- mtx_unlock_flags(mtx, MTX_NOSWITCH);
+ mtx_unlock(mtx);
mtx_unlock_spin(&sched_lock);
return (0);
}
- DROP_GIANT_NOSWITCH();
+ DROP_GIANT();
if (mtx != NULL) {
mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
WITNESS_SAVE(&mtx->mtx_object, mtx);
- mtx_unlock_flags(mtx, MTX_NOSWITCH);
+ mtx_unlock(mtx);
if (priority & PDROP)
mtx = NULL;
}
@@ -482,7 +482,7 @@ msleep(ident, mtx, priority, wmesg, timo)
PROC_LOCK(p);
sig = CURSIG(p);
mtx_lock_spin(&sched_lock);
- PROC_UNLOCK_NOSWITCH(p);
+ PROC_UNLOCK(p);
if (sig != 0) {
if (td->td_wchan != NULL)
unsleep(td);
@@ -750,13 +750,13 @@ mi_switch()
PROC_LOCK(p);
killproc(p, "exceeded maximum CPU limit");
mtx_lock_spin(&sched_lock);
- PROC_UNLOCK_NOSWITCH(p);
+ PROC_UNLOCK(p);
} else {
mtx_unlock_spin(&sched_lock);
PROC_LOCK(p);
psignal(p, SIGXCPU);
mtx_lock_spin(&sched_lock);
- PROC_UNLOCK_NOSWITCH(p);
+ PROC_UNLOCK(p);
if (rlim->rlim_cur < rlim->rlim_max) {
/* XXX: we should make a private copy */
rlim->rlim_cur += 5;
diff --git a/sys/kern/subr_taskqueue.c b/sys/kern/subr_taskqueue.c
index 6052aea..72afa6b 100644
--- a/sys/kern/subr_taskqueue.c
+++ b/sys/kern/subr_taskqueue.c
@@ -209,7 +209,7 @@ taskqueue_run(struct taskqueue *queue)
static void
taskqueue_swi_enqueue(void *context)
{
- swi_sched(taskqueue_ih, SWI_NOSWITCH);
+ swi_sched(taskqueue_ih, 0);
}
static void
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index 5d7edec..46a19b7 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -82,7 +82,7 @@ userret(td, frame, oticks)
mtx_lock_spin(&sched_lock);
kg->kg_pri.pri_level = kg->kg_pri.pri_user;
if (ke->ke_flags & KEF_NEEDRESCHED) {
- DROP_GIANT_NOSWITCH();
+ DROP_GIANT();
setrunqueue(td);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 852b570..2c5217b 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -209,8 +209,6 @@ _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
{
MPASS(curthread != NULL);
- KASSERT((opts & MTX_NOSWITCH) == 0,
- ("MTX_NOSWITCH used at %s:%d", file, line));
_get_sleep_lock(m, curthread, opts, file, line);
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
@@ -264,12 +262,6 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
MPASS(curthread != NULL);
- /*
- * _mtx_trylock does not accept MTX_NOSWITCH option.
- */
- KASSERT((opts & MTX_NOSWITCH) == 0,
- ("mtx_trylock() called with invalid option flag(s) %d", opts));
-
rval = _obtain_lock(m, curthread);
LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
@@ -524,7 +516,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
td1->td_proc->p_stat = SRUN;
setrunqueue(td1);
- if ((opts & MTX_NOSWITCH) == 0 && td1->td_ksegrp->kg_pri.pri_level < pri) {
+ if (td->td_critnest == 1 && td1->td_ksegrp->kg_pri.pri_level < pri) {
#ifdef notyet
if (td->td_ithd != NULL) {
struct ithd *it = td->td_ithd;
@@ -691,8 +683,8 @@ mtx_destroy(struct mtx *m)
MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
/* Tell witness this isn't locked to make it happy. */
- WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE | LOP_NOSWITCH,
- __FILE__, __LINE__);
+ WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
+ __LINE__);
}
WITNESS_DESTROY(&m->mtx_object);
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index b798579..37dc369 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -818,7 +818,7 @@ witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
instance->li_lock->lo_name,
instance->li_flags);
instance->li_flags--;
- goto out;
+ return;
}
s = cpu_critical_enter();
CTR4(KTR_WITNESS,
@@ -839,23 +839,11 @@ witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
td->td_proc->p_pid, lle);
witness_lock_list_free(lle);
}
- goto out;
+ return;
}
}
panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
file, line);
-out:
- /*
- * We don't need to protect this PCPU_GET() here against preemption
- * because if we hold any spinlocks then we are already protected,
- * and if we don't we will get NULL if we hold no spinlocks even if
- * we switch CPU's while reading it.
- */
- if (class->lc_flags & LC_SLEEPLOCK) {
- if ((flags & LOP_NOSWITCH) == 0 && PCPU_GET(spinlocks) != NULL)
- panic("switchable sleep unlock (%s) %s @ %s:%d",
- class->lc_name, lock->lo_name, file, line);
- }
}
/*
diff --git a/sys/pc98/cbus/sio.c b/sys/pc98/cbus/sio.c
index cf1b03a..dcf0fa3 100644
--- a/sys/pc98/cbus/sio.c
+++ b/sys/pc98/cbus/sio.c
@@ -2700,7 +2700,7 @@ more_intr:
}
++com->bytes_in;
if (com->hotchar != 0 && recv_data == com->hotchar)
- swi_sched(sio_fast_ih, SWI_NOSWITCH);
+ swi_sched(sio_fast_ih, 0);
ioptr = com->iptr;
if (ioptr >= com->ibufend)
CE_RECORD(com, CE_INTERRUPT_BUF_OVERFLOW);
@@ -2711,7 +2711,7 @@ more_intr:
swi_sched(sio_slow_ih, SWI_DELAY);
#if 0 /* for testing input latency vs efficiency */
if (com->iptr - com->ibuf == 8)
- swi_sched(sio_fast_ih, SWI_NOSWITCH);
+ swi_sched(sio_fast_ih, 0);
#endif
ioptr[0] = recv_data;
ioptr[com->ierroff] = line_status;
@@ -2765,7 +2765,7 @@ cont:
if (!(com->state & CS_CHECKMSR)) {
com_events += LOTS_OF_EVENTS;
com->state |= CS_CHECKMSR;
- swi_sched(sio_fast_ih, SWI_NOSWITCH);
+ swi_sched(sio_fast_ih, 0);
}
/* handle CTS change immediately for crisp flow ctl */
@@ -2868,7 +2868,7 @@ cont:
com_events += LOTS_OF_EVENTS;
com->state |= CS_ODONE;
/* handle at high level ASAP */
- swi_sched(sio_fast_ih, SWI_NOSWITCH);
+ swi_sched(sio_fast_ih, 0);
}
}
if (COM_IIR_TXRDYBUG(com->flags) && (int_ctl != int_ctl_new)) {
diff --git a/sys/pc98/pc98/sio.c b/sys/pc98/pc98/sio.c
index cf1b03a..dcf0fa3 100644
--- a/sys/pc98/pc98/sio.c
+++ b/sys/pc98/pc98/sio.c
@@ -2700,7 +2700,7 @@ more_intr:
}
++com->bytes_in;
if (com->hotchar != 0 && recv_data == com->hotchar)
- swi_sched(sio_fast_ih, SWI_NOSWITCH);
+ swi_sched(sio_fast_ih, 0);
ioptr = com->iptr;
if (ioptr >= com->ibufend)
CE_RECORD(com, CE_INTERRUPT_BUF_OVERFLOW);
@@ -2711,7 +2711,7 @@ more_intr:
swi_sched(sio_slow_ih, SWI_DELAY);
#if 0 /* for testing input latency vs efficiency */
if (com->iptr - com->ibuf == 8)
- swi_sched(sio_fast_ih, SWI_NOSWITCH);
+ swi_sched(sio_fast_ih, 0);
#endif
ioptr[0] = recv_data;
ioptr[com->ierroff] = line_status;
@@ -2765,7 +2765,7 @@ cont:
if (!(com->state & CS_CHECKMSR)) {
com_events += LOTS_OF_EVENTS;
com->state |= CS_CHECKMSR;
- swi_sched(sio_fast_ih, SWI_NOSWITCH);
+ swi_sched(sio_fast_ih, 0);
}
/* handle CTS change immediately for crisp flow ctl */
@@ -2868,7 +2868,7 @@ cont:
com_events += LOTS_OF_EVENTS;
com->state |= CS_ODONE;
/* handle at high level ASAP */
- swi_sched(sio_fast_ih, SWI_NOSWITCH);
+ swi_sched(sio_fast_ih, 0);
}
}
if (COM_IIR_TXRDYBUG(com->flags) && (int_ctl != int_ctl_new)) {
diff --git a/sys/sys/interrupt.h b/sys/sys/interrupt.h
index fda8495..4e1a934 100644
--- a/sys/sys/interrupt.h
+++ b/sys/sys/interrupt.h
@@ -82,9 +82,7 @@ struct ithd {
#define IT_DEAD 0x000004 /* Thread is waiting to exit. */
/* Flags to pass to sched_swi. */
-#define SWI_NOSWITCH 0x0
-#define SWI_SWITCH 0x1
-#define SWI_DELAY 0x2 /* implies NOSWITCH */
+#define SWI_DELAY 0x2
/*
* Software interrupt bit numbers in priority order. The priority only
diff --git a/sys/sys/lock.h b/sys/sys/lock.h
index e72b712..425a6e9 100644
--- a/sys/sys/lock.h
+++ b/sys/sys/lock.h
@@ -78,7 +78,6 @@ struct lock_class {
* Option flags passed to lock operations that witness also needs to know
* about or that are generic across all locks.
*/
-#define LOP_NOSWITCH 0x00000001 /* Lock doesn't switch on release. */
#define LOP_QUIET 0x00000002 /* Don't log locking operations. */
#define LOP_TRYLOCK 0x00000004 /* Don't check lock order. */
#define LOP_EXCLUSIVE 0x00000008 /* Exclusive lock. */
diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h
index a4eb018..f952b8e 100644
--- a/sys/sys/mutex.h
+++ b/sys/sys/mutex.h
@@ -62,7 +62,6 @@
* Option flags passed to certain lock/unlock routines, through the use
* of corresponding mtx_{lock,unlock}_flags() interface macros.
*/
-#define MTX_NOSWITCH LOP_NOSWITCH /* Do not switch on release */
#define MTX_QUIET LOP_QUIET /* Don't log a mutex event */
/*
@@ -214,7 +213,7 @@ void mtx_unlock_giant(int s);
* mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m'
* and passes option flags `opts' to the "hard" function, if required.
* With these routines, it is possible to pass flags such as MTX_QUIET
- * and/or MTX_NOSWITCH to the appropriate lock manipulation routines.
+ * to the appropriate lock manipulation routines.
*
* mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if
* it cannot. Rather, it returns 0 on failure and non-zero on success.
@@ -294,16 +293,6 @@ extern int kern_giant_file;
*
* Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT()
*/
-#define DROP_GIANT_NOSWITCH() \
-do { \
- int _giantcnt; \
- WITNESS_SAVE_DECL(Giant); \
- \
- if (mtx_owned(&Giant)) \
- WITNESS_SAVE(&Giant.mtx_object, Giant); \
- for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
- mtx_unlock_flags(&Giant, MTX_NOSWITCH)
-
#define DROP_GIANT() \
do { \
int _giantcnt; \
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index ebf6d36..46f01ee 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -582,8 +582,6 @@ sigonstack(size_t sp)
#define PROC_LOCK(p) mtx_lock(&(p)->p_mtx)
#define PROC_TRYLOCK(p) mtx_trylock(&(p)->p_mtx)
#define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx)
-#define PROC_UNLOCK_NOSWITCH(p) \
- mtx_unlock_flags(&(p)->p_mtx, MTX_NOSWITCH)
#define PROC_LOCKED(p) mtx_owned(&(p)->p_mtx)
#define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type))
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 7a03cca..b351e83 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -630,7 +630,7 @@ swapout(p)
mtx_lock_spin(&sched_lock);
p->p_sflag &= ~PS_INMEM;
p->p_sflag |= PS_SWAPPING;
- PROC_UNLOCK_NOSWITCH(p);
+ PROC_UNLOCK(p);
FOREACH_THREAD_IN_PROC (p, td)
if (td->td_proc->p_stat == SRUN) /* XXXKSE */
remrunqueue(td); /* XXXKSE */
OpenPOWER on IntegriCloud