summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/init_main.c2
-rw-r--r--sys/kern/kern_clock.c4
-rw-r--r--sys/kern/kern_exit.c4
-rw-r--r--sys/kern/kern_ktr.c2
-rw-r--r--sys/kern/kern_malloc.c2
-rw-r--r--sys/kern/kern_mutex.c8
-rw-r--r--sys/kern/kern_resource.c9
-rw-r--r--sys/kern/kern_shutdown.c4
-rw-r--r--sys/kern/kern_subr.c6
-rw-r--r--sys/kern/kern_switch.c4
-rw-r--r--sys/kern/kern_synch.c23
-rw-r--r--sys/kern/subr_prf.c2
-rw-r--r--sys/kern/subr_smp.c38
-rw-r--r--sys/kern/subr_trap.c39
-rw-r--r--sys/kern/subr_turnstile.c8
-rw-r--r--sys/kern/subr_witness.c8
-rw-r--r--sys/kern/vfs_bio.c10
17 files changed, 94 insertions, 79 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index a147eae..24bf964 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -415,7 +415,7 @@ proc0_post(void *dummy __unused)
p->p_runtime = 0;
}
ALLPROC_LOCK(AP_RELEASE);
- microuptime(&switchtime);
+ microuptime(PCPU_PTR(switchtime));
PCPU_SET(switchticks, ticks);
/*
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 223be91..5a8ade6 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -160,7 +160,7 @@ hardclock(frame)
int need_softclock = 0;
p = curproc;
- if (p != idleproc) {
+ if (p != PCPU_GET(idleproc)) {
register struct pstats *pstats;
/*
@@ -400,7 +400,7 @@ statclock(frame)
cp_time[CP_INTR]++;
} else {
p->p_sticks++;
- if (p != idleproc)
+ if (p != PCPU_GET(idleproc))
cp_time[CP_SYS]++;
else
cp_time[CP_IDLE]++;
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index e6a640b..41b540e 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -305,8 +305,8 @@ exit1(p, rv)
* directly. Set it now so that the rest of the exit time gets
* counted somewhere if possible.
*/
- microuptime(&switchtime);
- switchticks = ticks;
+ microuptime(PCPU_PTR(switchtime));
+ PCPU_SET(switchticks, ticks);
/*
* notify interested parties of our demise.
diff --git a/sys/kern/kern_ktr.c b/sys/kern/kern_ktr.c
index 1704d5d..457eb02 100644
--- a/sys/kern/kern_ktr.c
+++ b/sys/kern/kern_ktr.c
@@ -61,7 +61,7 @@
#endif
#ifdef SMP
-#define KTR_CPU cpuid
+#define KTR_CPU PCPU_GET(cpuid)
#else
#define KTR_CPU 0
#endif
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index f268be6..d6275b8 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -147,7 +147,7 @@ malloc(size, type, flags)
#if defined(INVARIANTS) && defined(__i386__)
if (flags == M_WAITOK)
- KASSERT(intr_nesting_level == 0,
+ KASSERT(PCPU_GET(intr_nesting_level) == 0,
("malloc(M_WAITOK) in interrupt context"));
#endif
indx = BUCKETINDX(size);
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 77fbdc1..a79299c 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -844,7 +844,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
if (m->mtx_recurse != 0)
return;
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
- i = witness_spin_check;
+ i = PCPU_GET(witness_spin_check);
if (i != 0 && w->w_level < i) {
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
panic("mutex_enter(%s:%x, MTX_SPIN) out of order @"
@@ -977,7 +977,8 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
if (m->mtx_recurse != 0)
return;
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
- PCPU_SET(witness_spin_check, witness_spin_check & ~w->w_level);
+ PCPU_SET(witness_spin_check,
+ PCPU_GET(witness_spin_check) & ~w->w_level);
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
return;
}
@@ -1011,7 +1012,8 @@ witness_try_enter(struct mtx *m, int flags, const char *file, int line)
if (m->mtx_recurse != 0)
return;
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
- PCPU_SET(witness_spin_check, witness_spin_check | w->w_level);
+ PCPU_SET(witness_spin_check,
+ PCPU_GET(witness_spin_check) | w->w_level);
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
w->w_file = file;
w->w_line = line;
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index b852cf6..e67ee79 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -534,13 +534,14 @@ calcru(p, up, sp, ip)
* quantum, which is much greater than the sampling error.
*/
microuptime(&tv);
- if (timevalcmp(&tv, &switchtime, <))
+ if (timevalcmp(&tv, PCPU_PTR(switchtime), <))
printf("microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n",
- switchtime.tv_sec, switchtime.tv_usec,
+ PCPU_GET(switchtime.tv_sec), PCPU_GET(switchtime.tv_usec),
tv.tv_sec, tv.tv_usec);
else
- tu += (tv.tv_usec - switchtime.tv_usec) +
- (tv.tv_sec - switchtime.tv_sec) * (int64_t)1000000;
+ tu += (tv.tv_usec - PCPU_GET(switchtime.tv_usec)) +
+ (tv.tv_sec - PCPU_GET(switchtime.tv_sec)) *
+ (int64_t)1000000;
}
ptu = p->p_uu + p->p_su + p->p_iu;
if (tu < ptu || (int64_t)tu < 0) {
diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c
index d1e23f8..eaf8b94 100644
--- a/sys/kern/kern_shutdown.c
+++ b/sys/kern/kern_shutdown.c
@@ -209,7 +209,7 @@ boot(int howto)
#ifdef SMP
if (smp_active)
- printf("boot() called on cpu#%d\n", cpuid);
+ printf("boot() called on cpu#%d\n", PCPU_GET(cpuid));
#endif
/*
* Do any callouts that should be done BEFORE syncing the filesystems.
@@ -557,7 +557,7 @@ panic(const char *fmt, ...)
printf("panic: %s\n", buf);
#ifdef SMP
/* two seperate prints in case of an unmapped page and trap */
- printf("cpuid = %d; ", cpuid);
+ printf("cpuid = %d; ", PCPU_GET(cpuid));
#ifdef APIC_IO
printf("lapic.id = %08x\n", lapic.id);
#endif
diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c
index c379f79..c3d7849 100644
--- a/sys/kern/kern_subr.c
+++ b/sys/kern/kern_subr.c
@@ -92,7 +92,7 @@ uiomove(cp, n, uio)
case UIO_USERSPACE:
case UIO_USERISPACE:
- if (ticks - switchticks >= hogticks)
+ if (ticks - PCPU_GET(switchticks) >= hogticks)
uio_yield();
if (uio->uio_rw == UIO_READ)
error = copyout(cp, iov->iov_base, cnt);
@@ -154,7 +154,7 @@ uiomoveco(cp, n, uio, obj)
case UIO_USERSPACE:
case UIO_USERISPACE:
- if (ticks - switchticks >= hogticks)
+ if (ticks - PCPU_GET(switchticks) >= hogticks)
uio_yield();
if (uio->uio_rw == UIO_READ) {
#ifdef ENABLE_VFS_IOOPT
@@ -236,7 +236,7 @@ uioread(n, uio, obj, nread)
cnt &= ~PAGE_MASK;
- if (ticks - switchticks >= hogticks)
+ if (ticks - PCPU_GET(switchticks) >= hogticks)
uio_yield();
error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
uio->uio_offset, cnt,
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 60024b0..7515ea8 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -232,12 +232,12 @@ chooseproc(void)
} else {
CTR1(KTR_PROC, "chooseproc: idleproc, schedlock %lx",
(long)sched_lock.mtx_lock);
- return idleproc;
+ return PCPU_GET(idleproc);
}
p = TAILQ_FIRST(q);
#ifdef SMP
/* wander down the current run queue for this pri level for a match */
- id = cpuid;
+ id = PCPU_GET(cpuid);
while (p->p_lastcpu != id) {
p = TAILQ_NEXT(p, p_procq);
if (p == NULL) {
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 90b9fa3..ff240db 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -142,7 +142,7 @@ maybe_resched(chk)
* standard process becomes runaway cpu-bound, the system can lockup
* due to idle-scheduler processes in wakeup never getting any cpu.
*/
- if (p == idleproc) {
+ if (p == PCPU_GET(idleproc)) {
#if 0
need_resched();
#endif
@@ -176,7 +176,7 @@ roundrobin(arg)
need_resched();
forward_roundrobin();
#else
- if (p == idleproc || RTP_PRIO_NEED_RR(p->p_rtprio.type))
+ if (p == PCPU_GET(idleproc) || RTP_PRIO_NEED_RR(p->p_rtprio.type))
need_resched();
#endif
@@ -925,14 +925,15 @@ mi_switch()
* process was running, and add that to its total so far.
*/
microuptime(&new_switchtime);
- if (timevalcmp(&new_switchtime, &switchtime, <)) {
+ if (timevalcmp(&new_switchtime, PCPU_PTR(switchtime), <)) {
printf("microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n",
- switchtime.tv_sec, switchtime.tv_usec,
+ PCPU_GET(switchtime.tv_sec), PCPU_GET(switchtime.tv_usec),
new_switchtime.tv_sec, new_switchtime.tv_usec);
- new_switchtime = switchtime;
+ new_switchtime = PCPU_GET(switchtime);
} else {
- p->p_runtime += (new_switchtime.tv_usec - switchtime.tv_usec) +
- (new_switchtime.tv_sec - switchtime.tv_sec) * (int64_t)1000000;
+ p->p_runtime += (new_switchtime.tv_usec - PCPU_GET(switchtime.tv_usec)) +
+ (new_switchtime.tv_sec - PCPU_GET(switchtime.tv_sec)) *
+ (int64_t)1000000;
}
/*
@@ -959,15 +960,15 @@ mi_switch()
* Pick a new current process and record its start time.
*/
cnt.v_swtch++;
- switchtime = new_switchtime;
+ PCPU_SET(switchtime, new_switchtime);
CTR4(KTR_PROC, "mi_switch: old proc %p (pid %d, %s), schedlock %p",
p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
cpu_switch();
CTR4(KTR_PROC, "mi_switch: new proc %p (pid %d, %s), schedlock %p",
p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
- if (switchtime.tv_sec == 0)
- microuptime(&switchtime);
- switchticks = ticks;
+ if (PCPU_GET(switchtime.tv_sec) == 0)
+ microuptime(PCPU_PTR(switchtime));
+ PCPU_SET(switchticks, ticks);
splx(x);
}
diff --git a/sys/kern/subr_prf.c b/sys/kern/subr_prf.c
index efff7f8..86a53766 100644
--- a/sys/kern/subr_prf.c
+++ b/sys/kern/subr_prf.c
@@ -113,7 +113,7 @@ uprintf(const char *fmt, ...)
struct putchar_arg pca;
int retval = 0;
- if (p && p != idleproc && p->p_flag & P_CONTROLT &&
+ if (p && p != PCPU_GET(idleproc) && p->p_flag & P_CONTROLT &&
p->p_session->s_ttyvp) {
va_start(ap, fmt);
pca.tty = p->p_session->s_ttyp;
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index 6c3abeb..79fc383 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -484,11 +484,11 @@ init_secondary(void)
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
- common_tss.tss_esp0 = 0; /* not used until after switch */
- common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
- common_tss.tss_ioopt = (sizeof common_tss) << 16;
- tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
- common_tssd = *tss_gdt;
+ PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
+ PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
+ PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
+ PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
+ PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
ltr(gsel_tss);
pmap_set_opt();
@@ -2045,7 +2045,7 @@ start_all_aps(u_int boot_addr)
}
/* build our map of 'other' CPUs */
- other_cpus = all_cpus & ~(1 << cpuid);
+ PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
/* fill in our (BSP) APIC version */
cpu_apic_versions[0] = lapic.version;
@@ -2398,9 +2398,9 @@ ap_init(void)
#endif
/* Build our map of 'other' CPUs. */
- other_cpus = all_cpus & ~(1 << cpuid);
+ PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
- printf("SMP: AP CPU #%d Launched!\n", cpuid);
+ printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
/* set up CPU registers and state */
cpu_setregs();
@@ -2410,8 +2410,8 @@ ap_init(void)
/* A quick check from sanity claus */
apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
- if (cpuid != apic_id) {
- printf("SMP: cpuid = %d\n", cpuid);
+ if (PCPU_GET(cpuid) != apic_id) {
+ printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
printf("SMP: apic_id = %d\n", apic_id);
printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
panic("cpuid mismatch! boom!!");
@@ -2445,10 +2445,10 @@ ap_init(void)
* Set curproc to our per-cpu idleproc so that mutexes have
* something unique to lock with.
*/
- PCPU_SET(curproc,idleproc);
+ PCPU_SET(curproc, PCPU_GET(idleproc));
- microuptime(&switchtime);
- switchticks = ticks;
+ microuptime(PCPU_PTR(switchtime));
+ PCPU_SET(switchticks, ticks);
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
@@ -2610,7 +2610,7 @@ forward_statclock(int pscnt)
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */
- map = other_cpus & ~stopped_cpus ;
+ map = PCPU_GET(other_cpus) & ~stopped_cpus ;
checkstate_probed_cpus = 0;
if (map != 0)
selected_apic_ipi(map,
@@ -2636,7 +2636,7 @@ forward_statclock(int pscnt)
map = 0;
for (id = 0; id < mp_ncpus; id++) {
- if (id == cpuid)
+ if (id == PCPU_GET(cpuid))
continue;
if (((1 << id) & checkstate_probed_cpus) == 0)
continue;
@@ -2685,7 +2685,7 @@ forward_hardclock(int pscnt)
/* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */
- map = other_cpus & ~stopped_cpus ;
+ map = PCPU_GET(other_cpus) & ~stopped_cpus ;
checkstate_probed_cpus = 0;
if (map != 0)
selected_apic_ipi(map,
@@ -2712,7 +2712,7 @@ forward_hardclock(int pscnt)
map = 0;
for (id = 0; id < mp_ncpus; id++) {
- if (id == cpuid)
+ if (id == PCPU_GET(cpuid))
continue;
if (((1 << id) & checkstate_probed_cpus) == 0)
continue;
@@ -2813,8 +2813,8 @@ forward_roundrobin(void)
return;
if (!forward_roundrobin_enabled)
return;
- resched_cpus |= other_cpus;
- map = other_cpus & ~stopped_cpus ;
+ resched_cpus |= PCPU_GET(other_cpus);
+ map = PCPU_GET(other_cpus) & ~stopped_cpus ;
#if 1
selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
#else
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index db02d51..959c64f 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -480,7 +480,7 @@ restart:
if (in_vm86call)
break;
- if (intr_nesting_level != 0)
+ if (PCPU_GET(intr_nesting_level) != 0)
break;
/*
@@ -493,7 +493,7 @@ restart:
* a signal.
*/
if (frame.tf_eip == (int)cpu_switch_load_gs) {
- curpcb->pcb_gs = 0;
+ PCPU_GET(curpcb)->pcb_gs = 0;
psignal(p, SIGBUS);
goto out;
}
@@ -519,13 +519,15 @@ restart:
if (frame.tf_eip == (int)doreti_popl_es) {
frame.tf_eip = (int)doreti_popl_es_fault;
goto out;
- }
+ }
if (frame.tf_eip == (int)doreti_popl_fs) {
frame.tf_eip = (int)doreti_popl_fs_fault;
goto out;
}
- if (curpcb && curpcb->pcb_onfault) {
- frame.tf_eip = (int)curpcb->pcb_onfault;
+ if (PCPU_GET(curpcb) != NULL &&
+ PCPU_GET(curpcb)->pcb_onfault != NULL) {
+ frame.tf_eip =
+ (int)PCPU_GET(curpcb)->pcb_onfault;
goto out;
}
break;
@@ -685,8 +687,9 @@ trap_pfault(frame, usermode, eva)
if (p == NULL ||
(!usermode && va < VM_MAXUSER_ADDRESS &&
- (intr_nesting_level != 0 || curpcb == NULL ||
- curpcb->pcb_onfault == NULL))) {
+ (PCPU_GET(intr_nesting_level) != 0 ||
+ PCPU_GET(curpcb) == NULL ||
+ PCPU_GET(curpcb)->pcb_onfault == NULL))) {
trap_fatal(frame, eva);
return (-1);
}
@@ -748,8 +751,10 @@ trap_pfault(frame, usermode, eva)
return (0);
nogo:
if (!usermode) {
- if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) {
- frame->tf_eip = (int)curpcb->pcb_onfault;
+ if (PCPU_GET(intr_nesting_level) == 0 &&
+ PCPU_GET(curpcb) != NULL &&
+ PCPU_GET(curpcb)->pcb_onfault != NULL) {
+ frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault;
return (0);
}
trap_fatal(frame, eva);
@@ -853,8 +858,10 @@ trap_pfault(frame, usermode, eva)
return (0);
nogo:
if (!usermode) {
- if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) {
- frame->tf_eip = (int)curpcb->pcb_onfault;
+ if (PCPU_GET(intr_nesting_level) == 0 &&
+ PCPU_GET(curpcb) != NULL &&
+ PCPU_GET(curpcb)->pcb_onfault != NULL) {
+ frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault;
return (0);
}
trap_fatal(frame, eva);
@@ -886,7 +893,7 @@ trap_fatal(frame, eva)
ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
#ifdef SMP
/* two seperate prints in case of a trap on an unmapped page */
- printf("cpuid = %d; ", cpuid);
+ printf("cpuid = %d; ", PCPU_GET(cpuid));
printf("lapic.id = %08x\n", lapic.id);
#endif
if (type == T_PAGEFLT) {
@@ -964,12 +971,12 @@ void
dblfault_handler()
{
printf("\nFatal double fault:\n");
- printf("eip = 0x%x\n", common_tss.tss_eip);
- printf("esp = 0x%x\n", common_tss.tss_esp);
- printf("ebp = 0x%x\n", common_tss.tss_ebp);
+ printf("eip = 0x%x\n", PCPU_GET(common_tss.tss_eip));
+ printf("esp = 0x%x\n", PCPU_GET(common_tss.tss_esp));
+ printf("ebp = 0x%x\n", PCPU_GET(common_tss.tss_ebp));
#ifdef SMP
/* two seperate prints in case of a trap on an unmapped page */
- printf("cpuid = %d; ", cpuid);
+ printf("cpuid = %d; ", PCPU_GET(cpuid));
printf("lapic.id = %08x\n", lapic.id);
#endif
panic("double fault");
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 77fbdc1..a79299c 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -844,7 +844,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
if (m->mtx_recurse != 0)
return;
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
- i = witness_spin_check;
+ i = PCPU_GET(witness_spin_check);
if (i != 0 && w->w_level < i) {
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
panic("mutex_enter(%s:%x, MTX_SPIN) out of order @"
@@ -977,7 +977,8 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
if (m->mtx_recurse != 0)
return;
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
- PCPU_SET(witness_spin_check, witness_spin_check & ~w->w_level);
+ PCPU_SET(witness_spin_check,
+ PCPU_GET(witness_spin_check) & ~w->w_level);
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
return;
}
@@ -1011,7 +1012,8 @@ witness_try_enter(struct mtx *m, int flags, const char *file, int line)
if (m->mtx_recurse != 0)
return;
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
- PCPU_SET(witness_spin_check, witness_spin_check | w->w_level);
+ PCPU_SET(witness_spin_check,
+ PCPU_GET(witness_spin_check) | w->w_level);
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
w->w_file = file;
w->w_line = line;
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index 77fbdc1..a79299c 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -844,7 +844,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
if (m->mtx_recurse != 0)
return;
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
- i = witness_spin_check;
+ i = PCPU_GET(witness_spin_check);
if (i != 0 && w->w_level < i) {
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
panic("mutex_enter(%s:%x, MTX_SPIN) out of order @"
@@ -977,7 +977,8 @@ witness_exit(struct mtx *m, int flags, const char *file, int line)
if (m->mtx_recurse != 0)
return;
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
- PCPU_SET(witness_spin_check, witness_spin_check & ~w->w_level);
+ PCPU_SET(witness_spin_check,
+ PCPU_GET(witness_spin_check) & ~w->w_level);
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
return;
}
@@ -1011,7 +1012,8 @@ witness_try_enter(struct mtx *m, int flags, const char *file, int line)
if (m->mtx_recurse != 0)
return;
mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET);
- PCPU_SET(witness_spin_check, witness_spin_check | w->w_level);
+ PCPU_SET(witness_spin_check,
+ PCPU_GET(witness_spin_check) | w->w_level);
mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET);
w->w_file = file;
w->w_line = line;
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 9949813..14ae4a8 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -510,7 +510,7 @@ bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
/* if not found in cache, do some I/O */
if ((bp->b_flags & B_CACHE) == 0) {
- if (curproc != idleproc)
+ if (curproc != PCPU_GET(idleproc))
curproc->p_stats->p_ru.ru_inblock++;
KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp));
bp->b_iocmd = BIO_READ;
@@ -547,7 +547,7 @@ breadn(struct vnode * vp, daddr_t blkno, int size,
/* if not found in cache, do some I/O */
if ((bp->b_flags & B_CACHE) == 0) {
- if (curproc != idleproc)
+ if (curproc != PCPU_GET(idleproc))
curproc->p_stats->p_ru.ru_inblock++;
bp->b_iocmd = BIO_READ;
bp->b_flags &= ~B_INVAL;
@@ -568,7 +568,7 @@ breadn(struct vnode * vp, daddr_t blkno, int size,
rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
if ((rabp->b_flags & B_CACHE) == 0) {
- if (curproc != idleproc)
+ if (curproc != PCPU_GET(idleproc))
curproc->p_stats->p_ru.ru_inblock++;
rabp->b_flags |= B_ASYNC;
rabp->b_flags &= ~B_INVAL;
@@ -695,7 +695,7 @@ bwrite(struct buf * bp)
bp->b_vp->v_numoutput++;
vfs_busy_pages(bp, 1);
- if (curproc != idleproc)
+ if (curproc != PCPU_GET(idleproc))
curproc->p_stats->p_ru.ru_oublock++;
splx(s);
if (oldflags & B_ASYNC)
@@ -2107,7 +2107,7 @@ loop:
* XXX remove if 0 sections (clean this up after its proven)
*/
if (numfreebuffers == 0) {
- if (curproc == idleproc)
+ if (curproc == PCPU_GET(idleproc))
return NULL;
needsbuffer |= VFS_BIO_NEED_ANY;
}
OpenPOWER on IntegriCloud