diff options
Diffstat (limited to 'sys/alpha')
-rw-r--r-- | sys/alpha/alpha/busdma_machdep.c | 20 | ||||
-rw-r--r-- | sys/alpha/alpha/interrupt.c | 12 | ||||
-rw-r--r-- | sys/alpha/alpha/machdep.c | 2 | ||||
-rw-r--r-- | sys/alpha/alpha/mp_machdep.c | 18 | ||||
-rw-r--r-- | sys/alpha/alpha/procfs_machdep.c | 24 | ||||
-rw-r--r-- | sys/alpha/alpha/trap.c | 76 | ||||
-rw-r--r-- | sys/alpha/alpha/vm_machdep.c | 8 | ||||
-rw-r--r-- | sys/alpha/include/cpu.h | 4 | ||||
-rw-r--r-- | sys/alpha/include/mutex.h | 47 | ||||
-rw-r--r-- | sys/alpha/osf1/osf1_misc.c | 4 |
10 files changed, 100 insertions, 115 deletions
diff --git a/sys/alpha/alpha/busdma_machdep.c b/sys/alpha/alpha/busdma_machdep.c index 2cc55d2..e920894 100644 --- a/sys/alpha/alpha/busdma_machdep.c +++ b/sys/alpha/alpha/busdma_machdep.c @@ -614,11 +614,11 @@ alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) break; } bpage->busaddr = pmap_kextract(bpage->vaddr); - mtx_enter(&bounce_lock, MTX_DEF); + mtx_lock(&bounce_lock); STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); total_bpages++; free_bpages++; - mtx_exit(&bounce_lock, MTX_DEF); + mtx_unlock(&bounce_lock); count++; numpages--; } @@ -653,7 +653,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, panic("add_bounce_page: map doesn't need any pages"); map->pagesreserved--; - mtx_enter(&bounce_lock, MTX_DEF); + mtx_lock(&bounce_lock); bpage = STAILQ_FIRST(&bounce_page_list); if (bpage == NULL) panic("add_bounce_page: free page list is empty"); @@ -661,7 +661,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, STAILQ_REMOVE_HEAD(&bounce_page_list, links); reserved_bpages--; active_bpages++; - mtx_exit(&bounce_lock, MTX_DEF); + mtx_unlock(&bounce_lock); bpage->datavaddr = vaddr; bpage->datacount = size; @@ -677,7 +677,7 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) bpage->datavaddr = 0; bpage->datacount = 0; - mtx_enter(&bounce_lock, MTX_DEF); + mtx_lock(&bounce_lock); STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); free_bpages++; active_bpages--; @@ -690,7 +690,7 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) sched_swi(vm_ih, SWI_NOSWITCH); } } - mtx_exit(&bounce_lock, MTX_DEF); + mtx_unlock(&bounce_lock); } void @@ -698,13 +698,13 @@ busdma_swi(void) { struct bus_dmamap *map; - mtx_enter(&bounce_lock, MTX_DEF); + mtx_lock(&bounce_lock); while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); - mtx_exit(&bounce_lock, MTX_DEF); + mtx_unlock(&bounce_lock); bus_dmamap_load(map->dmat, map, map->buf, map->buflen, map->callback, map->callback_arg, /*flags*/0); - mtx_enter(&bounce_lock, MTX_DEF); + mtx_lock(&bounce_lock); } - mtx_exit(&bounce_lock, MTX_DEF); + mtx_unlock(&bounce_lock); } diff --git a/sys/alpha/alpha/interrupt.c b/sys/alpha/alpha/interrupt.c index 57d0da5..f2b431f 100644 --- a/sys/alpha/alpha/interrupt.c +++ b/sys/alpha/alpha/interrupt.c @@ -560,7 +560,7 @@ alpha_dispatch_intr(void *frame, unsigned long vector) "alpha_dispatch_intr: disabling vector 0x%x", i->vector); i->disable(i->vector); } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (ithd->it_proc->p_stat == SWAIT) { /* not on the run queue and not running */ CTR1(KTR_INTR, "alpha_dispatch_intr: setrunqueue %d", @@ -587,7 +587,7 @@ alpha_dispatch_intr(void *frame, unsigned long vector) ithd->it_proc->p_pid, ithd->it_need, ithd->it_proc->p_stat); need_resched(); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } void @@ -626,10 +626,10 @@ ithd_loop(void *dummy) ih->ih_flags); if ((ih->ih_flags & INTR_MPSAFE) == 0) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); ih->ih_handler(ih->ih_argument); if ((ih->ih_flags & INTR_MPSAFE) == 0) - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } /* @@ -646,7 +646,7 @@ ithd_loop(void *dummy) * set again, so we have to check it again. */ mtx_assert(&Giant, MA_NOTOWNED); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (!ithd->it_need) { ithd->it_proc->p_stat = SWAIT; /* we're idle */ CTR1(KTR_INTR, "ithd_loop pid %d: done", @@ -655,7 +655,7 @@ ithd_loop(void *dummy) CTR1(KTR_INTR, "ithd_loop pid %d: resumed", ithd->it_proc->p_pid); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } } diff --git a/sys/alpha/alpha/machdep.c b/sys/alpha/alpha/machdep.c index c76665a..bbfe946 100644 --- a/sys/alpha/alpha/machdep.c +++ b/sys/alpha/alpha/machdep.c @@ -1012,7 +1012,7 @@ alpha_init(pfn, ptb, bim, bip, biv) */ mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); /* * Look at arguments passed to us and compute boothowto. diff --git a/sys/alpha/alpha/mp_machdep.c b/sys/alpha/alpha/mp_machdep.c index bf61c11..b40ffa4 100644 --- a/sys/alpha/alpha/mp_machdep.c +++ b/sys/alpha/alpha/mp_machdep.c @@ -162,7 +162,7 @@ smp_init_secondary(void) alpha_pal_wrent(XentUna, ALPHA_KENTRY_UNA); alpha_pal_wrent(XentSys, ALPHA_KENTRY_SYS); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("smp_init_secondary: called\n"); CTR0(KTR_SMP, "smp_init_secondary"); @@ -176,7 +176,7 @@ smp_init_secondary(void) spl0(); smp_ipi_all(0); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } extern void smp_init_secondary_glue(void); @@ -657,14 +657,14 @@ forward_signal(struct proc *p) return; if (!forward_signal_enabled) return; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); while (1) { if (p->p_stat != SRUN) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return; } id = p->p_oncpu; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (id == 0xff) return; map = (1<<id); @@ -682,9 +682,9 @@ forward_signal(struct proc *p) break; } } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (id == p->p_oncpu) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return; } } @@ -841,7 +841,7 @@ smp_rendezvous(void (* setup_func)(void *), { /* obtain rendezvous lock */ - mtx_enter(&smp_rv_mtx, MTX_SPIN); + mtx_lock_spin(&smp_rv_mtx); /* set static function pointers */ smp_rv_setup_func = setup_func; @@ -858,7 +858,7 @@ smp_rendezvous(void (* setup_func)(void *), smp_rendezvous_action(); /* release lock */ - mtx_exit(&smp_rv_mtx, MTX_SPIN); + mtx_unlock_spin(&smp_rv_mtx); } /* diff --git a/sys/alpha/alpha/procfs_machdep.c b/sys/alpha/alpha/procfs_machdep.c index 229d2f9..c0766ca 100644 --- a/sys/alpha/alpha/procfs_machdep.c +++ b/sys/alpha/alpha/procfs_machdep.c @@ -86,12 +86,12 @@ procfs_read_regs(p, regs) struct reg *regs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (fill_regs(p, regs)); } @@ -101,12 +101,12 @@ procfs_write_regs(p, regs) struct reg *regs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (set_regs(p, regs)); } @@ -121,12 +121,12 @@ procfs_read_fpregs(p, fpregs) struct fpreg *fpregs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (fill_fpregs(p, fpregs)); } @@ -136,12 +136,12 @@ procfs_write_fpregs(p, fpregs) struct fpreg *fpregs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (set_fpregs(p, fpregs)); } diff --git a/sys/alpha/alpha/trap.c b/sys/alpha/alpha/trap.c index 7aa7362..eada017 100644 --- a/sys/alpha/alpha/trap.c +++ b/sys/alpha/alpha/trap.c @@ -106,10 +106,10 @@ userret(p, frame, oticks) /* take pending signals */ while ((sig = CURSIG(p)) != 0) { if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); postsig(sig); } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); p->p_priority = p->p_usrpri; if (want_resched) { /* @@ -125,30 +125,30 @@ userret(p, frame, oticks) setrunqueue(p); p->p_stats->p_ru.ru_nivcsw++; mi_switch(); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PICKUP_GIANT(); splx(s); while ((sig = CURSIG(p)) != 0) { if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); postsig(sig); } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); } /* * If profiling, charge recent system time to the trapped pc. */ if (p->p_sflag & PS_PROFIL) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock(&Giant); + mtx_lock_spin(&sched_lock); addupc_task(p, frame->tf_regs[FRAME_PC], (int)(p->p_sticks - oticks) * psratio); } curpriority = p->p_priority; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } static void @@ -230,9 +230,9 @@ trap(a0, a1, a2, entry, framep) ucode = 0; user = (framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0; if (user) { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); sticks = p->p_sticks; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); p->p_md.md_tf = framep; #if 0 /* This is to catch some weird stuff on the UDB (mj) */ @@ -259,12 +259,12 @@ trap(a0, a1, a2, entry, framep) * and per-process unaligned-access-handling flags). */ if (user) { - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); if ((i = unaligned_fixup(a0, a1, a2, p)) == 0) { - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); goto out; } - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); ucode = a0; /* VA */ break; } @@ -288,13 +288,13 @@ trap(a0, a1, a2, entry, framep) * is not requested or if the completion fails. */ if (user) { - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); if (a0 & EXCSUM_SWC) if (fp_software_completion(a1, p)) { - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); goto out; } - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); i = SIGFPE; ucode = a0; /* exception summary */ break; @@ -415,7 +415,7 @@ trap(a0, a1, a2, entry, framep) goto out; } - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); /* * It is only a kernel address space fault iff: * 1. !user and @@ -529,11 +529,11 @@ trap(a0, a1, a2, entry, framep) rv = KERN_INVALID_ADDRESS; } if (rv == KERN_SUCCESS) { - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); goto out; } - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); if (!user) { /* Check for copyin/copyout fault */ if (p != NULL && @@ -575,7 +575,7 @@ out: framep->tf_regs[FRAME_SP] = alpha_pal_rdusp(); userret(p, framep, sticks); if (mtx_owned(&Giant)) - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } return; @@ -621,7 +621,7 @@ syscall(code, framep) * Find our per-cpu globals. */ globalp = (struct globaldata *) alpha_pal_rdval(); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); framep->tf_regs[FRAME_TRAPARG_A0] = 0; framep->tf_regs[FRAME_TRAPARG_A1] = 0; @@ -635,9 +635,9 @@ syscall(code, framep) p = curproc; p->p_md.md_tf = framep; opc = framep->tf_regs[FRAME_PC] - 4; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); sticks = p->p_sticks; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); #ifdef DIAGNOSTIC alpha_fpstate_check(p); @@ -739,7 +739,7 @@ syscall(code, framep) * is not the case, this code will need to be revisited. */ STOPEVENT(p, S_SCX, code); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #ifdef WITNESS if (witness_list(p)) { @@ -763,9 +763,9 @@ ast(framep) u_quad_t sticks; p = curproc; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); sticks = p->p_sticks; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); p->p_md.md_tf = framep; if ((framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) == 0) @@ -774,36 +774,36 @@ ast(framep) cnt.v_soft++; PCPU_SET(astpending, 0); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_sflag & PS_OWEUPC) { p->p_sflag &= ~PS_OWEUPC; - mtx_exit(&sched_lock, MTX_SPIN); - mtx_enter(&Giant, MTX_DEF); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); + mtx_lock(&Giant); + mtx_lock_spin(&sched_lock); addupc_task(p, p->p_stats->p_prof.pr_addr, p->p_stats->p_prof.pr_ticks); } if (p->p_sflag & PS_ALRMPEND) { p->p_sflag &= ~PS_ALRMPEND; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); psignal(p, SIGVTALRM); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); } if (p->p_sflag & PS_PROFPEND) { p->p_sflag &= ~PS_PROFPEND; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); psignal(p, SIGPROF); } else - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); userret(p, framep, sticks); if (mtx_owned(&Giant)) - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } /* diff --git a/sys/alpha/alpha/vm_machdep.c b/sys/alpha/alpha/vm_machdep.c index 6d96337..851b1878 100644 --- a/sys/alpha/alpha/vm_machdep.c +++ b/sys/alpha/alpha/vm_machdep.c @@ -253,8 +253,8 @@ cpu_exit(p) { alpha_fpstate_drop(p); - mtx_enter(&sched_lock, MTX_SPIN); - mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH); + mtx_lock_spin(&sched_lock); + mtx_unlock_flags(&Giant, MTX_NOSWITCH); mtx_assert(&Giant, MA_NOTOWNED); /* @@ -437,7 +437,7 @@ vm_page_zero_idle() if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) return(0); - if (mtx_try_enter(&Giant, MTX_DEF)) { + if (mtx_trylock(&Giant)) { s = splvm(); m = vm_page_list_find(PQ_FREE, free_rover, FALSE); zero_state = 0; @@ -466,7 +466,7 @@ vm_page_zero_idle() } free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; splx(s); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); return (1); } return (0); diff --git a/sys/alpha/include/cpu.h b/sys/alpha/include/cpu.h index 515228a..8ecaaf3 100644 --- a/sys/alpha/include/cpu.h +++ b/sys/alpha/include/cpu.h @@ -81,9 +81,9 @@ struct clockframe { * through trap, marking the proc as needing a profiling tick. */ #define need_proftick(p) do { \ - mtx_enter(&sched_lock, MTX_SPIN); \ + mtx_lock_spin(&sched_lock); \ (p)->p_sflag |= PS_OWEUPC; \ - mtx_exit(&sched_lock, MTX_SPIN); \ + mtx_unlock_spin(&sched_lock); \ aston(); \ } while (0) diff --git a/sys/alpha/include/mutex.h b/sys/alpha/include/mutex.h index 40717b0..564ad19 100644 --- a/sys/alpha/include/mutex.h +++ b/sys/alpha/include/mutex.h @@ -39,26 +39,12 @@ /* * Debugging */ -#ifdef MUTEX_DEBUG - -#ifdef _KERN_MUTEX_C_ -char STR_IEN[] = "ps & IPL == IPL_0"; -char STR_IDIS[] = "ps & IPL == IPL_HIGH"; -char STR_SIEN[] = "mpp->mtx_saveintr == IPL_0"; -#else /* _KERN_MUTEX_C_ */ -extern char STR_IEN[]; -extern char STR_IDIS[]; -extern char STR_SIEN[]; -#endif /* _KERN_MUTEX_C_ */ - -#endif /* MUTEX_DEBUG */ - #define ASS_IEN MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \ - == ALPHA_PSL_IPL_0, STR_IEN) + == ALPHA_PSL_IPL_0, "ps & IPL == IPL_0") #define ASS_IDIS MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \ - == ALPHA_PSL_IPL_HIGH, STR_IDIS) + == ALPHA_PSL_IPL_HIGH, "ps & IPL == IPL_HIGH") #define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr \ - == ALPHA_PSL_IPL_0, STR_SIEN) + == ALPHA_PSL_IPL_0, "mpp->mtx_saveintr == IPL_0") #define mtx_legal2block() \ ((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) == ALPHA_PSL_IPL_0) @@ -68,34 +54,33 @@ extern char STR_SIEN[]; *-------------------------------------------------------------------------- */ -#ifdef _KERN_MUTEX_C_ - -#define _V(x) __STRING(x) - /* - * Get a spin lock, handle recusion inline (as the less common case) + * Get a spin lock, handle recusion inline. */ - -#define _getlock_spin_block(mp, tid, type) do { \ +#define _get_spin_lock(mp, tid, opts) do { \ u_int _ipl = alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH); \ - if (!_obtain_lock(mp, tid)) \ - mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _ipl); \ - else { \ + if (!_obtain_lock((mp), (tid))) { \ + if ((mp)->mtx_lock == (uintptr_t)(tid)) \ + (mp)->mtx_recurse++; \ + else \ + _mtx_lock_spin((mp), (opts), _ipl, __FILE__, \ + __LINE__); \ + } else { \ alpha_mb(); \ (mp)->mtx_saveintr = _ipl; \ } \ } while (0) -#undef _V - -#endif /* _KERN_MUTEX_C_ */ - #endif /* _KERNEL */ #else /* !LOCORE */ /* * Simple assembly macros to get and release non-recursive spin locks + * + * XXX: These are presently unused and cannot be used right now. Need to be + * re-written (they are wrong). If you plan to use this and still see + * this message, know not to unless you fix them first! :-) */ #define MTX_ENTER(lck) \ ldiq a0, ALPHA_PSL_IPL_HIGH; \ diff --git a/sys/alpha/osf1/osf1_misc.c b/sys/alpha/osf1/osf1_misc.c index e6cc37c..90dbc62 100644 --- a/sys/alpha/osf1/osf1_misc.c +++ b/sys/alpha/osf1/osf1_misc.c @@ -1341,9 +1341,9 @@ osf1_getrusage(p, uap) switch (uap->who) { case RUSAGE_SELF: rup = &p->p_stats->p_ru; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); calcru(p, &rup->ru_utime, &rup->ru_stime, NULL); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); break; case RUSAGE_CHILDREN: |