From 91d150179059555ef497f4b5b5a560fdb24e472f Mon Sep 17 00:00:00 2001 From: jeff Date: Tue, 5 Jun 2007 00:00:57 +0000 Subject: Commit 14/14 of sched_lock decomposition. - Use thread_lock() rather than sched_lock for per-thread scheduling sychronization. - Use the per-process spinlock rather than the sched_lock for per-process scheduling synchronization. Tested by: kris, current@ Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc. Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each) --- sys/netsmb/smb_trantcp.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'sys/netsmb/smb_trantcp.c') diff --git a/sys/netsmb/smb_trantcp.c b/sys/netsmb/smb_trantcp.c index 5db6b7e..3f4ccaf 100644 --- a/sys/netsmb/smb_trantcp.c +++ b/sys/netsmb/smb_trantcp.c @@ -115,9 +115,9 @@ nbssn_rselect(struct nbpcb *nbp, struct timeval *tv, int events, retry: ncoll = nselcoll; - mtx_lock_spin(&sched_lock); + thread_lock(td); td->td_flags |= TDF_SELECT; - mtx_unlock_spin(&sched_lock); + thread_unlock(td); mtx_unlock(&sellock); /* XXX: Should be done when the thread is initialized. */ @@ -144,12 +144,12 @@ retry: * the process, test P_SELECT and rescan file descriptors if * necessary. */ - mtx_lock_spin(&sched_lock); + thread_lock(td); if ((td->td_flags & TDF_SELECT) == 0 || nselcoll != ncoll) { - mtx_unlock_spin(&sched_lock); + thread_unlock(td); goto retry; } - mtx_unlock_spin(&sched_lock); + thread_unlock(td); if (timo > 0) error = cv_timedwait(&selwait, &sellock, timo); @@ -161,9 +161,9 @@ retry: done: clear_selinfo_list(td); - mtx_lock_spin(&sched_lock); + thread_lock(td); td->td_flags &= ~TDF_SELECT; - mtx_unlock_spin(&sched_lock); + thread_unlock(td); mtx_unlock(&sellock); done_noproclock: -- cgit v1.1