summaryrefslogtreecommitdiffstats
path: root/lib/libpthread/thread/thr_sig.c
diff options
context:
space:
mode:
authorjb <jb@FreeBSD.org>1998-06-09 23:21:05 +0000
committerjb <jb@FreeBSD.org>1998-06-09 23:21:05 +0000
commit765df5f4d29065eac50b4bfab3407a7da8a90323 (patch)
treee14930dc53674181c346bbe0f43647669e330569 /lib/libpthread/thread/thr_sig.c
parent5ed1d7e9485fa8756d1be3fdf6cfa5125ac45657 (diff)
downloadFreeBSD-src-765df5f4d29065eac50b4bfab3407a7da8a90323.zip
FreeBSD-src-765df5f4d29065eac50b4bfab3407a7da8a90323.tar.gz
Implement compile time debug support instead of tracking file name and
line number every time a file descriptor is locked. This looks like a big change but it isn't. It should reduce the size of libc_r and make it run slightly faster.
Diffstat (limited to 'lib/libpthread/thread/thr_sig.c')
-rw-r--r--lib/libpthread/thread/thr_sig.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/lib/libpthread/thread/thr_sig.c b/lib/libpthread/thread/thr_sig.c
index c0fbfd1..43ef9e6 100644
--- a/lib/libpthread/thread/thr_sig.c
+++ b/lib/libpthread/thread/thr_sig.c
@@ -39,17 +39,17 @@
#include "pthread_private.h"
/* Static variables: */
-static int volatile yield_on_unlock_dead = 0;
-static int volatile yield_on_unlock_thread = 0;
-static long volatile thread_dead_lock = 0;
-static long volatile thread_link_list_lock = 0;
+static int volatile yield_on_unlock_dead = 0;
+static int volatile yield_on_unlock_thread = 0;
+static spinlock_t thread_dead_lock = _SPINLOCK_INITIALIZER;
+static spinlock_t thread_link_list_lock = _SPINLOCK_INITIALIZER;
/* Lock the thread list: */
void
_lock_thread_list()
{
/* Lock the thread list: */
- _spinlock(&thread_link_list_lock);
+ _SPINLOCK(&thread_link_list_lock);
}
/* Lock the dead thread list: */
@@ -57,7 +57,7 @@ void
_lock_dead_thread_list()
{
/* Lock the dead thread list: */
- _spinlock(&thread_dead_lock);
+ _SPINLOCK(&thread_dead_lock);
}
/* Lock the thread list: */
@@ -65,7 +65,7 @@ void
_unlock_thread_list()
{
/* Unlock the thread list: */
- _atomic_unlock(&thread_link_list_lock);
+ _SPINUNLOCK(&thread_link_list_lock);
/*
* Check if a scheduler interrupt occurred while the thread
@@ -85,7 +85,7 @@ void
_unlock_dead_thread_list()
{
/* Unlock the dead thread list: */
- _atomic_unlock(&thread_dead_lock);
+ _SPINUNLOCK(&thread_dead_lock);
/*
* Check if a scheduler interrupt occurred while the dead
@@ -137,7 +137,7 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
* unfortunate time which one of the threads is
* modifying the thread list:
*/
- if (thread_link_list_lock)
+ if (thread_link_list_lock.access_lock)
/*
* Set a flag so that the thread that has
* the lock yields when it unlocks the
@@ -149,7 +149,7 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
* unfortunate time which one of the threads is
* modifying the dead thread list:
*/
- if (thread_dead_lock)
+ if (thread_dead_lock.access_lock)
/*
* Set a flag so that the thread that has
* the lock yields when it unlocks the
OpenPOWER on IntegriCloud