summaryrefslogtreecommitdiffstats
path: root/lib/libthr/thread/thr_spinlock.c
diff options
context:
space:
mode:
authordavidxu <davidxu@FreeBSD.org>2005-04-02 01:20:00 +0000
committerdavidxu <davidxu@FreeBSD.org>2005-04-02 01:20:00 +0000
commitf066519e91e2290cb79ef12fe7c958ee462cda6c (patch)
tree6aaef5f553a6539306bd6f5679d039ed3c2abcce /lib/libthr/thread/thr_spinlock.c
parent3cc412b7837a105c757df856c422eb5f497bad67 (diff)
downloadFreeBSD-src-f066519e91e2290cb79ef12fe7c958ee462cda6c.zip
FreeBSD-src-f066519e91e2290cb79ef12fe7c958ee462cda6c.tar.gz
Import my recent 1:1 threading working. some features improved includes:
1. fast simple type mutex. 2. __thread tls works. 3. asynchronous cancellation works ( using signal ). 4. thread synchronization is fully based on umtx, mainly, condition variable and other synchronization objects were rewritten by using umtx directly. those objects can be shared between processes via shared memory, it has to change ABI which does not happen yet. 5. default stack size is increased to 1M on 32 bits platform, 2M for 64 bits platform. As the result, some mysql super-smack benchmarks show performance is improved massivly. Okayed by: jeff, mtm, rwatson, scottl
Diffstat (limited to 'lib/libthr/thread/thr_spinlock.c')
-rw-r--r--lib/libthr/thread/thr_spinlock.c168
1 files changed, 59 insertions, 109 deletions
diff --git a/lib/libthr/thread/thr_spinlock.c b/lib/libthr/thread/thr_spinlock.c
index d590ad5..7d5cd84 100644
--- a/lib/libthr/thread/thr_spinlock.c
+++ b/lib/libthr/thread/thr_spinlock.c
@@ -1,5 +1,4 @@
/*
- * Copyright (c) 2004 Michael Telahun Makonnen <mtm@FreeBSD.Org>
* Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
@@ -35,141 +34,92 @@
*/
#include <sys/types.h>
-#include <machine/atomic.h>
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <sched.h>
#include <pthread.h>
-#include <unistd.h>
-
#include <libc_private.h>
+#include <spinlock.h>
#include "thr_private.h"
-#define THR_SPIN_MAGIC 0xdadadada
-#define THR_SPIN_UNOWNED (void *)0
-#define MAGIC_TEST_RETURN_ON_FAIL(l) \
- do { \
- if ((l) == NULL || (l)->s_magic != THR_SPIN_MAGIC) \
- return (EINVAL); \
- } while(0)
+#define MAX_SPINLOCKS 20
-__weak_reference(_pthread_spin_destroy, pthread_spin_destroy);
-__weak_reference(_pthread_spin_init, pthread_spin_init);
-__weak_reference(_pthread_spin_lock, pthread_spin_lock);
-__weak_reference(_pthread_spin_trylock, pthread_spin_trylock);
-__weak_reference(_pthread_spin_unlock, pthread_spin_unlock);
+/*
+ * These data structures are used to trace all spinlocks
+ * in libc.
+ */
+struct spinlock_extra {
+ spinlock_t *owner;
+};
-int
-_pthread_spin_destroy(pthread_spinlock_t *lock)
-{
- MAGIC_TEST_RETURN_ON_FAIL((*lock));
- if ((*lock)->s_owner == THR_SPIN_UNOWNED) {
- (*lock)->s_magic = 0;
- free((*lock));
- *lock = NULL;
- return (0);
- }
- return (EBUSY);
-}
+static umtx_t spinlock_static_lock;
+static struct spinlock_extra extra[MAX_SPINLOCKS];
+static int spinlock_count;
+static int initialized;
-int
-_pthread_spin_init(pthread_spinlock_t *lock, int pshared)
-{
- struct pthread_spinlock *s;
-
- s = (struct pthread_spinlock *)malloc(sizeof(struct pthread_spinlock));
- if (s == NULL)
- return (ENOMEM);
- s->s_magic = THR_SPIN_MAGIC;
- s->s_owner = THR_SPIN_UNOWNED;
- *lock = s;
- return (0);
-}
+static void init_spinlock(spinlock_t *lck);
/*
- * If the caller sets nonblocking to 1, this function will return
- * immediately without acquiring the lock it is owned by another thread.
- * If set to 0, it will keep spinning until it acquires the lock.
+ * These are for compatability only. Spinlocks of this type
+ * are deprecated.
*/
-int
-_pthread_spin_lock(pthread_spinlock_t *lock)
-{
- MAGIC_TEST_RETURN_ON_FAIL(*lock);
- if ((*lock)->s_owner == curthread)
- return (EDEADLK);
- while (atomic_cmpset_acq_ptr(&(*lock)->s_owner, THR_SPIN_UNOWNED,
- (void *)curthread) != 1)
- ; /* SPIN */
- return (0);
-}
-
-int
-_pthread_spin_trylock(pthread_spinlock_t *lock)
-{
- MAGIC_TEST_RETURN_ON_FAIL(*lock);
- if (atomic_cmpset_acq_ptr(&(*lock)->s_owner, THR_SPIN_UNOWNED,
- (void *)curthread) == 1)
- return (0);
- return (EBUSY);
-}
-int
-_pthread_spin_unlock(pthread_spinlock_t *lock)
+void
+_spinunlock(spinlock_t *lck)
{
- MAGIC_TEST_RETURN_ON_FAIL(*lock);
- if (atomic_cmpset_rel_ptr(&(*lock)->s_owner, (void *)curthread,
- THR_SPIN_UNOWNED) == 1)
- return (0);
- return (EPERM);
+ THR_UMTX_UNLOCK(_get_curthread(), (umtx_t *)&lck->access_lock);
}
void
-_spinunlock(spinlock_t *lck)
+_spinlock(spinlock_t *lck)
{
- if (umtx_unlock((struct umtx *)lck, curthread->thr_id))
- abort();
+ if (!__isthreaded)
+ PANIC("Spinlock called when not threaded.");
+ if (!initialized)
+ PANIC("Spinlocks not initialized.");
+ if (lck->fname == NULL)
+ init_spinlock(lck);
+ THR_UMTX_LOCK(_get_curthread(), (umtx_t *)&lck->access_lock);
}
-/*
- * Lock a location for the running thread. Yield to allow other
- * threads to run if this thread is blocked because the lock is
- * not available. Note that this function does not sleep. It
- * assumes that the lock will be available very soon.
- */
void
-_spinlock(spinlock_t *lck)
+_spinlock_debug(spinlock_t *lck, char *fname, int lineno)
{
- if (umtx_lock((struct umtx *)lck, curthread->thr_id))
- abort();
+ _spinlock(lck);
}
-int
-_spintrylock(spinlock_t *lck)
+static void
+init_spinlock(spinlock_t *lck)
{
- int error;
+ static int count = 0;
- error = umtx_lock((struct umtx *)lck, curthread->thr_id);
- if (error != 0 && error != EBUSY)
- abort();
- return (error);
+ THR_UMTX_LOCK(_get_curthread(), &spinlock_static_lock);
+ if ((lck->fname == NULL) && (spinlock_count < MAX_SPINLOCKS)) {
+ lck->fname = (char *)&extra[spinlock_count];
+ extra[spinlock_count].owner = lck;
+ spinlock_count++;
+ }
+ THR_UMTX_UNLOCK(_get_curthread(), &spinlock_static_lock);
+ if (lck->fname == NULL && ++count < 5)
+ stderr_debug("Warning: exceeded max spinlocks");
}
-/*
- * Lock a location for the running thread. Yield to allow other
- * threads to run if this thread is blocked because the lock is
- * not available. Note that this function does not sleep. It
- * assumes that the lock will be available very soon.
- *
- * This function checks if the running thread has already locked the
- * location, warns if this occurs and creates a thread dump before
- * returning.
- */
void
-_spinlock_debug(spinlock_t *lck, char *fname, int lineno)
+_thr_spinlock_init(void)
{
- if (umtx_lock((struct umtx *)lck, curthread->thr_id))
- abort();
+ int i;
+
+ _thr_umtx_init(&spinlock_static_lock);
+ if (initialized != 0) {
+ /*
+ * called after fork() to reset state of libc spin locks,
+ * it is not quite right since libc may be in inconsistent
+ * state, resetting the locks to allow current thread to be
+ * able to hold them may not help things too much, but
+ * anyway, we do our best.
+ * it is better to do pthread_atfork in libc.
+ */
+ for (i = 0; i < spinlock_count; i++)
+ _thr_umtx_init((umtx_t *)&extra[i].owner->access_lock);
+ } else {
+ initialized = 1;
+ }
}
OpenPOWER on IntegriCloud