summaryrefslogtreecommitdiffstats
path: root/lib/libthr
diff options
context:
space:
mode:
authormtm <mtm@FreeBSD.org>2004-01-22 15:31:56 +0000
committermtm <mtm@FreeBSD.org>2004-01-22 15:31:56 +0000
commit7a517ec35f80fe08da85e81fe1aa41bf8d798ab2 (patch)
tree34fdcb089696e32bfb3291aa3b79f29eec73eb4c /lib/libthr
parent9e559fd52a0ce5de58459528736b0991ca0ba181 (diff)
downloadFreeBSD-src-7a517ec35f80fe08da85e81fe1aa41bf8d798ab2.zip
FreeBSD-src-7a517ec35f80fe08da85e81fe1aa41bf8d798ab2.tar.gz
o Implement the pthread_spin_* functions in libthr.
o Man pages
Diffstat (limited to 'lib/libthr')
-rw-r--r--lib/libthr/thread/thr_private.h5
-rw-r--r--lib/libthr/thread/thr_spinlock.c86
2 files changed, 91 insertions, 0 deletions
diff --git a/lib/libthr/thread/thr_private.h b/lib/libthr/thread/thr_private.h
index c2bea3f..bbf5abd 100644
--- a/lib/libthr/thread/thr_private.h
+++ b/lib/libthr/thread/thr_private.h
@@ -211,6 +211,11 @@ struct pthread_mutex {
spinlock_t lock;
};
+struct pthread_spinlock {
+ void *s_owner;
+ unsigned int s_magic;
+};
+
/*
* Flags for mutexes.
*/
diff --git a/lib/libthr/thread/thr_spinlock.c b/lib/libthr/thread/thr_spinlock.c
index 322d2e7..60e9522 100644
--- a/lib/libthr/thread/thr_spinlock.c
+++ b/lib/libthr/thread/thr_spinlock.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2004 Michael Telahun Makonnen <mtm@FreeBSD.Org>
* Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
@@ -33,6 +34,9 @@
*
*/
+#include <sys/types.h>
+#include <machine/atomic.h>
+
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
@@ -44,6 +48,88 @@
#include "thr_private.h"
+#define THR_SPIN_MAGIC 0xdadadada
+#define THR_SPIN_UNOWNED (void *)0
+#define MAGIC_TEST_RETURN_ON_FAIL(l) \
+ do { \
+ if ((l) == NULL || (l)->s_magic != THR_SPIN_MAGIC) \
+ return (EINVAL); \
+ } while(0)
+
+__weak_reference(_pthread_spin_destroy, pthread_spin_destroy);
+__weak_reference(_pthread_spin_init, pthread_spin_init);
+__weak_reference(_pthread_spin_lock, pthread_spin_lock);
+__weak_reference(_pthread_spin_trylock, pthread_spin_trylock);
+__weak_reference(_pthread_spin_unlock, pthread_spin_unlock);
+
+int
+_pthread_spin_destroy(pthread_spinlock_t *lock)
+{
+ MAGIC_TEST_RETURN_ON_FAIL((*lock));
+ if ((*lock)->s_owner == THR_SPIN_UNOWNED) {
+ (*lock)->s_magic = 0;
+ free((*lock));
+ *lock = NULL;
+ return (0);
+ }
+ return (EBUSY);
+}
+
+int
+_pthread_spin_init(pthread_spinlock_t *lock, int pshared)
+{
+ struct pthread_spinlock *s;
+
+ if (*lock != NULL) {
+ if ((*lock)->s_magic == THR_SPIN_MAGIC)
+ return (EBUSY);
+ }
+ s = (struct pthread_spinlock *)malloc(sizeof(struct pthread_spinlock));
+ if (s == NULL)
+ return (ENOMEM);
+ s->s_magic = THR_SPIN_MAGIC;
+ s->s_owner = THR_SPIN_UNOWNED;
+ *lock = s;
+ return (0);
+}
+
+/*
+ * If the caller sets nonblocking to 1, this function will return
+ * immediately without acquiring the lock it is owned by another thread.
+ * If set to 0, it will keep spinning until it acquires the lock.
+ */
+int
+_pthread_spin_lock(pthread_spinlock_t *lock)
+{
+ MAGIC_TEST_RETURN_ON_FAIL(*lock);
+ if ((*lock)->s_owner == curthread)
+ return (EDEADLK);
+ while (atomic_cmpset_acq_ptr(&(*lock)->s_owner, THR_SPIN_UNOWNED,
+ (void *)curthread) != 1)
+ ; /* SPIN */
+ return (0);
+}
+
+int
+_pthread_spin_trylock(pthread_spinlock_t *lock)
+{
+ MAGIC_TEST_RETURN_ON_FAIL(*lock);
+ if (atomic_cmpset_acq_ptr(&(*lock)->s_owner, THR_SPIN_UNOWNED,
+ (void *)curthread) == 1)
+ return (0);
+ return (EBUSY);
+}
+
+int
+_pthread_spin_unlock(pthread_spinlock_t *lock)
+{
+ MAGIC_TEST_RETURN_ON_FAIL(*lock);
+ if (atomic_cmpset_rel_ptr(&(*lock)->s_owner, (void *)curthread,
+ THR_SPIN_UNOWNED) == 1)
+ return (0);
+ return (EPERM);
+}
+
void
_spinunlock(spinlock_t *lck)
{
OpenPOWER on IntegriCloud