summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/libkse/sys/Makefile.inc3
-rw-r--r--lib/libkse/sys/lock.c268
-rw-r--r--lib/libkse/sys/lock.h89
-rw-r--r--lib/libkse/sys/thr_error.c17
-rw-r--r--lib/libpthread/sys/Makefile.inc3
-rw-r--r--lib/libpthread/sys/lock.c268
-rw-r--r--lib/libpthread/sys/lock.h89
-rw-r--r--lib/libpthread/sys/thr_error.c17
8 files changed, 740 insertions, 14 deletions
diff --git a/lib/libkse/sys/Makefile.inc b/lib/libkse/sys/Makefile.inc
index 2945285..0b5663d 100644
--- a/lib/libkse/sys/Makefile.inc
+++ b/lib/libkse/sys/Makefile.inc
@@ -2,4 +2,5 @@
.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
-SRCS+= thr_error.c _atomic_lock.S thr_enter_uts.S thr_switch.S
+SRCS+= thr_error.c _atomic_lock.S ksd.c thr_enter_uts.S thr_getcontext.S \
+ thr_switch.S lock.c
diff --git a/lib/libkse/sys/lock.c b/lib/libkse/sys/lock.c
new file mode 100644
index 0000000..b1949f4
--- /dev/null
+++ b/lib/libkse/sys/lock.c
@@ -0,0 +1,268 @@
+/*-
+ * Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+#include <machine/atomic.h>
+#include <assert.h>
+#include <stdlib.h>
+
+#include "atomic_ops.h"
+#include "lock.h"
+
+#define LCK_ASSERT assert
+#define MAX_SPINS 500
+
+void
+_lock_destroy(struct lock *lck)
+{
+
+ if ((lck != NULL) && (lck->l_head != NULL)) {
+ free(lck->l_head);
+ lck->l_head = NULL;
+ lck->l_tail = NULL;
+ }
+}
+
+int
+_lock_init(struct lock *lck, enum lock_type ltype,
+ lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
+{
+
+ if (lck == NULL)
+ return (-1);
+ else if ((lck->l_head = malloc(sizeof(struct lockreq))) == NULL)
+ return (-1);
+ else {
+ lck->l_type = ltype;
+ lck->l_wait = waitfunc;
+ lck->l_wakeup = wakeupfunc;
+ lck->l_head->lr_locked = 0;
+ lck->l_head->lr_watcher = NULL;
+ lck->l_head->lr_owner = NULL;
+ lck->l_head->lr_waiting = 0;
+ lck->l_tail = lck->l_head;
+ }
+ return (0);
+}
+
+int
+_lockuser_init(struct lockuser *lu, void *priv)
+{
+
+ if (lu == NULL)
+ return (-1);
+ else if ((lu->lu_myreq == NULL) &&
+ ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL))
+ return (-1);
+ else {
+ lu->lu_myreq->lr_locked = 1;
+ lu->lu_myreq->lr_watcher = NULL;
+ lu->lu_myreq->lr_owner = lu;
+ lu->lu_myreq->lr_waiting = 0;
+ lu->lu_watchreq = NULL;
+ lu->lu_priority = 0;
+ lu->lu_private = priv;
+ lu->lu_private2 = NULL;
+ }
+ return (0);
+}
+
+void
+_lockuser_destroy(struct lockuser *lu)
+{
+
+ if ((lu != NULL) && (lu->lu_myreq != NULL))
+ free(lu->lu_myreq);
+}
+
+/*
+ * Acquire a lock waiting (spin or sleep) for it to become available.
+ */
+void
+_lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
+{
+ int i;
+
+ /**
+ * XXX - We probably want to remove these checks to optimize
+ * performance. It is also a bug if any one of the
+ * checks fail, so it's probably better to just let it
+ * SEGV and fix it.
+ */
+#if 0
+ if (lck == NULL || lu == NULL || lck->l_head == NULL)
+ return;
+#endif
+ if ((lck->l_type & LCK_PRIORITY) == 0)
+ atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
+ else {
+ LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
+ LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
+ LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
+ LCK_ASSERT(lu->lu_myreq->lr_waiting == 0);
+ LCK_ASSERT(lu->lu_watchreq == NULL);
+
+ lu->lu_priority = prio;
+ /*
+ * Atomically swap the head of the lock request with
+ * this request.
+ */
+ atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
+ }
+
+ if (lu->lu_watchreq->lr_locked != 0) {
+ atomic_store_rel_ptr(&lu->lu_watchreq->lr_watcher, lu);
+ if ((lck->l_wait == NULL) ||
+ ((lck->l_type & LCK_ADAPTIVE) == 0)) {
+ while (lu->lu_watchreq->lr_locked == 0)
+ ; /* spin, then yield? */
+ } else {
+ /*
+ * Spin for a bit before invoking the wait function.
+ *
+ * We should be a little smarter here. If we're
+ * running on a single processor, then the lock
+ * owner got preempted and spinning will accomplish
+ * nothing but waste time. If we're running on
+ * multiple processors, the owner could be running
+ * on another CPU and we might acquire the lock if
+ * we spin for a bit.
+ *
+ * The other thing to keep in mind is that threads
+ * acquiring these locks are considered to be in
+ * critical regions; they will not be preempted by
+ * the _UTS_ until they release the lock. It is
+ * therefore safe to assume that if a lock can't
+ * be acquired, it is currently held by a thread
+ * running in another KSE.
+ */
+ for (i = 0; i < MAX_SPINS; i++) {
+ if (lu->lu_watchreq->lr_locked == 0)
+ return;
+ }
+ atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 1);
+ while (lu->lu_watchreq->lr_locked != 0)
+ lck->l_wait(lck, lu);
+ atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 0);
+ }
+ }
+}
+
+/*
+ * Release a lock.
+ */
+void
+_lock_release(struct lock *lck, struct lockuser *lu)
+{
+ struct lockuser *lu_tmp, *lu_h;
+ struct lockreq *myreq;
+ int prio_h;
+
+ /**
+ * XXX - We probably want to remove these checks to optimize
+ * performance. It is also a bug if any one of the
+ * checks fail, so it's probably better to just let it
+ * SEGV and fix it.
+ */
+#if 0
+ if ((lck == NULL) || (lu == NULL))
+ return;
+#endif
+ if ((lck->l_type & LCK_PRIORITY) != 0) {
+ prio_h = 0;
+ lu_h = NULL;
+
+ /* Update tail if our request is last. */
+ if (lu->lu_watchreq->lr_owner == NULL) {
+ atomic_store_rel_ptr(&lck->l_tail, lu->lu_myreq);
+ atomic_store_rel_ptr(&lu->lu_myreq->lr_owner, NULL);
+ } else {
+ /* Remove ourselves from the list. */
+ atomic_store_rel_ptr(&lu->lu_myreq->lr_owner,
+ lu->lu_watchreq->lr_owner);
+ atomic_store_rel_ptr(
+ &lu->lu_watchreq->lr_owner->lu_myreq, lu->lu_myreq);
+ }
+ /*
+ * The watch request now becomes our own because we've
+ * traded away our previous request. Save our previous
+ * request so that we can grant the lock.
+ */
+ myreq = lu->lu_myreq;
+ lu->lu_myreq = lu->lu_watchreq;
+ lu->lu_watchreq = NULL;
+ lu->lu_myreq->lr_locked = 1;
+ lu->lu_myreq->lr_owner = lu;
+ lu->lu_myreq->lr_watcher = NULL;
+ lu->lu_myreq->lr_waiting = 0;
+ /*
+ * Traverse the list of lock requests in reverse order
+ * looking for the user with the highest priority.
+ */
+ for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
+ lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
+ if (lu_tmp->lu_priority > prio_h) {
+ lu_h = lu_tmp;
+ prio_h = lu_tmp->lu_priority;
+ }
+ }
+ if (lu_h != NULL) {
+ /* Give the lock to the highest priority user. */
+ atomic_store_rel_long(&lu_h->lu_watchreq->lr_locked, 0);
+ if ((lu_h->lu_watchreq->lr_waiting != 0) &&
+ (lck->l_wakeup != NULL))
+ /* Notify the sleeper */
+ lck->l_wakeup(lck, lu_h->lu_myreq->lr_watcher);
+ } else {
+ /* Give the lock to the previous request. */
+ atomic_store_rel_long(&myreq->lr_locked, 0);
+ if ((myreq->lr_waiting != 0) &&
+ (lck->l_wakeup != NULL))
+ /* Notify the sleeper */
+ lck->l_wakeup(lck, myreq->lr_watcher);
+ }
+ } else {
+ /*
+ * The watch request now becomes our own because we've
+ * traded away our previous request. Save our previous
+ * request so that we can grant the lock.
+ */
+ myreq = lu->lu_myreq;
+ lu->lu_myreq = lu->lu_watchreq;
+ lu->lu_watchreq = NULL;
+ lu->lu_myreq->lr_locked = 1;
+ lu->lu_myreq->lr_waiting = 0;
+ /* Give the lock to the previous request. */
+ atomic_store_rel_long(&myreq->lr_locked, 0);
+ if ((myreq->lr_waiting != 0) &&
+ (lck->l_wakeup != NULL))
+ /* Notify the sleeper */
+ lck->l_wakeup(lck, myreq->lr_watcher);
+
+ }
+}
diff --git a/lib/libkse/sys/lock.h b/lib/libkse/sys/lock.h
new file mode 100644
index 0000000..eeb7286
--- /dev/null
+++ b/lib/libkse/sys/lock.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _LOCK_H_
+#define _LOCK_H_
+
+struct lockreq;
+struct lockuser;
+struct lock;
+
+enum lock_type {
+ LCK_DEFAULT = 0x0000, /* default is FIFO spin locks */
+ LCK_PRIORITY = 0x0001,
+ LCK_ADAPTIVE = 0x0002 /* call user-supplied handlers */
+};
+
+typedef void lock_handler_t(struct lock *, struct lockuser *);
+
+struct lock {
+ struct lockreq *l_head;
+ struct lockreq *l_tail; /* only used for priority locks */
+ enum lock_type l_type;
+ lock_handler_t *l_wait; /* only used for adaptive locks */
+ lock_handler_t *l_wakeup; /* only used for adaptive locks */
+};
+
+/* Try to make this >= CACHELINESIZE */
+struct lockreq {
+ volatile long lr_locked; /* lock granted = 0, busy otherwise */
+ struct lockuser *lr_watcher; /* only used for priority locks */
+ struct lockuser *lr_owner; /* only used for priority locks */
+ long lr_waiting; /* non-zero when wakeup needed */
+};
+
+struct lockuser {
+ struct lockreq *lu_myreq; /* request to give up/trade */
+ struct lockreq *lu_watchreq; /* watch this request */
+ int lu_priority; /* only used for priority locks */
+ void *lu_private1; /* private{1,2} are initialized to */
+ void *lu_private2; /* NULL and can be used by caller */
+#define lu_private lu_private1
+};
+
+#define _LCK_INITIALIZER(lck_req) { &lck_req, NULL, LCK_DEFAULT, \
+ NULL, NULL }
+#define _LCK_REQUEST_INITIALIZER { 0, NULL, NULL, 0 }
+
+#define _LCK_BUSY(lu) ((lu)->lu_watchreq->lr_locked != 0)
+#define _LCK_GRANTED(lu) ((lu)->lu_watchreq->lr_locked == 0)
+
+#define _LCK_SET_PRIVATE(lu, p) (lu)->lu_private = (void *)(p)
+#define _LCK_GET_PRIVATE(lu) (lu)->lu_private
+#define _LCK_SET_PRIVATE2(lu, p) (lu)->lu_private2 = (void *)(p)
+#define _LCK_GET_PRIVATE2(lu) (lu)->lu_private2
+
+void _lock_destroy(struct lock *);
+int _lock_init(struct lock *, enum lock_type,
+ lock_handler_t *, lock_handler_t *);
+int _lockuser_init(struct lockuser *lu, void *priv);
+void _lockuser_destroy(struct lockuser *lu);
+void _lock_acquire(struct lock *, struct lockuser *, int);
+void _lock_release(struct lock *, struct lockuser *);
+
+#endif
diff --git a/lib/libkse/sys/thr_error.c b/lib/libkse/sys/thr_error.c
index f002ee9..a4a8507 100644
--- a/lib/libkse/sys/thr_error.c
+++ b/lib/libkse/sys/thr_error.c
@@ -34,16 +34,21 @@
* $FreeBSD$
*/
#include <pthread.h>
+#include "libc_private.h"
#include "thr_private.h"
extern int errno;
int * __error()
{
- int *p_errno;
- if (_thread_run == _thread_initial) {
- p_errno = &errno;
- } else {
- p_errno = &_thread_run->error;
+ struct pthread *curthread;
+
+ if (__isthreaded == 0)
+ return (&errno);
+ else {
+ curthread = _get_curthread();
+ if ((curthread == NULL) || (curthread == _thr_initial))
+ return (&errno);
+ else
+ return (&curthread->error);
}
- return(p_errno);
}
diff --git a/lib/libpthread/sys/Makefile.inc b/lib/libpthread/sys/Makefile.inc
index 2945285..0b5663d 100644
--- a/lib/libpthread/sys/Makefile.inc
+++ b/lib/libpthread/sys/Makefile.inc
@@ -2,4 +2,5 @@
.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
-SRCS+= thr_error.c _atomic_lock.S thr_enter_uts.S thr_switch.S
+SRCS+= thr_error.c _atomic_lock.S ksd.c thr_enter_uts.S thr_getcontext.S \
+ thr_switch.S lock.c
diff --git a/lib/libpthread/sys/lock.c b/lib/libpthread/sys/lock.c
new file mode 100644
index 0000000..b1949f4
--- /dev/null
+++ b/lib/libpthread/sys/lock.c
@@ -0,0 +1,268 @@
+/*-
+ * Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+#include <machine/atomic.h>
+#include <assert.h>
+#include <stdlib.h>
+
+#include "atomic_ops.h"
+#include "lock.h"
+
+#define LCK_ASSERT assert
+#define MAX_SPINS 500
+
+void
+_lock_destroy(struct lock *lck)
+{
+
+ if ((lck != NULL) && (lck->l_head != NULL)) {
+ free(lck->l_head);
+ lck->l_head = NULL;
+ lck->l_tail = NULL;
+ }
+}
+
+int
+_lock_init(struct lock *lck, enum lock_type ltype,
+ lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
+{
+
+ if (lck == NULL)
+ return (-1);
+ else if ((lck->l_head = malloc(sizeof(struct lockreq))) == NULL)
+ return (-1);
+ else {
+ lck->l_type = ltype;
+ lck->l_wait = waitfunc;
+ lck->l_wakeup = wakeupfunc;
+ lck->l_head->lr_locked = 0;
+ lck->l_head->lr_watcher = NULL;
+ lck->l_head->lr_owner = NULL;
+ lck->l_head->lr_waiting = 0;
+ lck->l_tail = lck->l_head;
+ }
+ return (0);
+}
+
+int
+_lockuser_init(struct lockuser *lu, void *priv)
+{
+
+ if (lu == NULL)
+ return (-1);
+ else if ((lu->lu_myreq == NULL) &&
+ ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL))
+ return (-1);
+ else {
+ lu->lu_myreq->lr_locked = 1;
+ lu->lu_myreq->lr_watcher = NULL;
+ lu->lu_myreq->lr_owner = lu;
+ lu->lu_myreq->lr_waiting = 0;
+ lu->lu_watchreq = NULL;
+ lu->lu_priority = 0;
+ lu->lu_private = priv;
+ lu->lu_private2 = NULL;
+ }
+ return (0);
+}
+
+void
+_lockuser_destroy(struct lockuser *lu)
+{
+
+ if ((lu != NULL) && (lu->lu_myreq != NULL))
+ free(lu->lu_myreq);
+}
+
+/*
+ * Acquire a lock waiting (spin or sleep) for it to become available.
+ */
+void
+_lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
+{
+ int i;
+
+ /**
+ * XXX - We probably want to remove these checks to optimize
+ * performance. It is also a bug if any one of the
+ * checks fail, so it's probably better to just let it
+ * SEGV and fix it.
+ */
+#if 0
+ if (lck == NULL || lu == NULL || lck->l_head == NULL)
+ return;
+#endif
+ if ((lck->l_type & LCK_PRIORITY) == 0)
+ atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
+ else {
+ LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
+ LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
+ LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
+ LCK_ASSERT(lu->lu_myreq->lr_waiting == 0);
+ LCK_ASSERT(lu->lu_watchreq == NULL);
+
+ lu->lu_priority = prio;
+ /*
+ * Atomically swap the head of the lock request with
+ * this request.
+ */
+ atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
+ }
+
+ if (lu->lu_watchreq->lr_locked != 0) {
+ atomic_store_rel_ptr(&lu->lu_watchreq->lr_watcher, lu);
+ if ((lck->l_wait == NULL) ||
+ ((lck->l_type & LCK_ADAPTIVE) == 0)) {
+ while (lu->lu_watchreq->lr_locked == 0)
+ ; /* spin, then yield? */
+ } else {
+ /*
+ * Spin for a bit before invoking the wait function.
+ *
+ * We should be a little smarter here. If we're
+ * running on a single processor, then the lock
+ * owner got preempted and spinning will accomplish
+ * nothing but waste time. If we're running on
+ * multiple processors, the owner could be running
+ * on another CPU and we might acquire the lock if
+ * we spin for a bit.
+ *
+ * The other thing to keep in mind is that threads
+ * acquiring these locks are considered to be in
+ * critical regions; they will not be preempted by
+ * the _UTS_ until they release the lock. It is
+ * therefore safe to assume that if a lock can't
+ * be acquired, it is currently held by a thread
+ * running in another KSE.
+ */
+ for (i = 0; i < MAX_SPINS; i++) {
+ if (lu->lu_watchreq->lr_locked == 0)
+ return;
+ }
+ atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 1);
+ while (lu->lu_watchreq->lr_locked != 0)
+ lck->l_wait(lck, lu);
+ atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 0);
+ }
+ }
+}
+
+/*
+ * Release a lock.
+ */
+void
+_lock_release(struct lock *lck, struct lockuser *lu)
+{
+ struct lockuser *lu_tmp, *lu_h;
+ struct lockreq *myreq;
+ int prio_h;
+
+ /**
+ * XXX - We probably want to remove these checks to optimize
+ * performance. It is also a bug if any one of the
+ * checks fail, so it's probably better to just let it
+ * SEGV and fix it.
+ */
+#if 0
+ if ((lck == NULL) || (lu == NULL))
+ return;
+#endif
+ if ((lck->l_type & LCK_PRIORITY) != 0) {
+ prio_h = 0;
+ lu_h = NULL;
+
+ /* Update tail if our request is last. */
+ if (lu->lu_watchreq->lr_owner == NULL) {
+ atomic_store_rel_ptr(&lck->l_tail, lu->lu_myreq);
+ atomic_store_rel_ptr(&lu->lu_myreq->lr_owner, NULL);
+ } else {
+ /* Remove ourselves from the list. */
+ atomic_store_rel_ptr(&lu->lu_myreq->lr_owner,
+ lu->lu_watchreq->lr_owner);
+ atomic_store_rel_ptr(
+ &lu->lu_watchreq->lr_owner->lu_myreq, lu->lu_myreq);
+ }
+ /*
+ * The watch request now becomes our own because we've
+ * traded away our previous request. Save our previous
+ * request so that we can grant the lock.
+ */
+ myreq = lu->lu_myreq;
+ lu->lu_myreq = lu->lu_watchreq;
+ lu->lu_watchreq = NULL;
+ lu->lu_myreq->lr_locked = 1;
+ lu->lu_myreq->lr_owner = lu;
+ lu->lu_myreq->lr_watcher = NULL;
+ lu->lu_myreq->lr_waiting = 0;
+ /*
+ * Traverse the list of lock requests in reverse order
+ * looking for the user with the highest priority.
+ */
+ for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
+ lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
+ if (lu_tmp->lu_priority > prio_h) {
+ lu_h = lu_tmp;
+ prio_h = lu_tmp->lu_priority;
+ }
+ }
+ if (lu_h != NULL) {
+ /* Give the lock to the highest priority user. */
+ atomic_store_rel_long(&lu_h->lu_watchreq->lr_locked, 0);
+ if ((lu_h->lu_watchreq->lr_waiting != 0) &&
+ (lck->l_wakeup != NULL))
+ /* Notify the sleeper */
+ lck->l_wakeup(lck, lu_h->lu_myreq->lr_watcher);
+ } else {
+ /* Give the lock to the previous request. */
+ atomic_store_rel_long(&myreq->lr_locked, 0);
+ if ((myreq->lr_waiting != 0) &&
+ (lck->l_wakeup != NULL))
+ /* Notify the sleeper */
+ lck->l_wakeup(lck, myreq->lr_watcher);
+ }
+ } else {
+ /*
+ * The watch request now becomes our own because we've
+ * traded away our previous request. Save our previous
+ * request so that we can grant the lock.
+ */
+ myreq = lu->lu_myreq;
+ lu->lu_myreq = lu->lu_watchreq;
+ lu->lu_watchreq = NULL;
+ lu->lu_myreq->lr_locked = 1;
+ lu->lu_myreq->lr_waiting = 0;
+ /* Give the lock to the previous request. */
+ atomic_store_rel_long(&myreq->lr_locked, 0);
+ if ((myreq->lr_waiting != 0) &&
+ (lck->l_wakeup != NULL))
+ /* Notify the sleeper */
+ lck->l_wakeup(lck, myreq->lr_watcher);
+
+ }
+}
diff --git a/lib/libpthread/sys/lock.h b/lib/libpthread/sys/lock.h
new file mode 100644
index 0000000..eeb7286
--- /dev/null
+++ b/lib/libpthread/sys/lock.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _LOCK_H_
+#define _LOCK_H_
+
+struct lockreq;
+struct lockuser;
+struct lock;
+
+enum lock_type {
+ LCK_DEFAULT = 0x0000, /* default is FIFO spin locks */
+ LCK_PRIORITY = 0x0001,
+ LCK_ADAPTIVE = 0x0002 /* call user-supplied handlers */
+};
+
+typedef void lock_handler_t(struct lock *, struct lockuser *);
+
+struct lock {
+ struct lockreq *l_head;
+ struct lockreq *l_tail; /* only used for priority locks */
+ enum lock_type l_type;
+ lock_handler_t *l_wait; /* only used for adaptive locks */
+ lock_handler_t *l_wakeup; /* only used for adaptive locks */
+};
+
+/* Try to make this >= CACHELINESIZE */
+struct lockreq {
+ volatile long lr_locked; /* lock granted = 0, busy otherwise */
+ struct lockuser *lr_watcher; /* only used for priority locks */
+ struct lockuser *lr_owner; /* only used for priority locks */
+ long lr_waiting; /* non-zero when wakeup needed */
+};
+
+struct lockuser {
+ struct lockreq *lu_myreq; /* request to give up/trade */
+ struct lockreq *lu_watchreq; /* watch this request */
+ int lu_priority; /* only used for priority locks */
+ void *lu_private1; /* private{1,2} are initialized to */
+ void *lu_private2; /* NULL and can be used by caller */
+#define lu_private lu_private1
+};
+
+#define _LCK_INITIALIZER(lck_req) { &lck_req, NULL, LCK_DEFAULT, \
+ NULL, NULL }
+#define _LCK_REQUEST_INITIALIZER { 0, NULL, NULL, 0 }
+
+#define _LCK_BUSY(lu) ((lu)->lu_watchreq->lr_locked != 0)
+#define _LCK_GRANTED(lu) ((lu)->lu_watchreq->lr_locked == 0)
+
+#define _LCK_SET_PRIVATE(lu, p) (lu)->lu_private = (void *)(p)
+#define _LCK_GET_PRIVATE(lu) (lu)->lu_private
+#define _LCK_SET_PRIVATE2(lu, p) (lu)->lu_private2 = (void *)(p)
+#define _LCK_GET_PRIVATE2(lu) (lu)->lu_private2
+
+void _lock_destroy(struct lock *);
+int _lock_init(struct lock *, enum lock_type,
+ lock_handler_t *, lock_handler_t *);
+int _lockuser_init(struct lockuser *lu, void *priv);
+void _lockuser_destroy(struct lockuser *lu);
+void _lock_acquire(struct lock *, struct lockuser *, int);
+void _lock_release(struct lock *, struct lockuser *);
+
+#endif
diff --git a/lib/libpthread/sys/thr_error.c b/lib/libpthread/sys/thr_error.c
index f002ee9..a4a8507 100644
--- a/lib/libpthread/sys/thr_error.c
+++ b/lib/libpthread/sys/thr_error.c
@@ -34,16 +34,21 @@
* $FreeBSD$
*/
#include <pthread.h>
+#include "libc_private.h"
#include "thr_private.h"
extern int errno;
int * __error()
{
- int *p_errno;
- if (_thread_run == _thread_initial) {
- p_errno = &errno;
- } else {
- p_errno = &_thread_run->error;
+ struct pthread *curthread;
+
+ if (__isthreaded == 0)
+ return (&errno);
+ else {
+ curthread = _get_curthread();
+ if ((curthread == NULL) || (curthread == _thr_initial))
+ return (&errno);
+ else
+ return (&curthread->error);
}
- return(p_errno);
}
OpenPOWER on IntegriCloud