summaryrefslogtreecommitdiffstats
path: root/lib/libpthread/sys/lock.c
diff options
context:
space:
mode:
authordeischen <deischen@FreeBSD.org>2003-05-16 19:58:30 +0000
committerdeischen <deischen@FreeBSD.org>2003-05-16 19:58:30 +0000
commit7f206ad4bb26e0a2be46cd85318999ccba58875c (patch)
treeb51a83e5c85ef7655ee05174c31348fd86fe4591 /lib/libpthread/sys/lock.c
parent7d7228c36bf498422a9b6bdcb82758abca864f64 (diff)
downloadFreeBSD-src-7f206ad4bb26e0a2be46cd85318999ccba58875c.zip
FreeBSD-src-7f206ad4bb26e0a2be46cd85318999ccba58875c.tar.gz
Add a method of yielding the current thread with the scheduler
lock held (_thr_sched_switch_unlocked()) and use this to avoid dropping the scheduler lock and having the scheduler retake the same lock again. Add a better way of detecting if a low-level lock is in use. When switching out a thread due to blocking in the UTS, don't switch to the KSE's scheduler stack only to switch back to another thread. If possible switch to the new thread directly from the old thread and avoid the overhead of the extra context switch. Check for pending signals on a thread when entering the scheduler and add them to the threads signal frame. This includes some other minor signal fixes. Most of this was a joint effor between davidxu and myself. Reviewed by: davidxu Approved by: re@ (blanket for libpthread)
Diffstat (limited to 'lib/libpthread/sys/lock.c')
-rw-r--r--lib/libpthread/sys/lock.c56
1 files changed, 28 insertions, 28 deletions
diff --git a/lib/libpthread/sys/lock.c b/lib/libpthread/sys/lock.c
index 12ce1a0..d005baa 100644
--- a/lib/libpthread/sys/lock.c
+++ b/lib/libpthread/sys/lock.c
@@ -65,7 +65,7 @@ _lock_init(struct lock *lck, enum lock_type ltype,
lck->l_head->lr_watcher = NULL;
lck->l_head->lr_owner = NULL;
lck->l_head->lr_waiting = 0;
- lck->l_head->lr_handshake = 0;
+ lck->l_head->lr_active = 1;
lck->l_tail = lck->l_head;
}
return (0);
@@ -85,7 +85,7 @@ _lockuser_init(struct lockuser *lu, void *priv)
lu->lu_myreq->lr_watcher = NULL;
lu->lu_myreq->lr_owner = lu;
lu->lu_myreq->lr_waiting = 0;
- lu->lu_myreq->lr_handshake = 0;
+ lu->lu_myreq->lr_active = 0;
lu->lu_watchreq = NULL;
lu->lu_priority = 0;
lu->lu_private = priv;
@@ -166,19 +166,16 @@ _lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
for (i = 0; i < MAX_SPINS; i++) {
if (lu->lu_watchreq->lr_locked == 0)
return;
+ if (lu->lu_watchreq->lr_active == 0)
+ break;
}
atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 1);
while (lu->lu_watchreq->lr_locked != 0)
lck->l_wait(lck, lu);
atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 0);
- /*
- * Wait for original owner to stop accessing the
- * lockreq object.
- */
- while (lu->lu_watchreq->lr_handshake)
- ;
}
}
+ lu->lu_myreq->lr_active = 1;
}
/*
@@ -240,24 +237,21 @@ _lock_release(struct lock *lck, struct lockuser *lu)
}
}
if (lu_h != NULL) {
- lu_h->lu_watchreq->lr_handshake = 1;
/* Give the lock to the highest priority user. */
- atomic_store_rel_long(&lu_h->lu_watchreq->lr_locked, 0);
if ((lu_h->lu_watchreq->lr_waiting != 0) &&
(lck->l_wakeup != NULL))
/* Notify the sleeper */
lck->l_wakeup(lck, lu_h->lu_myreq->lr_watcher);
- atomic_store_rel_long(&lu_h->lu_watchreq->lr_handshake,
- 0);
+ else
+ atomic_store_rel_long(&lu_h->lu_watchreq->lr_locked, 0);
} else {
- myreq->lr_handshake = 1;
- /* Give the lock to the previous request. */
- atomic_store_rel_long(&myreq->lr_locked, 0);
if ((myreq->lr_waiting != 0) &&
(lck->l_wakeup != NULL))
/* Notify the sleeper */
lck->l_wakeup(lck, myreq->lr_watcher);
- atomic_store_rel_long(&myreq->lr_handshake, 0);
+ else
+ /* Give the lock to the previous request. */
+ atomic_store_rel_long(&myreq->lr_locked, 0);
}
} else {
/*
@@ -270,19 +264,25 @@ _lock_release(struct lock *lck, struct lockuser *lu)
lu->lu_watchreq = NULL;
lu->lu_myreq->lr_locked = 1;
lu->lu_myreq->lr_waiting = 0;
- if (lck->l_wakeup) {
- /* Start wakeup */
- myreq->lr_handshake = 1;
+ if (myreq->lr_waiting != 0 && lck->l_wakeup)
+ /* Notify the sleeper */
+ lck->l_wakeup(lck, myreq->lr_watcher);
+ else
/* Give the lock to the previous request. */
atomic_store_rel_long(&myreq->lr_locked, 0);
- if (myreq->lr_waiting != 0) {
- /* Notify the sleeper */
- lck->l_wakeup(lck, myreq->lr_watcher);
- }
- /* Stop wakeup */
- atomic_store_rel_long(&myreq->lr_handshake, 0);
- } else {
- atomic_store_rel_long(&myreq->lr_locked, 0);
- }
}
+ lu->lu_myreq->lr_active = 0;
}
+
+void
+_lock_grant(struct lock *lck /* unused */, struct lockuser *lu)
+{
+ atomic_store_rel_long(&lu->lu_watchreq->lr_locked, 0);
+}
+
+void
+_lockuser_setactive(struct lockuser *lu, int active)
+{
+ lu->lu_myreq->lr_active = active;
+}
+
OpenPOWER on IntegriCloud