summaryrefslogtreecommitdiffstats
path: root/lib/libkse/sys
diff options
context:
space:
mode:
authordeischen <deischen@FreeBSD.org>2003-05-16 19:58:30 +0000
committerdeischen <deischen@FreeBSD.org>2003-05-16 19:58:30 +0000
commit7f206ad4bb26e0a2be46cd85318999ccba58875c (patch)
treeb51a83e5c85ef7655ee05174c31348fd86fe4591 /lib/libkse/sys
parent7d7228c36bf498422a9b6bdcb82758abca864f64 (diff)
downloadFreeBSD-src-7f206ad4bb26e0a2be46cd85318999ccba58875c.zip
FreeBSD-src-7f206ad4bb26e0a2be46cd85318999ccba58875c.tar.gz
Add a method of yielding the current thread with the scheduler
lock held (_thr_sched_switch_unlocked()) and use this to avoid dropping the scheduler lock and having the scheduler retake the same lock again. Add a better way of detecting if a low-level lock is in use. When switching out a thread due to blocking in the UTS, don't switch to the KSE's scheduler stack only to switch back to another thread. If possible switch to the new thread directly from the old thread and avoid the overhead of the extra context switch. Check for pending signals on a thread when entering the scheduler and add them to the threads signal frame. This includes some other minor signal fixes. Most of this was a joint effor between davidxu and myself. Reviewed by: davidxu Approved by: re@ (blanket for libpthread)
Diffstat (limited to 'lib/libkse/sys')
-rw-r--r--lib/libkse/sys/lock.c56
-rw-r--r--lib/libkse/sys/lock.h5
2 files changed, 32 insertions, 29 deletions
diff --git a/lib/libkse/sys/lock.c b/lib/libkse/sys/lock.c
index 12ce1a0..d005baa 100644
--- a/lib/libkse/sys/lock.c
+++ b/lib/libkse/sys/lock.c
@@ -65,7 +65,7 @@ _lock_init(struct lock *lck, enum lock_type ltype,
lck->l_head->lr_watcher = NULL;
lck->l_head->lr_owner = NULL;
lck->l_head->lr_waiting = 0;
- lck->l_head->lr_handshake = 0;
+ lck->l_head->lr_active = 1;
lck->l_tail = lck->l_head;
}
return (0);
@@ -85,7 +85,7 @@ _lockuser_init(struct lockuser *lu, void *priv)
lu->lu_myreq->lr_watcher = NULL;
lu->lu_myreq->lr_owner = lu;
lu->lu_myreq->lr_waiting = 0;
- lu->lu_myreq->lr_handshake = 0;
+ lu->lu_myreq->lr_active = 0;
lu->lu_watchreq = NULL;
lu->lu_priority = 0;
lu->lu_private = priv;
@@ -166,19 +166,16 @@ _lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
for (i = 0; i < MAX_SPINS; i++) {
if (lu->lu_watchreq->lr_locked == 0)
return;
+ if (lu->lu_watchreq->lr_active == 0)
+ break;
}
atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 1);
while (lu->lu_watchreq->lr_locked != 0)
lck->l_wait(lck, lu);
atomic_store_rel_long(&lu->lu_watchreq->lr_waiting, 0);
- /*
- * Wait for original owner to stop accessing the
- * lockreq object.
- */
- while (lu->lu_watchreq->lr_handshake)
- ;
}
}
+ lu->lu_myreq->lr_active = 1;
}
/*
@@ -240,24 +237,21 @@ _lock_release(struct lock *lck, struct lockuser *lu)
}
}
if (lu_h != NULL) {
- lu_h->lu_watchreq->lr_handshake = 1;
/* Give the lock to the highest priority user. */
- atomic_store_rel_long(&lu_h->lu_watchreq->lr_locked, 0);
if ((lu_h->lu_watchreq->lr_waiting != 0) &&
(lck->l_wakeup != NULL))
/* Notify the sleeper */
lck->l_wakeup(lck, lu_h->lu_myreq->lr_watcher);
- atomic_store_rel_long(&lu_h->lu_watchreq->lr_handshake,
- 0);
+ else
+ atomic_store_rel_long(&lu_h->lu_watchreq->lr_locked, 0);
} else {
- myreq->lr_handshake = 1;
- /* Give the lock to the previous request. */
- atomic_store_rel_long(&myreq->lr_locked, 0);
if ((myreq->lr_waiting != 0) &&
(lck->l_wakeup != NULL))
/* Notify the sleeper */
lck->l_wakeup(lck, myreq->lr_watcher);
- atomic_store_rel_long(&myreq->lr_handshake, 0);
+ else
+ /* Give the lock to the previous request. */
+ atomic_store_rel_long(&myreq->lr_locked, 0);
}
} else {
/*
@@ -270,19 +264,25 @@ _lock_release(struct lock *lck, struct lockuser *lu)
lu->lu_watchreq = NULL;
lu->lu_myreq->lr_locked = 1;
lu->lu_myreq->lr_waiting = 0;
- if (lck->l_wakeup) {
- /* Start wakeup */
- myreq->lr_handshake = 1;
+ if (myreq->lr_waiting != 0 && lck->l_wakeup)
+ /* Notify the sleeper */
+ lck->l_wakeup(lck, myreq->lr_watcher);
+ else
/* Give the lock to the previous request. */
atomic_store_rel_long(&myreq->lr_locked, 0);
- if (myreq->lr_waiting != 0) {
- /* Notify the sleeper */
- lck->l_wakeup(lck, myreq->lr_watcher);
- }
- /* Stop wakeup */
- atomic_store_rel_long(&myreq->lr_handshake, 0);
- } else {
- atomic_store_rel_long(&myreq->lr_locked, 0);
- }
}
+ lu->lu_myreq->lr_active = 0;
}
+
+void
+_lock_grant(struct lock *lck /* unused */, struct lockuser *lu)
+{
+ atomic_store_rel_long(&lu->lu_watchreq->lr_locked, 0);
+}
+
+void
+_lockuser_setactive(struct lockuser *lu, int active)
+{
+ lu->lu_myreq->lr_active = active;
+}
+
diff --git a/lib/libkse/sys/lock.h b/lib/libkse/sys/lock.h
index e397111..6fa23e2 100644
--- a/lib/libkse/sys/lock.h
+++ b/lib/libkse/sys/lock.h
@@ -55,7 +55,7 @@ struct lockreq {
struct lockuser *lr_watcher; /* only used for priority locks */
struct lockuser *lr_owner; /* only used for priority locks */
long lr_waiting; /* non-zero when wakeup needed */
- volatile long lr_handshake; /* non-zero when wakeup in progress */
+ volatile int lr_active; /* non-zero if the lock is last lock for thread */
};
struct lockuser {
@@ -72,6 +72,7 @@ struct lockuser {
#define _LCK_REQUEST_INITIALIZER { 0, NULL, NULL, 0 }
#define _LCK_BUSY(lu) ((lu)->lu_watchreq->lr_locked != 0)
+#define _LCK_ACTIVE(lu) ((lu)->lu_watchreq->lr_active != 0)
#define _LCK_GRANTED(lu) ((lu)->lu_watchreq->lr_locked == 0)
#define _LCK_SET_PRIVATE(lu, p) (lu)->lu_private = (void *)(p)
@@ -84,7 +85,9 @@ int _lock_init(struct lock *, enum lock_type,
lock_handler_t *, lock_handler_t *);
int _lockuser_init(struct lockuser *lu, void *priv);
void _lockuser_destroy(struct lockuser *lu);
+void _lockuser_setactive(struct lockuser *lu, int active);
void _lock_acquire(struct lock *, struct lockuser *, int);
void _lock_release(struct lock *, struct lockuser *);
+void _lock_grant(struct lock *, struct lockuser *);
#endif
OpenPOWER on IntegriCloud