diff options
author | deischen <deischen@FreeBSD.org> | 2003-04-28 23:56:12 +0000 |
---|---|---|
committer | deischen <deischen@FreeBSD.org> | 2003-04-28 23:56:12 +0000 |
commit | 6deabb439b7870759f16db0c9a3968e3755bf61a (patch) | |
tree | 90f4ce3660289c401aab5d0a2c4f105e5aa3e877 /lib/libkse/thread/thr_private.h | |
parent | 0fbdf3450d750b66e54d688d645fc4bafd8739a1 (diff) | |
download | FreeBSD-src-6deabb439b7870759f16db0c9a3968e3755bf61a.zip FreeBSD-src-6deabb439b7870759f16db0c9a3968e3755bf61a.tar.gz |
o Don't add a scope system thread's KSE to the list of available
KSEs when it's thread exits; allow the GC handler to do that.
o Make spinlock/spinlock critical regions.
The following were submitted by davidxu
o Alow thr_switch() to take a null mailbox argument.
o Better protect cancellation checks.
o Don't set KSE specific data when creating new KSEs; rely on the
first upcall of the KSE to set it.
o Add the ability to set the maximum concurrency level and do this
automatically. We should have a way to enable/disable this with
some sort of tunable because some applications may not want this
to be the default.
o Hold the scheduling lock across thread switch calls.
o If scheduling of a thread fails, make sure to remove it from the list
of active threads.
o Better protect accesses to a joining threads when the target thread is
exited and detached.
o Remove some macro definitions that are now provided by <sys/kse.h>.
o Don't leave the library in threaded mode if creation of the initial
KSE fails.
o Wakeup idle KSEs when there are threads ready to run.
o Maintain the number of threads active in the priority queue.
Diffstat (limited to 'lib/libkse/thread/thr_private.h')
-rw-r--r-- | lib/libkse/thread/thr_private.h | 46 |
1 files changed, 37 insertions, 9 deletions
diff --git a/lib/libkse/thread/thr_private.h b/lib/libkse/thread/thr_private.h index fc2e4ee..4d43242 100644 --- a/lib/libkse/thread/thr_private.h +++ b/lib/libkse/thread/thr_private.h @@ -143,6 +143,7 @@ typedef struct pq_queue { int pq_size; /* number of priority lists */ #define PQF_ACTIVE 0x0001 int pq_flags; + int pq_threads; } pq_queue_t; /* @@ -197,6 +198,7 @@ struct kse { #define KF_STARTED 0x0001 /* kernel kse created */ #define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */ int k_waiting; + int k_idle; /* kse is idle */ int k_error; /* syscall errno in critical */ int k_cpu; /* CPU ID when bound */ int k_done; /* this KSE is done */ @@ -294,11 +296,15 @@ do { \ #define KSE_SET_WAIT(kse) atomic_store_rel_int(&(kse)->k_waiting, 1) -#define KSE_CLEAR_WAIT(kse) atomic_set_acq_int(&(kse)->k_waiting, 0) +#define KSE_CLEAR_WAIT(kse) atomic_store_rel_int(&(kse)->k_waiting, 0) #define KSE_WAITING(kse) (kse)->k_waiting != 0 #define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_mbx) +#define KSE_SET_IDLE(kse) ((kse)->k_idle = 1) +#define KSE_CLEAR_IDLE(kse) ((kse)->k_idle = 0) +#define KSE_IS_IDLE(kse) ((kse)->k_idle != 0) + /* * TailQ initialization values. */ @@ -658,6 +664,7 @@ struct pthread { /* Thread state: */ enum pthread_state state; + int lock_switch; /* * Number of microseconds accumulated by this thread when @@ -803,8 +810,11 @@ struct pthread { #define THR_YIELD_CHECK(thrd) \ do { \ if (((thrd)->critical_yield != 0) && \ - !(THR_IN_CRITICAL(thrd))) \ + !(THR_IN_CRITICAL(thrd))) { \ + THR_LOCK_SWITCH(thrd); \ _thr_sched_switch(thrd); \ + THR_UNLOCK_SWITCH(thrd); \ + } \ else if (((thrd)->check_pending != 0) && \ !(THR_IN_CRITICAL(thrd))) \ _thr_sig_check_pending(thrd); \ @@ -828,15 +838,31 @@ do { \ _lock_release((lck), \ &(thrd)->lockusers[(thrd)->locklevel - 1]); \ (thrd)->locklevel--; \ - if ((thrd)->locklevel != 0) \ + if ((thrd)->lock_switch) \ ; \ - else if ((thrd)->critical_yield != 0) \ - _thr_sched_switch(thrd); \ - else if ((thrd)->check_pending != 0) \ - _thr_sig_check_pending(thrd); \ + else { \ + THR_YIELD_CHECK(thrd); \ + } \ } \ } while (0) +#define THR_LOCK_SWITCH(thrd) \ +do { \ + THR_ASSERT(!(thrd)->lock_switch, "context switch locked"); \ + _kse_critical_enter(); \ + KSE_SCHED_LOCK((thrd)->kse, (thrd)->kseg); \ + (thrd)->lock_switch = 1; \ +} while (0) + +#define THR_UNLOCK_SWITCH(thrd) \ +do { \ + THR_ASSERT((thrd)->lock_switch, "context switch not locked"); \ + THR_ASSERT(_kse_in_critical(), "Er,not in critical region"); \ + (thrd)->lock_switch = 0; \ + KSE_SCHED_UNLOCK((thrd)->kse, (thrd)->kseg); \ + _kse_critical_leave(&thrd->tmbx); \ +} while (0) + /* * For now, threads will have their own lock separate from their * KSE scheduling lock. @@ -907,8 +933,6 @@ do { \ KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg); \ (curthr)->locklevel--; \ _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \ - if ((curthr)->locklevel == 0) \ - THR_YIELD_CHECK(curthr); \ } while (0) #define THR_CRITICAL_ENTER(thr) (thr)->critical_count++ @@ -917,7 +941,9 @@ do { \ if (((thr)->critical_yield != 0) && \ ((thr)->critical_count == 0)) { \ (thr)->critical_yield = 0; \ + THR_LOCK_SWITCH(thr); \ _thr_sched_switch(thr); \ + THR_UNLOCK_SWITCH(thr); \ } \ } while (0) @@ -1092,6 +1118,8 @@ void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf) void _thr_seterrno(struct pthread *, int); void _thr_enter_cancellation_point(struct pthread *); void _thr_leave_cancellation_point(struct pthread *); +int _thr_setconcurrency(int new_level); +int _thr_setmaxconcurrency(void); /* XXX - Stuff that goes away when my sources get more up to date. */ /* #include <sys/kse.h> */ |