summaryrefslogtreecommitdiffstats
path: root/lib/libpthread
diff options
context:
space:
mode:
authordeischen <deischen@FreeBSD.org>2003-07-18 02:46:55 +0000
committerdeischen <deischen@FreeBSD.org>2003-07-18 02:46:55 +0000
commit8c86a69beb11982e649317ffae6803742f1d0bcc (patch)
treeadcaaa6f7105bf1eafccce659645ce0d84dd1754 /lib/libpthread
parent875c5215cc8eb12453d2815a29a56842237d4327 (diff)
downloadFreeBSD-src-8c86a69beb11982e649317ffae6803742f1d0bcc.zip
FreeBSD-src-8c86a69beb11982e649317ffae6803742f1d0bcc.tar.gz
Cleanup thread accounting. Don't reset a threads timeslice
when it blocks; it only gets reset when it yields. Properly set a thread's default stack guardsize. Reviewed by: davidxu
Diffstat (limited to 'lib/libpthread')
-rw-r--r--lib/libpthread/thread/thr_attr_init.c1
-rw-r--r--lib/libpthread/thread/thr_kern.c57
-rw-r--r--lib/libpthread/thread/thr_private.h7
3 files changed, 27 insertions, 38 deletions
diff --git a/lib/libpthread/thread/thr_attr_init.c b/lib/libpthread/thread/thr_attr_init.c
index d8b701e..9c73463 100644
--- a/lib/libpthread/thread/thr_attr_init.c
+++ b/lib/libpthread/thread/thr_attr_init.c
@@ -53,6 +53,7 @@ _pthread_attr_init(pthread_attr_t *attr)
/* Initialise the attribute object with the defaults: */
memcpy(pattr, &_pthread_attr_default,
sizeof(struct pthread_attr));
+ pattr->guardsize_attr = _thr_guard_default;
/* Return a pointer to the attribute object: */
*attr = pattr;
diff --git a/lib/libpthread/thread/thr_kern.c b/lib/libpthread/thread/thr_kern.c
index 2728337..51aab62 100644
--- a/lib/libpthread/thread/thr_kern.c
+++ b/lib/libpthread/thread/thr_kern.c
@@ -152,6 +152,22 @@ static int thr_timedout(struct pthread *thread, struct timespec *curtime);
static void thr_unlink(struct pthread *thread);
+static void __inline
+thr_accounting(struct pthread *thread)
+{
+ if ((thread->slice_usec != -1) &&
+ (thread->slice_usec <= TIMESLICE_USEC) &&
+ (thread->attr.sched_policy != SCHED_FIFO)) {
+ thread->slice_usec += (thread->tmbx.tm_uticks
+ + thread->tmbx.tm_sticks) * _clock_res_usec;
+ /* Check for time quantum exceeded: */
+ if (thread->slice_usec > TIMESLICE_USEC)
+ thread->slice_usec = -1;
+ }
+ thread->tmbx.tm_uticks = 0;
+ thread->tmbx.tm_sticks = 0;
+}
+
/*
* This is called after a fork().
* No locks need to be taken here since we are guaranteed to be
@@ -581,7 +597,8 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
curthread->need_switchout = 1; /* The thread yielded on its own. */
curthread->critical_yield = 0; /* No need to yield anymore. */
- curthread->slice_usec = -1; /* Restart the time slice. */
+ thr_accounting(curthread);
+
/* Thread can unlock the scheduler lock. */
curthread->lock_switch = 1;
@@ -638,12 +655,6 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
td->kse = curkse;
/*
- * Reset accounting.
- */
- td->tmbx.tm_uticks = 0;
- td->tmbx.tm_sticks = 0;
-
- /*
* Reset the time slice if this thread is running
* for the first time or running again after using
* its full time slice allocation.
@@ -1028,12 +1039,6 @@ kse_sched_multi(struct kse *curkse)
curthread->kse = curkse;
/*
- * Reset accounting.
- */
- curthread->tmbx.tm_uticks = 0;
- curthread->tmbx.tm_sticks = 0;
-
- /*
* Reset the time slice if this thread is running for the first
* time or running again after using its full time slice allocation.
*/
@@ -1416,6 +1421,7 @@ kse_check_completed(struct kse *kse)
(thread->name == NULL) ? "none" : thread->name);
thread->blocked = 0;
if (thread != kse->k_curthread) {
+ thr_accounting(thread);
if ((thread->flags & THR_FLAGS_SUSPENDED) != 0)
THR_SET_STATE(thread, PS_SUSPENDED);
else
@@ -1545,11 +1551,6 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread)
thread->active = 0;
thread->need_switchout = 0;
/* This thread must have blocked in the kernel. */
- /* thread->slice_usec = -1;*/ /* restart timeslice */
- if ((thread->slice_usec != -1) &&
- (thread->attr.sched_policy != SCHED_FIFO))
- thread->slice_usec += (thread->tmbx.tm_uticks
- + thread->tmbx.tm_sticks) * _clock_res_usec;
/*
* Check for pending signals for this thread to
* see if we need to interrupt it in the kernel.
@@ -1623,24 +1624,8 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread)
KSE_WAITQ_INSERT(kse, thread);
break;
}
- if (thread->state != PS_RUNNING) {
- /* Restart the time slice: */
- thread->slice_usec = -1;
- } else {
- if (thread->need_switchout != 0)
- /*
- * The thread yielded on its own;
- * restart the timeslice.
- */
- thread->slice_usec = -1;
- else if ((thread->slice_usec != -1) &&
- (thread->attr.sched_policy != SCHED_FIFO)) {
- thread->slice_usec += (thread->tmbx.tm_uticks
- + thread->tmbx.tm_sticks) * _clock_res_usec;
- /* Check for time quantum exceeded: */
- if (thread->slice_usec > TIMESLICE_USEC)
- thread->slice_usec = -1;
- }
+ thr_accounting(thread);
+ if (thread->state == PS_RUNNING) {
if (thread->slice_usec == -1) {
/*
* The thread exceeded its time quantum or
diff --git a/lib/libpthread/thread/thr_private.h b/lib/libpthread/thread/thr_private.h
index ccd4faf..21026b7 100644
--- a/lib/libpthread/thread/thr_private.h
+++ b/lib/libpthread/thread/thr_private.h
@@ -888,7 +888,7 @@ do { \
_pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
#define THR_RUNQ_REMOVE(thrd) \
_pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
-#define THR_RUNQ_FIRST() \
+#define THR_RUNQ_FIRST(thrd) \
_pq_first(&(thrd)->kseg->kg_schedq.sq_runq)
/*
@@ -947,6 +947,9 @@ do { \
(void)_kse_critical_enter(); \
KSE_SCHED_LOCK((curthr)->kse, (curthr)->kseg); \
} while (0)
+#define THR_UNLOCK_SWITCH(curthr) do { \
+ KSE_SCHED_UNLOCK((curthr)->kse, (curthr)->kseg);\
+} while (0)
#define THR_CRITICAL_ENTER(thr) (thr)->critical_count++
#define THR_CRITICAL_LEAVE(thr) do { \
@@ -989,7 +992,7 @@ SCLASS struct pthread_attr _pthread_attr_default
SCLASS_PRESET({
SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY,
THR_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL,
- NULL, NULL, THR_STACK_DEFAULT
+ NULL, NULL, THR_STACK_DEFAULT, /* guardsize */0
});
/* Default mutex attributes: */
OpenPOWER on IntegriCloud