diff options
author | davidxu <davidxu@FreeBSD.org> | 2006-06-15 06:37:39 +0000 |
---|---|---|
committer | davidxu <davidxu@FreeBSD.org> | 2006-06-15 06:37:39 +0000 |
commit | a4976ce4819998468c1e5c81438c6e85194d9eed (patch) | |
tree | 54e94fb49d889ce7133d0d6bc666318bb347cc77 | |
parent | b3a7439a45bf95ef2c21dfad6ba1a051467efad1 (diff) | |
download | FreeBSD-src-a4976ce4819998468c1e5c81438c6e85194d9eed.zip FreeBSD-src-a4976ce4819998468c1e5c81438c6e85194d9eed.tar.gz |
Add scheduler API sched_relinquish(), the API is used to implement
yield() and sched_yield() syscalls. Every scheduler has its own way
to relinquish cpu, the ULE and CORE schedulers have two internal run-
queues, a timesharing thread which calls yield() syscall should be
moved to inactive queue.
-rw-r--r-- | sys/kern/ksched.c | 4 | ||||
-rw-r--r-- | sys/kern/sched_4bsd.c | 15 | ||||
-rw-r--r-- | sys/kern/sched_core.c | 15 | ||||
-rw-r--r-- | sys/kern/sched_ule.c | 13 | ||||
-rw-r--r-- | sys/posix4/ksched.c | 4 | ||||
-rw-r--r-- | sys/sys/sched.h | 1 |
6 files changed, 46 insertions, 6 deletions
diff --git a/sys/kern/ksched.c b/sys/kern/ksched.c index c4a5f0b..7884d90 100644 --- a/sys/kern/ksched.c +++ b/sys/kern/ksched.c @@ -253,9 +253,7 @@ ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td) int ksched_yield(register_t *ret, struct ksched *ksched) { - mtx_lock_spin(&sched_lock); - curthread->td_flags |= TDF_NEEDRESCHED; - mtx_unlock_spin(&sched_lock); + sched_relinquish(curthread); return 0; } diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index 60f6df6..399c9e5 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -1354,6 +1354,19 @@ sched_is_bound(struct thread *td) return (td->td_kse->ke_flags & KEF_BOUND); } +void +sched_relinquish(struct thread *td) +{ + struct ksegrp *kg; + + kg = td->td_ksegrp; + mtx_lock_spin(&sched_lock); + if (kg->kg_pri_class == PRI_TIMESHARE) + sched_prio(td, PRI_MAX_TIMESHARE); + mi_switch(SW_VOL, NULL); + mtx_unlock_spin(&sched_lock); +} + int sched_load(void) { @@ -1365,11 +1378,13 @@ sched_sizeof_ksegrp(void) { return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); } + int sched_sizeof_proc(void) { return (sizeof(struct proc)); } + int sched_sizeof_thread(void) { diff --git a/sys/kern/sched_core.c b/sys/kern/sched_core.c index 3f5554b..494e04f 100644 --- a/sys/kern/sched_core.c +++ b/sys/kern/sched_core.c @@ -2310,6 +2310,21 @@ sched_load(void) #endif } +void +sched_relinquish(struct thread *td) +{ + struct ksegrp *kg; + + kg = td->td_ksegrp; + mtx_lock_spin(&sched_lock); + if (sched_is_timeshare(kg)) { + sched_prio(td, PRI_MAX_TIMESHARE); + td->td_kse->ke_flags |= KEF_NEXTRQ; + } + mi_switch(SW_VOL, NULL); + mtx_unlock_spin(&sched_lock); +} + int sched_sizeof_ksegrp(void) { diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index a3cccfe..a352445 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -1974,6 +1974,19 @@ sched_is_bound(struct thread *td) return (td->td_kse->ke_flags & KEF_BOUND); } +void +sched_relinquish(struct thread *td) +{ + struct ksegrp *kg; + + kg = td->td_ksegrp; + mtx_lock_spin(&sched_lock); + if (kg->kg_pri_class == PRI_TIMESHARE) + sched_prio(td, PRI_MAX_TIMESHARE); + mi_switch(SW_VOL, NULL); + mtx_unlock_spin(&sched_lock); +} + int sched_load(void) { diff --git a/sys/posix4/ksched.c b/sys/posix4/ksched.c index c4a5f0b..7884d90 100644 --- a/sys/posix4/ksched.c +++ b/sys/posix4/ksched.c @@ -253,9 +253,7 @@ ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td) int ksched_yield(register_t *ret, struct ksched *ksched) { - mtx_lock_spin(&sched_lock); - curthread->td_flags |= TDF_NEEDRESCHED; - mtx_unlock_spin(&sched_lock); + sched_relinquish(curthread); return 0; } diff --git a/sys/sys/sched.h b/sys/sys/sched.h index 5c5825f..b782c1f 100644 --- a/sys/sys/sched.h +++ b/sys/sys/sched.h @@ -79,6 +79,7 @@ void sched_add(struct thread *td, int flags); void sched_clock(struct thread *td); void sched_rem(struct thread *td); void sched_tick(void); +void sched_relinquish(struct thread *td); /* * Binding makes cpu affinity permanent while pinning is used to temporarily |