summaryrefslogtreecommitdiffstats
path: root/sys/sys/sched.h
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-06-04 23:50:30 +0000
committerjeff <jeff@FreeBSD.org>2007-06-04 23:50:30 +0000
commit186ae07cb61840670b6b7bc387b690bef2c2e262 (patch)
treee1f8264072afbc05d59439c37c9d1a06178296ad /sys/sys/sched.h
parent9bd4fdf7ce811d83f0305cacc5990ec339df9f13 (diff)
downloadFreeBSD-src-186ae07cb61840670b6b7bc387b690bef2c2e262.zip
FreeBSD-src-186ae07cb61840670b6b7bc387b690bef2c2e262.tar.gz
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique similar to solaris's container locking. - A per-process spinlock is now used to protect the queue of threads, thread count, suspension count, p_sflags, and other process related scheduling fields. - The new thread lock is actually a pointer to a spinlock for the container that the thread is currently owned by. The container may be a turnstile, sleepqueue, or run queue. - thread_lock() is now used to protect access to thread related scheduling fields. thread_unlock() unlocks the lock and thread_set_lock() implements the transition from one lock to another. - A new "blocked_lock" is used in cases where it is not safe to hold the actual thread's lock yet we must prevent access to the thread. - sched_throw() and sched_fork_exit() are introduced to allow the schedulers to fix-up locking at these points. - Add some minor infrastructure for optionally exporting scheduler statistics that were invaluable in solving performance problems with this patch. Generally these statistics allow you to differentiate between different causes of context switches. Tested by: kris, current@ Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc. Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
Diffstat (limited to 'sys/sys/sched.h')
-rw-r--r--sys/sys/sched.h16
1 files changed, 15 insertions, 1 deletions
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 1342906..0dcf369 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -81,6 +81,7 @@ int sched_runnable(void);
*/
void sched_exit(struct proc *p, struct thread *childtd);
void sched_fork(struct thread *td, struct thread *childtd);
+void sched_fork_exit(struct thread *td);
/*
* KSE Groups contain scheduling priority information. They record the
@@ -101,6 +102,7 @@ fixpt_t sched_pctcpu(struct thread *td);
void sched_prio(struct thread *td, u_char prio);
void sched_sleep(struct thread *td);
void sched_switch(struct thread *td, struct thread *newtd, int flags);
+void sched_throw(struct thread *td);
void sched_unlend_prio(struct thread *td, u_char prio);
void sched_unlend_user_prio(struct thread *td, u_char pri);
void sched_user_prio(struct thread *td, u_char prio);
@@ -155,6 +157,19 @@ sched_unpin(void)
#define SRQ_PREEMPTED 0x0008 /* has been preempted.. be kind */
#define SRQ_BORROWING 0x0010 /* Priority updated due to prio_lend */
+/* Switch stats. */
+#ifdef SCHED_STATS
+extern long switch_preempt;
+extern long switch_owepreempt;
+extern long switch_turnstile;
+extern long switch_sleepq;
+extern long switch_sleepqtimo;
+extern long switch_relinquish;
+extern long switch_needresched;
+#define SCHED_STAT_INC(var) atomic_add_long(&(var), 1)
+#else
+#define SCHED_STAT_INC(var)
+#endif
/* temporarily here */
void schedinit(void);
@@ -162,7 +177,6 @@ void sched_init_concurrency(struct proc *p);
void sched_set_concurrency(struct proc *p, int cuncurrency);
void sched_schedinit(void);
void sched_newproc(struct proc *p, struct thread *td);
-void sched_thread_exit(struct thread *td);
void sched_newthread(struct thread *td);
#endif /* _KERNEL */
OpenPOWER on IntegriCloud