summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_thread.c
diff options
context:
space:
mode:
authordavidxu <davidxu@FreeBSD.org>2010-10-09 02:50:23 +0000
committerdavidxu <davidxu@FreeBSD.org>2010-10-09 02:50:23 +0000
commit55194e796cd9e8eb27d9bb4ec9f07184e390d01a (patch)
tree84ad0652e39fc58da6ca522cc6bf55a188109780 /sys/kern/kern_thread.c
parent6afff59f3c11c8f879672f8dbcb0a74bcbae79a5 (diff)
downloadFreeBSD-src-55194e796cd9e8eb27d9bb4ec9f07184e390d01a.zip
FreeBSD-src-55194e796cd9e8eb27d9bb4ec9f07184e390d01a.tar.gz
Create a global thread hash table to speed up thread lookup, use
rwlock to protect the table. In old code, thread lookup is done with process lock held, to find a thread, kernel has to iterate through process and thread list, this is quite inefficient. With this change, test shows in extreme case performance is dramatically improved. Earlier patch was reviewed by: jhb, julian
Diffstat (limited to 'sys/kern/kern_thread.c')
-rw-r--r--sys/kern/kern_thread.c71
1 files changed, 70 insertions, 1 deletions
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index e500e0a..89f6137 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
#include <sys/selinfo.h>
#include <sys/turnstile.h>
#include <sys/ktr.h>
+#include <sys/rwlock.h>
#include <sys/umtx.h>
#include <sys/cpuset.h>
#ifdef HWPMC_HOOKS
@@ -83,6 +84,12 @@ static void thread_zombie(struct thread *);
struct mtx tid_lock;
static struct unrhdr *tid_unrhdr;
+static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
+
+struct tidhashhead *tidhashtbl;
+u_long tidhash;
+struct rwlock tidhash_lock;
+
/*
* Prepare a thread for use.
*/
@@ -230,6 +237,8 @@ threadinit(void)
thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
thread_ctor, thread_dtor, thread_init, thread_fini,
16 - 1, 0);
+ tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
+ rw_init(&tidhash_lock, "tidhash");
}
/*
@@ -748,8 +757,14 @@ thread_suspend_check(int return_instead)
* this thread should just suicide.
* Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
*/
- if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
+ if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
+ PROC_SUNLOCK(p);
+ PROC_UNLOCK(p);
+ tidhash_remove(td);
+ PROC_LOCK(p);
+ PROC_SLOCK(p);
thread_exit();
+ }
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
if (p->p_numthreads == p->p_suspcount + 1) {
thread_lock(p->p_singlethread);
@@ -923,3 +938,57 @@ thread_find(struct proc *p, lwpid_t tid)
}
return (td);
}
+
+/* Locate a thread by number; return with proc lock held. */
+struct thread *
+tdfind(lwpid_t tid, pid_t pid)
+{
+#define RUN_THRESH 16
+ struct thread *td;
+ int run = 0;
+
+ rw_rlock(&tidhash_lock);
+ LIST_FOREACH(td, TIDHASH(tid), td_hash) {
+ if (td->td_tid == tid) {
+ if (pid != -1 && td->td_proc->p_pid != pid) {
+ td = NULL;
+ break;
+ }
+ if (td->td_proc->p_state == PRS_NEW) {
+ td = NULL;
+ break;
+ }
+ if (run > RUN_THRESH) {
+ if (rw_try_upgrade(&tidhash_lock)) {
+ LIST_REMOVE(td, td_hash);
+ LIST_INSERT_HEAD(TIDHASH(td->td_tid),
+ td, td_hash);
+ PROC_LOCK(td->td_proc);
+ rw_wunlock(&tidhash_lock);
+ return (td);
+ }
+ }
+ PROC_LOCK(td->td_proc);
+ break;
+ }
+ run++;
+ }
+ rw_runlock(&tidhash_lock);
+ return (td);
+}
+
+void
+tidhash_add(struct thread *td)
+{
+ rw_wlock(&tidhash_lock);
+ LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
+ rw_wunlock(&tidhash_lock);
+}
+
+void
+tidhash_remove(struct thread *td)
+{
+ rw_wlock(&tidhash_lock);
+ LIST_REMOVE(td, td_hash);
+ rw_wunlock(&tidhash_lock);
+}
OpenPOWER on IntegriCloud