summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2002-09-06 07:00:37 +0000
committerjulian <julian@FreeBSD.org>2002-09-06 07:00:37 +0000
commit4446570abffaa8bb634aecab0e8b85c27033979b (patch)
tree01ff467a2fdb42c0b7b0624494e4a993030d572b
parent9680f220d4acd260a446574e496f176621c7a18e (diff)
downloadFreeBSD-src-4446570abffaa8bb634aecab0e8b85c27033979b.zip
FreeBSD-src-4446570abffaa8bb634aecab0e8b85c27033979b.tar.gz
Use UMA as a complex object allocator.
The process allocator now caches and hands out complete process structures *including substructures* . i.e. it get's the process structure with the first thread (and soon KSE) already allocated and attached, all in one hit. For the average non threaded program (non KSE that is) the allocated thread and its stack remain attached to the process, even when the process is unused and in the process cache. This saves having to allocate and attach it later, effectively bringing us (hopefully) close to the efficiency of pre-KSE systems where these were a single structure. Reviewed by: davidxu@freebsd.org, peter@freebsd.org
-rw-r--r--sys/kern/kern_exit.c2
-rw-r--r--sys/kern/kern_fork.c36
-rw-r--r--sys/kern/kern_kse.c88
-rw-r--r--sys/kern/kern_proc.c48
-rw-r--r--sys/kern/kern_thread.c88
-rw-r--r--sys/sys/proc.h26
-rw-r--r--sys/vm/vm_glue.c5
7 files changed, 143 insertions, 150 deletions
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 8a57e1d..dac90bb 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -741,6 +741,8 @@ loop:
*/
vm_waitproc(p);
mtx_destroy(&p->p_mtx);
+ KASSERT(FIRST_THREAD_IN_PROC(p),
+ ("wait1: no residual thread!"));
uma_zfree(proc_zone, p);
sx_xlock(&allproc_lock);
nprocs--;
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index a0429b2..cb991d9 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -467,9 +467,9 @@ again:
* Start by zeroing the section of proc that is zero-initialized,
* then copy the section that is copied directly from the parent.
*/
- td2 = thread_alloc();
- ke2 = &p2->p_kse;
- kg2 = &p2->p_ksegrp;
+ td2 = FIRST_THREAD_IN_PROC(p2);
+ kg2 = FIRST_KSEGRP_IN_PROC(p2);
+ ke2 = FIRST_KSE_IN_KSEGRP(kg2);
#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
@@ -477,10 +477,8 @@ again:
(unsigned) RANGEOF(struct proc, p_startzero, p_endzero));
bzero(&ke2->ke_startzero,
(unsigned) RANGEOF(struct kse, ke_startzero, ke_endzero));
-#if 0 /* bzero'd by the thread allocator */
bzero(&td2->td_startzero,
(unsigned) RANGEOF(struct thread, td_startzero, td_endzero));
-#endif
bzero(&kg2->kg_startzero,
(unsigned) RANGEOF(struct ksegrp, kg_startzero, kg_endzero));
@@ -498,17 +496,6 @@ again:
(unsigned) RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
#undef RANGEOF
- /*
- * XXXKSE Theoretically only the running thread would get copied
- * Others in the kernel would be 'aborted' in the child.
- * i.e return E*something*
- * On SMP we would have to stop them running on
- * other CPUs! (set a flag in the proc that stops
- * all returns to userland until completed)
- * This is wrong but ok for 1:1.
- */
- proc_linkup(p2, kg2, ke2, td2);
-
/* Set up the thread as an active thread (as if runnable). */
TAILQ_REMOVE(&kg2->kg_iq, ke2, ke_kgrlist);
kg2->kg_idle_kses--;
@@ -517,8 +504,6 @@ again:
td2->td_kse = ke2;
td2->td_flags &= ~TDF_UNBOUND; /* For the rest of this syscall. */
- /* note.. XXXKSE no pcb or u-area yet */
-
/*
* Duplicate sub-structures as needed.
* Increase reference counts on shared objects.
@@ -622,10 +607,8 @@ again:
LIST_INSERT_AFTER(p1, p2, p_pglist);
PGRP_UNLOCK(p1->p_pgrp);
LIST_INIT(&p2->p_children);
- LIST_INIT(&td2->td_contested); /* XXXKSE only 1 thread? */
callout_init(&p2->p_itcallout, 0);
- callout_init(&td2->td_slpcallout, 1); /* XXXKSE */
#ifdef KTRACE
/*
@@ -681,19 +664,6 @@ again:
PROC_UNLOCK(p2);
sx_xunlock(&proctree_lock);
- /*
- * XXXKSE: In KSE, there would be a race here if one thread was
- * dieing due to a signal (or calling exit1() for that matter) while
- * another thread was calling fork1(). Not sure how KSE wants to work
- * around that. The problem is that up until the point above, if p1
- * gets killed, it won't find p2 in its list in order for it to be
- * reparented. Alternatively, we could add a new p_flag that gets set
- * before we reparent all the children that we check above and just
- * use init as our parent if that if that flag is set. (Either that
- * or abort the fork if the flag is set since our parent died trying
- * to fork us (which is evil)).
- */
-
KASSERT(newprocsig == NULL, ("unused newprocsig"));
if (newsigacts != NULL)
FREE(newsigacts, M_SUBPROC);
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 26a4707..8b2b88c 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -95,20 +95,8 @@ thread_ctor(void *mem, int size, void *arg)
("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
td = (struct thread *)mem;
- bzero(&td->td_startzero,
- (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
td->td_state = TDS_NEW;
td->td_flags |= TDF_UNBOUND;
-#if 0
- /*
- * Maybe move these here from process creation, but maybe not.
- * Moving them here takes them away from their "natural" place
- * in the fork process.
- */
- /* XXX td_contested does not appear to be initialized for threads! */
- LIST_INIT(&td->td_contested);
- callout_init(&td->td_slpcallout, 1);
-#endif
cached_threads--; /* XXXSMP */
active_threads++; /* XXXSMP */
}
@@ -202,7 +190,7 @@ threadinit(void)
}
/*
- * Stash an embarasingly esxtra thread into the zombie thread queue.
+ * Stash an embarasingly extra thread into the zombie thread queue.
*/
void
thread_stash(struct thread *td)
@@ -328,47 +316,59 @@ thread_exit(void)
KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
if (ke->ke_tdspare != NULL) {
- thread_stash(ke->ke_tdspare);
+ thread_free(ke->ke_tdspare);
ke->ke_tdspare = NULL;
}
cpu_thread_exit(td); /* XXXSMP */
- /* Reassign this thread's KSE. */
- ke->ke_thread = NULL;
- td->td_kse = NULL;
- ke->ke_state = KES_UNQUEUED;
- kse_reassign(ke);
-
- /* Unlink this thread from its proc. and the kseg */
- TAILQ_REMOVE(&p->p_threads, td, td_plist);
- p->p_numthreads--;
- TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
- kg->kg_numthreads--;
/*
- * The test below is NOT true if we are the
- * sole exiting thread. P_STOPPED_SINGLE is unset
- * in exit1() after it is the only survivor.
+ * The last thread is left attached to the process
+ * So that the whole bundle gets recycled. Skip
+ * all this stuff.
*/
- if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
- if (p->p_numthreads == p->p_suspcount) {
- TAILQ_REMOVE(&p->p_suspended,
- p->p_singlethread, td_runq);
- setrunqueue(p->p_singlethread);
- p->p_suspcount--;
+ if (p->p_numthreads > 1) {
+ /* Reassign this thread's KSE. */
+ ke->ke_thread = NULL;
+ td->td_kse = NULL;
+ ke->ke_state = KES_UNQUEUED;
+ kse_reassign(ke);
+
+ /* Unlink this thread from its proc. and the kseg */
+ TAILQ_REMOVE(&p->p_threads, td, td_plist);
+ p->p_numthreads--;
+ TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
+ kg->kg_numthreads--;
+ /*
+ * The test below is NOT true if we are the
+ * sole exiting thread. P_STOPPED_SNGL is unset
+ * in exit1() after it is the only survivor.
+ */
+ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
+ if (p->p_numthreads == p->p_suspcount) {
+ TAILQ_REMOVE(&p->p_suspended,
+ p->p_singlethread, td_runq);
+ setrunqueue(p->p_singlethread);
+ p->p_suspcount--;
+ }
}
+ PROC_UNLOCK(p);
+ td->td_state = TDS_SURPLUS;
+ td->td_proc = NULL;
+ td->td_ksegrp = NULL;
+ td->td_last_kse = NULL;
+ ke->ke_tdspare = td;
+ } else {
+ PROC_UNLOCK(p);
}
- PROC_UNLOCK(p);
- td->td_state = TDS_SURPLUS;
- td->td_proc = NULL;
- td->td_ksegrp = NULL;
- td->td_last_kse = NULL;
- ke->ke_tdspare = td;
+
cpu_throw();
/* NOTREACHED */
}
/*
* Link a thread to a process.
+ * set up anything that needs to be initialized for it to
+ * be used by the process.
*
* Note that we do not link to the proc's ucred here.
* The thread is linked as if running but no KSE assigned.
@@ -384,6 +384,8 @@ thread_link(struct thread *td, struct ksegrp *kg)
td->td_ksegrp = kg;
td->td_last_kse = NULL;
+ LIST_INIT(&td->td_contested);
+ callout_init(&td->td_slpcallout, 1);
TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
p->p_numthreads++;
@@ -393,7 +395,6 @@ thread_link(struct thread *td, struct ksegrp *kg)
if (oiks_debug > 1)
Debugger("OIKS");
}
- td->td_critnest = 0;
td->td_kse = NULL;
}
@@ -418,11 +419,14 @@ thread_schedule_upcall(struct thread *td, struct kse *ke)
}
CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
td, td->td_proc->p_pid, td->td_proc->p_comm);
+ bzero(&td->td_startzero,
+ (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
+ bcopy(&td->td_startcopy, &td2->td_startcopy,
+ (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
thread_link(td2, ke->ke_ksegrp);
cpu_set_upcall(td2, ke->ke_pcb);
td2->td_ucred = crhold(td->td_ucred);
td2->td_flags = TDF_UNBOUND|TDF_UPCALLING;
- td2->td_priority = td->td_priority;
setrunqueue(td2);
return (td2);
}
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index d285f3e..b5f6d0b 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -136,19 +136,6 @@ proc_ctor(void *mem, int size, void *arg)
KASSERT((size == sizeof(struct proc)),
("size mismatch: %d != %d\n", size, (int)sizeof(struct proc)));
p = (struct proc *)mem;
-#if 0
- /*
- * Maybe move these from process creation, but maybe not.
- * Moving them here takes them away from their "natural" place
- * in the fork process.
- */
- bzero(&p->p_startzero,
- (unsigned) RANGEOF(struct proc, p_startzero, p_endzero));
- p->p_state = PRS_NEW;
- mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
- LIST_INIT(&p->p_children);
- callout_init(&p->p_itcallout, 0);
-#endif
cached_procs--;
active_procs++;
}
@@ -160,14 +147,31 @@ static void
proc_dtor(void *mem, int size, void *arg)
{
struct proc *p;
+ struct thread *td;
+ struct ksegrp *kg;
+ struct kse *ke;
+ /* INVARIANTS checks go here */
KASSERT((size == sizeof(struct proc)),
("size mismatch: %d != %d\n", size, (int)sizeof(struct proc)));
p = (struct proc *)mem;
- /* INVARIANTS checks go here */
-#if 0 /* See comment in proc_ctor about separating things */
- mtx_destroy(&p->p_mtx);
-#endif
+ KASSERT((p->p_numthreads == 1),
+ ("bad number of threads in exiting process"));
+ td = FIRST_THREAD_IN_PROC(p);
+ KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
+ kg = FIRST_KSEGRP_IN_PROC(p);
+ KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
+ ke = FIRST_KSE_IN_KSEGRP(kg);
+ KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
+ /*
+ * We want to make sure we know the initial linkages.
+ * so for now tear them down and remake them.
+ * his is probably un-needed as we can probably rely
+ * on the state coming in here from wait4().
+ */
+ proc_linkup(p, kg, ke, td);
+
+ /* Stats only */
active_procs--;
cached_procs++;
}
@@ -179,11 +183,18 @@ static void
proc_init(void *mem, int size)
{
struct proc *p;
+ struct thread *td;
+ struct ksegrp *kg;
+ struct kse *ke;
KASSERT((size == sizeof(struct proc)),
("size mismatch: %d != %d\n", size, (int)sizeof(struct proc)));
p = (struct proc *)mem;
vm_proc_new(p);
+ td = thread_alloc();
+ ke = &p->p_kse;
+ kg = &p->p_ksegrp;
+ proc_linkup(p, kg, ke, td);
cached_procs++;
allocated_procs++;
}
@@ -202,6 +213,7 @@ proc_fini(void *mem, int size)
vm_proc_dispose(p);
cached_procs--;
allocated_procs--;
+ thread_free(FIRST_THREAD_IN_PROC(p));
}
/*
@@ -256,6 +268,8 @@ proc_linkup(struct proc *p, struct ksegrp *kg,
TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */
TAILQ_INIT(&p->p_threads); /* all threads in proc */
TAILQ_INIT(&p->p_suspended); /* Threads suspended */
+ p->p_numksegrps = 0;
+ p->p_numthreads = 0;
ksegrp_link(kg, p);
kse_link(ke, kg);
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 26a4707..8b2b88c 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -95,20 +95,8 @@ thread_ctor(void *mem, int size, void *arg)
("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
td = (struct thread *)mem;
- bzero(&td->td_startzero,
- (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
td->td_state = TDS_NEW;
td->td_flags |= TDF_UNBOUND;
-#if 0
- /*
- * Maybe move these here from process creation, but maybe not.
- * Moving them here takes them away from their "natural" place
- * in the fork process.
- */
- /* XXX td_contested does not appear to be initialized for threads! */
- LIST_INIT(&td->td_contested);
- callout_init(&td->td_slpcallout, 1);
-#endif
cached_threads--; /* XXXSMP */
active_threads++; /* XXXSMP */
}
@@ -202,7 +190,7 @@ threadinit(void)
}
/*
- * Stash an embarasingly esxtra thread into the zombie thread queue.
+ * Stash an embarasingly extra thread into the zombie thread queue.
*/
void
thread_stash(struct thread *td)
@@ -328,47 +316,59 @@ thread_exit(void)
KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
if (ke->ke_tdspare != NULL) {
- thread_stash(ke->ke_tdspare);
+ thread_free(ke->ke_tdspare);
ke->ke_tdspare = NULL;
}
cpu_thread_exit(td); /* XXXSMP */
- /* Reassign this thread's KSE. */
- ke->ke_thread = NULL;
- td->td_kse = NULL;
- ke->ke_state = KES_UNQUEUED;
- kse_reassign(ke);
-
- /* Unlink this thread from its proc. and the kseg */
- TAILQ_REMOVE(&p->p_threads, td, td_plist);
- p->p_numthreads--;
- TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
- kg->kg_numthreads--;
/*
- * The test below is NOT true if we are the
- * sole exiting thread. P_STOPPED_SINGLE is unset
- * in exit1() after it is the only survivor.
+ * The last thread is left attached to the process
+ * So that the whole bundle gets recycled. Skip
+ * all this stuff.
*/
- if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
- if (p->p_numthreads == p->p_suspcount) {
- TAILQ_REMOVE(&p->p_suspended,
- p->p_singlethread, td_runq);
- setrunqueue(p->p_singlethread);
- p->p_suspcount--;
+ if (p->p_numthreads > 1) {
+ /* Reassign this thread's KSE. */
+ ke->ke_thread = NULL;
+ td->td_kse = NULL;
+ ke->ke_state = KES_UNQUEUED;
+ kse_reassign(ke);
+
+ /* Unlink this thread from its proc. and the kseg */
+ TAILQ_REMOVE(&p->p_threads, td, td_plist);
+ p->p_numthreads--;
+ TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
+ kg->kg_numthreads--;
+ /*
+ * The test below is NOT true if we are the
+ * sole exiting thread. P_STOPPED_SNGL is unset
+ * in exit1() after it is the only survivor.
+ */
+ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
+ if (p->p_numthreads == p->p_suspcount) {
+ TAILQ_REMOVE(&p->p_suspended,
+ p->p_singlethread, td_runq);
+ setrunqueue(p->p_singlethread);
+ p->p_suspcount--;
+ }
}
+ PROC_UNLOCK(p);
+ td->td_state = TDS_SURPLUS;
+ td->td_proc = NULL;
+ td->td_ksegrp = NULL;
+ td->td_last_kse = NULL;
+ ke->ke_tdspare = td;
+ } else {
+ PROC_UNLOCK(p);
}
- PROC_UNLOCK(p);
- td->td_state = TDS_SURPLUS;
- td->td_proc = NULL;
- td->td_ksegrp = NULL;
- td->td_last_kse = NULL;
- ke->ke_tdspare = td;
+
cpu_throw();
/* NOTREACHED */
}
/*
* Link a thread to a process.
+ * set up anything that needs to be initialized for it to
+ * be used by the process.
*
* Note that we do not link to the proc's ucred here.
* The thread is linked as if running but no KSE assigned.
@@ -384,6 +384,8 @@ thread_link(struct thread *td, struct ksegrp *kg)
td->td_ksegrp = kg;
td->td_last_kse = NULL;
+ LIST_INIT(&td->td_contested);
+ callout_init(&td->td_slpcallout, 1);
TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
p->p_numthreads++;
@@ -393,7 +395,6 @@ thread_link(struct thread *td, struct ksegrp *kg)
if (oiks_debug > 1)
Debugger("OIKS");
}
- td->td_critnest = 0;
td->td_kse = NULL;
}
@@ -418,11 +419,14 @@ thread_schedule_upcall(struct thread *td, struct kse *ke)
}
CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
td, td->td_proc->p_pid, td->td_proc->p_comm);
+ bzero(&td->td_startzero,
+ (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
+ bcopy(&td->td_startcopy, &td2->td_startcopy,
+ (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
thread_link(td2, ke->ke_ksegrp);
cpu_set_upcall(td2, ke->ke_pcb);
td2->td_ucred = crhold(td->td_ucred);
td2->td_flags = TDF_UNBOUND|TDF_UPCALLING;
- td2->td_priority = td->td_priority;
setrunqueue(td2);
return (td2);
}
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 7c0e570..249cd6e 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -264,6 +264,7 @@ struct thread {
TAILQ_HEAD(, selinfo) td_selq; /* (p) List of selinfos. */
+/* Cleared during fork1() or thread_sched_upcall() */
#define td_startzero td_flags
int td_flags; /* (j) TDF_* flags. */
struct kse *td_last_kse; /* Where it wants to be if possible. */
@@ -284,16 +285,21 @@ struct thread {
void *td_mailbox; /* the userland mailbox address */
struct ucred *td_ucred; /* (k) Reference to credentials. */
void (*td_switchin)(void); /* (k) switchin special func */
+ u_int td_critnest; /* (k) Critical section nest level. */
#define td_endzero td_md
+/* Copied during fork1() or thread_sched_upcall() */
#define td_startcopy td_endzero
- /* XXXKSE p_md is in the "on your own" section in old struct proc */
+ /* XXXKSE just copying td_md needs checking! */
struct mdthread td_md; /* (k) Any machine-dependent fields. */
- register_t td_retval[2]; /* (k) Syscall aux returns. */
u_char td_base_pri; /* (j) Thread base kernel priority. */
u_char td_priority; /* (j) Thread active priority. */
#define td_endcopy td_pcb
+/*
+ * fields that must be manually set in fork1() or thread_sched_upcall()
+ * or already have been set in the allocator, contstructor, etc..
+ */
struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */
enum {
TDS_NEW = 0x20,
@@ -308,11 +314,11 @@ struct thread {
TDS_SWAPPED,
TDS_SUSP_SLP /* on sleep queue AND suspend queue */
} td_state;
+ register_t td_retval[2]; /* (k) Syscall aux returns. */
struct callout td_slpcallout; /* (h) Callout for sleep. */
struct trapframe *td_frame; /* (k) */
struct vm_object *td_kstack_obj;/* (a) Kstack object. */
vm_offset_t td_kstack; /* Kernel VA of kstack. */
- u_int td_critnest; /* (k) Critical section nest level. */
};
/* flags kept in td_flags */
#define TDF_UNBOUND 0x000001 /* may give away the kse, uses the kg runq */
@@ -354,7 +360,6 @@ struct kse {
int ke_flags; /* (j) KEF_* flags. */
struct thread *ke_thread; /* Active associated thread. */
struct thread *ke_bound; /* Thread bound to this KSE (*) */
- /*u_int ke_estcpu; */ /* (j) Time averaged val of cpticks. */
int ke_cpticks; /* (j) Ticks of cpu time. */
fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */
u_int64_t ke_uu; /* (j) Previous user time in usec. */
@@ -425,11 +430,8 @@ struct ksegrp {
u_int kg_estcpu; /* Sum of the same field in KSEs. */
u_int kg_slptime; /* (j) How long completely blocked. */
struct thread *kg_last_assigned; /* Last thread assigned to a KSE */
- int kg_numthreads; /* Num threads in total */
int kg_runnable; /* Num runnable threads on queue. */
- int kg_kses; /* Num KSEs in group. */
int kg_runq_kses; /* Num KSEs on runq. */
- int kg_idle_kses; /* num KSEs idle */
#define kg_endzero kg_pri_class
#define kg_startcopy kg_endzero
@@ -437,8 +439,10 @@ struct ksegrp {
u_char kg_user_pri; /* (j) User pri from estcpu and nice. */
char kg_nice; /* (j?/k?) Process "nice" value. */
/* struct rtprio kg_rtprio; */ /* (j) Realtime priority. */
-#define kg_endcopy kg_dummy
- int kg_dummy;
+#define kg_endcopy kg_numthreads
+ int kg_numthreads; /* Num threads in total */
+ int kg_idle_kses; /* num KSEs idle */
+ int kg_kses; /* Num KSEs in group. */
};
/*
@@ -505,8 +509,6 @@ struct proc {
u_char p_pfsflags; /* (c) Procfs flags. */
struct nlminfo *p_nlminfo; /* (?) Only used by/for lockd. */
void *p_aioinfo; /* (c) ASYNC I/O info. */
- int p_numthreads; /* (?) number of threads */
- int p_numksegrps; /* (?) number of ksegrps */
struct thread *p_singlethread;/* (j) If single threading this is it */
int p_suspcount; /* (j) # threads in suspended mode */
int p_userthreads; /* (j) # threads in userland */
@@ -526,6 +528,8 @@ struct proc {
#define p_endcopy p_xstat
u_short p_xstat; /* (c) Exit status; also stop sig. */
+ int p_numthreads; /* (?) number of threads */
+ int p_numksegrps; /* (?) number of ksegrps */
struct mdproc p_md; /* (c) Any machine-dependent fields. */
struct callout p_itcallout; /* (h) Interval timer callout. */
struct user *p_uarea; /* (k) Kernel VA of u-area (CPU) */
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 24b27e3..8e3f5c6 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -430,14 +430,9 @@ void
vm_waitproc(p)
struct proc *p;
{
- struct thread *td;
GIANT_REQUIRED;
cpu_wait(p);
-/* XXXKSE by here there should not be any threads left! */
- FOREACH_THREAD_IN_PROC(p, td) {
- panic("vm_waitproc: Survivor thread!");
- }
vmspace_exitfree(p); /* and clean-out the vmspace */
}
OpenPOWER on IntegriCloud