diff options
author | julian <julian@FreeBSD.org> | 2002-09-15 23:52:25 +0000 |
---|---|---|
committer | julian <julian@FreeBSD.org> | 2002-09-15 23:52:25 +0000 |
commit | c7e9e7e892b7375ea88920a969958f26747fb8c4 (patch) | |
tree | 1c66894d3eba07d04b596510c855c0bf9935c6b3 /sys | |
parent | 4571d5bc4cd1fbf88217bbf7f736c24ae1232a5f (diff) | |
download | FreeBSD-src-c7e9e7e892b7375ea88920a969958f26747fb8c4.zip FreeBSD-src-c7e9e7e892b7375ea88920a969958f26747fb8c4.tar.gz |
Allocate KSEs and KSEGRPs separatly and remove them from the proc structure.
next step is to allow > 1 to be allocated per process. This would give
multi-processor threads. (when the rest of the infrastructure is
in place)
While doing this I noticed libkvm and sys/kern/kern_proc.c:fill_kinfo_proc
are diverging more than they should.. corrective action needed soon.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/alpha/alpha/machdep.c | 2 | ||||
-rw-r--r-- | sys/amd64/amd64/machdep.c | 2 | ||||
-rw-r--r-- | sys/i386/i386/machdep.c | 2 | ||||
-rw-r--r-- | sys/ia64/ia64/machdep.c | 2 | ||||
-rw-r--r-- | sys/kern/init_main.c | 6 | ||||
-rw-r--r-- | sys/kern/kern_exit.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_fork.c | 8 | ||||
-rw-r--r-- | sys/kern/kern_kse.c | 76 | ||||
-rw-r--r-- | sys/kern/kern_proc.c | 84 | ||||
-rw-r--r-- | sys/kern/kern_resource.c | 68 | ||||
-rw-r--r-- | sys/kern/kern_sig.c | 9 | ||||
-rw-r--r-- | sys/kern/kern_switch.c | 7 | ||||
-rw-r--r-- | sys/kern/kern_thread.c | 76 | ||||
-rw-r--r-- | sys/pc98/i386/machdep.c | 2 | ||||
-rw-r--r-- | sys/pc98/pc98/machdep.c | 2 | ||||
-rw-r--r-- | sys/powerpc/aim/machdep.c | 4 | ||||
-rw-r--r-- | sys/powerpc/powerpc/machdep.c | 4 | ||||
-rw-r--r-- | sys/sparc64/sparc64/machdep.c | 2 | ||||
-rw-r--r-- | sys/sys/proc.h | 10 |
19 files changed, 232 insertions, 138 deletions
diff --git a/sys/alpha/alpha/machdep.c b/sys/alpha/alpha/machdep.c index cf4cbe8..276fd3c 100644 --- a/sys/alpha/alpha/machdep.c +++ b/sys/alpha/alpha/machdep.c @@ -884,7 +884,7 @@ alpha_init(pfn, ptb, bim, bip, biv) } - proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0); + proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); /* * Init mapping for u page(s) for proc 0 */ diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c index c5676b9..95edb4b 100644 --- a/sys/amd64/amd64/machdep.c +++ b/sys/amd64/amd64/machdep.c @@ -1640,7 +1640,7 @@ init386(first) * This may be done better later if it gets more high level * components in it. If so just link td->td_proc here. */ - proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0); + proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); metadata_missing = 0; if (bootinfo.bi_modulep) { diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c index c5676b9..95edb4b 100644 --- a/sys/i386/i386/machdep.c +++ b/sys/i386/i386/machdep.c @@ -1640,7 +1640,7 @@ init386(first) * This may be done better later if it gets more high level * components in it. If so just link td->td_proc here. */ - proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0); + proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); metadata_missing = 0; if (bootinfo.bi_modulep) { diff --git a/sys/ia64/ia64/machdep.c b/sys/ia64/ia64/machdep.c index cdf8da9..89d61cb 100644 --- a/sys/ia64/ia64/machdep.c +++ b/sys/ia64/ia64/machdep.c @@ -675,7 +675,7 @@ ia64_init(u_int64_t arg1, u_int64_t arg2) } - proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0); + proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); /* * Init mapping for u page(s) for proc 0 */ diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index 70dfa24..93afb1d 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -87,6 +87,8 @@ static struct session session0; static struct pgrp pgrp0; struct proc proc0; struct thread thread0; +struct kse kse0; +struct ksegrp ksegrp0; static struct procsig procsig0; static struct filedesc0 filedesc0; static struct plimit limit0; @@ -311,6 +313,8 @@ proc0_init(void *dummy __unused) GIANT_REQUIRED; p = &proc0; td = &thread0; + ke = &kse0; + kg = &ksegrp0; /* * Initialize magic number. @@ -357,8 +361,6 @@ proc0_init(void *dummy __unused) * I would have done it here.. maybe this means this should be * done earlier too. */ - ke = &proc0.p_kse; /* XXXKSE */ - kg = &proc0.p_ksegrp; /* XXXKSE */ p->p_flag = P_SYSTEM; p->p_sflag = PS_INMEM; p->p_state = PRS_NORMAL; diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index dac90bb..2481a20 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -620,7 +620,7 @@ loop: mtx_lock_spin(&sched_lock); curthread->td_ksegrp->kg_estcpu = ESTCPULIM(curthread->td_ksegrp->kg_estcpu + - p->p_ksegrp.kg_estcpu); + FIRST_KSEGRP_IN_PROC(p)->kg_estcpu); mtx_unlock_spin(&sched_lock); } @@ -728,7 +728,7 @@ loop: /* Free the KSE spare thread. */ if (ke->ke_tdspare != NULL) { thread_free(ke->ke_tdspare); - p->p_kse.ke_tdspare = NULL; + ke->ke_tdspare = NULL; } } } diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index d23f47e..0b8b839 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -634,12 +634,10 @@ again: } /* - * set priority of child to be that of parent - * XXXKSE hey! copying the estcpu seems dodgy.. should split it.. + * set priority of child to be that of parent. + * XXXKSE this needs redefining.. */ - mtx_lock_spin(&sched_lock); - p2->p_ksegrp.kg_estcpu = p1->p_ksegrp.kg_estcpu; - mtx_unlock_spin(&sched_lock); + kg2->kg_estcpu = td->td_ksegrp->kg_estcpu; /* * This begins the section where we must prevent the parent diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c index 2f5f10a..75cf5dd 100644 --- a/sys/kern/kern_kse.c +++ b/sys/kern/kern_kse.c @@ -54,28 +54,22 @@ #include <machine/frame.h> /* - * Thread related storage. + * KSEGRP related storage. */ +static uma_zone_t ksegrp_zone; +static uma_zone_t kse_zone; static uma_zone_t thread_zone; -static int allocated_threads; -static int active_threads; -static int cached_threads; +/* DEBUG ONLY */ SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); - -SYSCTL_INT(_kern_threads, OID_AUTO, active, CTLFLAG_RD, - &active_threads, 0, "Number of active threads in system."); - -SYSCTL_INT(_kern_threads, OID_AUTO, cached, CTLFLAG_RD, - &cached_threads, 0, "Number of threads in thread cache."); - -SYSCTL_INT(_kern_threads, OID_AUTO, allocated, CTLFLAG_RD, - &allocated_threads, 0, "Number of threads in zone."); - static int oiks_debug = 1; /* 0 disable, 1 printf, 2 enter debugger */ SYSCTL_INT(_kern_threads, OID_AUTO, oiks, CTLFLAG_RW, &oiks_debug, 0, "OIKS thread debug"); +static int max_threads_per_proc = 4; +SYSCTL_INT(_kern_threads, OID_AUTO, max_per_proc, CTLFLAG_RW, + &max_threads_per_proc, 0, "Limit on threads per proc"); + #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); @@ -97,8 +91,6 @@ thread_ctor(void *mem, int size, void *arg) td = (struct thread *)mem; td->td_state = TDS_INACTIVE; td->td_flags |= TDF_UNBOUND; - cached_threads--; /* XXXSMP */ - active_threads++; /* XXXSMP */ } /* @@ -134,10 +126,6 @@ thread_dtor(void *mem, int size, void *arg) /* NOTREACHED */ } #endif - - /* Update counters. */ - active_threads--; /* XXXSMP */ - cached_threads++; /* XXXSMP */ } /* @@ -156,8 +144,6 @@ thread_init(void *mem, int size) pmap_new_thread(td); mtx_unlock(&Giant); cpu_thread_setup(td); - cached_threads++; /* XXXSMP */ - allocated_threads++; /* XXXSMP */ } /* @@ -173,8 +159,6 @@ thread_fini(void *mem, int size) td = (struct thread *)mem; pmap_dispose_thread(td); - cached_threads--; /* XXXSMP */ - allocated_threads--; /* XXXSMP */ } /* @@ -187,6 +171,12 @@ threadinit(void) thread_zone = uma_zcreate("THREAD", sizeof (struct thread), thread_ctor, thread_dtor, thread_init, thread_fini, UMA_ALIGN_CACHE, 0); + ksegrp_zone = uma_zcreate("KSEGRP", sizeof (struct ksegrp), + NULL, NULL, NULL, NULL, + UMA_ALIGN_CACHE, 0); + kse_zone = uma_zcreate("KSE", sizeof (struct kse), + NULL, NULL, NULL, NULL, + UMA_ALIGN_CACHE, 0); } /* @@ -226,6 +216,24 @@ thread_reap(void) } /* + * Allocate a ksegrp. + */ +struct ksegrp * +ksegrp_alloc(void) +{ + return (uma_zalloc(ksegrp_zone, M_WAITOK)); +} + +/* + * Allocate a kse. + */ +struct kse * +kse_alloc(void) +{ + return (uma_zalloc(kse_zone, M_WAITOK)); +} + +/* * Allocate a thread. */ struct thread * @@ -236,6 +244,24 @@ thread_alloc(void) } /* + * Deallocate a ksegrp. + */ +void +ksegrp_free(struct ksegrp *td) +{ + uma_zfree(ksegrp_zone, td); +} + +/* + * Deallocate a kse. + */ +void +kse_free(struct kse *td) +{ + uma_zfree(kse_zone, td); +} + +/* * Deallocate a thread. */ void @@ -387,7 +413,7 @@ thread_link(struct thread *td, struct ksegrp *kg) TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); p->p_numthreads++; kg->kg_numthreads++; - if (oiks_debug && p->p_numthreads > 4) { + if (oiks_debug && p->p_numthreads > max_threads_per_proc) { printf("OIKS %d\n", p->p_numthreads); if (oiks_debug > 1) Debugger("OIKS"); diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index 2ee4f51..497912b 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -98,10 +98,6 @@ struct mtx pargs_ref_lock; uma_zone_t proc_zone; uma_zone_t ithread_zone; -static int active_procs; -static int cached_procs; -static int allocated_procs; - int kstack_pages = KSTACK_PAGES; int uarea_pages = UAREA_PAGES; SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, ""); @@ -142,8 +138,6 @@ proc_ctor(void *mem, int size, void *arg) KASSERT((size == sizeof(struct proc)), ("size mismatch: %d != %d\n", size, (int)sizeof(struct proc))); p = (struct proc *)mem; - cached_procs--; - active_procs++; } /* @@ -176,10 +170,6 @@ proc_dtor(void *mem, int size, void *arg) * on the state coming in here from wait4(). */ proc_linkup(p, kg, ke, td); - - /* Stats only */ - active_procs--; - cached_procs++; } /* @@ -198,11 +188,9 @@ proc_init(void *mem, int size) p = (struct proc *)mem; vm_proc_new(p); td = thread_alloc(); - ke = &p->p_kse; - kg = &p->p_ksegrp; + ke = kse_alloc(); + kg = ksegrp_alloc(); proc_linkup(p, kg, ke, td); - cached_procs++; - allocated_procs++; } /* @@ -212,14 +200,25 @@ static void proc_fini(void *mem, int size) { struct proc *p; + struct thread *td; + struct ksegrp *kg; + struct kse *ke; KASSERT((size == sizeof(struct proc)), ("size mismatch: %d != %d\n", size, (int)sizeof(struct proc))); p = (struct proc *)mem; + KASSERT((p->p_numthreads == 1), + ("bad number of threads in freeing process")); + td = FIRST_THREAD_IN_PROC(p); + KASSERT((td != NULL), ("proc_dtor: bad thread pointer")); + kg = FIRST_KSEGRP_IN_PROC(p); + KASSERT((kg != NULL), ("proc_dtor: bad kg pointer")); + ke = FIRST_KSE_IN_KSEGRP(kg); + KASSERT((ke != NULL), ("proc_dtor: bad ke pointer")); vm_proc_dispose(p); - cached_procs--; - allocated_procs--; - thread_free(FIRST_THREAD_IN_PROC(p)); + thread_free(td); + ksegrp_free(kg); + kse_free(ke); } /* @@ -787,6 +786,8 @@ fill_kinfo_proc(p, kp) struct kinfo_proc *kp; { struct thread *td; + struct kse *ke; + struct ksegrp *kg; struct tty *tp; struct session *sp; struct timeval tv; @@ -862,13 +863,14 @@ fill_kinfo_proc(p, kp) } if (p->p_state == PRS_NORMAL) { /* XXXKSE very approximate */ - if ((TD_ON_RUNQ(td)) || - (TD_IS_RUNNING(td))) { + if (TD_ON_RUNQ(td) || + TD_CAN_RUN(td) || + TD_IS_RUNNING(td)) { kp->ki_stat = SRUN; - } else if (TD_IS_SLEEPING(td)) { - kp->ki_stat = SSLEEP; } else if (P_SHOULDSTOP(p)) { kp->ki_stat = SSTOP; + } else if (TD_IS_SLEEPING(td)) { + kp->ki_stat = SSLEEP; } else if (TD_ON_MUTEX(td)) { kp->ki_stat = SMTX; } else { @@ -883,33 +885,43 @@ fill_kinfo_proc(p, kp) kp->ki_pid = p->p_pid; /* vvv XXXKSE */ if (!(p->p_flag & P_KSES)) { + kg = td->td_ksegrp; + ke = td->td_kse; + KASSERT((ke != NULL), ("fill_kinfo_proc: Null KSE")); bintime2timeval(&p->p_runtime, &tv); - kp->ki_runtime = tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec; - kp->ki_pctcpu = p->p_kse.ke_pctcpu; - kp->ki_estcpu = p->p_ksegrp.kg_estcpu; - kp->ki_slptime = p->p_ksegrp.kg_slptime; + kp->ki_runtime = + tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec; + + /* things in the KSE GROUP */ + kp->ki_estcpu = kg->kg_estcpu; + kp->ki_slptime = kg->kg_slptime; + kp->ki_pri.pri_user = kg->kg_user_pri; + kp->ki_pri.pri_class = kg->kg_pri_class; + kp->ki_nice = kg->kg_nice; + + /* Things in the thread */ kp->ki_wchan = td->td_wchan; kp->ki_pri.pri_level = td->td_priority; - kp->ki_pri.pri_user = p->p_ksegrp.kg_user_pri; - kp->ki_pri.pri_class = p->p_ksegrp.kg_pri_class; kp->ki_pri.pri_native = td->td_base_pri; - kp->ki_nice = p->p_ksegrp.kg_nice; - kp->ki_rqindex = p->p_kse.ke_rqindex; - kp->ki_oncpu = p->p_kse.ke_oncpu; kp->ki_lastcpu = td->td_lastcpu; kp->ki_tdflags = td->td_flags; kp->ki_pcb = td->td_pcb; kp->ki_kstack = (void *)td->td_kstack; + + /* Things in the kse */ + kp->ki_rqindex = ke->ke_rqindex; + kp->ki_oncpu = ke->ke_oncpu; + kp->ki_pctcpu = ke->ke_pctcpu; } else { kp->ki_oncpu = -1; kp->ki_lastcpu = -1; kp->ki_tdflags = -1; - /* All the reast are 0 */ + /* All the rest are 0 for now */ } + /* ^^^ XXXKSE */ } else { kp->ki_stat = SZOMB; } - /* ^^^ XXXKSE */ mtx_unlock_spin(&sched_lock); sp = NULL; tp = NULL; @@ -1255,11 +1267,3 @@ SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD, SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY, sysctl_kern_proc_args, "Process argument list"); -SYSCTL_INT(_kern_proc, OID_AUTO, active, CTLFLAG_RD, - &active_procs, 0, "Number of active procs in system."); - -SYSCTL_INT(_kern_proc, OID_AUTO, cached, CTLFLAG_RD, - &cached_procs, 0, "Number of procs in proc cache."); - -SYSCTL_INT(_kern_proc, OID_AUTO, allocated, CTLFLAG_RD, - &allocated_procs, 0, "Number of procs in zone."); diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c index 5097c3a..8f02c13 100644 --- a/sys/kern/kern_resource.c +++ b/sys/kern/kern_resource.c @@ -87,9 +87,10 @@ getpriority(td, uap) struct thread *td; register struct getpriority_args *uap; { - register struct proc *p; - register int low = PRIO_MAX + 1; + struct proc *p; + int low = PRIO_MAX + 1; int error = 0; + struct ksegrp *kg; mtx_lock(&Giant); @@ -101,8 +102,12 @@ getpriority(td, uap) p = pfind(uap->who); if (p == NULL) break; - if (p_cansee(td, p) == 0) - low = p->p_ksegrp.kg_nice /* XXXKSE */ ; + if (p_cansee(td, p) == 0) { + FOREACH_KSEGRP_IN_PROC(p, kg) { + if (kg->kg_nice < low) + low = kg->kg_nice; + } + } PROC_UNLOCK(p); } break; @@ -124,8 +129,12 @@ getpriority(td, uap) sx_sunlock(&proctree_lock); LIST_FOREACH(p, &pg->pg_members, p_pglist) { PROC_LOCK(p); - if (!p_cansee(td, p) && p->p_ksegrp.kg_nice /* XXXKSE */ < low) - low = p->p_ksegrp.kg_nice /* XXXKSE */ ; + if (!p_cansee(td, p)) { + FOREACH_KSEGRP_IN_PROC(p, kg) { + if (kg->kg_nice < low) + low = kg->kg_nice; + } + } PROC_UNLOCK(p); } PGRP_UNLOCK(pg); @@ -139,9 +148,12 @@ getpriority(td, uap) LIST_FOREACH(p, &allproc, p_list) { PROC_LOCK(p); if (!p_cansee(td, p) && - p->p_ucred->cr_uid == uap->who && - p->p_ksegrp.kg_nice /* XXXKSE */ < low) - low = p->p_ksegrp.kg_nice /* XXXKSE */ ; + p->p_ucred->cr_uid == uap->who) { + FOREACH_KSEGRP_IN_PROC(p, kg) { + if (kg->kg_nice < low) + low = kg->kg_nice; + } + } PROC_UNLOCK(p); } sx_sunlock(&allproc_lock); @@ -250,25 +262,41 @@ setpriority(td, uap) return (error); } +/* + * Set "nice" for a process. Doesn't really understand threaded processes well + * but does try. Has the unfortunate side effect of making all the NICE + * values for a process's ksegrps the same.. This suggests that + * NICE valuse should be stored as a process nice and deltas for the ksegrps. + * (but not yet). + */ static int -donice(td, chgp, n) - struct thread *td; - register struct proc *chgp; - register int n; +donice(struct thread *td, struct proc *p, int n) { int error; + int low = PRIO_MAX + 1; + struct ksegrp *kg; - PROC_LOCK_ASSERT(chgp, MA_OWNED); - if ((error = p_cansched(td, chgp))) + PROC_LOCK_ASSERT(p, MA_OWNED); + if ((error = p_cansched(td, p))) return (error); if (n > PRIO_MAX) n = PRIO_MAX; if (n < PRIO_MIN) n = PRIO_MIN; - if (n < chgp->p_ksegrp.kg_nice /* XXXKSE */ && suser(td)) + /* + * Only allow nicing if to more than the lowest nice. + * e.g. nices of 4,3,2 allow nice to 3 but not 1 + */ + FOREACH_KSEGRP_IN_PROC(p, kg) { + if (kg->kg_nice < low) + low = kg->kg_nice; + } + if (n < low && suser(td)) return (EACCES); - chgp->p_ksegrp.kg_nice /* XXXKSE */ = n; - (void)resetpriority(&chgp->p_ksegrp); /* XXXKSE */ + FOREACH_KSEGRP_IN_PROC(p, kg) { + kg->kg_nice = n; + (void)resetpriority(kg); + } return (0); } @@ -317,7 +345,7 @@ rtprio(td, uap) if ((error = p_cansee(td, p))) break; mtx_lock_spin(&sched_lock); - pri_to_rtp(&p->p_ksegrp /* XXXKSE */ , &rtp); + pri_to_rtp(FIRST_KSEGRP_IN_PROC(p), &rtp); mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); return (copyout(&rtp, uap->rtp, sizeof(struct rtprio))); @@ -348,7 +376,7 @@ rtprio(td, uap) } } mtx_lock_spin(&sched_lock); - error = rtp_to_pri(&rtp, &p->p_ksegrp); + error = rtp_to_pri(&rtp, FIRST_KSEGRP_IN_PROC(p)); mtx_unlock_spin(&sched_lock); break; default: diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index 4f9f516..7b51b68 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -189,12 +189,19 @@ cursig(struct thread *td) void signotify(struct proc *p) { + struct kse *ke; + struct ksegrp *kg; PROC_LOCK_ASSERT(p, MA_OWNED); mtx_lock_spin(&sched_lock); if (SIGPENDING(p)) { p->p_sflag |= PS_NEEDSIGCHK; - p->p_kse.ke_flags |= KEF_ASTPENDING; /* XXXKSE */ + /* XXXKSE for now punish all KSEs */ + FOREACH_KSEGRP_IN_PROC(p, kg) { + FOREACH_KSE_IN_GROUP(kg, ke) { + ke->ke_flags |= KEF_ASTPENDING; + } + } } mtx_unlock_spin(&sched_lock); } diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c index 6ccc916..e8a802f 100644 --- a/sys/kern/kern_switch.c +++ b/sys/kern/kern_switch.c @@ -659,13 +659,10 @@ thread_sanity_check(struct thread *td) kg = td->td_ksegrp; ke = td->td_kse; - if (kg != &p->p_ksegrp) { - panic ("wrong ksegrp"); - } if (ke) { - if (ke != &p->p_kse) { - panic("wrong kse"); + if (p != ke->ke_proc) { + panic("wrong proc"); } if (ke->ke_thread != td) { panic("wrong thread"); diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index 2f5f10a..75cf5dd 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -54,28 +54,22 @@ #include <machine/frame.h> /* - * Thread related storage. + * KSEGRP related storage. */ +static uma_zone_t ksegrp_zone; +static uma_zone_t kse_zone; static uma_zone_t thread_zone; -static int allocated_threads; -static int active_threads; -static int cached_threads; +/* DEBUG ONLY */ SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); - -SYSCTL_INT(_kern_threads, OID_AUTO, active, CTLFLAG_RD, - &active_threads, 0, "Number of active threads in system."); - -SYSCTL_INT(_kern_threads, OID_AUTO, cached, CTLFLAG_RD, - &cached_threads, 0, "Number of threads in thread cache."); - -SYSCTL_INT(_kern_threads, OID_AUTO, allocated, CTLFLAG_RD, - &allocated_threads, 0, "Number of threads in zone."); - static int oiks_debug = 1; /* 0 disable, 1 printf, 2 enter debugger */ SYSCTL_INT(_kern_threads, OID_AUTO, oiks, CTLFLAG_RW, &oiks_debug, 0, "OIKS thread debug"); +static int max_threads_per_proc = 4; +SYSCTL_INT(_kern_threads, OID_AUTO, max_per_proc, CTLFLAG_RW, + &max_threads_per_proc, 0, "Limit on threads per proc"); + #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); @@ -97,8 +91,6 @@ thread_ctor(void *mem, int size, void *arg) td = (struct thread *)mem; td->td_state = TDS_INACTIVE; td->td_flags |= TDF_UNBOUND; - cached_threads--; /* XXXSMP */ - active_threads++; /* XXXSMP */ } /* @@ -134,10 +126,6 @@ thread_dtor(void *mem, int size, void *arg) /* NOTREACHED */ } #endif - - /* Update counters. */ - active_threads--; /* XXXSMP */ - cached_threads++; /* XXXSMP */ } /* @@ -156,8 +144,6 @@ thread_init(void *mem, int size) pmap_new_thread(td); mtx_unlock(&Giant); cpu_thread_setup(td); - cached_threads++; /* XXXSMP */ - allocated_threads++; /* XXXSMP */ } /* @@ -173,8 +159,6 @@ thread_fini(void *mem, int size) td = (struct thread *)mem; pmap_dispose_thread(td); - cached_threads--; /* XXXSMP */ - allocated_threads--; /* XXXSMP */ } /* @@ -187,6 +171,12 @@ threadinit(void) thread_zone = uma_zcreate("THREAD", sizeof (struct thread), thread_ctor, thread_dtor, thread_init, thread_fini, UMA_ALIGN_CACHE, 0); + ksegrp_zone = uma_zcreate("KSEGRP", sizeof (struct ksegrp), + NULL, NULL, NULL, NULL, + UMA_ALIGN_CACHE, 0); + kse_zone = uma_zcreate("KSE", sizeof (struct kse), + NULL, NULL, NULL, NULL, + UMA_ALIGN_CACHE, 0); } /* @@ -226,6 +216,24 @@ thread_reap(void) } /* + * Allocate a ksegrp. + */ +struct ksegrp * +ksegrp_alloc(void) +{ + return (uma_zalloc(ksegrp_zone, M_WAITOK)); +} + +/* + * Allocate a kse. + */ +struct kse * +kse_alloc(void) +{ + return (uma_zalloc(kse_zone, M_WAITOK)); +} + +/* * Allocate a thread. */ struct thread * @@ -236,6 +244,24 @@ thread_alloc(void) } /* + * Deallocate a ksegrp. + */ +void +ksegrp_free(struct ksegrp *td) +{ + uma_zfree(ksegrp_zone, td); +} + +/* + * Deallocate a kse. + */ +void +kse_free(struct kse *td) +{ + uma_zfree(kse_zone, td); +} + +/* * Deallocate a thread. */ void @@ -387,7 +413,7 @@ thread_link(struct thread *td, struct ksegrp *kg) TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); p->p_numthreads++; kg->kg_numthreads++; - if (oiks_debug && p->p_numthreads > 4) { + if (oiks_debug && p->p_numthreads > max_threads_per_proc) { printf("OIKS %d\n", p->p_numthreads); if (oiks_debug > 1) Debugger("OIKS"); diff --git a/sys/pc98/i386/machdep.c b/sys/pc98/i386/machdep.c index c9dc416..078c257 100644 --- a/sys/pc98/i386/machdep.c +++ b/sys/pc98/i386/machdep.c @@ -1704,7 +1704,7 @@ init386(first) * This may be done better later if it gets more high level * components in it. If so just link td->td_proc here. */ - proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0); + proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); #ifdef PC98 /* diff --git a/sys/pc98/pc98/machdep.c b/sys/pc98/pc98/machdep.c index c9dc416..078c257 100644 --- a/sys/pc98/pc98/machdep.c +++ b/sys/pc98/pc98/machdep.c @@ -1704,7 +1704,7 @@ init386(first) * This may be done better later if it gets more high level * components in it. If so just link td->td_proc here. */ - proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0); + proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); #ifdef PC98 /* diff --git a/sys/powerpc/aim/machdep.c b/sys/powerpc/aim/machdep.c index 2cbf4f4..8d10987 100644 --- a/sys/powerpc/aim/machdep.c +++ b/sys/powerpc/aim/machdep.c @@ -392,7 +392,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp) /* * Start initializing proc0 and thread0. */ - proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0); + proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); proc0.p_uarea = (struct user *)uarea0; proc0.p_stats = &proc0.p_uarea->u_stats; thread0.td_frame = &frame0; @@ -507,7 +507,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, char *args) pmap_setavailmem(startkernel, endkernel); - proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0); + proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); proc0uarea = (struct user *)pmap_steal_memory(UAREA_PAGES * PAGE_SIZE); proc0kstack = pmap_steal_memory(KSTACK_PAGES * PAGE_SIZE); diff --git a/sys/powerpc/powerpc/machdep.c b/sys/powerpc/powerpc/machdep.c index 2cbf4f4..8d10987 100644 --- a/sys/powerpc/powerpc/machdep.c +++ b/sys/powerpc/powerpc/machdep.c @@ -392,7 +392,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp) /* * Start initializing proc0 and thread0. */ - proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0); + proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); proc0.p_uarea = (struct user *)uarea0; proc0.p_stats = &proc0.p_uarea->u_stats; thread0.td_frame = &frame0; @@ -507,7 +507,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, char *args) pmap_setavailmem(startkernel, endkernel); - proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0); + proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); proc0uarea = (struct user *)pmap_steal_memory(UAREA_PAGES * PAGE_SIZE); proc0kstack = pmap_steal_memory(KSTACK_PAGES * PAGE_SIZE); diff --git a/sys/sparc64/sparc64/machdep.c b/sys/sparc64/sparc64/machdep.c index 4edd9df..d2ecc80 100644 --- a/sys/sparc64/sparc64/machdep.c +++ b/sys/sparc64/sparc64/machdep.c @@ -297,7 +297,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec) /* * Initialize proc0 stuff (p_contested needs to be done early). */ - proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0); + proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); proc0.p_md.md_sigtramp = NULL; proc0.p_md.md_utrap = NULL; proc0.p_uarea = (struct user *)uarea0; diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 9ccd2a5..7e4c1d1 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -507,8 +507,8 @@ struct proc { struct vm_object *p_upages_obj; /* (a) Upages object. */ struct procsig *p_procsig; /* (c) Signal actions, state (CPU). */ - struct ksegrp p_ksegrp; - struct kse p_kse; + /*struct ksegrp p_ksegrp; + struct kse p_kse; */ /* * The following don't make too much sense.. @@ -800,6 +800,8 @@ extern struct sx proctree_lock; extern struct mtx pargs_ref_lock; extern struct proc proc0; /* Process slot for swapper. */ extern struct thread thread0; /* Primary thread in proc0 */ +extern struct ksegrp ksegrp0; /* Primary ksegrp in proc0 */ +extern struct kse kse0; /* Primary kse in proc0 */ extern int hogticks; /* Limit on kernel cpu hogs. */ extern int nprocs, maxproc; /* Current and max number of procs. */ extern int maxprocperuid; /* Max procs per uid. */ @@ -890,6 +892,10 @@ void cpu_set_fork_handler(struct thread *, void (*)(void *), void *); void cpu_wait(struct proc *); /* New in KSE. */ +struct ksegrp *ksegrp_alloc(void); +void ksegrp_free(struct ksegrp *td); +struct kse *kse_alloc(void); +void kse_free(struct kse *td); struct thread *thread_alloc(void); void thread_free(struct thread *td); int cpu_export_context(struct thread *td); |