summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_proc.c
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2004-09-05 02:09:54 +0000
committerjulian <julian@FreeBSD.org>2004-09-05 02:09:54 +0000
commit5813d2702971c0732a69541a73be8c44114dd183 (patch)
tree18da20638d66699090b682ef6c65384dc44ef3e3 /sys/kern/kern_proc.c
parent6f864d0a973a7f3987d73132be311b7cfbd1ccfc (diff)
downloadFreeBSD-src-5813d2702971c0732a69541a73be8c44114dd183.zip
FreeBSD-src-5813d2702971c0732a69541a73be8c44114dd183.tar.gz
Refactor a bunch of scheduler code to give basically the same behaviour
but with slightly cleaned up interfaces. The KSE structure has become the same as the "per thread scheduler private data" structure. In order to not make the diffs too great one is #defined as the other at this time. The KSE (or td_sched) structure is now allocated per thread and has no allocation code of its own. Concurrency for a KSEGRP is now kept track of via a simple pair of counters rather than using KSE structures as tokens. Since the KSE structure is different in each scheduler, kern_switch.c is now included at the end of each scheduler. Nothing outside the scheduler knows the contents of the KSE (aka td_sched) structure. The fields in the ksegrp structure that are to do with the scheduler's queueing mechanisms are now moved to the kg_sched structure. (per ksegrp scheduler private data structure). In other words how the scheduler queues and keeps track of threads is no-one's business except the scheduler's. This should allow people to write experimental schedulers with completely different internal structuring. A scheduler call sched_set_concurrency(kg, N) has been added that notifies teh scheduler that no more than N threads from that ksegrp should be allowed to be on concurrently scheduled. This is also used to enforce 'fainess' at this time so that a ksegrp with 10000 threads can not swamp a the run queue and force out a process with 1 thread, since the current code will not set the concurrency above NCPU, and both schedulers will not allow more than that many onto the system run queue at a time. Each scheduler should eventualy develop their own methods to do this now that they are effectively separated. Rejig libthr's kernel interface to follow the same code paths as linkse for scope system threads. This has slightly hurt libthr's performance but I will work to recover as much of it as I can. Thread exit code has been cleaned up greatly. exit and exec code now transitions a process back to 'standard non-threaded mode' before taking the next step. Reviewed by: scottl, peter MFC after: 1 week
Diffstat (limited to 'sys/kern/kern_proc.c')
-rw-r--r--sys/kern/kern_proc.c38
1 files changed, 10 insertions, 28 deletions
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index b144543..3f12801 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -100,8 +100,6 @@ int uarea_pages = UAREA_PAGES;
SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "");
SYSCTL_INT(_kern, OID_AUTO, uarea_pages, CTLFLAG_RD, &uarea_pages, 0, "");
-#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
-
CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
/*
@@ -145,19 +143,20 @@ proc_dtor(void *mem, int size, void *arg)
{
struct proc *p;
struct thread *td;
+#ifdef INVARIANTS
struct ksegrp *kg;
- struct kse *ke;
+#endif
/* INVARIANTS checks go here */
p = (struct proc *)mem;
+ td = FIRST_THREAD_IN_PROC(p);
+#ifdef INVARIANTS
KASSERT((p->p_numthreads == 1),
("bad number of threads in exiting process"));
- td = FIRST_THREAD_IN_PROC(p);
KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
kg = FIRST_KSEGRP_IN_PROC(p);
KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
- ke = FIRST_KSE_IN_KSEGRP(kg);
- KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
+#endif
/* Dispose of an alternate kstack, if it exists.
* XXX What if there are more than one thread in the proc?
@@ -166,14 +165,6 @@ proc_dtor(void *mem, int size, void *arg)
*/
if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
vm_thread_dispose_altkstack(td);
-
- /*
- * We want to make sure we know the initial linkages.
- * so for now tear them down and remake them.
- * This is probably un-needed as we can probably rely
- * on the state coming in here from wait4().
- */
- proc_linkup(p, kg, ke, td);
}
/*
@@ -185,17 +176,16 @@ proc_init(void *mem, int size, int flags)
struct proc *p;
struct thread *td;
struct ksegrp *kg;
- struct kse *ke;
p = (struct proc *)mem;
p->p_sched = (struct p_sched *)&p[1];
vm_proc_new(p);
td = thread_alloc();
- ke = kse_alloc();
kg = ksegrp_alloc();
- proc_linkup(p, kg, ke, td);
bzero(&p->p_mtx, sizeof(struct mtx));
mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
+ proc_linkup(p, kg, td);
+ sched_newproc(p, kg, td);
return (0);
}
@@ -208,7 +198,6 @@ proc_fini(void *mem, int size)
struct proc *p;
struct thread *td;
struct ksegrp *kg;
- struct kse *ke;
p = (struct proc *)mem;
KASSERT((p->p_numthreads == 1),
@@ -217,12 +206,10 @@ proc_fini(void *mem, int size)
KASSERT((td != NULL), ("proc_fini: bad thread pointer"));
kg = FIRST_KSEGRP_IN_PROC(p);
KASSERT((kg != NULL), ("proc_fini: bad kg pointer"));
- ke = FIRST_KSE_IN_KSEGRP(kg);
- KASSERT((ke != NULL), ("proc_fini: bad ke pointer"));
vm_proc_dispose(p);
+ sched_destroyproc(p);
thread_free(td);
ksegrp_free(kg);
- kse_free(ke);
mtx_destroy(&p->p_mtx);
}
@@ -635,7 +622,6 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
{
struct proc *p;
struct thread *td0;
- struct kse *ke;
struct ksegrp *kg;
struct tty *tp;
struct session *sp;
@@ -756,7 +742,6 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
}
kg = td->td_ksegrp;
- ke = td->td_kse;
/* things in the KSE GROUP */
kp->ki_estcpu = kg->kg_estcpu;
@@ -777,11 +762,8 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
kp->ki_kstack = (void *)td->td_kstack;
kp->ki_pctcpu = sched_pctcpu(td);
- /* Things in the kse */
- if (ke)
- kp->ki_rqindex = ke->ke_rqindex;
- else
- kp->ki_rqindex = 0;
+ /* We can't get this anymore but ps etc never used it anyway. */
+ kp->ki_rqindex = 0;
} else {
kp->ki_stat = SZOMB;
OpenPOWER on IntegriCloud