diff options
author | julian <julian@FreeBSD.org> | 2001-09-12 08:38:13 +0000 |
---|---|---|
committer | julian <julian@FreeBSD.org> | 2001-09-12 08:38:13 +0000 |
commit | 5596676e6c6c1e81e899cd0531f9b1c28a292669 (patch) | |
tree | b1a19fcdf05759281fab0d89efb13f0fdf42102e /sys/posix4 | |
parent | 83e00d4274950d2b531c24692cd123538ffbddb9 (diff) | |
download | FreeBSD-src-5596676e6c6c1e81e899cd0531f9b1c28a292669.zip FreeBSD-src-5596676e6c6c1e81e899cd0531f9b1c28a292669.tar.gz |
KSE Milestone 2
Note ALL MODULES MUST BE RECOMPILED
make the kernel aware that there are smaller units of scheduling than the
process. (but only allow one thread per process at this time).
This is functionally equivalent to teh previousl -current except
that there is a thread associated with each process.
Sorry john! (your next MFC will be a doosie!)
Reviewed by: peter@freebsd.org, dillon@freebsd.org
X-MFC after: ha ha ha ha
Diffstat (limited to 'sys/posix4')
-rw-r--r-- | sys/posix4/ksched.c | 36 | ||||
-rw-r--r-- | sys/posix4/p1003_1b.c | 81 | ||||
-rw-r--r-- | sys/posix4/posix4.h | 16 |
3 files changed, 77 insertions, 56 deletions
diff --git a/sys/posix4/ksched.c b/sys/posix4/ksched.c index d118b93..6ad2a07 100644 --- a/sys/posix4/ksched.c +++ b/sys/posix4/ksched.c @@ -62,9 +62,9 @@ int ksched_attach(struct ksched **p) return 0; } -int ksched_detach(struct ksched *p) +int ksched_detach(struct ksched *ks) { - p31b_free(p); + p31b_free(ks); return 0; } @@ -94,13 +94,13 @@ int ksched_detach(struct ksched *p) #define P1B_PRIO_MAX rtpprio_to_p4prio(RTP_PRIO_MIN) static __inline int -getscheduler(register_t *ret, struct ksched *ksched, struct proc *p) +getscheduler(register_t *ret, struct ksched *ksched, struct thread *td) { struct rtprio rtp; int e = 0; mtx_lock_spin(&sched_lock); - pri_to_rtp(&p->p_pri, &rtp); + pri_to_rtp(&td->td_ksegrp->kg_pri, &rtp); mtx_unlock_spin(&sched_lock); switch (rtp.type) { @@ -121,31 +121,31 @@ getscheduler(register_t *ret, struct ksched *ksched, struct proc *p) } int ksched_setparam(register_t *ret, struct ksched *ksched, - struct proc *p, const struct sched_param *param) + struct thread *td, const struct sched_param *param) { register_t policy; int e; - e = getscheduler(&policy, ksched, p); + e = getscheduler(&policy, ksched, td); if (e == 0) { if (policy == SCHED_OTHER) e = EINVAL; else - e = ksched_setscheduler(ret, ksched, p, policy, param); + e = ksched_setscheduler(ret, ksched, td, policy, param); } return e; } int ksched_getparam(register_t *ret, struct ksched *ksched, - struct proc *p, struct sched_param *param) + struct thread *td, struct sched_param *param) { struct rtprio rtp; mtx_lock_spin(&sched_lock); - pri_to_rtp(&p->p_pri, &rtp); + pri_to_rtp(&td->td_ksegrp->kg_pri, &rtp); mtx_unlock_spin(&sched_lock); if (RTP_PRIO_IS_REALTIME(rtp.type)) param->sched_priority = rtpprio_to_p4prio(rtp.prio); @@ -161,7 +161,7 @@ int ksched_getparam(register_t *ret, struct ksched *ksched, * */ int ksched_setscheduler(register_t *ret, struct ksched *ksched, - struct proc *p, int policy, const struct sched_param *param) + struct thread *td, int policy, const struct sched_param *param) { int e = 0; struct rtprio rtp; @@ -179,8 +179,8 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched, ? RTP_PRIO_FIFO : RTP_PRIO_REALTIME; mtx_lock_spin(&sched_lock); - rtp_to_pri(&rtp, &p->p_pri); - p->p_sflag |= PS_NEEDRESCHED; + rtp_to_pri(&rtp, &td->td_ksegrp->kg_pri); + td->td_last_kse->ke_flags |= KEF_NEEDRESCHED; /* XXXKSE */ mtx_unlock_spin(&sched_lock); } else @@ -194,7 +194,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched, rtp.type = RTP_PRIO_NORMAL; rtp.prio = p4prio_to_rtpprio(param->sched_priority); mtx_lock_spin(&sched_lock); - rtp_to_pri(&rtp, &p->p_pri); + rtp_to_pri(&rtp, &td->td_ksegrp->kg_pri); /* XXX Simply revert to whatever we had for last * normal scheduler priorities. @@ -202,7 +202,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched, * on the scheduling code: You must leave the * scheduling info alone. */ - p->p_sflag |= PS_NEEDRESCHED; + td->td_last_kse->ke_flags |= KEF_NEEDRESCHED; /* XXXKSE */ mtx_unlock_spin(&sched_lock); } break; @@ -211,9 +211,9 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched, return e; } -int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct proc *p) +int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td) { - return getscheduler(ret, ksched, p); + return getscheduler(ret, ksched, td); } /* ksched_yield: Yield the CPU. @@ -221,7 +221,7 @@ int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct proc *p) int ksched_yield(register_t *ret, struct ksched *ksched) { mtx_lock_spin(&sched_lock); - curproc->p_sflag |= PS_NEEDRESCHED; + curthread->td_kse->ke_flags |= KEF_NEEDRESCHED; mtx_unlock_spin(&sched_lock); return 0; } @@ -271,7 +271,7 @@ int ksched_get_priority_min(register_t *ret, struct ksched *ksched, int policy) } int ksched_rr_get_interval(register_t *ret, struct ksched *ksched, - struct proc *p, struct timespec *timespec) + struct thread *td, struct timespec *timespec) { *timespec = ksched->rr_interval; diff --git a/sys/posix4/p1003_1b.c b/sys/posix4/p1003_1b.c index 4e43b22..0409d63 100644 --- a/sys/posix4/p1003_1b.c +++ b/sys/posix4/p1003_1b.c @@ -56,10 +56,10 @@ MALLOC_DEFINE(M_P31B, "p1003.1b", "Posix 1003.1B"); * start to use this when they shouldn't. That will be removed if annoying. */ int -syscall_not_present(struct proc *p, const char *s, struct nosys_args *uap) +syscall_not_present(struct thread *td, const char *s, struct nosys_args *uap) { log(LOG_ERR, "cmd %s pid %d tried to use non-present %s\n", - p->p_comm, p->p_pid, s); + td->td_proc->p_comm, td->td_proc->p_pid, s); /* a " return nosys(p, uap); " here causes a core dump. */ @@ -105,9 +105,10 @@ static int sched_attach(void) /* * MPSAFE */ -int sched_setparam(struct proc *p, +int sched_setparam(struct thread *td, struct sched_setparam_args *uap) { + struct thread *targettd; struct proc *targetp; int e; struct sched_param sched_param; @@ -118,7 +119,8 @@ int sched_setparam(struct proc *p, mtx_lock(&Giant); if (uap->pid == 0) { - targetp = p; + targetp = td->td_proc; + targettd = td; PROC_LOCK(targetp); } else { targetp = pfind(uap->pid); @@ -126,12 +128,13 @@ int sched_setparam(struct proc *p, e = ESRCH; goto done2; } + targettd = &targetp->p_thread; /* XXXKSE */ } - e = p_cansched(p, targetp); + e = p_cansched(td->td_proc, targetp); PROC_UNLOCK(targetp); if (e == 0) { - e = ksched_setparam(&p->p_retval[0], ksched, targetp, + e = ksched_setparam(&td->td_retval[0], ksched, targettd, (const struct sched_param *)&sched_param); } done2: @@ -142,16 +145,18 @@ done2: /* * MPSAFE */ -int sched_getparam(struct proc *p, +int sched_getparam(struct thread *td, struct sched_getparam_args *uap) { int e; struct sched_param sched_param; + struct thread *targettd; struct proc *targetp; mtx_lock(&Giant); if (uap->pid == 0) { - targetp = p; + targetp = td->td_proc; + targettd = td; PROC_LOCK(targetp); } else { targetp = pfind(uap->pid); @@ -159,28 +164,31 @@ int sched_getparam(struct proc *p, e = ESRCH; goto done2; } + targettd = &targetp->p_thread; /* XXXKSE */ } - e = p_cansee(p, targetp); + e = p_cansee(td->td_proc, targetp); PROC_UNLOCK(targetp); if (e) goto done2; - e = ksched_getparam(&p->p_retval[0], ksched, targetp, &sched_param); + e = ksched_getparam(&td->td_retval[0], ksched, targettd, &sched_param); if (e == 0) e = copyout(&sched_param, uap->param, sizeof(sched_param)); done2: mtx_unlock(&Giant); return (e); } + /* * MPSAFE */ -int sched_setscheduler(struct proc *p, +int sched_setscheduler(struct thread *td, struct sched_setscheduler_args *uap) { int e; struct sched_param sched_param; + struct thread *targettd; struct proc *targetp; e = copyin(uap->param, &sched_param, sizeof(sched_param)); @@ -189,7 +197,8 @@ int sched_setscheduler(struct proc *p, mtx_lock(&Giant); if (uap->pid == 0) { - targetp = p; + targetp = td->td_proc; + targettd = td; PROC_LOCK(targetp); } else { targetp = pfind(uap->pid); @@ -197,31 +206,34 @@ int sched_setscheduler(struct proc *p, e = ESRCH; goto done2; } + targettd = &targetp->p_thread; /* XXXKSE */ } - e = p_cansched(p, targetp); + e = p_cansched(td->td_proc, targetp); PROC_UNLOCK(targetp); if (e == 0) { - e = ksched_setscheduler(&p->p_retval[0], ksched, - targetp, uap->policy, - (const struct sched_param *)&sched_param); + e = ksched_setscheduler(&td->td_retval[0], ksched, targettd, + uap->policy, (const struct sched_param *)&sched_param); } done2: mtx_unlock(&Giant); return (e); } + /* * MPSAFE */ -int sched_getscheduler(struct proc *p, +int sched_getscheduler(struct thread *td, struct sched_getscheduler_args *uap) { int e; + struct thread *targettd; struct proc *targetp; mtx_lock(&Giant); if (uap->pid == 0) { - targetp = p; + targetp = td->td_proc; + targettd = td; PROC_LOCK(targetp); } else { targetp = pfind(uap->pid); @@ -229,67 +241,75 @@ int sched_getscheduler(struct proc *p, e = ESRCH; goto done2; } + targettd = &targetp->p_thread; /* XXXKSE */ } - e = p_cansee(p, targetp); + e = p_cansee(td->td_proc, targetp); PROC_UNLOCK(targetp); if (e == 0) - e = ksched_getscheduler(&p->p_retval[0], ksched, targetp); + e = ksched_getscheduler(&td->td_retval[0], ksched, targettd); done2: mtx_unlock(&Giant); return (e); } + /* * MPSAFE */ -int sched_yield(struct proc *p, +int sched_yield(struct thread *td, struct sched_yield_args *uap) { int error; mtx_lock(&Giant); - error = ksched_yield(&p->p_retval[0], ksched); + error = ksched_yield(&td->td_retval[0], ksched); mtx_unlock(&Giant); return (error); } + /* * MPSAFE */ -int sched_get_priority_max(struct proc *p, +int sched_get_priority_max(struct thread *td, struct sched_get_priority_max_args *uap) { int error; mtx_lock(&Giant); - error = ksched_get_priority_max(&p->p_retval[0], ksched, uap->policy); + error = ksched_get_priority_max(&td->td_retval[0], ksched, uap->policy); mtx_unlock(&Giant); return (error); } + /* * MPSAFE */ -int sched_get_priority_min(struct proc *p, +int sched_get_priority_min(struct thread *td, struct sched_get_priority_min_args *uap) { int error; + mtx_lock(&Giant); - error = ksched_get_priority_min(&p->p_retval[0], ksched, uap->policy); + error = ksched_get_priority_min(&td->td_retval[0], ksched, uap->policy); mtx_unlock(&Giant); return (error); } + /* * MPSAFE */ -int sched_rr_get_interval(struct proc *p, +int sched_rr_get_interval(struct thread *td, struct sched_rr_get_interval_args *uap) { int e; + struct thread *targettd; struct proc *targetp; mtx_lock(&Giant); if (uap->pid == 0) { - targetp = p; + targettd = td; + targetp = td->td_proc; PROC_LOCK(targetp); } else { targetp = pfind(uap->pid); @@ -297,12 +317,13 @@ int sched_rr_get_interval(struct proc *p, e = ESRCH; goto done2; } + targettd = &targetp->p_thread; /* XXXKSE */ } - e = p_cansee(p, targetp); + e = p_cansee(td->td_proc, targetp); PROC_UNLOCK(targetp); if (e == 0) { - e = ksched_rr_get_interval(&p->p_retval[0], ksched, targetp, + e = ksched_rr_get_interval(&td->td_retval[0], ksched, targettd, uap->interval); } done2: diff --git a/sys/posix4/posix4.h b/sys/posix4/posix4.h index 232435a..626d6d8 100644 --- a/sys/posix4/posix4.h +++ b/sys/posix4/posix4.h @@ -47,12 +47,12 @@ */ struct proc; struct nosys_args; -extern int syscall_not_present(struct proc *, const char *, struct nosys_args *); +extern int syscall_not_present(struct thread *, const char *, struct nosys_args *); #define SYSCALL_NOT_PRESENT_GEN(SC) \ -int SC (struct proc *p, struct SC##_args *uap) \ +int SC (struct thread *td, struct SC##_args *uap) \ { \ - return syscall_not_present(p, #SC , (struct nosys_args *)uap); \ + return syscall_not_present(td, #SC , (struct nosys_args *)uap); \ } @@ -95,13 +95,13 @@ int ksched_attach(struct ksched **); int ksched_detach(struct ksched *); int ksched_setparam(register_t *, struct ksched *, - struct proc *, const struct sched_param *); + struct thread *, const struct sched_param *); int ksched_getparam(register_t *, struct ksched *, - struct proc *, struct sched_param *); + struct thread *, struct sched_param *); int ksched_setscheduler(register_t *, struct ksched *, - struct proc *, int, const struct sched_param *); -int ksched_getscheduler(register_t *, struct ksched *, struct proc *); + struct thread *, int, const struct sched_param *); +int ksched_getscheduler(register_t *, struct ksched *, struct thread *); int ksched_yield(register_t *, struct ksched *); @@ -109,7 +109,7 @@ int ksched_get_priority_max(register_t *, struct ksched *, int); int ksched_get_priority_min(register_t *, struct ksched *, int); int ksched_rr_get_interval(register_t *, struct ksched *, - struct proc *, struct timespec *); + struct thread *, struct timespec *); #endif /* _KPOSIX_PRIORITY_SCHEDULING */ |