summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/kern_resource.c95
-rw-r--r--sys/kern/kern_thr.c219
-rw-r--r--sys/kern/syscalls.master13
-rw-r--r--sys/sys/rtprio.h1
-rw-r--r--sys/sys/thr.h16
5 files changed, 123 insertions, 221 deletions
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index c7cb511..1556993 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -273,6 +273,101 @@ donice(struct thread *td, struct proc *p, int n)
}
/*
+ * Set realtime priority for LWP.
+ *
+ * MPSAFE
+ */
+#ifndef _SYS_SYSPROTO_H_
+struct rtprio_thread_args {
+ int function;
+ lwpid_t lwpid;
+ struct rtprio *rtp;
+};
+#endif
+
+int
+rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
+{
+ struct proc *curp;
+ struct proc *p;
+ struct rtprio rtp;
+ struct thread *td1;
+ int cierror, error;
+
+ /* Perform copyin before acquiring locks if needed. */
+ if (uap->function == RTP_SET)
+ cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
+ else
+ cierror = 0;
+
+ curp = td->td_proc;
+ /*
+ * Though lwpid is unique, only current process is supported
+ * since there is no efficient way to look up a LWP yet.
+ */
+ p = curp;
+ PROC_LOCK(p);
+
+ switch (uap->function) {
+ case RTP_LOOKUP:
+ if ((error = p_cansee(td, p)))
+ break;
+ mtx_lock_spin(&sched_lock);
+ if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
+ td1 = td;
+ else
+ td1 = thread_find(p, uap->lwpid);
+ if (td1 != NULL)
+ pri_to_rtp(td1->td_ksegrp, &rtp);
+ else
+ error = ESRCH;
+ mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
+ return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
+ case RTP_SET:
+ if ((error = p_cansched(td, p)) || (error = cierror))
+ break;
+
+ /* Disallow setting rtprio in most cases if not superuser. */
+ if (suser(td) != 0) {
+ /* can't set realtime priority */
+/*
+ * Realtime priority has to be restricted for reasons which should be
+ * obvious. However, for idle priority, there is a potential for
+ * system deadlock if an idleprio process gains a lock on a resource
+ * that other processes need (and the idleprio process can't run
+ * due to a CPU-bound normal process). Fix me! XXX
+ */
+#if 0
+ if (RTP_PRIO_IS_REALTIME(rtp.type)) {
+#else
+ if (rtp.type != RTP_PRIO_NORMAL) {
+#endif
+ error = EPERM;
+ break;
+ }
+ }
+
+ mtx_lock_spin(&sched_lock);
+ if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
+ td1 = td;
+ else
+ td1 = thread_find(p, uap->lwpid);
+ if (td1 != NULL)
+ error = rtp_to_pri(&rtp, td1->td_ksegrp);
+ else
+ error = ESRCH;
+ mtx_unlock_spin(&sched_lock);
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ PROC_UNLOCK(p);
+ return (error);
+}
+
+/*
* Set realtime priority.
*
* MPSAFE
diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c
index ee710e9..5624d9d 100644
--- a/sys/kern/kern_thr.c
+++ b/sys/kern/kern_thr.c
@@ -58,7 +58,7 @@ static int create_thread(struct thread *td, mcontext_t *ctx,
char *stack_base, size_t stack_size,
char *tls_base,
long *child_tid, long *parent_tid,
- int flags, struct thr_sched_param *sched);
+ int flags, struct rtprio *rtp);
/*
* System call interface.
@@ -83,29 +83,23 @@ thr_new(struct thread *td, struct thr_new_args *uap)
/* struct thr_param * */
{
struct thr_param param;
- struct thr_sched_param sched_param, *sched;
+ struct rtprio rtp, *rtpp;
int error;
if (uap->param_size < sizeof(param))
return (EINVAL);
+ bzero(&param, sizeof(param));
if ((error = copyin(uap->param, &param, sizeof(param))))
return (error);
- sched = NULL;
- if (param.sched_param != NULL) {
- if (param.sched_param_size != sizeof(struct thr_sched_param))
- return (EINVAL);
-
- error = copyin(param.sched_param, &sched_param,
- sizeof(sched_param));
- if (error)
- return (error);
- sched = &sched_param;
+ rtpp = NULL;
+ if (param.rtp != 0) {
+ error = copyin(param.rtp, &rtp, sizeof(struct rtprio));
+ rtpp = &rtp;
}
-
error = create_thread(td, NULL, param.start_func, param.arg,
param.stack_base, param.stack_size, param.tls_base,
param.child_tid, param.parent_tid, param.flags,
- sched);
+ rtpp);
return (error);
}
@@ -115,7 +109,7 @@ create_thread(struct thread *td, mcontext_t *ctx,
char *stack_base, size_t stack_size,
char *tls_base,
long *child_tid, long *parent_tid,
- int flags, struct thr_sched_param *sched)
+ int flags, struct rtprio *rtp)
{
stack_t stack;
struct thread *newtd;
@@ -132,18 +126,18 @@ create_thread(struct thread *td, mcontext_t *ctx,
if (p->p_numthreads >= max_threads_per_proc)
return (EPROCLIM);
- if (sched != NULL) {
- switch(sched->policy) {
- case SCHED_FIFO:
- case SCHED_RR:
+ if (rtp != NULL) {
+ switch(rtp->type) {
+ case RTP_PRIO_REALTIME:
+ case RTP_PRIO_FIFO:
/* Only root can set scheduler policy */
if (suser(td) != 0)
return (EPERM);
- if (sched->param.sched_priority < RTP_PRIO_MIN ||
- sched->param.sched_priority > RTP_PRIO_MAX)
+ if (rtp->prio > RTP_PRIO_MAX)
return (EINVAL);
break;
- case SCHED_OTHER:
+ case RTP_PRIO_NORMAL:
+ rtp->prio = 0;
break;
default:
return (EINVAL);
@@ -218,32 +212,12 @@ create_thread(struct thread *td, mcontext_t *ctx,
/* let the scheduler know about these things. */
sched_fork_ksegrp(td, newkg);
sched_fork_thread(td, newtd);
- if (sched != NULL) {
- struct rtprio rtp;
- switch (sched->policy) {
- case SCHED_FIFO:
- rtp.type = PRI_FIFO;
- rtp.prio = RTP_PRIO_MAX - sched->param.sched_priority;
- rtp_to_pri(&rtp, newkg);
+ if (rtp != NULL) {
+ if (!(kg->kg_pri_class == PRI_TIMESHARE &&
+ rtp->type == RTP_PRIO_NORMAL)) {
+ rtp_to_pri(rtp, newkg);
sched_prio(newtd, newkg->kg_user_pri);
- break;
- case SCHED_RR:
- rtp.type = PRI_REALTIME;
- rtp.prio = RTP_PRIO_MAX - sched->param.sched_priority;
- rtp_to_pri(&rtp, newkg);
- sched_prio(newtd, newkg->kg_user_pri);
- break;
- case SCHED_OTHER:
- if (newkg->kg_pri_class != PRI_TIMESHARE) {
- rtp.type = PRI_TIMESHARE;
- rtp.prio = 0;
- rtp_to_pri(&rtp, newkg);
- sched_prio(newtd, newkg->kg_user_pri);
- }
- break;
- default:
- panic("sched policy");
- }
+ } /* ignore timesharing class */
}
TD_SET_CAN_RUN(newtd);
/* if ((flags & THR_SUSPENDED) == 0) */
@@ -436,154 +410,3 @@ thr_set_name(struct thread *td, struct thr_set_name_args *uap)
PROC_UNLOCK(p);
return (error);
}
-
-int
-thr_setscheduler(struct thread *td, struct thr_setscheduler_args *uap)
-{
- struct proc *p;
- struct thread *ttd;
- struct rtprio rtp;
- struct sched_param param;
- int ret;
-
- if (uap->param_size != sizeof(struct sched_param))
- return (EINVAL);
-
- ret = copyin(uap->param, &param, sizeof(struct sched_param));
- if (ret != 0)
- return (ret);
-
- ret = suser(td);
- if (ret != 0)
- return (ret);
-
- switch(uap->policy) {
- case SCHED_FIFO:
- rtp.type = PRI_FIFO;
- rtp.prio = RTP_PRIO_MAX - param.sched_priority;
- break;
- case SCHED_RR:
- rtp.type = PRI_REALTIME;
- rtp.prio = RTP_PRIO_MAX - param.sched_priority;
- break;
- case SCHED_OTHER:
- rtp.type = PRI_TIMESHARE;
- rtp.prio = 0;
- break;
- default:
- return (EINVAL);
- }
-
- p = td->td_proc;
- PROC_LOCK(p);
- if (ret != 0) {
- PROC_UNLOCK(p);
- return (ret);
- }
-
- ttd = thread_find(p, uap->id);
- if (ttd == NULL) {
- PROC_UNLOCK(p);
- return (ESRCH);
- }
- mtx_lock_spin(&sched_lock);
- ret = rtp_to_pri(&rtp, ttd->td_ksegrp);
- if (ret == 0)
- ttd->td_flags |= TDF_NEEDRESCHED;
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
- return (ret);
-}
-
-int
-thr_getscheduler(struct thread *td, struct thr_getscheduler_args *uap)
-{
- struct proc *p;
- struct thread *ttd;
- struct rtprio rtp;
- struct sched_param param;
- int policy;
- int ret;
-
- if (uap->param_size != sizeof(struct sched_param))
- return (EINVAL);
-
- p = td->td_proc;
- PROC_LOCK(p);
- ttd = thread_find(p, uap->id);
- if (ttd == NULL) {
- PROC_UNLOCK(p);
- return (ESRCH);
- }
- mtx_lock_spin(&sched_lock);
- pri_to_rtp(ttd->td_ksegrp, &rtp);
- switch(ttd->td_ksegrp->kg_pri_class) {
- case PRI_FIFO:
- policy = SCHED_FIFO;
- param.sched_priority = RTP_PRIO_MAX - rtp.prio;
- break;
- case PRI_REALTIME:
- policy = SCHED_RR;
- param.sched_priority = RTP_PRIO_MAX - rtp.prio;
- break;
- case PRI_TIMESHARE:
- default: /* XXX SCHED_IDLE */
- policy = SCHED_OTHER;
- param.sched_priority = 0;
- break;
- }
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
-
- ret = copyout(&policy, uap->policy, sizeof(policy));
- if (ret == 0)
- ret = copyout(&param, uap->param, sizeof(param));
- return (ret);
-}
-
-int
-thr_setschedparam(struct thread *td, struct thr_setschedparam_args *uap)
-{
- struct proc *p;
- struct thread *ttd;
- struct rtprio rtp;
- struct sched_param param;
- int ret;
-
- if (uap->param_size != sizeof(struct sched_param))
- return (EINVAL);
-
- ret = copyin(uap->param, &param, sizeof(struct sched_param));
- if (ret != 0)
- return (ret);
- ret = suser(td);
- if (ret != 0)
- return (ret);
- p = td->td_proc;
- PROC_LOCK(p);
- ttd = thread_find(p, uap->id);
- if (ttd == NULL) {
- PROC_UNLOCK(p);
- return (ESRCH);
- }
- mtx_lock_spin(&sched_lock);
- switch(ttd->td_ksegrp->kg_pri_class) {
- case PRI_FIFO:
- rtp.prio = RTP_PRIO_MAX - param.sched_priority;
- break;
- case PRI_REALTIME:
- rtp.prio = RTP_PRIO_MAX - param.sched_priority;
- break;
- case PRI_TIMESHARE:
- rtp.prio = 0;
- break;
- default:
- return (EINVAL);
- }
- ret = rtp_to_pri(&rtp, ttd->td_ksegrp);
- if (ret == 0)
- ttd->td_flags |= TDF_NEEDRESCHED;
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
- return (ret);
-}
diff --git a/sys/kern/syscalls.master b/sys/kern/syscalls.master
index dda3e99..6dd58b8 100644
--- a/sys/kern/syscalls.master
+++ b/sys/kern/syscalls.master
@@ -819,15 +819,10 @@
463 AUE_NULL STD { int abort2(const char *why, int nargs, void **args); }
464 AUE_NULL STD { int thr_set_name(long id, const char *name); }
465 AUE_NULL NOSTD { int aio_fsync(int op, struct aiocb *aiocbp); }
-466 AUE_NULL STD { int thr_setscheduler(long id, int policy,\
- const struct sched_param *param, \
- int param_size); }
-467 AUE_NULL STD { int thr_getscheduler(long id, int *policy,\
- struct sched_param *param, \
- int param_size); }
-468 AUE_NULL STD { int thr_setschedparam(long id, \
- const struct sched_param *param, \
- int param_size); }
+466 AUE_RTPRIO STD { int rtprio_thread(int function, \
+ lwpid_t lwpid, struct rtprio *rtp); }
+467 AUE_NULL UNIMPL nosys
+468 AUE_NULL UNIMPL nosys
469 AUE_NULL UNIMPL __getpath_fromfd
470 AUE_NULL UNIMPL __getpath_fromaddr
; Please copy any additions and changes to the following compatability tables:
diff --git a/sys/sys/rtprio.h b/sys/sys/rtprio.h
index 21268fc..c450ada 100644
--- a/sys/sys/rtprio.h
+++ b/sys/sys/rtprio.h
@@ -86,6 +86,7 @@ void pri_to_rtp(struct ksegrp *, struct rtprio *);
__BEGIN_DECLS
int rtprio(int, pid_t, struct rtprio *);
+int rtprio_thread(int, lwpid_t, struct rtprio *);
__END_DECLS
#endif /* !_KERNEL */
#endif /* !_SYS_RTPRIO_H_ */
diff --git a/sys/sys/thr.h b/sys/sys/thr.h
index c6153d0..bd2723e 100644
--- a/sys/sys/thr.h
+++ b/sys/sys/thr.h
@@ -32,11 +32,6 @@
#include <posix4/sched.h>
-struct thr_sched_param {
- int policy;
- struct sched_param param;
-};
-
/* Create the thread in the suspended state. */
#define THR_SUSPENDED 0x0001
/* Create the system scope thread. */
@@ -52,9 +47,8 @@ struct thr_param {
long *child_tid; /* address to store new TID. */
long *parent_tid; /* parent accesses the new TID here. */
int flags; /* thread flags. */
- struct thr_sched_param *sched_param; /* POSIX scheduler parameters .*/
- long sched_param_size; /* scheduler parameter size */
- void *spare[2]; /* TODO: cpu affinity mask etc. */
+ struct rtprio *rtp; /* Real-time scheduling priority */
+ void *spare[3]; /* TODO: cpu affinity mask etc. */
};
/*
@@ -70,12 +64,6 @@ int thr_kill(long id, int sig);
int thr_suspend(const struct timespec *timeout);
int thr_wake(long id);
int thr_set_name(long id, const char *name);
-int thr_setscheduler(long id, int policy, const struct sched_param *param,
- int param_size);
-int thr_getscheduler(long id, int *policy, struct sched_param *param,
- int param_size);
-int thr_setschedparam(long id, const struct sched_param *param,
- int param_size);
#endif /* !_KERNEL */
#endif /* ! _SYS_THR_H_ */
OpenPOWER on IntegriCloud