diff options
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/init_main.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_clock.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_proc.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_resource.c | 44 | ||||
-rw-r--r-- | sys/kern/sched_4bsd.c | 13 | ||||
-rw-r--r-- | sys/kern/sched_ule.c | 54 |
6 files changed, 51 insertions, 66 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index 30c0d86..757f8ae 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -382,8 +382,8 @@ proc0_init(void *dummy __unused) p->p_flag = P_SYSTEM; p->p_sflag = PS_INMEM; p->p_state = PRS_NORMAL; + p->p_nice = NZERO; td->td_state = TDS_RUNNING; - kg->kg_nice = NZERO; kg->kg_pri_class = PRI_TIMESHARE; kg->kg_user_pri = PUSER; td->td_priority = PVM; diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index 5d56283..d346e23 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -392,7 +392,7 @@ statclock(frame) if (p->p_flag & P_SA) thread_statclock(1); p->p_uticks++; - if (td->td_ksegrp->kg_nice > NZERO) + if (p->p_nice > NZERO) cp_time[CP_NICE]++; else cp_time[CP_USER]++; diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index 74fa5af..daf51b0 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -740,6 +740,7 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp) kp->ki_sflag = p->p_sflag; kp->ki_swtime = p->p_swtime; kp->ki_pid = p->p_pid; + kp->ki_nice = p->p_nice; kg = td->td_ksegrp; ke = td->td_kse; bintime2timeval(&p->p_runtime, &tv); @@ -751,7 +752,6 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp) kp->ki_slptime = kg->kg_slptime; kp->ki_pri.pri_user = kg->kg_user_pri; kp->ki_pri.pri_class = kg->kg_pri_class; - kp->ki_nice = kg->kg_nice; /* Things in the thread */ kp->ki_wchan = td->td_wchan; diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c index dd9ad4d..8881920 100644 --- a/sys/kern/kern_resource.c +++ b/sys/kern/kern_resource.c @@ -88,7 +88,6 @@ getpriority(td, uap) struct thread *td; register struct getpriority_args *uap; { - struct ksegrp *kg; struct proc *p; int error, low; @@ -98,16 +97,13 @@ getpriority(td, uap) case PRIO_PROCESS: if (uap->who == 0) - low = td->td_ksegrp->kg_nice; + low = td->td_proc->p_nice; else { p = pfind(uap->who); if (p == NULL) break; if (p_cansee(td, p) == 0) { - FOREACH_KSEGRP_IN_PROC(p, kg) { - if (kg->kg_nice < low) - low = kg->kg_nice; - } + low = p->p_nice; } PROC_UNLOCK(p); } @@ -131,10 +127,8 @@ getpriority(td, uap) LIST_FOREACH(p, &pg->pg_members, p_pglist) { PROC_LOCK(p); if (!p_cansee(td, p)) { - FOREACH_KSEGRP_IN_PROC(p, kg) { - if (kg->kg_nice < low) - low = kg->kg_nice; - } + if (p->p_nice < low) + low = p->p_nice; } PROC_UNLOCK(p); } @@ -150,10 +144,8 @@ getpriority(td, uap) PROC_LOCK(p); if (!p_cansee(td, p) && p->p_ucred->cr_uid == uap->who) { - FOREACH_KSEGRP_IN_PROC(p, kg) { - if (kg->kg_nice < low) - low = kg->kg_nice; - } + if (p->p_nice < low) + low = p->p_nice; } PROC_UNLOCK(p); } @@ -260,19 +252,13 @@ setpriority(td, uap) } /* - * Set "nice" for a process. Doesn't really understand threaded processes - * well but does try. Has the unfortunate side effect of making all the NICE - * values for a process's ksegrps the same. This suggests that - * NICE values should be stored as a process nice and deltas for the ksegrps. - * (but not yet). + * Set "nice" for a (whole) process. */ static int donice(struct thread *td, struct proc *p, int n) { - struct ksegrp *kg; - int error, low; + int error; - low = PRIO_MAX + 1; PROC_LOCK_ASSERT(p, MA_OWNED); if ((error = p_cansched(td, p))) return (error); @@ -280,20 +266,10 @@ donice(struct thread *td, struct proc *p, int n) n = PRIO_MAX; if (n < PRIO_MIN) n = PRIO_MIN; - /* - * Only allow nicing if to more than the lowest nice. - * E.g., for nices of 4,3,2 allow nice to 3 but not 1 - */ - FOREACH_KSEGRP_IN_PROC(p, kg) { - if (kg->kg_nice < low) - low = kg->kg_nice; - } - if (n < low && suser(td) != 0) + if (n < p->p_nice && suser(td) != 0) return (EACCES); mtx_lock_spin(&sched_lock); - FOREACH_KSEGRP_IN_PROC(p, kg) { - sched_nice(kg, n); - } + sched_nice(p, n); mtx_unlock_spin(&sched_lock); return (0); } diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index 4d3c939..b339b58 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -439,7 +439,7 @@ resetpriority(struct ksegrp *kg) if (kg->kg_pri_class == PRI_TIMESHARE) { newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + - NICE_WEIGHT * (kg->kg_nice - PRIO_MIN); + NICE_WEIGHT * (kg->kg_proc->p_nice - PRIO_MIN); newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), PRI_MAX_TIMESHARE); kg->kg_user_pri = newpriority; @@ -583,13 +583,16 @@ sched_fork_thread(struct thread *td, struct thread *child) } void -sched_nice(struct ksegrp *kg, int nice) +sched_nice(struct proc *p, int nice) { + struct ksegrp *kg; - PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); + PROC_LOCK_ASSERT(p, MA_OWNED); mtx_assert(&sched_lock, MA_OWNED); - kg->kg_nice = nice; - resetpriority(kg); + p->p_nice = nice; + FOREACH_KSEGRP_IN_PROC(p, kg) { + resetpriority(kg); + } } void diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index e83e7a7..e7333fc 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -366,9 +366,9 @@ kseq_load_add(struct kseq *kseq, struct kse *ke) CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, - ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); + ke->ke_proc->p_nice, kseq->ksq_nicemin); if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) - kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); + kseq_nice_add(kseq, ke->ke_proc->p_nice); } static void @@ -388,7 +388,7 @@ kseq_load_rem(struct kseq *kseq, struct kse *ke) kseq->ksq_load--; ke->ke_runq = NULL; if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) - kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); + kseq_nice_rem(kseq, ke->ke_proc->p_nice); } static void @@ -929,7 +929,7 @@ sched_priority(struct ksegrp *kg) pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); pri += SCHED_PRI_BASE; - pri += kg->kg_nice; + pri += kg->kg_proc->p_nice; if (pri > PRI_MAX_TIMESHARE) pri = PRI_MAX_TIMESHARE; @@ -980,13 +980,13 @@ sched_slice(struct kse *ke) if (!SCHED_INTERACTIVE(kg)) { int nice; - nice = kg->kg_nice + (0 - kseq->ksq_nicemin); + nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin); if (kseq->ksq_load_timeshare == 0 || - kg->kg_nice < kseq->ksq_nicemin) + kg->kg_proc->p_nice < kseq->ksq_nicemin) ke->ke_slice = SCHED_SLICE_MAX; else if (nice <= SCHED_SLICE_NTHRESH) ke->ke_slice = SCHED_SLICE_NICE(nice); - else if (kg->kg_nice == 0) + else if (kg->kg_proc->p_nice == 0) ke->ke_slice = SCHED_SLICE_MIN; else ke->ke_slice = 0; @@ -995,7 +995,7 @@ sched_slice(struct kse *ke) CTR6(KTR_ULE, "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", - ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, + ke, ke->ke_slice, kg->kg_proc->p_nice, kseq->ksq_nicemin, kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg)); return; @@ -1167,29 +1167,35 @@ sched_switch(struct thread *td) } void -sched_nice(struct ksegrp *kg, int nice) +sched_nice(struct proc *p, int nice) { + struct ksegrp *kg; struct kse *ke; struct thread *td; struct kseq *kseq; - PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); + PROC_LOCK_ASSERT(p, MA_OWNED); mtx_assert(&sched_lock, MA_OWNED); /* * We need to adjust the nice counts for running KSEs. */ - if (kg->kg_pri_class == PRI_TIMESHARE) - FOREACH_KSE_IN_GROUP(kg, ke) { - if (ke->ke_runq == NULL) - continue; - kseq = KSEQ_CPU(ke->ke_cpu); - kseq_nice_rem(kseq, kg->kg_nice); - kseq_nice_add(kseq, nice); + FOREACH_KSEGRP_IN_PROC(p, kg) { + if (kg->kg_pri_class == PRI_TIMESHARE) { + FOREACH_KSE_IN_GROUP(kg, ke) { + if (ke->ke_runq == NULL) + continue; + kseq = KSEQ_CPU(ke->ke_cpu); + kseq_nice_rem(kseq, p->p_nice); + kseq_nice_add(kseq, nice); + } } - kg->kg_nice = nice; - sched_priority(kg); - FOREACH_THREAD_IN_GROUP(kg, td) - td->td_flags |= TDF_NEEDRESCHED; + } + p->p_nice = nice; + FOREACH_KSEGRP_IN_PROC(p, kg) { + sched_priority(kg); + FOREACH_THREAD_IN_GROUP(kg, td) + td->td_flags |= TDF_NEEDRESCHED; + } } void @@ -1246,6 +1252,7 @@ sched_fork(struct proc *p, struct proc *p1) mtx_assert(&sched_lock, MA_OWNED); + p1->p_nice = p->p_nice; sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); @@ -1273,7 +1280,6 @@ sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) child->kg_slptime = kg->kg_slptime; child->kg_runtime = kg->kg_runtime; child->kg_user_pri = kg->kg_user_pri; - child->kg_nice = kg->kg_nice; sched_interact_fork(child); kg->kg_runtime += tickincr << 10; sched_interact_update(kg); @@ -1327,11 +1333,11 @@ sched_class(struct ksegrp *kg, int class) #endif if (oclass == PRI_TIMESHARE) { kseq->ksq_load_timeshare--; - kseq_nice_rem(kseq, kg->kg_nice); + kseq_nice_rem(kseq, kg->kg_proc->p_nice); } if (nclass == PRI_TIMESHARE) { kseq->ksq_load_timeshare++; - kseq_nice_add(kseq, kg->kg_nice); + kseq_nice_add(kseq, kg->kg_proc->p_nice); } } |