diff options
author | jake <jake@FreeBSD.org> | 2001-02-12 00:20:08 +0000 |
---|---|---|
committer | jake <jake@FreeBSD.org> | 2001-02-12 00:20:08 +0000 |
commit | 55d5108ac58bdc48fbd9eccefcb58a49682107b5 (patch) | |
tree | 2e0de19f9802474be1018e6086f4bb4e01fed0be /sys/kern/kern_mutex.c | |
parent | 3acecaf2d523e3763f225a429b3007ff059b4253 (diff) | |
download | FreeBSD-src-55d5108ac58bdc48fbd9eccefcb58a49682107b5.zip FreeBSD-src-55d5108ac58bdc48fbd9eccefcb58a49682107b5.tar.gz |
Implement a unified run queue and adjust priority levels accordingly.
- All processes go into the same array of queues, with different
scheduling classes using different portions of the array. This
allows user processes to have their priorities propogated up into
interrupt thread range if need be.
- I chose 64 run queues as an arbitrary number that is greater than
32. We used to have 4 separate arrays of 32 queues each, so this
may not be optimal. The new run queue code was written with this
in mind; changing the number of run queues only requires changing
constants in runq.h and adjusting the priority levels.
- The new run queue code takes the run queue as a parameter. This
is intended to be used to create per-cpu run queues. Implement
wrappers for compatibility with the old interface which pass in
the global run queue structure.
- Group the priority level, user priority, native priority (before
propogation) and the scheduling class into a struct priority.
- Change any hard coded priority levels that I found to use
symbolic constants (TTIPRI and TTOPRI).
- Remove the curpriority global variable and use that of curproc.
This was used to detect when a process' priority had lowered and
it should yield. We now effectively yield on every interrupt.
- Activate propogate_priority(). It should now have the desired
effect without needing to also propogate the scheduling class.
- Temporarily comment out the call to vm_page_zero_idle() in the
idle loop. It interfered with propogate_priority() because
the idle process needed to do a non-blocking acquire of Giant
and then other processes would try to propogate their priority
onto it. The idle process should not do anything except idle.
vm_page_zero_idle() will return in the form of an idle priority
kernel thread which is woken up at apprioriate times by the vm
system.
- Update struct kinfo_proc to the new priority interface. Deliberately
change its size by adjusting the spare fields. It remained the same
size, but the layout has changed, so userland processes that use it
would parse the data incorrectly. The size constraint should really
be changed to an arbitrary version number. Also add a debug.sizeof
sysctl node for struct kinfo_proc.
Diffstat (limited to 'sys/kern/kern_mutex.c')
-rw-r--r-- | sys/kern/kern_mutex.c | 57 |
1 files changed, 16 insertions, 41 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 7131b31..aec8b2c 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -106,7 +106,7 @@ struct mtx_debug { : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK)) #define RETIP(x) *(((uintptr_t *)(&x)) - 1) -#define SET_PRIO(p, pri) (p)->p_priority = (pri) +#define SET_PRIO(p, pri) (p)->p_pri.pri_level = (pri) /* * Early WITNESS-enabled declarations. @@ -180,7 +180,7 @@ static void propagate_priority(struct proc *); static void propagate_priority(struct proc *p) { - int pri = p->p_priority; + int pri = p->p_pri.pri_level; struct mtx *m = p->p_blocked; mtx_assert(&sched_lock, MA_OWNED); @@ -201,7 +201,7 @@ propagate_priority(struct proc *p) MPASS(p->p_magic == P_MAGIC); KASSERT(p->p_stat != SSLEEP, ("sleeping process owns a mutex")); - if (p->p_priority <= pri) + if (p->p_pri.pri_level <= pri) return; /* @@ -212,32 +212,16 @@ propagate_priority(struct proc *p) /* * If lock holder is actually running, just bump priority. */ -#ifdef SMP - /* - * For SMP, we can check the p_oncpu field to see if we are - * running. - */ if (p->p_oncpu != 0xff) { MPASS(p->p_stat == SRUN || p->p_stat == SZOMB); return; } -#else - /* - * For UP, we check to see if p is curproc (this shouldn't - * ever happen however as it would mean we are in a deadlock.) - */ - if (p == curproc) { - panic("Deadlock detected"); - return; - } -#endif + /* * If on run queue move to new run queue, and * quit. */ if (p->p_stat == SRUN) { - printf("XXX: moving proc %d(%s) to a new run queue\n", - p->p_pid, p->p_comm); MPASS(p->p_blocked == NULL); remrunqueue(p); setrunqueue(p); @@ -258,23 +242,16 @@ propagate_priority(struct proc *p) m = p->p_blocked; MPASS(m != NULL); - printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid, - p->p_comm, m->mtx_description); - /* * Check if the proc needs to be moved up on * the blocked chain */ if (p == TAILQ_FIRST(&m->mtx_blocked)) { - printf("XXX: process at head of run queue\n"); continue; } - p1 = TAILQ_PREV(p, rq, p_procq); - if (p1->p_priority <= pri) { - printf( - "XXX: previous process %d(%s) has higher priority\n", - p->p_pid, p->p_comm); + p1 = TAILQ_PREV(p, procqueue, p_procq); + if (p1->p_pri.pri_level <= pri) { continue; } @@ -288,7 +265,7 @@ propagate_priority(struct proc *p) TAILQ_REMOVE(&m->mtx_blocked, p, p_procq); TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) { MPASS(p1->p_magic == P_MAGIC); - if (p1->p_priority > pri) + if (p1->p_pri.pri_level > pri) break; } @@ -371,7 +348,7 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) * p_nativepri is only read when we are blocked on a mutex, so that * can't be happening right now either. */ - p->p_nativepri = p->p_priority; + p->p_pri.pri_native = p->p_pri.pri_level; while (!_obtain_lock(m, p)) { uintptr_t v; @@ -396,8 +373,8 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) MPASS(p1 != NULL); m->mtx_lock = (uintptr_t)p | MTX_CONTESTED; - if (p1->p_priority < p->p_priority) - SET_PRIO(p, p1->p_priority); + if (p1->p_pri.pri_level < p->p_pri.pri_level) + SET_PRIO(p, p1->p_pri.pri_level); mtx_unlock_spin(&sched_lock); return; } @@ -446,7 +423,7 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq); } else { TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) - if (p1->p_priority > p->p_priority) + if (p1->p_pri.pri_level > p->p_pri.pri_level) break; if (p1) TAILQ_INSERT_BEFORE(p1, p, p_procq); @@ -460,9 +437,7 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) p->p_blocked = m; p->p_mtxname = m->mtx_description; p->p_stat = SMTX; -#if 0 propagate_priority(p); -#endif if ((opts & MTX_QUIET) == 0) CTR3(KTR_LOCK, @@ -565,15 +540,15 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) } else atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); - pri = MAXPRI; + pri = PRI_MAX; LIST_FOREACH(m1, &p->p_contested, mtx_contested) { - int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority; + int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_pri.pri_level; if (cp < pri) pri = cp; } - if (pri > p->p_nativepri) - pri = p->p_nativepri; + if (pri > p->p_pri.pri_native) + pri = p->p_pri.pri_native; SET_PRIO(p, pri); if ((opts & MTX_QUIET) == 0) @@ -585,7 +560,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) p1->p_stat = SRUN; setrunqueue(p1); - if ((opts & MTX_NOSWITCH) == 0 && p1->p_priority < pri) { + if ((opts & MTX_NOSWITCH) == 0 && p1->p_pri.pri_level < pri) { #ifdef notyet if (p->p_flag & (P_ITHD | P_SITHD)) { ithd_t *it = (ithd_t *)p; |