summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2004-02-01 02:48:36 +0000
committerjeff <jeff@FreeBSD.org>2004-02-01 02:48:36 +0000
commit201544a2b65d8dcf48483b4d2ddc5674f31a316e (patch)
tree72e2f7df2b3efb5008aa65456ff3e251d1fbd7e0 /sys/kern
parentc78b51b49ed0946a2b5fcc12c6ae44d92991c423 (diff)
downloadFreeBSD-src-201544a2b65d8dcf48483b4d2ddc5674f31a316e.zip
FreeBSD-src-201544a2b65d8dcf48483b4d2ddc5674f31a316e.tar.gz
- Add a new member to struct kseq called ksq_sysload. This is intended to
track the load for the sched_load() function. In the SMP case this member is not defined because it would be redundant with the ksg_load member which already tracks the non ithd load. - For sched_load() in the UP case simply return ksq_sysload. In the SMP case traverse the list of kseq groups and sum up their ksg_load fields.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/sched_ule.c30
1 files changed, 27 insertions, 3 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 0359f0f..1d13454 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -216,6 +216,8 @@ struct kseq {
LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */
struct kseq_group *ksq_group; /* Our processor group. */
volatile struct kse *ksq_assigned; /* assigned by another CPU. */
+#else
+ int ksq_sysload; /* For loadavg, !ITHD load. */
#endif
};
@@ -355,9 +357,11 @@ kseq_load_add(struct kseq *kseq, struct kse *ke)
if (class == PRI_TIMESHARE)
kseq->ksq_load_timeshare++;
kseq->ksq_load++;
-#ifdef SMP
if (class != PRI_ITHD)
+#ifdef SMP
kseq->ksq_group->ksg_load++;
+#else
+ kseq->ksq_sysload++;
#endif
if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
CTR6(KTR_ULE,
@@ -376,9 +380,11 @@ kseq_load_rem(struct kseq *kseq, struct kse *ke)
class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
if (class == PRI_TIMESHARE)
kseq->ksq_load_timeshare--;
-#ifdef SMP
if (class != PRI_ITHD)
+#ifdef SMP
kseq->ksq_group->ksg_load--;
+#else
+ kseq->ksq_sysload--;
#endif
kseq->ksq_load--;
ke->ke_runq = NULL;
@@ -1166,8 +1172,10 @@ sched_switch(struct thread *td)
} else
kseq_runq_add(KSEQ_SELF(), ke);
} else {
- if (ke->ke_runq)
+ if (ke->ke_runq) {
kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
+ } else if ((td->td_flags & TDF_IDLETD) == 0)
+ backtrace();
/*
* We will not be on the run queue. So we must be
* sleeping or similar.
@@ -1716,6 +1724,22 @@ sched_unbind(struct thread *td)
}
int
+sched_load(void)
+{
+#ifdef SMP
+ int total;
+ int i;
+
+ total = 0;
+ for (i = 0; i <= ksg_maxid; i++)
+ total += KSEQ_GROUP(i)->ksg_load;
+ return (total);
+#else
+ return (KSEQ_SELF()->ksq_sysload);
+#endif
+}
+
+int
sched_sizeof_kse(void)
{
return (sizeof(struct kse) + sizeof(struct ke_sched));
OpenPOWER on IntegriCloud