summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2009-11-03 16:46:52 +0000
committerattilio <attilio@FreeBSD.org>2009-11-03 16:46:52 +0000
commit1c940ef4f4e225185b488e5cf1498863dfbbcebb (patch)
tree33345f6b0c0607c9b4b3c28c1eb1f317c974a310 /sys/kern/sched_ule.c
parentbd5fd681f2fbbc4a77b04b7586fcd7537c516ded (diff)
downloadFreeBSD-src-1c940ef4f4e225185b488e5cf1498863dfbbcebb.zip
FreeBSD-src-1c940ef4f4e225185b488e5cf1498863dfbbcebb.tar.gz
Split P_NOLOAD into a per-thread flag (TDF_NOLOAD).
This improvements aims for avoiding further cache-misses in scheduler specific functions which need to keep track of average thread running time and further locking in places setting for this flag. Reported by: jeff (originally), kris (currently) Reviewed by: jhb Tested by: Giuseppe Cocomazzi <sbudella at email dot it>
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index e0cff5f..1ebfda2 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -495,7 +495,7 @@ tdq_load_add(struct tdq *tdq, struct thread *td)
THREAD_LOCK_ASSERT(td, MA_OWNED);
tdq->tdq_load++;
- if ((td->td_proc->p_flag & P_NOLOAD) == 0)
+ if ((td->td_flags & TDF_NOLOAD) == 0)
tdq->tdq_sysload++;
KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
}
@@ -514,7 +514,7 @@ tdq_load_rem(struct tdq *tdq, struct thread *td)
("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
tdq->tdq_load--;
- if ((td->td_proc->p_flag & P_NOLOAD) == 0)
+ if ((td->td_flags & TDF_NOLOAD) == 0)
tdq->tdq_sysload--;
KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
}
OpenPOWER on IntegriCloud