summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2007-03-08 06:44:34 +0000
committerjulian <julian@FreeBSD.org>2007-03-08 06:44:34 +0000
commit80d6cde009b5766eaf8fb0ee64deb32113487939 (patch)
treef414b9c84952f9cf32c444f2ea351d4886916534 /sys/kern
parent3483dab550cf625cb12e9b0c98e86757477d7a9c (diff)
downloadFreeBSD-src-80d6cde009b5766eaf8fb0ee64deb32113487939.zip
FreeBSD-src-80d6cde009b5766eaf8fb0ee64deb32113487939.tar.gz
Instead of doing comparisons using the pcpu area to see if
a thread is an idle thread, just see if it has the IDLETD flag set. That flag will probably move to the pflags word as it's permenent and never chenges for the life of the system so it doesn't need locking.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_clock.c2
-rw-r--r--sys/kern/kern_synch.c2
-rw-r--r--sys/kern/sched_core.c4
-rw-r--r--sys/kern/sched_ule.c2
-rw-r--r--sys/kern/subr_prf.c2
-rw-r--r--sys/kern/subr_smp.c2
-rw-r--r--sys/kern/vfs_bio.c8
7 files changed, 11 insertions, 11 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index dbc6d20..063c2f3 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -450,7 +450,7 @@ statclock(int usermode)
#endif
td->td_pticks++;
td->td_sticks++;
- if (td != PCPU_GET(idlethread))
+ if (!TD_IS_IDLETHREAD(td))
cp_time[CP_SYS]++;
else
cp_time[CP_IDLE]++;
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 4fceafb..0edd670 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -428,7 +428,7 @@ mi_switch(int flags, struct thread *newtd)
CTR4(KTR_PROC, "mi_switch: old thread %ld (kse %p, pid %ld, %s)",
td->td_tid, td->td_sched, p->p_pid, p->p_comm);
#if (KTR_COMPILE & KTR_SCHED) != 0
- if (td == PCPU_GET(idlethread))
+ if (TD_IS_IDLETHREAD(td))
CTR3(KTR_SCHED, "mi_switch: %p(%s) prio %d idle",
td, td->td_proc->p_comm, td->td_priority);
else if (newtd != NULL)
diff --git a/sys/kern/sched_core.c b/sys/kern/sched_core.c
index 5db991f..b0994f8 100644
--- a/sys/kern/sched_core.c
+++ b/sys/kern/sched_core.c
@@ -983,7 +983,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
td->td_flags &= ~TDF_NEEDRESCHED;
td->td_owepreempt = 0;
- if (td == PCPU_GET(idlethread)) {
+ if (TD_IS_IDLETHREAD(td)) {
TD_SET_CAN_RUN(td);
} else {
sched_update_runtime(ts, now);
@@ -1230,7 +1230,7 @@ sched_tick(void)
/*
* Processes of equal idle priority are run round-robin.
*/
- if (td != PCPU_GET(idlethread) && --ts->ts_slice <= 0) {
+ if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) {
ts->ts_slice = def_timeslice;
td->td_flags |= TDF_NEEDRESCHED;
}
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index f52cbc8..2a62798 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1436,7 +1436,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
* If the thread has been assigned it may be in the process of switching
* to the new cpu. This is the case in sched_bind().
*/
- if (td == PCPU_GET(idlethread)) {
+ if (TD_IS_IDLETHREAD(td)) {
TD_SET_CAN_RUN(td);
} else {
tdq_load_rem(tdq, ts);
diff --git a/sys/kern/subr_prf.c b/sys/kern/subr_prf.c
index 2d18b49..a0605ab 100644
--- a/sys/kern/subr_prf.c
+++ b/sys/kern/subr_prf.c
@@ -133,7 +133,7 @@ uprintf(const char *fmt, ...)
struct putchar_arg pca;
int retval;
- if (td == NULL || td == PCPU_GET(idlethread))
+ if (td == NULL || TD_IS_IDLETHREAD(td))
return (0);
mtx_lock(&Giant);
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index f0c0994..d16e2fd 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -201,7 +201,7 @@ forward_roundrobin(void)
td = pc->pc_curthread;
id = pc->pc_cpumask;
if (id != me && (id & stopped_cpus) == 0 &&
- td != pc->pc_idlethread) {
+ !TD_IS_IDLETHREAD(td)) {
td->td_flags |= TDF_NEEDRESCHED;
map |= id;
}
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 5b5302d..5ae3e4f 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -749,7 +749,7 @@ breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
if ((rabp->b_flags & B_CACHE) == 0) {
- if (curthread != PCPU_GET(idlethread))
+ if (!TD_IS_IDLETHREAD(curthread))
curthread->td_proc->p_stats->p_ru.ru_inblock++;
rabp->b_flags |= B_ASYNC;
rabp->b_flags &= ~B_INVAL;
@@ -784,7 +784,7 @@ breadn(struct vnode * vp, daddr_t blkno, int size,
/* if not found in cache, do some I/O */
if ((bp->b_flags & B_CACHE) == 0) {
- if (curthread != PCPU_GET(idlethread))
+ if (!TD_IS_IDLETHREAD(curthread))
curthread->td_proc->p_stats->p_ru.ru_inblock++;
bp->b_iocmd = BIO_READ;
bp->b_flags &= ~B_INVAL;
@@ -863,7 +863,7 @@ bufwrite(struct buf *bp)
bp->b_runningbufspace = bp->b_bufsize;
atomic_add_int(&runningbufspace, bp->b_runningbufspace);
- if (curthread != PCPU_GET(idlethread))
+ if (!TD_IS_IDLETHREAD(curthread))
curthread->td_proc->p_stats->p_ru.ru_oublock++;
if (oldflags & B_ASYNC)
BUF_KERNPROC(bp);
@@ -2445,7 +2445,7 @@ loop:
* XXX remove if 0 sections (clean this up after its proven)
*/
if (numfreebuffers == 0) {
- if (curthread == PCPU_GET(idlethread))
+ if (TD_IS_IDLETHREAD(curthread))
return NULL;
mtx_lock(&nblock);
needsbuffer |= VFS_BIO_NEED_ANY;
OpenPOWER on IntegriCloud