summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2007-10-27 22:07:40 +0000
committerjhb <jhb@FreeBSD.org>2007-10-27 22:07:40 +0000
commit3ee3b45ab86278f68d3c2dd9cf172c46447a19ff (patch)
treebaed704cdc044381d72be34836128018f94d8402 /sys
parente0b9f6a2b2bad01ea9271181c944da1d4ca22e3f (diff)
downloadFreeBSD-src-3ee3b45ab86278f68d3c2dd9cf172c46447a19ff.zip
FreeBSD-src-3ee3b45ab86278f68d3c2dd9cf172c46447a19ff.tar.gz
Change the roundrobin implementation in the 4BSD scheduler to trigger a
userland preemption directly from hardclock() via sched_clock() when a thread uses up a full quantum instead of using a periodic timeout to cause a userland preemption every so often. This fixes a potential deadlock when IPI_PREEMPTION isn't enabled where softclock blocks on a lock held by a thread pinned or bound to another CPU. The current thread on that CPU will never be preempted while softclock is blocked. Note that ULE already drives its round-robin userland preemption from sched_clock() as well and always enables IPI_PREEMPT. MFC after: 1 week
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/sched_4bsd.c37
1 files changed, 8 insertions, 29 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 058ee0d..b25d2c5 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -108,10 +108,7 @@ static int sched_tdcnt; /* Total runnable threads in the system. */
static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
#define SCHED_QUANTUM (hz / 10) /* Default sched quantum */
-static struct callout roundrobin_callout;
-
static void setup_runqs(void);
-static void roundrobin(void *arg);
static void schedcpu(void);
static void schedcpu_thread(void);
static void sched_priority(struct thread *td, u_char prio);
@@ -256,27 +253,6 @@ maybe_resched(struct thread *td)
}
/*
- * Force switch among equal priority processes every 100ms.
- * We don't actually need to force a context switch of the current process.
- * The act of firing the event triggers a context switch to softclock() and
- * then switching back out again which is equivalent to a preemption, thus
- * no further work is needed on the local CPU.
- */
-/* ARGSUSED */
-static void
-roundrobin(void *arg)
-{
-
-#ifdef SMP
- mtx_lock_spin(&sched_lock);
- forward_roundrobin();
- mtx_unlock_spin(&sched_lock);
-#endif
-
- callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
-}
-
-/*
* Constants for digital decay and forget:
* 90% of (td_estcpu) usage in 5 * loadav time
* 95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
@@ -551,11 +527,6 @@ sched_setup(void *dummy)
sched_quantum = SCHED_QUANTUM;
hogticks = 2 * sched_quantum;
- callout_init(&roundrobin_callout, CALLOUT_MPSAFE);
-
- /* Kick off timeout driven events by calling first time. */
- roundrobin(NULL);
-
/* Account for thread0. */
sched_load_add();
}
@@ -626,6 +597,14 @@ sched_clock(struct thread *td)
resetpriority(td);
resetpriority_thread(td);
}
+
+ /*
+ * Force a context switch if the current thread has used up a full
+ * quantum (default quantum is 100ms).
+ */
+ if (!TD_IS_IDLETHREAD(td) &&
+ ticks - PCPU_GET(switchticks) >= sched_quantum)
+ td->td_flags |= TDF_NEEDRESCHED;
}
/*
OpenPOWER on IntegriCloud