summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authorjimharris <jimharris@FreeBSD.org>2012-10-24 18:36:41 +0000
committerjimharris <jimharris@FreeBSD.org>2012-10-24 18:36:41 +0000
commite56b4fdc17b3d9d906ac2a1a1fe90da82c774712 (patch)
treef1d39d90cd621bd25a46eba46dc944558c3c6727 /sys/kern/sched_ule.c
parent278c83657aa54ebb2f925731cce0f55827c15ecf (diff)
downloadFreeBSD-src-e56b4fdc17b3d9d906ac2a1a1fe90da82c774712.zip
FreeBSD-src-e56b4fdc17b3d9d906ac2a1a1fe90da82c774712.tar.gz
Pad tdq_lock to avoid false sharing with tdq_load and tdq_cpu_idle.
This enables CPU searches (which read tdq_load) to operate independently of any contention on the spinlock. Some scheduler-intensive workloads running on an 8C single-socket SNB Xeon show considerable improvement with this change (2-3% perf improvement, 5-6% decrease in CPU util). Sponsored by: Intel Reviewed by: jeff
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 82d1cce..e42ec52 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -223,8 +223,13 @@ static int sched_idlespinthresh = -1;
* locking in sched_pickcpu();
*/
struct tdq {
- /* Ordered to improve efficiency of cpu_search() and switch(). */
+ /*
+ * Ordered to improve efficiency of cpu_search() and switch().
+ * tdq_lock is padded to avoid false sharing with tdq_load and
+ * tdq_cpu_idle.
+ */
struct mtx tdq_lock; /* run queue lock. */
+ char pad[64 - sizeof(struct mtx)];
struct cpu_group *tdq_cg; /* Pointer to cpu topology. */
volatile int tdq_load; /* Aggregate load. */
volatile int tdq_cpu_idle; /* cpu_idle() is active. */
OpenPOWER on IntegriCloud