summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2012-10-31 18:07:18 +0000
committerattilio <attilio@FreeBSD.org>2012-10-31 18:07:18 +0000
commitd38d7bb24528def9112ecd75c967b4202e87cc4e (patch)
tree6e28b3206316703630ba5a9b796a652c5c2bf190 /sys/kern/sched_ule.c
parent8fbe05091504259bdb07f38b21e970c6f172b8cd (diff)
downloadFreeBSD-src-d38d7bb24528def9112ecd75c967b4202e87cc4e.zip
FreeBSD-src-d38d7bb24528def9112ecd75c967b4202e87cc4e.tar.gz
Rework the known mutexes to benefit about staying on their own
cache line in order to avoid manual frobbing but using struct mtx_padalign. The sole exception being nvme and sxfge drivers, where the author redefined CACHE_LINE_SIZE manually, so they need to be analyzed and dealt with separately. Reviwed by: jimharris, alc
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index b3cf10d..d078ea1 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -228,8 +228,7 @@ struct tdq {
* tdq_lock is padded to avoid false sharing with tdq_load and
* tdq_cpu_idle.
*/
- struct mtx tdq_lock; /* run queue lock. */
- char pad[64 - sizeof(struct mtx)];
+ struct mtx_padalign tdq_lock; /* run queue lock. */
struct cpu_group *tdq_cg; /* Pointer to cpu topology. */
volatile int tdq_load; /* Aggregate load. */
volatile int tdq_cpu_idle; /* cpu_idle() is active. */
@@ -292,7 +291,7 @@ static struct tdq tdq_cpu;
#define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t)))
#define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
#define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t)))
-#define TDQ_LOCKPTR(t) (&(t)->tdq_lock)
+#define TDQ_LOCKPTR(t) ((struct mtx *)(&(t)->tdq_lock))
static void sched_priority(struct thread *);
static void sched_thread_priority(struct thread *, u_char);
OpenPOWER on IntegriCloud