summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/kern_proc.c6
-rw-r--r--sys/kern/kern_sysctl.c2
-rw-r--r--sys/kern/subr_vmem.c4
-rw-r--r--sys/kern/vfs_bio.c10
-rw-r--r--sys/vm/uma_core.c2
-rw-r--r--sys/vm/vm_page.c4
-rw-r--r--sys/vm/vm_pager.c2
7 files changed, 15 insertions, 15 deletions
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index eb841c8..24c20a0 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -131,9 +131,9 @@ struct pgrphashhead *pgrphashtbl;
u_long pgrphash;
struct proclist allproc;
struct proclist zombproc;
-struct sx allproc_lock;
-struct sx proctree_lock;
-struct mtx ppeers_lock;
+struct sx __exclusive_cache_line allproc_lock;
+struct sx __exclusive_cache_line proctree_lock;
+struct mtx __exclusive_cache_line ppeers_lock;
uma_zone_t proc_zone;
/*
diff --git a/sys/kern/kern_sysctl.c b/sys/kern/kern_sysctl.c
index 829f955..6182615 100644
--- a/sys/kern/kern_sysctl.c
+++ b/sys/kern/kern_sysctl.c
@@ -88,7 +88,7 @@ static MALLOC_DEFINE(M_SYSCTLTMP, "sysctltmp", "sysctl temp output buffer");
* sysctl requests larger than a single page via an exclusive lock.
*/
static struct rmlock sysctllock;
-static struct sx sysctlmemlock;
+static struct sx __exclusive_cache_line sysctlmemlock;
#define SYSCTL_WLOCK() rm_wlock(&sysctllock)
#define SYSCTL_WUNLOCK() rm_wunlock(&sysctllock)
diff --git a/sys/kern/subr_vmem.c b/sys/kern/subr_vmem.c
index 1de6378..4e3f04e 100644
--- a/sys/kern/subr_vmem.c
+++ b/sys/kern/subr_vmem.c
@@ -181,7 +181,7 @@ static struct callout vmem_periodic_ch;
static int vmem_periodic_interval;
static struct task vmem_periodic_wk;
-static struct mtx_padalign vmem_list_lock;
+static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
/* ---- misc */
@@ -580,7 +580,7 @@ qc_drain(vmem_t *vm)
#ifndef UMA_MD_SMALL_ALLOC
-static struct mtx_padalign vmem_bt_lock;
+static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
/*
* vmem_bt_alloc: Allocate a new page of boundary tags.
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 890dd31..9954356 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -249,23 +249,23 @@ SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
/*
* This lock synchronizes access to bd_request.
*/
-static struct mtx_padalign bdlock;
+static struct mtx_padalign __exclusive_cache_line bdlock;
/*
* This lock protects the runningbufreq and synchronizes runningbufwakeup and
* waitrunningbufspace().
*/
-static struct mtx_padalign rbreqlock;
+static struct mtx_padalign __exclusive_cache_line rbreqlock;
/*
* Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
*/
-static struct rwlock_padalign nblock;
+static struct rwlock_padalign __exclusive_cache_line nblock;
/*
* Lock that protects bdirtywait.
*/
-static struct mtx_padalign bdirtylock;
+static struct mtx_padalign __exclusive_cache_line bdirtylock;
/*
* Wakeup point for bufdaemon, as well as indicator of whether it is already
@@ -344,7 +344,7 @@ static int bq_len[BUFFER_QUEUES];
/*
* Lock for each bufqueue
*/
-static struct mtx_padalign bqlocks[BUFFER_QUEUES];
+static struct mtx_padalign __exclusive_cache_line bqlocks[BUFFER_QUEUES];
/*
* per-cpu empty buffer cache.
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 8504a72..bdb6fae 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -138,7 +138,7 @@ static LIST_HEAD(,uma_zone) uma_cachezones =
LIST_HEAD_INITIALIZER(uma_cachezones);
/* This RW lock protects the keg list */
-static struct rwlock_padalign uma_rwlock;
+static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
/* Linked list of boot time pages */
static LIST_HEAD(,uma_slab) uma_boot_pages =
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 16dc868..6553598 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -127,9 +127,9 @@ __FBSDID("$FreeBSD$");
*/
struct vm_domain vm_dom[MAXMEMDOM];
-struct mtx_padalign vm_page_queue_free_mtx;
+struct mtx_padalign __exclusive_cache_line vm_page_queue_free_mtx;
-struct mtx_padalign pa_lock[PA_LOCK_COUNT];
+struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT];
vm_page_t vm_page_array;
long vm_page_array_size;
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index 45d0c27..3625b41 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -165,7 +165,7 @@ struct pagerops *pagertab[] = {
* cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
* (MAXPHYS == 64k) if you want to get the most efficiency.
*/
-struct mtx_padalign pbuf_mtx;
+struct mtx_padalign __exclusive_cache_line pbuf_mtx;
static TAILQ_HEAD(swqueue, buf) bswlist;
static int bswneeded;
vm_offset_t swapbkva; /* swap buffers kva */
OpenPOWER on IntegriCloud