summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormjg <mjg@FreeBSD.org>2017-12-31 03:06:29 +0000
committerLuiz Souza <luiz@netgate.com>2018-02-21 15:15:33 -0300
commit2546f9b8e48cef95bc882b68d42e05ca3f33078d (patch)
tree1ca675983c2ea928aeb45af42d4902ae56e8cf0a
parent4fdf0fa3bcd504bb021d3d74993bae954a87fc11 (diff)
downloadFreeBSD-src-2546f9b8e48cef95bc882b68d42e05ca3f33078d.zip
FreeBSD-src-2546f9b8e48cef95bc882b68d42e05ca3f33078d.tar.gz
MFC r323234,r323305,r323306,r324044:
Start annotating global _padalign locks with __exclusive_cache_line While these locks are guarnteed to not share their respective cache lines, their current placement leaves unnecessary holes in lines which preceeded them. For instance the annotation of vm_page_queue_free_mtx allows 2 neighbour cachelines (previously separate by the lock) to be collapsed into 1. The annotation is only effective on architectures which have it implemented in their linker script (currently only amd64). Thus locks are not converted to their not-padaligned variants as to not affect the rest. ============= Annotate global process locks with __exclusive_cache_line ============= Annotate Giant with __exclusive_cache_line ============= Annotate sysctlmemlock with __exclusive_cache_line. (cherry picked from commit dc9eed165c25d9af290b93f577ad7ac9d7b3788c)
-rw-r--r--sys/kern/kern_proc.c6
-rw-r--r--sys/kern/kern_sysctl.c2
-rw-r--r--sys/kern/subr_vmem.c4
-rw-r--r--sys/kern/vfs_bio.c10
-rw-r--r--sys/vm/uma_core.c2
-rw-r--r--sys/vm/vm_page.c4
-rw-r--r--sys/vm/vm_pager.c2
7 files changed, 15 insertions, 15 deletions
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index eb841c8..24c20a0 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -131,9 +131,9 @@ struct pgrphashhead *pgrphashtbl;
u_long pgrphash;
struct proclist allproc;
struct proclist zombproc;
-struct sx allproc_lock;
-struct sx proctree_lock;
-struct mtx ppeers_lock;
+struct sx __exclusive_cache_line allproc_lock;
+struct sx __exclusive_cache_line proctree_lock;
+struct mtx __exclusive_cache_line ppeers_lock;
uma_zone_t proc_zone;
/*
diff --git a/sys/kern/kern_sysctl.c b/sys/kern/kern_sysctl.c
index 829f955..6182615 100644
--- a/sys/kern/kern_sysctl.c
+++ b/sys/kern/kern_sysctl.c
@@ -88,7 +88,7 @@ static MALLOC_DEFINE(M_SYSCTLTMP, "sysctltmp", "sysctl temp output buffer");
* sysctl requests larger than a single page via an exclusive lock.
*/
static struct rmlock sysctllock;
-static struct sx sysctlmemlock;
+static struct sx __exclusive_cache_line sysctlmemlock;
#define SYSCTL_WLOCK() rm_wlock(&sysctllock)
#define SYSCTL_WUNLOCK() rm_wunlock(&sysctllock)
diff --git a/sys/kern/subr_vmem.c b/sys/kern/subr_vmem.c
index 1de6378..4e3f04e 100644
--- a/sys/kern/subr_vmem.c
+++ b/sys/kern/subr_vmem.c
@@ -181,7 +181,7 @@ static struct callout vmem_periodic_ch;
static int vmem_periodic_interval;
static struct task vmem_periodic_wk;
-static struct mtx_padalign vmem_list_lock;
+static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
/* ---- misc */
@@ -580,7 +580,7 @@ qc_drain(vmem_t *vm)
#ifndef UMA_MD_SMALL_ALLOC
-static struct mtx_padalign vmem_bt_lock;
+static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
/*
* vmem_bt_alloc: Allocate a new page of boundary tags.
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 890dd31..9954356 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -249,23 +249,23 @@ SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
/*
* This lock synchronizes access to bd_request.
*/
-static struct mtx_padalign bdlock;
+static struct mtx_padalign __exclusive_cache_line bdlock;
/*
* This lock protects the runningbufreq and synchronizes runningbufwakeup and
* waitrunningbufspace().
*/
-static struct mtx_padalign rbreqlock;
+static struct mtx_padalign __exclusive_cache_line rbreqlock;
/*
* Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
*/
-static struct rwlock_padalign nblock;
+static struct rwlock_padalign __exclusive_cache_line nblock;
/*
* Lock that protects bdirtywait.
*/
-static struct mtx_padalign bdirtylock;
+static struct mtx_padalign __exclusive_cache_line bdirtylock;
/*
* Wakeup point for bufdaemon, as well as indicator of whether it is already
@@ -344,7 +344,7 @@ static int bq_len[BUFFER_QUEUES];
/*
* Lock for each bufqueue
*/
-static struct mtx_padalign bqlocks[BUFFER_QUEUES];
+static struct mtx_padalign __exclusive_cache_line bqlocks[BUFFER_QUEUES];
/*
* per-cpu empty buffer cache.
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 8504a72..bdb6fae 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -138,7 +138,7 @@ static LIST_HEAD(,uma_zone) uma_cachezones =
LIST_HEAD_INITIALIZER(uma_cachezones);
/* This RW lock protects the keg list */
-static struct rwlock_padalign uma_rwlock;
+static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
/* Linked list of boot time pages */
static LIST_HEAD(,uma_slab) uma_boot_pages =
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 16dc868..6553598 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -127,9 +127,9 @@ __FBSDID("$FreeBSD$");
*/
struct vm_domain vm_dom[MAXMEMDOM];
-struct mtx_padalign vm_page_queue_free_mtx;
+struct mtx_padalign __exclusive_cache_line vm_page_queue_free_mtx;
-struct mtx_padalign pa_lock[PA_LOCK_COUNT];
+struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT];
vm_page_t vm_page_array;
long vm_page_array_size;
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index 45d0c27..3625b41 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -165,7 +165,7 @@ struct pagerops *pagertab[] = {
* cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
* (MAXPHYS == 64k) if you want to get the most efficiency.
*/
-struct mtx_padalign pbuf_mtx;
+struct mtx_padalign __exclusive_cache_line pbuf_mtx;
static TAILQ_HEAD(swqueue, buf) bswlist;
static int bswneeded;
vm_offset_t swapbkva; /* swap buffers kva */
OpenPOWER on IntegriCloud