summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2012-06-02 22:14:10 +0000
committeralc <alc@FreeBSD.org>2012-06-02 22:14:10 +0000
commit3b78f745b2c89c26ef7bfa3aa81dae8b9e81ea05 (patch)
treee446afbbb8bb4a6b6d1b29e0450ae48332784e4b
parent6f6d510dc7250ad5839c1ddf9c5835e780902191 (diff)
downloadFreeBSD-src-3b78f745b2c89c26ef7bfa3aa81dae8b9e81ea05.zip
FreeBSD-src-3b78f745b2c89c26ef7bfa3aa81dae8b9e81ea05.tar.gz
Isolate the global pv list lock from data and other locks to prevent false
sharing within the cache.
-rw-r--r--sys/amd64/amd64/pmap.c12
-rw-r--r--sys/i386/i386/pmap.c12
2 files changed, 22 insertions, 2 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 7936897..a1d2cf5 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -200,12 +200,22 @@ static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */
static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
/*
+ * Isolate the global pv list lock from data and other locks to prevent false
+ * sharing within the cache.
+ */
+static struct {
+ struct rwlock lock;
+ char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
+} pvh_global __aligned(CACHE_LINE_SIZE);
+
+#define pvh_global_lock pvh_global.lock
+
+/*
* Data for the pv entry allocation mechanism
*/
static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
static long pv_entry_count;
static struct md_page *pv_table;
-static struct rwlock pvh_global_lock;
/*
* All those kernel PT submaps that BSD is so fond of
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index a81b492..c28efbc 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -232,12 +232,22 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0,
static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */
/*
+ * Isolate the global pv list lock from data and other locks to prevent false
+ * sharing within the cache.
+ */
+static struct {
+ struct rwlock lock;
+ char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
+} pvh_global __aligned(CACHE_LINE_SIZE);
+
+#define pvh_global_lock pvh_global.lock
+
+/*
* Data for the pv entry allocation mechanism
*/
static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
static struct md_page *pv_table;
-static struct rwlock pvh_global_lock;
static int shpgperproc = PMAP_SHPGPERPROC;
struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */
OpenPOWER on IntegriCloud