summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/vm/uma.h1
-rw-r--r--sys/vm/uma_core.c14
-rw-r--r--sys/vm/uma_int.h27
-rw-r--r--sys/vm/vm_map.c2
4 files changed, 34 insertions, 10 deletions
diff --git a/sys/vm/uma.h b/sys/vm/uma.h
index 57fcf56..7f17b52 100644
--- a/sys/vm/uma.h
+++ b/sys/vm/uma.h
@@ -172,6 +172,7 @@ uma_zone_t uma_zcreate(char *name, int size, uma_ctor ctor, uma_dtor dtor,
off of the real memory */
#define UMA_ZONE_MALLOC 0x0010 /* For use by malloc(9) only! */
#define UMA_ZONE_NOFREE 0x0020 /* Do not free slabs of this type! */
+#define UMA_ZONE_MTXCLASS 0x0040 /* Create a new lock class */
/* Definitions for align */
#define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 8418fed..2bb8f15 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -959,6 +959,7 @@ zone_ctor(void *mem, int size, void *udata)
{
struct uma_zctor_args *arg = udata;
uma_zone_t zone = mem;
+ int privlc;
int cplen;
int cpu;
@@ -992,6 +993,11 @@ zone_ctor(void *mem, int size, void *udata)
else
zone_small_init(zone);
+ if (arg->flags & UMA_ZONE_MTXCLASS)
+ privlc = 1;
+ else
+ privlc = 0;
+
/* We do this so that the per cpu lock name is unique for each zone */
memcpy(zone->uz_lname, "PCPU ", 5);
cplen = min(strlen(zone->uz_name) + 1, LOCKNAME_LEN - 6);
@@ -1053,7 +1059,7 @@ zone_ctor(void *mem, int size, void *udata)
zone->uz_size, zone->uz_ipers,
zone->uz_ppera, zone->uz_pgoff);
#endif
- ZONE_LOCK_INIT(zone);
+ ZONE_LOCK_INIT(zone, privlc);
mtx_lock(&uma_mtx);
LIST_INSERT_HEAD(&uma_zones, zone, uz_link);
@@ -1073,7 +1079,7 @@ zone_ctor(void *mem, int size, void *udata)
zone->uz_count = UMA_BUCKET_SIZE - 1;
for (cpu = 0; cpu < maxcpu; cpu++)
- CPU_LOCK_INIT(zone, cpu);
+ CPU_LOCK_INIT(zone, cpu, privlc);
}
/*
@@ -1799,10 +1805,12 @@ uma_zone_set_max(uma_zone_t zone, int nitems)
ZONE_LOCK(zone);
if (zone->uz_ppera > 1)
zone->uz_maxpages = nitems * zone->uz_ppera;
- else
+ else
zone->uz_maxpages = nitems / zone->uz_ipers;
+
if (zone->uz_maxpages * zone->uz_ipers < nitems)
zone->uz_maxpages++;
+
ZONE_UNLOCK(zone);
}
diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h
index 3190bb8..d7c86bb 100644
--- a/sys/vm/uma_int.h
+++ b/sys/vm/uma_int.h
@@ -284,16 +284,31 @@ void uma_large_free(uma_slab_t slab);
/* Lock Macros */
-#define ZONE_LOCK_INIT(z) \
- mtx_init(&(z)->uz_lock, (z)->uz_name, "UMA zone", \
- MTX_DEF | MTX_DUPOK)
+#define ZONE_LOCK_INIT(z, lc) \
+ do { \
+ if ((lc)) \
+ mtx_init(&(z)->uz_lock, (z)->uz_name, \
+ (z)->uz_name, MTX_DEF | MTX_DUPOK); \
+ else \
+ mtx_init(&(z)->uz_lock, (z)->uz_name, \
+ "UMA zone", MTX_DEF | MTX_DUPOK); \
+ } while (0)
+
#define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock)
#define ZONE_LOCK(z) mtx_lock(&(z)->uz_lock)
#define ZONE_UNLOCK(z) mtx_unlock(&(z)->uz_lock)
-#define CPU_LOCK_INIT(z, cpu) \
- mtx_init(&(z)->uz_cpu[(cpu)].uc_lock, (z)->uz_lname, "UMA cpu", \
- MTX_DEF | MTX_DUPOK)
+#define CPU_LOCK_INIT(z, cpu, lc) \
+ do { \
+ if ((lc)) \
+ mtx_init(&(z)->uz_cpu[(cpu)].uc_lock, \
+ (z)->uz_lname, (z)->uz_lname, \
+ MTX_DEF | MTX_DUPOK); \
+ else \
+ mtx_init(&(z)->uz_cpu[(cpu)].uc_lock, \
+ (z)->uz_lname, "UMA cpu", \
+ MTX_DEF | MTX_DUPOK); \
+ } while (0)
#define CPU_LOCK_FINI(z, cpu) \
mtx_destroy(&(z)->uz_cpu[(cpu)].uc_lock)
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 0733662..b486cdb 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -159,7 +159,7 @@ vm_map_startup(void)
vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
uma_prealloc(mapzone, MAX_KMAP);
kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
- NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_MTXCLASS);
uma_prealloc(kmapentzone, MAX_KMAPENT);
mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
OpenPOWER on IntegriCloud