summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_malloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/kern_malloc.c')
-rw-r--r--sys/kern/kern_malloc.c57
1 files changed, 30 insertions, 27 deletions
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 658908c..f16d4d5 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -191,7 +191,7 @@ static u_long vm_kmem_size_max;
SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
"Maximum size of kernel memory");
-static u_int vm_kmem_size_scale;
+static u_int vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
"Scale factor for kernel memory size");
@@ -236,9 +236,8 @@ static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
static int malloc_failure_rate;
static int malloc_nowait_count;
static int malloc_failure_count;
-SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW,
+SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
&malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
-TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate);
SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
&malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
#endif
@@ -280,7 +279,7 @@ tunable_set_numzones(void)
numzones = MALLOC_DEBUG_MAXZONES;
}
SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
-SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN,
+SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
&numzones, 0, "Number of malloc uma subzones");
/*
@@ -693,8 +692,21 @@ CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
void
kmeminit(void)
{
- u_long mem_size, tmp;
+ u_long mem_size;
+ u_long tmp;
+#ifdef VM_KMEM_SIZE
+ if (vm_kmem_size == 0)
+ vm_kmem_size = VM_KMEM_SIZE;
+#endif
+#ifdef VM_KMEM_SIZE_MIN
+ if (vm_kmem_size_min == 0)
+ vm_kmem_size_min = VM_KMEM_SIZE_MIN;
+#endif
+#ifdef VM_KMEM_SIZE_MAX
+ if (vm_kmem_size_max == 0)
+ vm_kmem_size_max = VM_KMEM_SIZE_MAX;
+#endif
/*
* Calculate the amount of kernel virtual address (KVA) space that is
* preallocated to the kmem arena. In order to support a wide range
@@ -713,38 +725,29 @@ kmeminit(void)
*/
mem_size = vm_cnt.v_page_count;
- vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
- TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale);
if (vm_kmem_size_scale < 1)
vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
- vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE;
-
-#if defined(VM_KMEM_SIZE_MIN)
- vm_kmem_size_min = VM_KMEM_SIZE_MIN;
-#endif
- TUNABLE_ULONG_FETCH("vm.kmem_size_min", &vm_kmem_size_min);
- if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
- vm_kmem_size = vm_kmem_size_min;
+ /*
+ * Check if we should use defaults for the "vm_kmem_size"
+ * variable:
+ */
+ if (vm_kmem_size == 0) {
+ vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE;
-#if defined(VM_KMEM_SIZE_MAX)
- vm_kmem_size_max = VM_KMEM_SIZE_MAX;
-#endif
- TUNABLE_ULONG_FETCH("vm.kmem_size_max", &vm_kmem_size_max);
- if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
- vm_kmem_size = vm_kmem_size_max;
+ if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
+ vm_kmem_size = vm_kmem_size_min;
+ if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
+ vm_kmem_size = vm_kmem_size_max;
+ }
/*
- * Alternatively, the amount of KVA space that is preallocated to the
+ * The amount of KVA space that is preallocated to the
* kmem arena can be set statically at compile-time or manually
* through the kernel environment. However, it is still limited to
* twice the physical memory size, which has been sufficient to handle
* the most severe cases of external fragmentation in the kmem arena.
*/
-#if defined(VM_KMEM_SIZE)
- vm_kmem_size = VM_KMEM_SIZE;
-#endif
- TUNABLE_ULONG_FETCH("vm.kmem_size", &vm_kmem_size);
if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
vm_kmem_size = 2 * mem_size * PAGE_SIZE;
@@ -811,7 +814,7 @@ mallocinit(void *dummy)
}
}
-SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, mallocinit, NULL);
+SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
void
malloc_init(void *data)
OpenPOWER on IntegriCloud