summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/memguard.c8
-rw-r--r--sys/vm/redzone.c3
-rw-r--r--sys/vm/uma_core.c3
-rw-r--r--sys/vm/vm_init.c3
-rw-r--r--sys/vm/vm_map.c3
-rw-r--r--sys/vm/vm_mmap.c3
-rw-r--r--sys/vm/vm_page.c3
-rw-r--r--sys/vm/vm_radix.c2
-rw-r--r--sys/vm/vm_zeroidle.c3
9 files changed, 11 insertions, 20 deletions
diff --git a/sys/vm/memguard.c b/sys/vm/memguard.c
index 1d3b412..d502ca5 100644
--- a/sys/vm/memguard.c
+++ b/sys/vm/memguard.c
@@ -67,7 +67,7 @@ static SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data");
* reserved for MemGuard.
*/
static u_int vm_memguard_divisor;
-SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN,
+SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
&vm_memguard_divisor,
0, "(kmem_size/memguard_divisor) == memguard submap size");
@@ -132,8 +132,7 @@ SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD,
#define MG_GUARD_ALLLARGE 0x002
#define MG_GUARD_NOFREE 0x004
static int memguard_options = MG_GUARD_AROUND;
-TUNABLE_INT("vm.memguard.options", &memguard_options);
-SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RW,
+SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RWTUN,
&memguard_options, 0,
"MemGuard options:\n"
"\t0x001 - add guard pages around each allocation\n"
@@ -149,8 +148,7 @@ SYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD,
static u_int memguard_frequency;
static u_long memguard_frequency_hits;
-TUNABLE_INT("vm.memguard.frequency", &memguard_frequency);
-SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RW,
+SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RWTUN,
&memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run");
SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD,
&memguard_frequency_hits, 0, "# times MemGuard randomly chose");
diff --git a/sys/vm/redzone.c b/sys/vm/redzone.c
index e4b5f6c..a66a793 100644
--- a/sys/vm/redzone.c
+++ b/sys/vm/redzone.c
@@ -41,8 +41,7 @@ static u_long redzone_extra_mem = 0;
SYSCTL_ULONG(_vm_redzone, OID_AUTO, extra_mem, CTLFLAG_RD, &redzone_extra_mem,
0, "Extra memory allocated by redzone");
static int redzone_panic = 0;
-TUNABLE_INT("vm.redzone.panic", &redzone_panic);
-SYSCTL_INT(_vm_redzone, OID_AUTO, panic, CTLFLAG_RW, &redzone_panic, 0,
+SYSCTL_INT(_vm_redzone, OID_AUTO, panic, CTLFLAG_RWTUN, &redzone_panic, 0,
"Panic when buffer corruption is detected");
#define REDZONE_CHSIZE (16)
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 62f4912..81b714a 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -281,8 +281,7 @@ SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
static int zone_warnings = 1;
-TUNABLE_INT("vm.zone_warnings", &zone_warnings);
-SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RW, &zone_warnings, 0,
+SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
"Warn when UMA zones becomes full");
/*
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index 30faa5a..be10387 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -91,8 +91,7 @@ __FBSDID("$FreeBSD$");
long physmem;
static int exec_map_entries = 16;
-TUNABLE_INT("vm.exec_map_entries", &exec_map_entries);
-SYSCTL_INT(_vm, OID_AUTO, exec_map_entries, CTLFLAG_RD, &exec_map_entries, 0,
+SYSCTL_INT(_vm, OID_AUTO, exec_map_entries, CTLFLAG_RDTUN, &exec_map_entries, 0,
"Maximum number of simultaneous execs");
/*
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 62eb393..b5108e2 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -3471,8 +3471,7 @@ vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
}
static int stack_guard_page = 0;
-TUNABLE_INT("security.bsd.stack_guard_page", &stack_guard_page);
-SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RW,
+SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN,
&stack_guard_page, 0,
"Insert stack guard page ahead of the growable segments.");
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index a524839..e94090c 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -92,9 +92,8 @@ __FBSDID("$FreeBSD$");
#endif
int old_mlock = 0;
-SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RW | CTLFLAG_TUN, &old_mlock, 0,
+SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RWTUN, &old_mlock, 0,
"Do not apply RLIMIT_MEMLOCK on mlockall");
-TUNABLE_INT("vm.old_mlock", &old_mlock);
#ifdef MAP_32BIT
#define MAP_32BIT_MAX_ADDR ((vm_offset_t)1 << 31)
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 49c3ede..4e30a3f 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -134,8 +134,7 @@ long first_page;
int vm_page_zero_count;
static int boot_pages = UMA_BOOT_PAGES;
-TUNABLE_INT("vm.boot_pages", &boot_pages);
-SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
+SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN, &boot_pages, 0,
"number of pages allocated for bootstrapping the VM system");
static int pa_tryrelock_restart;
diff --git a/sys/vm/vm_radix.c b/sys/vm/vm_radix.c
index bb45ba0..4f4b6d2 100644
--- a/sys/vm/vm_radix.c
+++ b/sys/vm/vm_radix.c
@@ -302,7 +302,7 @@ vm_radix_reserve_kva(void *arg __unused)
sizeof(struct vm_radix_node))))
panic("%s: unable to reserve KVA", __func__);
}
-SYSINIT(vm_radix_reserve_kva, SI_SUB_KMEM, SI_ORDER_SECOND,
+SYSINIT(vm_radix_reserve_kva, SI_SUB_KMEM, SI_ORDER_THIRD,
vm_radix_reserve_kva, NULL);
#endif
diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c
index 458539e..dac4abe 100644
--- a/sys/vm/vm_zeroidle.c
+++ b/sys/vm/vm_zeroidle.c
@@ -55,10 +55,9 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_phys.h>
static int idlezero_enable_default = 0;
-TUNABLE_INT("vm.idlezero_enable", &idlezero_enable_default);
/* Defer setting the enable flag until the kthread is running. */
static int idlezero_enable = 0;
-SYSCTL_INT(_vm, OID_AUTO, idlezero_enable, CTLFLAG_RW, &idlezero_enable, 0,
+SYSCTL_INT(_vm, OID_AUTO, idlezero_enable, CTLFLAG_RWTUN, &idlezero_enable, 0,
"Allow the kernel to use idle cpu cycles to zero-out pages");
/*
* Implement the pre-zeroed page mechanism.
OpenPOWER on IntegriCloud