summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorgjb <gjb@FreeBSD.org>2014-06-27 22:05:21 +0000
committergjb <gjb@FreeBSD.org>2014-06-27 22:05:21 +0000
commitfc21f40567ac7485e9e987cf5a539bd0d11c7155 (patch)
treefce5301b062a855bc68b9cb76c6b5966c5a2acbe /sys/vm
parent2f456747e010bfa5a9dd3498aa5650e0ade39f22 (diff)
downloadFreeBSD-src-fc21f40567ac7485e9e987cf5a539bd0d11c7155.zip
FreeBSD-src-fc21f40567ac7485e9e987cf5a539bd0d11c7155.tar.gz
Revert r267961, r267973:
These changes prevent sysctl(8) from returning proper output, such as: 1) no output from sysctl(8) 2) erroneously returning ENOMEM with tools like truss(1) or uname(1) truss: can not get etype: Cannot allocate memory
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/memguard.c8
-rw-r--r--sys/vm/redzone.c3
-rw-r--r--sys/vm/uma_core.c3
-rw-r--r--sys/vm/vm_init.c3
-rw-r--r--sys/vm/vm_map.c3
-rw-r--r--sys/vm/vm_mmap.c3
-rw-r--r--sys/vm/vm_page.c3
-rw-r--r--sys/vm/vm_radix.c2
-rw-r--r--sys/vm/vm_zeroidle.c3
9 files changed, 20 insertions, 11 deletions
diff --git a/sys/vm/memguard.c b/sys/vm/memguard.c
index d502ca5..1d3b412 100644
--- a/sys/vm/memguard.c
+++ b/sys/vm/memguard.c
@@ -67,7 +67,7 @@ static SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data");
* reserved for MemGuard.
*/
static u_int vm_memguard_divisor;
-SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
+SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN,
&vm_memguard_divisor,
0, "(kmem_size/memguard_divisor) == memguard submap size");
@@ -132,7 +132,8 @@ SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD,
#define MG_GUARD_ALLLARGE 0x002
#define MG_GUARD_NOFREE 0x004
static int memguard_options = MG_GUARD_AROUND;
-SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RWTUN,
+TUNABLE_INT("vm.memguard.options", &memguard_options);
+SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RW,
&memguard_options, 0,
"MemGuard options:\n"
"\t0x001 - add guard pages around each allocation\n"
@@ -148,7 +149,8 @@ SYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD,
static u_int memguard_frequency;
static u_long memguard_frequency_hits;
-SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RWTUN,
+TUNABLE_INT("vm.memguard.frequency", &memguard_frequency);
+SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RW,
&memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run");
SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD,
&memguard_frequency_hits, 0, "# times MemGuard randomly chose");
diff --git a/sys/vm/redzone.c b/sys/vm/redzone.c
index a66a793..e4b5f6c 100644
--- a/sys/vm/redzone.c
+++ b/sys/vm/redzone.c
@@ -41,7 +41,8 @@ static u_long redzone_extra_mem = 0;
SYSCTL_ULONG(_vm_redzone, OID_AUTO, extra_mem, CTLFLAG_RD, &redzone_extra_mem,
0, "Extra memory allocated by redzone");
static int redzone_panic = 0;
-SYSCTL_INT(_vm_redzone, OID_AUTO, panic, CTLFLAG_RWTUN, &redzone_panic, 0,
+TUNABLE_INT("vm.redzone.panic", &redzone_panic);
+SYSCTL_INT(_vm_redzone, OID_AUTO, panic, CTLFLAG_RW, &redzone_panic, 0,
"Panic when buffer corruption is detected");
#define REDZONE_CHSIZE (16)
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 81b714a..62f4912 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -281,7 +281,8 @@ SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
static int zone_warnings = 1;
-SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
+TUNABLE_INT("vm.zone_warnings", &zone_warnings);
+SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RW, &zone_warnings, 0,
"Warn when UMA zones becomes full");
/*
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index be10387..30faa5a 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -91,7 +91,8 @@ __FBSDID("$FreeBSD$");
long physmem;
static int exec_map_entries = 16;
-SYSCTL_INT(_vm, OID_AUTO, exec_map_entries, CTLFLAG_RDTUN, &exec_map_entries, 0,
+TUNABLE_INT("vm.exec_map_entries", &exec_map_entries);
+SYSCTL_INT(_vm, OID_AUTO, exec_map_entries, CTLFLAG_RD, &exec_map_entries, 0,
"Maximum number of simultaneous execs");
/*
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index b5108e2..62eb393 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -3471,7 +3471,8 @@ vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
}
static int stack_guard_page = 0;
-SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN,
+TUNABLE_INT("security.bsd.stack_guard_page", &stack_guard_page);
+SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RW,
&stack_guard_page, 0,
"Insert stack guard page ahead of the growable segments.");
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index e94090c..a524839 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -92,8 +92,9 @@ __FBSDID("$FreeBSD$");
#endif
int old_mlock = 0;
-SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RWTUN, &old_mlock, 0,
+SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RW | CTLFLAG_TUN, &old_mlock, 0,
"Do not apply RLIMIT_MEMLOCK on mlockall");
+TUNABLE_INT("vm.old_mlock", &old_mlock);
#ifdef MAP_32BIT
#define MAP_32BIT_MAX_ADDR ((vm_offset_t)1 << 31)
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 4e30a3f..49c3ede 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -134,7 +134,8 @@ long first_page;
int vm_page_zero_count;
static int boot_pages = UMA_BOOT_PAGES;
-SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN, &boot_pages, 0,
+TUNABLE_INT("vm.boot_pages", &boot_pages);
+SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
"number of pages allocated for bootstrapping the VM system");
static int pa_tryrelock_restart;
diff --git a/sys/vm/vm_radix.c b/sys/vm/vm_radix.c
index 4f4b6d2..bb45ba0 100644
--- a/sys/vm/vm_radix.c
+++ b/sys/vm/vm_radix.c
@@ -302,7 +302,7 @@ vm_radix_reserve_kva(void *arg __unused)
sizeof(struct vm_radix_node))))
panic("%s: unable to reserve KVA", __func__);
}
-SYSINIT(vm_radix_reserve_kva, SI_SUB_KMEM, SI_ORDER_THIRD,
+SYSINIT(vm_radix_reserve_kva, SI_SUB_KMEM, SI_ORDER_SECOND,
vm_radix_reserve_kva, NULL);
#endif
diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c
index dac4abe..458539e 100644
--- a/sys/vm/vm_zeroidle.c
+++ b/sys/vm/vm_zeroidle.c
@@ -55,9 +55,10 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_phys.h>
static int idlezero_enable_default = 0;
+TUNABLE_INT("vm.idlezero_enable", &idlezero_enable_default);
/* Defer setting the enable flag until the kthread is running. */
static int idlezero_enable = 0;
-SYSCTL_INT(_vm, OID_AUTO, idlezero_enable, CTLFLAG_RWTUN, &idlezero_enable, 0,
+SYSCTL_INT(_vm, OID_AUTO, idlezero_enable, CTLFLAG_RW, &idlezero_enable, 0,
"Allow the kernel to use idle cpu cycles to zero-out pages");
/*
* Implement the pre-zeroed page mechanism.
OpenPOWER on IntegriCloud