diff options
author | mdf <mdf@FreeBSD.org> | 2012-07-15 20:29:48 +0000 |
---|---|---|
committer | mdf <mdf@FreeBSD.org> | 2012-07-15 20:29:48 +0000 |
commit | a42ef9b109ee5ee45250fc3bbaa770c3e62a0de4 (patch) | |
tree | 2a0f83e86809435c7447ffbd609f73783ac90342 /sys/vm/memguard.c | |
parent | 2357a49326f12023cbed9f16a7a74841c1e49b97 (diff) | |
download | FreeBSD-src-a42ef9b109ee5ee45250fc3bbaa770c3e62a0de4.zip FreeBSD-src-a42ef9b109ee5ee45250fc3bbaa770c3e62a0de4.tar.gz |
Fix a bug with memguard(9) on 32-bit architectures without a
VM_KMEM_MAX_SIZE.
The code was not taking into account the size of the kernel_map, which
the kmem_map is allocated from, so it could produce a sub-map size too
large to fit. The simplest solution is to ignore VM_KMEM_MAX entirely
and base the memguard map's size off the kernel_map's size, since this
is always relevant and always smaller.
Found by: Justin Hibbits
Diffstat (limited to 'sys/vm/memguard.c')
-rw-r--r-- | sys/vm/memguard.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/sys/vm/memguard.c b/sys/vm/memguard.c index 5a690e7..b1740c3 100644 --- a/sys/vm/memguard.c +++ b/sys/vm/memguard.c @@ -159,16 +159,18 @@ SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD, * the kmem_map. The memguard memory will be a submap. */ unsigned long -memguard_fudge(unsigned long km_size, unsigned long km_max) +memguard_fudge(unsigned long km_size, const struct vm_map *parent_map) { - u_long mem_pgs = cnt.v_page_count; + u_long mem_pgs, parent_size; vm_memguard_divisor = 10; TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor); + parent_size = vm_map_max(parent_map) - vm_map_min(parent_map) + + PAGE_SIZE; /* Pick a conservative value if provided value sucks. */ if ((vm_memguard_divisor <= 0) || - ((km_size / vm_memguard_divisor) == 0)) + ((parent_size / vm_memguard_divisor) == 0)) vm_memguard_divisor = 10; /* * Limit consumption of physical pages to @@ -177,21 +179,19 @@ memguard_fudge(unsigned long km_size, unsigned long km_max) * This prevents memguard's page promotions from completely * using up memory, since most malloc(9) calls are sub-page. */ + mem_pgs = cnt.v_page_count; memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE; /* * We want as much KVA as we can take safely. Use at most our - * allotted fraction of kmem_max. Limit this to twice the - * physical memory to avoid using too much memory as pagetable - * pages. + * allotted fraction of the parent map's size. Limit this to + * twice the physical memory to avoid using too much memory as + * pagetable pages (size must be multiple of PAGE_SIZE). */ - memguard_mapsize = km_max / vm_memguard_divisor; - /* size must be multiple of PAGE_SIZE */ - memguard_mapsize = round_page(memguard_mapsize); - if (memguard_mapsize == 0 || - memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs) + memguard_mapsize = round_page(parent_size / vm_memguard_divisor); + if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs) memguard_mapsize = mem_pgs * 2 * PAGE_SIZE; - if (km_max > 0 && km_size + memguard_mapsize > km_max) - return (km_max); + if (km_size + memguard_mapsize > parent_size) + memguard_mapsize = 0; return (km_size + memguard_mapsize); } |