diff options
author | bdrewery <bdrewery@FreeBSD.org> | 2014-03-22 10:26:09 +0000 |
---|---|---|
committer | bdrewery <bdrewery@FreeBSD.org> | 2014-03-22 10:26:09 +0000 |
commit | 6fcf6199a4a9aefe9f2e59d947f0e0df171367b5 (patch) | |
tree | 9a9561ab626d2c7b17fbd98a498de2fac743ab08 /sys/vm | |
parent | e97a0e252b398acead00694bf1d357d9089128ad (diff) | |
download | FreeBSD-src-6fcf6199a4a9aefe9f2e59d947f0e0df171367b5.zip FreeBSD-src-6fcf6199a4a9aefe9f2e59d947f0e0df171367b5.tar.gz |
Rename global cnt to vm_cnt to avoid shadowing.
To reduce the diff struct pcu.cnt field was not renamed, so
PCPU_OP(cnt.field) is still used. pc_cnt and pcpu are also used in
kvm(3) and vmstat(8). The goal was to not affect externally used KPI.
Bump __FreeBSD_version_ in case some out-of-tree module/code relies on the
the global cnt variable.
Exp-run revealed no ports using it directly.
No objection from: arch@
Sponsored by: EMC / Isilon Storage Division
Diffstat (limited to 'sys/vm')
-rw-r--r-- | sys/vm/memguard.c | 2 | ||||
-rw-r--r-- | sys/vm/swap_pager.c | 6 | ||||
-rw-r--r-- | sys/vm/vm_glue.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_map.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_meter.c | 24 | ||||
-rw-r--r-- | sys/vm/vm_mmap.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_object.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_page.c | 88 | ||||
-rw-r--r-- | sys/vm/vm_pageout.c | 50 | ||||
-rw-r--r-- | sys/vm/vm_param.h | 14 | ||||
-rw-r--r-- | sys/vm/vm_phys.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_phys.h | 2 | ||||
-rw-r--r-- | sys/vm/vm_radix.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_zeroidle.c | 6 | ||||
-rw-r--r-- | sys/vm/vnode_pager.c | 3 |
15 files changed, 104 insertions, 103 deletions
diff --git a/sys/vm/memguard.c b/sys/vm/memguard.c index 167c223..1d3b412 100644 --- a/sys/vm/memguard.c +++ b/sys/vm/memguard.c @@ -181,7 +181,7 @@ memguard_fudge(unsigned long km_size, const struct vm_map *parent_map) * This prevents memguard's page promotions from completely * using up memory, since most malloc(9) calls are sub-page. */ - mem_pgs = cnt.v_page_count; + mem_pgs = vm_cnt.v_page_count; memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE; /* * We want as much KVA as we can take safely. Use at most our diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c index 861de94..661b2b6 100644 --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -203,7 +203,7 @@ swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred) mtx_lock(&sw_dev_mtx); r = swap_reserved + incr; if (overcommit & SWAP_RESERVE_ALLOW_NONWIRED) { - s = cnt.v_page_count - cnt.v_free_reserved - cnt.v_wire_count; + s = vm_cnt.v_page_count - vm_cnt.v_free_reserved - vm_cnt.v_wire_count; s *= PAGE_SIZE; } else s = 0; @@ -545,7 +545,7 @@ swap_pager_swap_init(void) * can hold 16 pages, so this is probably overkill. This reservation * is typically limited to around 32MB by default. */ - n = cnt.v_page_count / 2; + n = vm_cnt.v_page_count / 2; if (maxswzone && n > maxswzone / sizeof(struct swblock)) n = maxswzone / sizeof(struct swblock); n2 = n; @@ -2316,7 +2316,7 @@ swapoff_one(struct swdevt *sp, struct ucred *cred) * of data we will have to page back in, plus an epsilon so * the system doesn't become critically low on swap space. */ - if (cnt.v_free_count + cnt.v_cache_count + swap_pager_avail < + if (vm_cnt.v_free_count + vm_cnt.v_cache_count + swap_pager_avail < nblks + nswap_lowat) { return (ENOMEM); } diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index 4512039..7d00097 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -197,7 +197,7 @@ vslock(void *addr, size_t len) * Also, the sysctl code, which is the only present user * of vslock(), does a hard loop on EAGAIN. */ - if (npages + cnt.v_wire_count > vm_page_max_wired) + if (npages + vm_cnt.v_wire_count > vm_page_max_wired) return (EAGAIN); #endif error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 34807ab..a6a500d 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -1840,7 +1840,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, * free pages allocating pv entries. */ if ((flags & MAP_PREFAULT_MADVISE) && - cnt.v_free_count < cnt.v_free_reserved) { + vm_cnt.v_free_count < vm_cnt.v_free_reserved) { psize = tmpidx; break; } diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c index 713a2be..4ec2613 100644 --- a/sys/vm/vm_meter.c +++ b/sys/vm/vm_meter.c @@ -53,24 +53,24 @@ __FBSDID("$FreeBSD$"); #include <vm/vm_object.h> #include <sys/sysctl.h> -struct vmmeter cnt; +struct vmmeter vm_cnt; SYSCTL_UINT(_vm, VM_V_FREE_MIN, v_free_min, - CTLFLAG_RW, &cnt.v_free_min, 0, "Minimum low-free-pages threshold"); + CTLFLAG_RW, &vm_cnt.v_free_min, 0, "Minimum low-free-pages threshold"); SYSCTL_UINT(_vm, VM_V_FREE_TARGET, v_free_target, - CTLFLAG_RW, &cnt.v_free_target, 0, "Desired free pages"); + CTLFLAG_RW, &vm_cnt.v_free_target, 0, "Desired free pages"); SYSCTL_UINT(_vm, VM_V_FREE_RESERVED, v_free_reserved, - CTLFLAG_RW, &cnt.v_free_reserved, 0, "Pages reserved for deadlock"); + CTLFLAG_RW, &vm_cnt.v_free_reserved, 0, "Pages reserved for deadlock"); SYSCTL_UINT(_vm, VM_V_INACTIVE_TARGET, v_inactive_target, - CTLFLAG_RW, &cnt.v_inactive_target, 0, "Pages desired inactive"); + CTLFLAG_RW, &vm_cnt.v_inactive_target, 0, "Pages desired inactive"); SYSCTL_UINT(_vm, VM_V_CACHE_MIN, v_cache_min, - CTLFLAG_RW, &cnt.v_cache_min, 0, "Min pages on cache queue"); + CTLFLAG_RW, &vm_cnt.v_cache_min, 0, "Min pages on cache queue"); SYSCTL_UINT(_vm, VM_V_CACHE_MAX, v_cache_max, - CTLFLAG_RW, &cnt.v_cache_max, 0, "Max pages on cache queue"); + CTLFLAG_RW, &vm_cnt.v_cache_max, 0, "Max pages on cache queue"); SYSCTL_UINT(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min, - CTLFLAG_RW, &cnt.v_pageout_free_min, 0, "Min pages reserved for kernel"); + CTLFLAG_RW, &vm_cnt.v_pageout_free_min, 0, "Min pages reserved for kernel"); SYSCTL_UINT(_vm, OID_AUTO, v_free_severe, - CTLFLAG_RW, &cnt.v_free_severe, 0, "Severe page depletion point"); + CTLFLAG_RW, &vm_cnt.v_free_severe, 0, "Severe page depletion point"); static int sysctl_vm_loadavg(SYSCTL_HANDLER_ARGS) @@ -231,7 +231,7 @@ vmtotal(SYSCTL_HANDLER_ARGS) } } mtx_unlock(&vm_object_list_mtx); - total.t_free = cnt.v_free_count + cnt.v_cache_count; + total.t_free = vm_cnt.v_free_count + vm_cnt.v_cache_count; return (sysctl_handle_opaque(oidp, &total, sizeof(total), req)); } @@ -251,7 +251,7 @@ static int vcnt(SYSCTL_HANDLER_ARGS) { int count = *(int *)arg1; - int offset = (char *)arg1 - (char *)&cnt; + int offset = (char *)arg1 - (char *)&vm_cnt; int i; CPU_FOREACH(i) { @@ -273,7 +273,7 @@ SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats"); #define VM_STATS(parent, var, descr) \ SYSCTL_PROC(parent, OID_AUTO, var, \ - CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, &cnt.var, 0, vcnt, \ + CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, &vm_cnt.var, 0, vcnt, \ "IU", descr) #define VM_STATS_VM(var, descr) VM_STATS(_vm_stats_vm, var, descr) #define VM_STATS_SYS(var, descr) VM_STATS(_vm_stats_sys, var, descr) diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c index 272491e..6a9f29b 100644 --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -1090,7 +1090,7 @@ vm_mlock(struct proc *proc, struct ucred *cred, const void *addr0, size_t len) return (ENOMEM); } PROC_UNLOCK(proc); - if (npages + cnt.v_wire_count > vm_page_max_wired) + if (npages + vm_cnt.v_wire_count > vm_page_max_wired) return (EAGAIN); #ifdef RACCT PROC_LOCK(proc); diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index 1066a9d..b428219 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -1957,7 +1957,7 @@ again: ("inconsistent wire count %d %d %p", p->wire_count, wirings, p)); p->wire_count = 0; - atomic_subtract_int(&cnt.v_wire_count, 1); + atomic_subtract_int(&vm_cnt.v_wire_count, 1); } } vm_page_free(p); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index d881dc0..26027c1 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -209,9 +209,9 @@ vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) void vm_set_page_size(void) { - if (cnt.v_page_size == 0) - cnt.v_page_size = PAGE_SIZE; - if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0) + if (vm_cnt.v_page_size == 0) + vm_cnt.v_page_size = PAGE_SIZE; + if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0) panic("vm_set_page_size: page size not a power of two"); } @@ -254,11 +254,11 @@ vm_page_domain_init(struct vm_domain *vmd) *__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) = "vm inactive pagequeue"; *__DECONST(int **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_vcnt) = - &cnt.v_inactive_count; + &vm_cnt.v_inactive_count; *__DECONST(char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) = "vm active pagequeue"; *__DECONST(int **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_vcnt) = - &cnt.v_active_count; + &vm_cnt.v_active_count; vmd->vmd_page_count = 0; vmd->vmd_free_count = 0; vmd->vmd_segs = 0; @@ -452,8 +452,8 @@ vm_page_startup(vm_offset_t vaddr) * Add every available physical page that is not blacklisted to * the free lists. */ - cnt.v_page_count = 0; - cnt.v_free_count = 0; + vm_cnt.v_page_count = 0; + vm_cnt.v_free_count = 0; list = getenv("vm.blacklist"); for (i = 0; phys_avail[i + 1] != 0; i += 2) { pa = phys_avail[i]; @@ -1339,7 +1339,7 @@ vm_page_cache_remove(vm_page_t m) ("vm_page_cache_remove: page %p is not cached", m)); vm_radix_remove(&m->object->cache, m->pindex); m->object = NULL; - cnt.v_cache_count--; + vm_cnt.v_cache_count--; } /* @@ -1482,11 +1482,11 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) * vm_page_cache(). */ mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE); - if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || + if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && - cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || + vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && - cnt.v_free_count + cnt.v_cache_count > 0)) { + vm_cnt.v_free_count + vm_cnt.v_cache_count > 0)) { /* * Allocate from the free queue if the number of free pages * exceeds the minimum for the request class. @@ -1557,7 +1557,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) KASSERT(m->valid != 0, ("vm_page_alloc: cached page %p is invalid", m)); if (m->object == object && m->pindex == pindex) - cnt.v_reactivated++; + vm_cnt.v_reactivated++; else m->valid = 0; m_object = m->object; @@ -1597,7 +1597,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) * The page lock is not required for wiring a page until that * page is inserted into the object. */ - atomic_add_int(&cnt.v_wire_count, 1); + atomic_add_int(&vm_cnt.v_wire_count, 1); m->wire_count = 1; } m->act_count = 0; @@ -1609,7 +1609,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) vdrop(vp); pagedaemon_wakeup(); if (req & VM_ALLOC_WIRED) { - atomic_subtract_int(&cnt.v_wire_count, 1); + atomic_subtract_int(&vm_cnt.v_wire_count, 1); m->wire_count = 0; } m->object = NULL; @@ -1725,11 +1725,11 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, SLIST_INIT(&deferred_vdrop_list); mtx_lock(&vm_page_queue_free_mtx); - if (cnt.v_free_count + cnt.v_cache_count >= npages + - cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && - cnt.v_free_count + cnt.v_cache_count >= npages + - cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && - cnt.v_free_count + cnt.v_cache_count >= npages)) { + if (vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages + + vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && + vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages + + vm_cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && + vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages)) { #if VM_NRESERVLEVEL > 0 retry: if (object == NULL || (object->flags & OBJ_COLORED) == 0 || @@ -1776,7 +1776,7 @@ retry: if ((req & VM_ALLOC_NODUMP) != 0) flags |= PG_NODUMP; if ((req & VM_ALLOC_WIRED) != 0) - atomic_add_int(&cnt.v_wire_count, npages); + atomic_add_int(&vm_cnt.v_wire_count, npages); if (object != NULL) { if (object->memattr != VM_MEMATTR_DEFAULT && memattr == VM_MEMATTR_DEFAULT) @@ -1803,7 +1803,7 @@ retry: if (vm_paging_needed()) pagedaemon_wakeup(); if ((req & VM_ALLOC_WIRED) != 0) - atomic_subtract_int(&cnt.v_wire_count, + atomic_subtract_int(&vm_cnt.v_wire_count, npages); for (m_tmp = m, m = m_ret; m < &m_ret[npages]; m++) { @@ -1916,11 +1916,11 @@ vm_page_alloc_freelist(int flind, int req) * Do not allocate reserved pages unless the req has asked for it. */ mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE); - if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || + if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && - cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || + vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && - cnt.v_free_count + cnt.v_cache_count > 0)) + vm_cnt.v_free_count + vm_cnt.v_cache_count > 0)) m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0); else { mtx_unlock(&vm_page_queue_free_mtx); @@ -1949,7 +1949,7 @@ vm_page_alloc_freelist(int flind, int req) * The page lock is not required for wiring a page that does * not belong to an object. */ - atomic_add_int(&cnt.v_wire_count, 1); + atomic_add_int(&vm_cnt.v_wire_count, 1); m->wire_count = 1; } /* Unmanaged pages don't use "act_count". */ @@ -1981,7 +1981,7 @@ vm_wait(void) vm_pages_needed = 1; wakeup(&vm_pages_needed); } - msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, + msleep(&vm_cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, "vmwait", 0); } } @@ -2005,7 +2005,7 @@ vm_waitpfault(void) vm_pages_needed = 1; wakeup(&vm_pages_needed); } - msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, + msleep(&vm_cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, "pfault", 0); } @@ -2172,7 +2172,7 @@ vm_page_free_wakeup(void) * some free. */ if (vm_pageout_pages_needed && - cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) { + vm_cnt.v_cache_count + vm_cnt.v_free_count >= vm_cnt.v_pageout_free_min) { wakeup(&vm_pageout_pages_needed); vm_pageout_pages_needed = 0; } @@ -2183,7 +2183,7 @@ vm_page_free_wakeup(void) */ if (vm_pages_needed && !vm_page_count_min()) { vm_pages_needed = 0; - wakeup(&cnt.v_free_count); + wakeup(&vm_cnt.v_free_count); } } @@ -2204,7 +2204,7 @@ vm_page_cache_turn_free(vm_page_t m) KASSERT((m->flags & PG_CACHED) != 0, ("vm_page_cache_turn_free: page %p is not cached", m)); m->flags &= ~PG_CACHED; - cnt.v_cache_count--; + vm_cnt.v_cache_count--; vm_phys_freecnt_adj(m, 1); } @@ -2319,7 +2319,7 @@ vm_page_wire(vm_page_t m) m->queue == PQ_NONE, ("vm_page_wire: unmanaged page %p is queued", m)); vm_page_remque(m); - atomic_add_int(&cnt.v_wire_count, 1); + atomic_add_int(&vm_cnt.v_wire_count, 1); } m->wire_count++; KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); @@ -2355,7 +2355,7 @@ vm_page_unwire(vm_page_t m, int activate) if (m->wire_count > 0) { m->wire_count--; if (m->wire_count == 0) { - atomic_subtract_int(&cnt.v_wire_count, 1); + atomic_subtract_int(&vm_cnt.v_wire_count, 1); if ((m->oflags & VPO_UNMANAGED) != 0 || m->object == NULL) return; @@ -2552,7 +2552,7 @@ vm_page_cache(vm_page_t m) cache_was_empty = vm_radix_is_singleton(&object->cache); m->flags |= PG_CACHED; - cnt.v_cache_count++; + vm_cnt.v_cache_count++; PCPU_INC(cnt.v_tcached); #if VM_NRESERVLEVEL > 0 if (!vm_reserv_free_page(m)) { @@ -3116,16 +3116,16 @@ vm_page_object_lock_assert(vm_page_t m) DB_SHOW_COMMAND(page, vm_page_print_page_info) { - db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); - db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); - db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); - db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); - db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); - db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); - db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); - db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); - db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); - db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); + db_printf("vm_cnt.v_free_count: %d\n", vm_cnt.v_free_count); + db_printf("vm_cnt.v_cache_count: %d\n", vm_cnt.v_cache_count); + db_printf("vm_cnt.v_inactive_count: %d\n", vm_cnt.v_inactive_count); + db_printf("vm_cnt.v_active_count: %d\n", vm_cnt.v_active_count); + db_printf("vm_cnt.v_wire_count: %d\n", vm_cnt.v_wire_count); + db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved); + db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min); + db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target); + db_printf("vm_cnt.v_cache_min: %d\n", vm_cnt.v_cache_min); + db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target); } DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) @@ -3133,7 +3133,7 @@ DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) int dom; db_printf("pq_free %d pq_cache %d\n", - cnt.v_free_count, cnt.v_cache_count); + vm_cnt.v_free_count, vm_cnt.v_cache_count); for (dom = 0; dom < vm_ndomains; dom++) { db_printf( "dom %d page_cnt %d free %d pq_act %d pq_inact %d pass %d\n", diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 52d6abc..7f80115 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -678,9 +678,9 @@ vm_pageout_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high) initial_dom = atomic_fetchadd_int(&start_dom, 1) % vm_ndomains; inactl = 0; - inactmax = cnt.v_inactive_count; + inactmax = vm_cnt.v_inactive_count; actl = 0; - actmax = tries < 2 ? 0 : cnt.v_active_count; + actmax = tries < 2 ? 0 : vm_cnt.v_active_count; dom = initial_dom; /* @@ -1310,7 +1310,7 @@ relock_queues: * Compute the number of pages we want to try to move from the * active queue to the inactive queue. */ - page_shortage = cnt.v_inactive_target - cnt.v_inactive_count + + page_shortage = vm_cnt.v_inactive_target - vm_cnt.v_inactive_count + vm_paging_target() + deficit + addl_page_shortage; pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; @@ -1576,7 +1576,7 @@ vm_pageout_oom(int shortage) killproc(bigproc, "out of swap space"); sched_nice(bigproc, PRIO_MIN); PROC_UNLOCK(bigproc); - wakeup(&cnt.v_free_count); + wakeup(&vm_cnt.v_free_count); } } @@ -1612,7 +1612,7 @@ vm_pageout_worker(void *arg) if (vm_pages_needed && !vm_page_count_min()) { if (!vm_paging_needed()) vm_pages_needed = 0; - wakeup(&cnt.v_free_count); + wakeup(&vm_cnt.v_free_count); } if (vm_pages_needed) { /* @@ -1635,7 +1635,7 @@ vm_pageout_worker(void *arg) } if (vm_pages_needed) { - cnt.v_pdwakeups++; + vm_cnt.v_pdwakeups++; domain->vmd_pass++; } mtx_unlock(&vm_page_queue_free_mtx); @@ -1656,8 +1656,8 @@ vm_pageout(void) /* * Initialize some paging parameters. */ - cnt.v_interrupt_free_min = 2; - if (cnt.v_page_count < 2000) + vm_cnt.v_interrupt_free_min = 2; + if (vm_cnt.v_page_count < 2000) vm_pageout_page_count = 8; /* @@ -1665,27 +1665,27 @@ vm_pageout(void) * swap pager structures plus enough for any pv_entry structs * when paging. */ - if (cnt.v_page_count > 1024) - cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; + if (vm_cnt.v_page_count > 1024) + vm_cnt.v_free_min = 4 + (vm_cnt.v_page_count - 1024) / 200; else - cnt.v_free_min = 4; - cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + - cnt.v_interrupt_free_min; - cnt.v_free_reserved = vm_pageout_page_count + - cnt.v_pageout_free_min + (cnt.v_page_count / 768); - cnt.v_free_severe = cnt.v_free_min / 2; - cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; - cnt.v_free_min += cnt.v_free_reserved; - cnt.v_free_severe += cnt.v_free_reserved; - cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; - if (cnt.v_inactive_target > cnt.v_free_count / 3) - cnt.v_inactive_target = cnt.v_free_count / 3; + vm_cnt.v_free_min = 4; + vm_cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + + vm_cnt.v_interrupt_free_min; + vm_cnt.v_free_reserved = vm_pageout_page_count + + vm_cnt.v_pageout_free_min + (vm_cnt.v_page_count / 768); + vm_cnt.v_free_severe = vm_cnt.v_free_min / 2; + vm_cnt.v_free_target = 4 * vm_cnt.v_free_min + vm_cnt.v_free_reserved; + vm_cnt.v_free_min += vm_cnt.v_free_reserved; + vm_cnt.v_free_severe += vm_cnt.v_free_reserved; + vm_cnt.v_inactive_target = (3 * vm_cnt.v_free_target) / 2; + if (vm_cnt.v_inactive_target > vm_cnt.v_free_count / 3) + vm_cnt.v_inactive_target = vm_cnt.v_free_count / 3; /* * Set the default wakeup threshold to be 10% above the minimum * page limit. This keeps the steady state out of shortfall. */ - vm_pageout_wakeup_thresh = (cnt.v_free_min / 10) * 11; + vm_pageout_wakeup_thresh = (vm_cnt.v_free_min / 10) * 11; /* * Set interval in seconds for active scan. We want to visit each @@ -1697,7 +1697,7 @@ vm_pageout(void) /* XXX does not really belong here */ if (vm_page_max_wired == 0) - vm_page_max_wired = cnt.v_free_count / 3; + vm_page_max_wired = vm_cnt.v_free_count / 3; swap_pager_swap_init(); #if MAXMEMDOM > 1 @@ -1716,7 +1716,7 @@ vm_pageout(void) /* * Unless the free page queue lock is held by the caller, this function * should be regarded as advisory. Specifically, the caller should - * not msleep() on &cnt.v_free_count following this function unless + * not msleep() on &vm_cnt.v_free_count following this function unless * the free page queue lock is held until the msleep() is performed. */ void diff --git a/sys/vm/vm_param.h b/sys/vm/vm_param.h index d19d1b9..38456de 100644 --- a/sys/vm/vm_param.h +++ b/sys/vm/vm_param.h @@ -75,13 +75,13 @@ #define VM_TOTAL 1 /* struct vmtotal */ #define VM_METER VM_TOTAL/* deprecated, use VM_TOTAL */ #define VM_LOADAVG 2 /* struct loadavg */ -#define VM_V_FREE_MIN 3 /* cnt.v_free_min */ -#define VM_V_FREE_TARGET 4 /* cnt.v_free_target */ -#define VM_V_FREE_RESERVED 5 /* cnt.v_free_reserved */ -#define VM_V_INACTIVE_TARGET 6 /* cnt.v_inactive_target */ -#define VM_V_CACHE_MIN 7 /* cnt.v_cache_min */ -#define VM_V_CACHE_MAX 8 /* cnt.v_cache_max */ -#define VM_V_PAGEOUT_FREE_MIN 9 /* cnt.v_pageout_free_min */ +#define VM_V_FREE_MIN 3 /* vm_cnt.v_free_min */ +#define VM_V_FREE_TARGET 4 /* vm_cnt.v_free_target */ +#define VM_V_FREE_RESERVED 5 /* vm_cnt.v_free_reserved */ +#define VM_V_INACTIVE_TARGET 6 /* vm_cnt.v_inactive_target */ +#define VM_V_CACHE_MIN 7 /* vm_cnt.v_cache_min */ +#define VM_V_CACHE_MAX 8 /* vm_cnt.v_cache_max */ +#define VM_V_PAGEOUT_FREE_MIN 9 /* vm_cnt.v_pageout_free_min */ #define VM_OBSOLETE_10 10 /* pageout algorithm */ #define VM_SWAPPING_ENABLED 11 /* swapping enabled */ #define VM_MAXID 12 /* number of valid vm ids */ diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c index 89d95c6..d8fe232 100644 --- a/sys/vm/vm_phys.c +++ b/sys/vm/vm_phys.c @@ -383,7 +383,7 @@ vm_phys_add_page(vm_paddr_t pa) vm_page_t m; struct vm_domain *vmd; - cnt.v_page_count++; + vm_cnt.v_page_count++; m = vm_phys_paddr_to_vm_page(pa); m->phys_addr = pa; m->queue = PQ_NONE; diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h index f39943c..6d94e07 100644 --- a/sys/vm/vm_phys.h +++ b/sys/vm/vm_phys.h @@ -113,7 +113,7 @@ vm_phys_freecnt_adj(vm_page_t m, int adj) { mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); - cnt.v_free_count += adj; + vm_cnt.v_free_count += adj; vm_phys_domain(m)->vmd_free_count += adj; } diff --git a/sys/vm/vm_radix.c b/sys/vm/vm_radix.c index 7c6f7fa..bb45ba0 100644 --- a/sys/vm/vm_radix.c +++ b/sys/vm/vm_radix.c @@ -298,7 +298,7 @@ vm_radix_reserve_kva(void *arg __unused) * are needed to store them. */ if (!uma_zone_reserve_kva(vm_radix_node_zone, - ((vm_paddr_t)cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE + + ((vm_paddr_t)vm_cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE + sizeof(struct vm_radix_node)))) panic("%s: unable to reserve KVA", __func__); } diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c index 8c191c0..458539e 100644 --- a/sys/vm/vm_zeroidle.c +++ b/sys/vm/vm_zeroidle.c @@ -84,9 +84,9 @@ vm_page_zero_check(void) * fast sleeps. We also do not want to be continuously zeroing * pages because doing so may flush our L1 and L2 caches too much. */ - if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) + if (zero_state && vm_page_zero_count >= ZIDLE_LO(vm_cnt.v_free_count)) return (0); - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) + if (vm_page_zero_count >= ZIDLE_HI(vm_cnt.v_free_count)) return (0); return (1); } @@ -98,7 +98,7 @@ vm_page_zero_idle(void) mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); zero_state = 0; if (vm_phys_zero_pages_idle()) { - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) + if (vm_page_zero_count >= ZIDLE_HI(vm_cnt.v_free_count)) zero_state = 1; } } diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index 5e50979..8058bdb 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -1026,7 +1026,8 @@ vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count, * daemon up. This should be probably be addressed XXX. */ - if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) + if ((vm_cnt.v_free_count + vm_cnt.v_cache_count) < + vm_cnt.v_pageout_free_min) sync |= OBJPC_SYNC; /* |