diff options
author | attilio <attilio@FreeBSD.org> | 2007-05-31 22:52:15 +0000 |
---|---|---|
committer | attilio <attilio@FreeBSD.org> | 2007-05-31 22:52:15 +0000 |
commit | 7dd8ed88a925a943f1963baa072f4b6c6a8c9930 (patch) | |
tree | 10bf0f11ceeb18c6b03947eb85223abbbbf9cc67 /sys/vm | |
parent | 4681b4098bbf12784d009826b2223ace96a2306b (diff) | |
download | FreeBSD-src-7dd8ed88a925a943f1963baa072f4b6c6a8c9930.zip FreeBSD-src-7dd8ed88a925a943f1963baa072f4b6c6a8c9930.tar.gz |
Revert VMCNT_* operations introduction.
Probabilly, a general approach is not the better solution here, so we should
solve the sched_lock protection problems separately.
Requested by: alc
Approved by: jeff (mentor)
Diffstat (limited to 'sys/vm')
-rw-r--r-- | sys/vm/swap_pager.c | 16 | ||||
-rw-r--r-- | sys/vm/uma_core.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_contig.c | 4 | ||||
-rw-r--r-- | sys/vm/vm_fault.c | 3 | ||||
-rw-r--r-- | sys/vm/vm_glue.c | 4 | ||||
-rw-r--r-- | sys/vm/vm_map.c | 4 | ||||
-rw-r--r-- | sys/vm/vm_meter.c | 122 | ||||
-rw-r--r-- | sys/vm/vm_mmap.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_object.c | 2 | ||||
-rw-r--r-- | sys/vm/vm_page.c | 70 | ||||
-rw-r--r-- | sys/vm/vm_pageout.c | 94 | ||||
-rw-r--r-- | sys/vm/vm_pageq.c | 12 | ||||
-rw-r--r-- | sys/vm/vm_zeroidle.c | 7 | ||||
-rw-r--r-- | sys/vm/vnode_pager.c | 19 |
14 files changed, 177 insertions, 184 deletions
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c index 10de8a4..121c4a5 100644 --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -385,7 +385,7 @@ swap_pager_swap_init(void) * can hold 16 pages, so this is probably overkill. This reservation * is typically limited to around 32MB by default. */ - n = VMCNT_GET(page_count) / 2; + n = cnt.v_page_count / 2; if (maxswzone && n > maxswzone / sizeof(struct swblock)) n = maxswzone / sizeof(struct swblock); n2 = n; @@ -1037,8 +1037,8 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) } bp->b_npages = j - i; - VMCNT_ADD(swapin, 1); - VMCNT_ADD(swappgsin, bp->b_npages); + cnt.v_swapin++; + cnt.v_swappgsin += bp->b_npages; /* * We still hold the lock on mreq, and our automatic completion routine @@ -1072,7 +1072,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) vm_page_lock_queues(); vm_page_flag_set(mreq, PG_REFERENCED); vm_page_unlock_queues(); - VMCNT_ADD(intrans, 1); + cnt.v_intrans++; if (msleep(mreq, VM_OBJECT_MTX(object), PSWP, "swread", hz*20)) { printf( "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n", @@ -1263,8 +1263,8 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, bp->b_dirtyoff = 0; bp->b_dirtyend = bp->b_bcount; - VMCNT_ADD(swapout, 1); - VMCNT_ADD(swappgsout, bp->b_npages); + cnt.v_swapout++; + cnt.v_swappgsout += bp->b_npages; /* * asynchronous @@ -2135,8 +2135,8 @@ swapoff_one(struct swdevt *sp, struct thread *td) * of data we will have to page back in, plus an epsilon so * the system doesn't become critically low on swap space. */ - if (VMCNT_GET(free_count) + VMCNT_GET(cache_count) + - swap_pager_avail < nblks + nswap_lowat) { + if (cnt.v_free_count + cnt.v_cache_count + swap_pager_avail < + nblks + nswap_lowat) { return (ENOMEM); } diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index dc87672..eb00bfe 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -271,7 +271,7 @@ SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT, static void bucket_enable(void) { - if (VMCNT_GET(free_count) < VMCNT_GET(free_min)) + if (cnt.v_free_count < cnt.v_free_min) bucketdisable = 1; else bucketdisable = 0; diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c index 8278c14..b26c46f 100644 --- a/sys/vm/vm_contig.c +++ b/sys/vm/vm_contig.c @@ -204,7 +204,7 @@ again: * Find first page in array that is free, within range, * aligned, and such that the boundary won't be crossed. */ - for (i = start; i < VMCNT_GET(page_count); i++) { + for (i = start; i < cnt.v_page_count; i++) { phys = VM_PAGE_TO_PHYS(&pga[i]); pqtype = pga[i].queue - pga[i].pc; if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) && @@ -217,7 +217,7 @@ again: /* * If the above failed or we will exceed the upper bound, fail. */ - if ((i == VMCNT_GET(page_count)) || + if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) { mtx_unlock(&vm_page_queue_free_mtx); /* diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index cd5de64..5855430 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -1271,8 +1271,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage) * try to do any readahead that we might have free pages for. */ if ((rahead + rbehind) > - ((VMCNT_GET(free_count) + VMCNT_GET(cache_count)) - - VMCNT_GET(free_reserved))) { + ((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) { pagedaemon_wakeup(); marray[0] = m; *reqpage = 0; diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index 4981efc..a3749bf 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -219,7 +219,7 @@ vslock(void *addr, size_t len) * Also, the sysctl code, which is the only present user * of vslock(), does a hard loop on EAGAIN. */ - if (npages + VMCNT_GET(wire_count) > vm_page_max_wired) + if (npages + cnt.v_wire_count > vm_page_max_wired) return (EAGAIN); #endif error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, @@ -589,7 +589,7 @@ vm_init_limits(udata) limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; /* limit the limit to no less than 2MB */ - rss_limit = max(VMCNT_GET(free_count), 512); + rss_limit = max(cnt.v_free_count, 512); limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; } diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 7e518606..9fe60b7 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -274,7 +274,7 @@ vmspace_alloc(min, max) void vm_init2(void) { - uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(VMCNT_GET(page_count), + uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count, (VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8 + maxproc * 2 + maxfiles); vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, @@ -1489,7 +1489,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, * free pages allocating pv entries. */ if ((flags & MAP_PREFAULT_MADVISE) && - VMCNT_GET(free_count) < VMCNT_GET(free_reserved)) { + cnt.v_free_count < cnt.v_free_reserved) { psize = tmpidx; break; } diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c index 14c9f5d..d4b51e7 100644 --- a/sys/vm/vm_meter.c +++ b/sys/vm/vm_meter.c @@ -52,26 +52,26 @@ __FBSDID("$FreeBSD$"); #include <vm/vm_object.h> #include <sys/sysctl.h> -volatile struct vmmeter cnt; +struct vmmeter cnt; int maxslp = MAXSLP; SYSCTL_UINT(_vm, VM_V_FREE_MIN, v_free_min, - CTLFLAG_RW, VMCNT_PTR(free_min), 0, ""); + CTLFLAG_RW, &cnt.v_free_min, 0, ""); SYSCTL_UINT(_vm, VM_V_FREE_TARGET, v_free_target, - CTLFLAG_RW, VMCNT_PTR(free_target), 0, ""); + CTLFLAG_RW, &cnt.v_free_target, 0, ""); SYSCTL_UINT(_vm, VM_V_FREE_RESERVED, v_free_reserved, - CTLFLAG_RW, VMCNT_PTR(free_reserved), 0, ""); + CTLFLAG_RW, &cnt.v_free_reserved, 0, ""); SYSCTL_UINT(_vm, VM_V_INACTIVE_TARGET, v_inactive_target, - CTLFLAG_RW, VMCNT_PTR(inactive_target), 0, ""); + CTLFLAG_RW, &cnt.v_inactive_target, 0, ""); SYSCTL_UINT(_vm, VM_V_CACHE_MIN, v_cache_min, - CTLFLAG_RW, VMCNT_PTR(cache_min), 0, ""); + CTLFLAG_RW, &cnt.v_cache_min, 0, ""); SYSCTL_UINT(_vm, VM_V_CACHE_MAX, v_cache_max, - CTLFLAG_RW, VMCNT_PTR(cache_max), 0, ""); + CTLFLAG_RW, &cnt.v_cache_max, 0, ""); SYSCTL_UINT(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min, - CTLFLAG_RW, VMCNT_PTR(pageout_free_min), 0, ""); + CTLFLAG_RW, &cnt.v_pageout_free_min, 0, ""); SYSCTL_UINT(_vm, OID_AUTO, v_free_severe, - CTLFLAG_RW, VMCNT_PTR(free_severe), 0, ""); + CTLFLAG_RW, &cnt.v_free_severe, 0, ""); static int sysctl_vm_loadavg(SYSCTL_HANDLER_ARGS) @@ -235,7 +235,7 @@ vmtotal(SYSCTL_HANDLER_ARGS) } } mtx_unlock(&vm_object_list_mtx); - total.t_free = VMCNT_GET(free_count) + VMCNT_GET(cache_count); + total.t_free = cnt.v_free_count + cnt.v_cache_count; return (sysctl_handle_opaque(oidp, &total, sizeof(total), req)); } @@ -255,7 +255,7 @@ static int vcnt(SYSCTL_HANDLER_ARGS) { int count = *(int *)arg1; - int offset = (char *)arg1 - (char *)VMCNT; + int offset = (char *)arg1 - (char *)&cnt; #ifdef SMP int i; @@ -280,103 +280,101 @@ static SYSCTL_NODE(_vm_stats, OID_AUTO, vm, CTLFLAG_RW, 0, SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats"); SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_swtch, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(swtch), 0, vcnt, "IU", "Context switches"); + &cnt.v_swtch, 0, vcnt, "IU", "Context switches"); SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_trap, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(trap), 0, vcnt, "IU", "Traps"); + &cnt.v_trap, 0, vcnt, "IU", "Traps"); SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_syscall, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(syscall), 0, vcnt, "IU", "Syscalls"); + &cnt.v_syscall, 0, vcnt, "IU", "Syscalls"); SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intr, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(intr), 0, vcnt, "IU", "Hardware interrupts"); + &cnt.v_intr, 0, vcnt, "IU", "Hardware interrupts"); SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_soft, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(soft), 0, vcnt, "IU", "Software interrupts"); + &cnt.v_soft, 0, vcnt, "IU", "Software interrupts"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vm_faults, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(vm_faults), 0, vcnt, "IU", "VM faults"); + &cnt.v_vm_faults, 0, vcnt, "IU", "VM faults"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_faults, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(cow_faults), 0, vcnt, "IU", "COW faults"); + &cnt.v_cow_faults, 0, vcnt, "IU", "COW faults"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_optim, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(cow_optim), 0, vcnt, "IU", "Optimized COW faults"); + &cnt.v_cow_optim, 0, vcnt, "IU", "Optimized COW faults"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_zfod, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(zfod), 0, vcnt, "IU", "Zero fill"); + &cnt.v_zfod, 0, vcnt, "IU", "Zero fill"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ozfod, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(ozfod), 0, vcnt, "IU", "Optimized zero fill"); + &cnt.v_ozfod, 0, vcnt, "IU", "Optimized zero fill"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapin, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(swapin), 0, vcnt, "IU", "Swapin operations"); + &cnt.v_swapin, 0, vcnt, "IU", "Swapin operations"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapout, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(swapout), 0, vcnt, "IU", "Swapout operations"); + &cnt.v_swapout, 0, vcnt, "IU", "Swapout operations"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsin, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(swappgsin), 0, vcnt, "IU", "Swapin pages"); + &cnt.v_swappgsin, 0, vcnt, "IU", "Swapin pages"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsout, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(swappgsout), 0, vcnt, "IU", "Swapout pages"); + &cnt.v_swappgsout, 0, vcnt, "IU", "Swapout pages"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodein, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(vnodein), 0, vcnt, "IU", "Vnodein operations"); + &cnt.v_vnodein, 0, vcnt, "IU", "Vnodein operations"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodeout, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(vnodeout), 0, vcnt, "IU", "Vnodeout operations"); + &cnt.v_vnodeout, 0, vcnt, "IU", "Vnodeout operations"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsin, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(vnodepgsin), 0, vcnt, "IU", "Vnodein pages"); + &cnt.v_vnodepgsin, 0, vcnt, "IU", "Vnodein pages"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsout, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(vnodepgsout), 0, vcnt, "IU", "Vnodeout pages"); + &cnt.v_vnodepgsout, 0, vcnt, "IU", "Vnodeout pages"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_intrans, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(intrans), 0, vcnt, "IU", "In transit page blocking"); + &cnt.v_intrans, 0, vcnt, "IU", "In transit page blocking"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_reactivated, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(reactivated), 0, vcnt, "IU", "Reactivated pages"); + &cnt.v_reactivated, 0, vcnt, "IU", "Reactivated pages"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdwakeups, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(pdwakeups), 0, vcnt, "IU", "Pagedaemon wakeups"); + &cnt.v_pdwakeups, 0, vcnt, "IU", "Pagedaemon wakeups"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdpages, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(pdpages), 0, vcnt, "IU", "Pagedaemon page scans"); + &cnt.v_pdpages, 0, vcnt, "IU", "Pagedaemon page scans"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_dfree, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(dfree), 0, vcnt, "IU", ""); + &cnt.v_dfree, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pfree, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(pfree), 0, vcnt, "IU", ""); + &cnt.v_pfree, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_tfree, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(tfree), 0, vcnt, "IU", ""); + &cnt.v_tfree, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_page_size, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(page_size), 0, vcnt, "IU", ""); + &cnt.v_page_size, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_page_count, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(page_count), 0, vcnt, "IU", ""); + &cnt.v_page_count, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_reserved, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(free_reserved), 0, vcnt, "IU", ""); + &cnt.v_free_reserved, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_target, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(free_target), 0, vcnt, "IU", ""); + &cnt.v_free_target, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_min, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(free_min), 0, vcnt, "IU", ""); + &cnt.v_free_min, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_count, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(free_count), 0, vcnt, "IU", ""); + &cnt.v_free_count, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_wire_count, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(wire_count), 0, vcnt, "IU", ""); + &cnt.v_wire_count, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_active_count, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(active_count), 0, vcnt, "IU", ""); + &cnt.v_active_count, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_inactive_target, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(inactive_target), 0, vcnt, "IU", ""); + &cnt.v_inactive_target, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_inactive_count, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(inactive_count), 0, vcnt, "IU", ""); + &cnt.v_inactive_count, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cache_count, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(cache_count), 0, vcnt, "IU", ""); + &cnt.v_cache_count, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cache_min, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(cache_min), 0, vcnt, "IU", ""); + &cnt.v_cache_min, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cache_max, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(cache_max), 0, vcnt, "IU", ""); + &cnt.v_cache_max, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pageout_free_min, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(pageout_free_min), 0, vcnt, "IU", ""); -SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_interrupt_free_min, CTLTYPE_UINT | - CTLFLAG_RD, VMCNT_PTR(interrupt_free_min), 0, vcnt, "IU", ""); + &cnt.v_pageout_free_min, 0, vcnt, "IU", ""); +SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_interrupt_free_min, CTLTYPE_UINT|CTLFLAG_RD, + &cnt.v_interrupt_free_min, 0, vcnt, "IU", ""); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forks, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(forks), 0, vcnt, "IU", "Number of fork() calls"); + &cnt.v_forks, 0, vcnt, "IU", "Number of fork() calls"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforks, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(vforks), 0, vcnt, "IU", "Number of vfork() calls"); + &cnt.v_vforks, 0, vcnt, "IU", "Number of vfork() calls"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforks, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(rforks), 0, vcnt, "IU", "Number of rfork() calls"); + &cnt.v_rforks, 0, vcnt, "IU", "Number of rfork() calls"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreads, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(kthreads), 0, vcnt, "IU", - "Number of fork() calls by kernel"); + &cnt.v_kthreads, 0, vcnt, "IU", "Number of fork() calls by kernel"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forkpages, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(forkpages), 0, vcnt, "IU", "VM pages affected by fork()"); + &cnt.v_forkpages, 0, vcnt, "IU", "VM pages affected by fork()"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforkpages, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(vforkpages), 0, vcnt, "IU", "VM pages affected by vfork()"); + &cnt.v_vforkpages, 0, vcnt, "IU", "VM pages affected by vfork()"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforkpages, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(rforkpages), 0, vcnt, "IU", "VM pages affected by rfork()"); + &cnt.v_rforkpages, 0, vcnt, "IU", "VM pages affected by rfork()"); SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreadpages, CTLTYPE_UINT|CTLFLAG_RD, - VMCNT_PTR(kthreadpages), 0, vcnt, "IU", - "VM pages affected by fork() by kernel"); + &cnt.v_kthreadpages, 0, vcnt, "IU", "VM pages affected by fork() by kernel"); SYSCTL_INT(_vm_stats_misc, OID_AUTO, zero_page_count, CTLFLAG_RD, &vm_page_zero_count, 0, ""); diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c index 901ff13..ff1ba18 100644 --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -974,7 +974,7 @@ mlock(td, uap) return (ENOMEM); } PROC_UNLOCK(proc); - if (npages + VMCNT_GET(wire_count) > vm_page_max_wired) + if (npages + cnt.v_wire_count > vm_page_max_wired) return (EAGAIN); error = vm_map_wire(&proc->p_vmspace->vm_map, start, end, VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index c12095a..dfcade1 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -655,7 +655,7 @@ vm_object_terminate(vm_object_t object) "p->busy = %d, p->flags %x\n", p, p->busy, p->flags)); if (p->wire_count == 0) { vm_page_free(p); - VMCNT_ADD(pfree, 1); + cnt.v_pfree++; } else { vm_page_remove(p); } diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 2e97237..d4f8148 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -151,9 +151,9 @@ SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0, void vm_set_page_size(void) { - if (VMCNT_GET(page_size) == 0) - VMCNT_SET(page_size, PAGE_SIZE); - if (((VMCNT_GET(page_size) - 1) & VMCNT_GET(page_size)) != 0) + if (cnt.v_page_size == 0) + cnt.v_page_size = PAGE_SIZE; + if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0) panic("vm_set_page_size: page size not a power of two"); } @@ -357,8 +357,8 @@ vm_page_startup(vm_offset_t vaddr) * last rather than first. On large-memory machines, this avoids * the exhaustion of low physical memory before isa_dma_init has run. */ - VMCNT_SET(page_count, 0); - VMCNT_SET(free_count, 0); + cnt.v_page_count = 0; + cnt.v_free_count = 0; list = getenv("vm.blacklist"); for (i = 0; phys_avail[i + 1] != 0; i += 2) { pa = phys_avail[i]; @@ -874,11 +874,11 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) loop: mtx_lock(&vm_page_queue_free_mtx); - if (VMCNT_GET(free_count) > VMCNT_GET(free_reserved) || + if (cnt.v_free_count > cnt.v_free_reserved || (page_req == VM_ALLOC_SYSTEM && - VMCNT_GET(cache_count) == 0 && - VMCNT_GET(free_count) > VMCNT_GET(interrupt_free_min)) || - (page_req == VM_ALLOC_INTERRUPT && VMCNT_GET(free_count) > 0)) { + cnt.v_cache_count == 0 && + cnt.v_free_count > cnt.v_interrupt_free_min) || + (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0)) { /* * Allocate from the free queue if the number of free pages * exceeds the minimum for the request class. @@ -893,9 +893,9 @@ loop: */ vm_page_lock_queues(); if ((m = vm_page_select_cache(color)) == NULL) { - KASSERT(VMCNT_GET(cache_count) == 0, + KASSERT(cnt.v_cache_count == 0, ("vm_page_alloc: cache queue is missing %d pages", - VMCNT_GET(cache_count))); + cnt.v_cache_count)); vm_page_unlock_queues(); atomic_add_int(&vm_pageout_deficit, 1); pagedaemon_wakeup(); @@ -904,8 +904,7 @@ loop: return (NULL); mtx_lock(&vm_page_queue_free_mtx); - if (VMCNT_GET(free_count) <= - VMCNT_GET(interrupt_free_min)) { + if (cnt.v_free_count <= cnt.v_interrupt_free_min) { mtx_unlock(&vm_page_queue_free_mtx); return (NULL); } @@ -955,7 +954,7 @@ loop: else m->oflags = VPO_BUSY; if (req & VM_ALLOC_WIRED) { - VMCNT_ADD(wire_count, 1); + atomic_add_int(&cnt.v_wire_count, 1); m->wire_count = 1; } else m->wire_count = 0; @@ -1001,8 +1000,8 @@ vm_wait(void) vm_pages_needed = 1; wakeup(&vm_pages_needed); } - msleep(VMCNT_PTR(free_count), &vm_page_queue_free_mtx, PDROP | - PVM, "vmwait", 0); + msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, + "vmwait", 0); } } @@ -1025,7 +1024,7 @@ vm_waitpfault(void) vm_pages_needed = 1; wakeup(&vm_pages_needed); } - msleep(VMCNT_PTR(free_count), &vm_page_queue_free_mtx, PDROP | PUSER, + msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, "pfault", 0); } @@ -1046,7 +1045,7 @@ vm_page_activate(vm_page_t m) mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) { if (VM_PAGE_INQUEUE1(m, PQ_CACHE)) - VMCNT_ADD(reactivated, 1); + cnt.v_reactivated++; vm_pageq_remove(m); if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { if (m->act_count < ACT_INIT) @@ -1079,8 +1078,7 @@ vm_page_free_wakeup(void) * some free. */ if (vm_pageout_pages_needed && - VMCNT_GET(cache_count) + VMCNT_GET(free_count) >= - VMCNT_GET(pageout_free_min)) { + cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) { wakeup(&vm_pageout_pages_needed); vm_pageout_pages_needed = 0; } @@ -1091,7 +1089,7 @@ vm_page_free_wakeup(void) */ if (vm_pages_needed && !vm_page_count_min()) { vm_pages_needed = 0; - wakeup(VMCNT_PTR(free_count)); + wakeup(&cnt.v_free_count); } } @@ -1114,7 +1112,7 @@ vm_page_free_toq(vm_page_t m) mtx_assert(&vm_page_queue_mtx, MA_OWNED); KASSERT(!pmap_page_is_mapped(m), ("vm_page_free_toq: freeing mapped page %p", m)); - VMCNT_ADD(tfree, 1); + cnt.v_tfree++; if (m->busy || VM_PAGE_INQUEUE1(m, PQ_FREE)) { printf( @@ -1205,7 +1203,7 @@ vm_page_wire(vm_page_t m) if (m->wire_count == 0) { if ((m->flags & PG_UNMANAGED) == 0) vm_pageq_remove(m); - VMCNT_ADD(wire_count, 1); + atomic_add_int(&cnt.v_wire_count, 1); } m->wire_count++; KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); @@ -1249,7 +1247,7 @@ vm_page_unwire(vm_page_t m, int activate) if (m->wire_count > 0) { m->wire_count--; if (m->wire_count == 0) { - VMCNT_SUB(wire_count, 1); + atomic_subtract_int(&cnt.v_wire_count, 1); if (m->flags & PG_UNMANAGED) { ; } else if (activate) @@ -1288,7 +1286,7 @@ _vm_page_deactivate(vm_page_t m, int athead) return; if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { if (VM_PAGE_INQUEUE1(m, PQ_CACHE)) - VMCNT_ADD(reactivated, 1); + cnt.v_reactivated++; vm_page_flag_clear(m, PG_WINATCFLS); vm_pageq_remove(m); if (athead) @@ -1297,7 +1295,7 @@ _vm_page_deactivate(vm_page_t m, int athead) TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); VM_PAGE_SETQUEUE2(m, PQ_INACTIVE); vm_page_queues[PQ_INACTIVE].lcnt++; - VMCNT_ADD(inactive_count, 1); + cnt.v_inactive_count++; } } @@ -1782,16 +1780,16 @@ vm_page_cowsetup(vm_page_t m) DB_SHOW_COMMAND(page, vm_page_print_page_info) { - db_printf("cnt.v_free_count: %d\n", VMCNT_GET(free_count)); - db_printf("cnt.v_cache_count: %d\n", VMCNT_GET(cache_count)); - db_printf("cnt.v_inactive_count: %d\n", VMCNT_GET(inactive_count)); - db_printf("cnt.v_active_count: %d\n", VMCNT_GET(active_count)); - db_printf("cnt.v_wire_count: %d\n", VMCNT_GET(wire_count)); - db_printf("cnt.v_free_reserved: %d\n", VMCNT_GET(free_reserved)); - db_printf("cnt.v_free_min: %d\n", VMCNT_GET(free_min)); - db_printf("cnt.v_free_target: %d\n", VMCNT_GET(free_target)); - db_printf("cnt.v_cache_min: %d\n", VMCNT_GET(cache_min)); - db_printf("cnt.v_inactive_target: %d\n", VMCNT_GET(inactive_target)); + db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); + db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); + db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); + db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); + db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); + db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); + db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); + db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); + db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); + db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); } DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index d3c14ba..c0611ba 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -538,7 +538,7 @@ vm_pageout_object_deactivate_pages(pmap, first_object, desired) goto unlock_return; } next = TAILQ_NEXT(p, listq); - VMCNT_ADD(pdpages, 1); + cnt.v_pdpages++; if (p->wire_count != 0 || p->hold_count != 0 || p->busy != 0 || @@ -739,13 +739,13 @@ vm_pageout_scan(int pass) vm_page_lock_queues(); rescan0: addl_page_shortage = addl_page_shortage_init; - maxscan = VMCNT_GET(inactive_count); + maxscan = cnt.v_inactive_count; for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); m != NULL && maxscan-- > 0 && page_shortage > 0; m = next) { - VMCNT_ADD(pdpages, 1); + cnt.v_pdpages++; if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) { goto rescan0; @@ -856,7 +856,7 @@ rescan0: * Invalid pages can be easily freed */ vm_page_free(m); - VMCNT_ADD(dfree, 1); + cnt.v_dfree++; --page_shortage; } else if (m->dirty == 0) { /* @@ -1043,8 +1043,8 @@ unlock_and_continue: * Compute the number of pages we want to try to move from the * active queue to the inactive queue. */ - page_shortage = vm_paging_target() + VMCNT_GET(inactive_target) - - VMCNT_GET(inactive_count); + page_shortage = vm_paging_target() + + cnt.v_inactive_target - cnt.v_inactive_count; page_shortage += addl_page_shortage; /* @@ -1052,7 +1052,7 @@ unlock_and_continue: * track the per-page activity counter and use it to locate * deactivation candidates. */ - pcount = VMCNT_GET(active_count); + pcount = cnt.v_active_count; m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { @@ -1089,7 +1089,7 @@ unlock_and_continue: * The count for pagedaemon pages is done after checking the * page for eligibility... */ - VMCNT_ADD(pdpages, 1); + cnt.v_pdpages++; /* * Check to see "how much" the page has been used. @@ -1149,9 +1149,8 @@ unlock_and_continue: */ cache_cur = cache_last_free; cache_first_failure = -1; - while (VMCNT_GET(free_count) < VMCNT_GET(free_reserved) && - (cache_cur = (cache_cur + PQ_PRIME2) & PQ_COLORMASK) != - cache_first_failure) { + while (cnt.v_free_count < cnt.v_free_reserved && (cache_cur = + (cache_cur + PQ_PRIME2) & PQ_COLORMASK) != cache_first_failure) { TAILQ_FOREACH(m, &vm_page_queues[PQ_CACHE + cache_cur].pl, pageq) { KASSERT(m->dirty == 0, @@ -1169,7 +1168,7 @@ unlock_and_continue: m)); vm_page_free(m); VM_OBJECT_UNLOCK(object); - VMCNT_ADD(dfree, 1); + cnt.v_dfree++; cache_last_free = cache_cur; cache_first_failure = -1; break; @@ -1292,7 +1291,7 @@ unlock_and_continue: sched_nice(bigproc, PRIO_MIN); mtx_unlock_spin(&sched_lock); PROC_UNLOCK(bigproc); - wakeup(VMCNT_PTR(free_count)); + wakeup(&cnt.v_free_count); } } mtx_unlock(&Giant); @@ -1315,18 +1314,16 @@ vm_pageout_page_stats() mtx_assert(&vm_page_queue_mtx, MA_OWNED); page_shortage = - (VMCNT_GET(inactive_target) + VMCNT_GET(cache_max) + - VMCNT_GET(free_min)) - (VMCNT_GET(free_count) + - VMCNT_GET(inactive_count) + VMCNT_GET(cache_count)); + (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - + (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); if (page_shortage <= 0) return; - pcount = VMCNT_GET(active_count); + pcount = cnt.v_active_count; fullintervalcount += vm_pageout_stats_interval; if (fullintervalcount < vm_pageout_full_stats_interval) { - tpcount = (vm_pageout_stats_max * VMCNT_GET(active_count)) / - VMCNT_GET(page_count); + tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; if (pcount > tpcount) pcount = tpcount; } else { @@ -1412,8 +1409,8 @@ vm_pageout() /* * Initialize some paging parameters. */ - VMCNT_SET(interrupt_free_min, 2); - if (VMCNT_GET(page_count) < 2000) + cnt.v_interrupt_free_min = 2; + if (cnt.v_page_count < 2000) vm_pageout_page_count = 8; /* @@ -1421,16 +1418,17 @@ vm_pageout() * swap pager structures plus enough for any pv_entry structs * when paging. */ - VMCNT_SET(free_min, (VMCNT_GET(page_count) > 1024) ? (4 + - (VMCNT_GET(page_count) - 1024) / 200) : 4); - VMCNT_SET(pageout_free_min, (2 * MAXBSIZE) / PAGE_SIZE + - VMCNT_GET(interrupt_free_min)); - VMCNT_SET(free_reserved, vm_pageout_page_count + - VMCNT_GET(pageout_free_min) + (VMCNT_GET(page_count) / 768) + - PQ_NUMCOLORS); - VMCNT_SET(free_severe, VMCNT_GET(free_min) / 2); - VMCNT_ADD(free_min, VMCNT_GET(free_reserved)); - VMCNT_ADD(free_severe, VMCNT_GET(free_reserved)); + if (cnt.v_page_count > 1024) + cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; + else + cnt.v_free_min = 4; + cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + + cnt.v_interrupt_free_min; + cnt.v_free_reserved = vm_pageout_page_count + + cnt.v_pageout_free_min + (cnt.v_page_count / 768) + PQ_NUMCOLORS; + cnt.v_free_severe = cnt.v_free_min / 2; + cnt.v_free_min += cnt.v_free_reserved; + cnt.v_free_severe += cnt.v_free_reserved; /* * v_free_target and v_cache_min control pageout hysteresis. Note @@ -1443,27 +1441,29 @@ vm_pageout() * be big enough to handle memory needs while the pageout daemon * is signalled and run to free more pages. */ - VMCNT_SET(free_target, ((VMCNT_GET(free_count) > 6144) ? 4 : 2) * - VMCNT_GET(free_min) + VMCNT_GET(free_reserved)); - - if (VMCNT_GET(free_count) > 2048) { - VMCNT_SET(cache_min, VMCNT_GET(free_target)); - VMCNT_SET(cache_max, 2 * VMCNT_GET(cache_min)); - VMCNT_SET(inactive_target, (3 * VMCNT_GET(free_target) / 2)); + if (cnt.v_free_count > 6144) + cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; + else + cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; + + if (cnt.v_free_count > 2048) { + cnt.v_cache_min = cnt.v_free_target; + cnt.v_cache_max = 2 * cnt.v_cache_min; + cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; } else { - VMCNT_SET(cache_min, 0); - VMCNT_SET(cache_max, 0); - VMCNT_SET(inactive_target, VMCNT_GET(free_count) / 4); + cnt.v_cache_min = 0; + cnt.v_cache_max = 0; + cnt.v_inactive_target = cnt.v_free_count / 4; } - if (VMCNT_GET(inactive_target) > VMCNT_GET(free_count) / 3) - VMCNT_SET(inactive_target, VMCNT_GET(free_count) / 3); + if (cnt.v_inactive_target > cnt.v_free_count / 3) + cnt.v_inactive_target = cnt.v_free_count / 3; /* XXX does not really belong here */ if (vm_page_max_wired == 0) - vm_page_max_wired = VMCNT_GET(free_count) / 3; + vm_page_max_wired = cnt.v_free_count / 3; if (vm_pageout_stats_max == 0) - vm_pageout_stats_max = VMCNT_GET(free_target); + vm_pageout_stats_max = cnt.v_free_target; /* * Set interval in seconds for stats scan. @@ -1489,7 +1489,7 @@ vm_pageout() if (vm_pages_needed && !vm_page_count_min()) { if (!vm_paging_needed()) vm_pages_needed = 0; - wakeup(VMCNT_PTR(free_count)); + wakeup(&cnt.v_free_count); } if (vm_pages_needed) { /* @@ -1524,7 +1524,7 @@ vm_pageout() } } if (vm_pages_needed) - VMCNT_ADD(pdwakeups, 1); + cnt.v_pdwakeups++; mtx_unlock(&vm_page_queue_free_mtx); vm_pageout_scan(pass); } diff --git a/sys/vm/vm_pageq.c b/sys/vm/vm_pageq.c index 6f6aadf..1b3e9a4 100644 --- a/sys/vm/vm_pageq.c +++ b/sys/vm/vm_pageq.c @@ -140,14 +140,14 @@ vm_pageq_init(void) vm_coloring_init(); for (i = 0; i < PQ_NUMCOLORS; ++i) { - vm_page_queues[PQ_FREE+i].cnt = VMCNT_PTR(free_count); + vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count; } for (i = 0; i < PQ_NUMCOLORS; ++i) { - vm_page_queues[PQ_CACHE + i].cnt = VMCNT_PTR(cache_count); + vm_page_queues[PQ_CACHE + i].cnt = &cnt.v_cache_count; } - vm_page_queues[PQ_INACTIVE].cnt = VMCNT_PTR(inactive_count); - vm_page_queues[PQ_ACTIVE].cnt = VMCNT_PTR(active_count); - vm_page_queues[PQ_HOLD].cnt = VMCNT_PTR(active_count); + vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count; + vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count; + vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count; for (i = 0; i < PQ_COUNT; i++) { TAILQ_INIT(&vm_page_queues[i].pl); @@ -192,7 +192,7 @@ vm_pageq_add_new_page(vm_paddr_t pa) { vm_page_t m; - VMCNT_ADD(page_count, 1); + atomic_add_int(&cnt.v_page_count, 1); m = PHYS_TO_VM_PAGE(pa); m->phys_addr = pa; m->flags = 0; diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c index 29144ed..5af84e0 100644 --- a/sys/vm/vm_zeroidle.c +++ b/sys/vm/vm_zeroidle.c @@ -90,10 +90,9 @@ vm_page_zero_check(void) * fast sleeps. We also do not want to be continuously zeroing * pages because doing so may flush our L1 and L2 caches too much. */ - if (zero_state && vm_page_zero_count >= - ZIDLE_LO(VMCNT_GET(free_count))) + if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) return (0); - if (vm_page_zero_count >= ZIDLE_HI(VMCNT_GET(free_count))) + if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) return (0); return (1); } @@ -116,7 +115,7 @@ vm_page_zero_idle(void) vm_pageq_enqueue(PQ_FREE + m->pc, m); ++vm_page_zero_count; ++cnt_prezero; - if (vm_page_zero_count >= ZIDLE_HI(VMCNT_GET(free_count))) + if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) zero_state = 1; } free_rover = (free_rover + PQ_PRIME2) & PQ_COLORMASK; diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index 837aa46..cb4cc39 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -728,8 +728,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) if (i != reqpage) vm_page_free(m[i]); vm_page_unlock_queues(); - VMCNT_ADD(vnodein, 1); - VMCNT_ADD(vnodepgsin, 1); + cnt.v_vnodein++; + cnt.v_vnodepgsin++; error = vnode_pager_input_old(object, m[reqpage]); VM_OBJECT_UNLOCK(object); return (error); @@ -757,8 +757,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) vm_page_free(m[i]); vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); - VMCNT_ADD(vnodein, 1); - VMCNT_ADD(vnodepgsin, 1); + cnt.v_vnodein++; + cnt.v_vnodepgsin++; return vnode_pager_input_smlfs(object, m[reqpage]); } @@ -909,8 +909,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) bp->b_runningbufspace = bp->b_bufsize; atomic_add_int(&runningbufspace, bp->b_runningbufspace); - VMCNT_ADD(vnodein, 1); - VMCNT_ADD(vnodepgsin, 1); + cnt.v_vnodein++; + cnt.v_vnodepgsin += count; /* do the input */ bp->b_iooffset = dbtob(bp->b_blkno); @@ -1031,8 +1031,7 @@ vnode_pager_putpages(object, m, count, sync, rtvals) * daemon up. This should be probably be addressed XXX. */ - if ((VMCNT_GET(free_count) + VMCNT_GET(cache_count)) < - VMCNT_GET(pageout_free_min)) + if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) sync |= OBJPC_SYNC; /* @@ -1158,8 +1157,8 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals) auio.uio_resid = maxsize; auio.uio_td = (struct thread *) 0; error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred); - VMCNT_ADD(vnodein, 1); - VMCNT_ADD(vnodepgsin, ncount); + cnt.v_vnodeout++; + cnt.v_vnodepgsout += ncount; if (error) { if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1))) |