summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/amd64/machdep.c4
-rw-r--r--sys/amd64/amd64/pmap.c11
-rw-r--r--sys/arm/arm/machdep.c4
-rw-r--r--sys/arm/arm/pmap.c2
-rw-r--r--sys/compat/linprocfs/linprocfs.c18
-rw-r--r--sys/compat/linux/linux_misc.c2
-rw-r--r--sys/compat/svr4/svr4_misc.c4
-rw-r--r--sys/fs/smbfs/smbfs_io.c8
-rw-r--r--sys/i386/i386/machdep.c4
-rw-r--r--sys/i386/i386/pmap.c6
-rw-r--r--sys/ia64/ia64/machdep.c4
-rw-r--r--sys/ia64/ia64/pmap.c2
-rw-r--r--sys/kern/init_main.c2
-rw-r--r--sys/kern/kern_fork.c16
-rw-r--r--sys/kern/kern_malloc.c9
-rw-r--r--sys/kern/kern_mib.c2
-rw-r--r--sys/kern/kern_synch.c2
-rw-r--r--sys/kern/kern_thread.c2
-rw-r--r--sys/kern/subr_trap.c2
-rw-r--r--sys/kern/vfs_bio.c6
-rw-r--r--sys/kern/vfs_subr.c7
-rw-r--r--sys/nfsclient/nfs_bio.c8
-rw-r--r--sys/pc98/pc98/machdep.c4
-rw-r--r--sys/powerpc/aim/machdep.c4
-rw-r--r--sys/powerpc/powerpc/machdep.c4
-rw-r--r--sys/sparc64/sparc64/machdep.c4
-rw-r--r--sys/sparc64/sparc64/pmap.c2
-rw-r--r--sys/sun4v/sun4v/machdep.c4
-rw-r--r--sys/sun4v/sun4v/pmap.c6
-rw-r--r--sys/sun4v/sun4v/tsb.c2
-rw-r--r--sys/sun4v/sun4v/tte_hash.c2
-rw-r--r--sys/sys/vmmeter.h33
-rw-r--r--sys/vm/swap_pager.c16
-rw-r--r--sys/vm/uma_core.c2
-rw-r--r--sys/vm/vm_contig.c4
-rw-r--r--sys/vm/vm_fault.c3
-rw-r--r--sys/vm/vm_glue.c4
-rw-r--r--sys/vm/vm_map.c4
-rw-r--r--sys/vm/vm_meter.c122
-rw-r--r--sys/vm/vm_mmap.c2
-rw-r--r--sys/vm/vm_object.c2
-rw-r--r--sys/vm/vm_page.c70
-rw-r--r--sys/vm/vm_pageout.c94
-rw-r--r--sys/vm/vm_pageq.c12
-rw-r--r--sys/vm/vm_zeroidle.c7
-rw-r--r--sys/vm/vnode_pager.c19
46 files changed, 263 insertions, 288 deletions
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index d346d15..bb32980 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -221,8 +221,8 @@ cpu_startup(dummy)
vm_ksubmap_init(&kmi);
printf("avail memory = %ju (%ju MB)\n",
- ptoa((uintmax_t)VMCNT_GET(free_count)),
- ptoa((uintmax_t)VMCNT_GET(free_count)) / 1048576);
+ ptoa((uintmax_t)cnt.v_free_count),
+ ptoa((uintmax_t)cnt.v_free_count) / 1048576);
/*
* Set up buffers, so they can be used to read disk labels.
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 4f33e92..8fb06e1 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -620,7 +620,7 @@ pmap_init(void)
* numbers of pv entries.
*/
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
- pv_entry_max = shpgperproc * maxproc + VMCNT_GET(page_count);
+ pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
pv_entry_high_water = 9 * (pv_entry_max / 10);
}
@@ -633,7 +633,7 @@ pmap_pventry_proc(SYSCTL_HANDLER_ARGS)
error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
if (error == 0 && req->newptr) {
- shpgperproc = (pv_entry_max - VMCNT_GET(page_count)) / maxproc;
+ shpgperproc = (pv_entry_max - cnt.v_page_count) / maxproc;
pv_entry_high_water = 9 * (pv_entry_max / 10);
}
return (error);
@@ -648,7 +648,7 @@ pmap_shpgperproc_proc(SYSCTL_HANDLER_ARGS)
error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
if (error == 0 && req->newptr) {
- pv_entry_max = shpgperproc * maxproc + VMCNT_GET(page_count);
+ pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
pv_entry_high_water = 9 * (pv_entry_max / 10);
}
return (error);
@@ -1149,7 +1149,8 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m,
*/
m->right = *free;
*free = m;
- VMCNT_SUB(wire_count, 1);
+
+ atomic_subtract_int(&cnt.v_wire_count, 1);
return 1;
}
@@ -1459,7 +1460,7 @@ pmap_release(pmap_t pmap)
pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */
m->wire_count--;
- VMCNT_SUB(wire_count, 1);
+ atomic_subtract_int(&cnt.v_wire_count, 1);
vm_page_free_zero(m);
PMAP_LOCK_DESTROY(pmap);
}
diff --git a/sys/arm/arm/machdep.c b/sys/arm/arm/machdep.c
index 3985511..110d7a6 100644
--- a/sys/arm/arm/machdep.c
+++ b/sys/arm/arm/machdep.c
@@ -286,8 +286,8 @@ cpu_startup(void *dummy)
vm_ksubmap_init(&kmi);
printf("avail memory = %ju (%ju MB)\n",
- (uintmax_t)ptoa(VMCNT_GET(free_count)),
- (uintmax_t)ptoa(VMCNT_GET(free_count)) / 1048576);
+ (uintmax_t)ptoa(cnt.v_free_count),
+ (uintmax_t)ptoa(cnt.v_free_count) / 1048576);
bufinit();
vm_pager_bufferinit();
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 2244956..8b66ba3 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -1970,7 +1970,7 @@ pmap_init(void)
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
- pv_entry_max = shpgperproc * maxproc + VMCNT_GET(page_count);
+ pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
pv_entry_high_water = 9 * (pv_entry_max / 10);
l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor,
NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
diff --git a/sys/compat/linprocfs/linprocfs.c b/sys/compat/linprocfs/linprocfs.c
index f9f2fd8..787ea1e 100644
--- a/sys/compat/linprocfs/linprocfs.c
+++ b/sys/compat/linprocfs/linprocfs.c
@@ -146,14 +146,14 @@ linprocfs_domeminfo(PFS_FILL_ARGS)
/*
* The correct thing here would be:
*
- memfree = VMCNT_GET(free_count) * PAGE_SIZE;
+ memfree = cnt.v_free_count * PAGE_SIZE;
memused = memtotal - memfree;
*
* but it might mislead linux binaries into thinking there
* is very little memory left, so we cheat and tell them that
* all memory that isn't wired down is free.
*/
- memused = VMCNT_GET(wire_count) * PAGE_SIZE;
+ memused = cnt.v_wire_count * PAGE_SIZE;
memfree = memtotal - memused;
swap_pager_status(&i, &j);
swaptotal = (unsigned long long)i * PAGE_SIZE;
@@ -175,7 +175,7 @@ linprocfs_domeminfo(PFS_FILL_ARGS)
* like unstaticizing it just for linprocfs's sake.
*/
buffers = 0;
- cached = VMCNT_GET(cache_count) * PAGE_SIZE;
+ cached = cnt.v_cache_count * PAGE_SIZE;
sbuf_printf(sb,
" total: used: free: shared: buffers: cached:\n"
@@ -394,12 +394,12 @@ linprocfs_dostat(PFS_FILL_ARGS)
"intr %u\n"
"ctxt %u\n"
"btime %lld\n",
- VMCNT_GET(vnodepgsin),
- VMCNT_GET(vnodepgsout),
- VMCNT_GET(swappgsin),
- VMCNT_GET(swappgsout),
- VMCNT_GET(intr),
- VMCNT_GET(swtch),
+ cnt.v_vnodepgsin,
+ cnt.v_vnodepgsout,
+ cnt.v_swappgsin,
+ cnt.v_swappgsout,
+ cnt.v_intr,
+ cnt.v_swtch,
(long long)boottime.tv_sec);
return (0);
}
diff --git a/sys/compat/linux/linux_misc.c b/sys/compat/linux/linux_misc.c
index 28fc69b..e09310c 100644
--- a/sys/compat/linux/linux_misc.c
+++ b/sys/compat/linux/linux_misc.c
@@ -139,7 +139,7 @@ linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args)
LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale;
sysinfo.totalram = physmem * PAGE_SIZE;
- sysinfo.freeram = sysinfo.totalram - VMCNT_GET(wire_count) * PAGE_SIZE;
+ sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE;
sysinfo.sharedram = 0;
mtx_lock(&vm_object_list_mtx);
diff --git a/sys/compat/svr4/svr4_misc.c b/sys/compat/svr4/svr4_misc.c
index 403919e..f1f44ea 100644
--- a/sys/compat/svr4/svr4_misc.c
+++ b/sys/compat/svr4/svr4_misc.c
@@ -778,14 +778,14 @@ svr4_sys_sysconfig(td, uap)
#if defined(UVM)
*retval = uvmexp.free; /* XXX: free instead of total */
#else
- *retval = VMCNT_GET(free_count); /* XXX: free instead of total */
+ *retval = cnt.v_free_count; /* XXX: free instead of total */
#endif
break;
case SVR4_CONFIG_AVPHYS_PAGES:
#if defined(UVM)
*retval = uvmexp.active; /* XXX: active instead of avg */
#else
- *retval = VMCNT_GET(active_count); /* XXX: active instead of avg */
+ *retval = cnt.v_active_count; /* XXX: active instead of avg */
#endif
break;
#endif /* NOTYET */
diff --git a/sys/fs/smbfs/smbfs_io.c b/sys/fs/smbfs/smbfs_io.c
index f94bdf6..0f8ed49 100644
--- a/sys/fs/smbfs/smbfs_io.c
+++ b/sys/fs/smbfs/smbfs_io.c
@@ -475,8 +475,8 @@ smbfs_getpages(ap)
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, pages, npages);
- VMCNT_ADD(vnodein, 1);
- VMCNT_ADD(vnodepgsin, npages);
+ cnt.v_vnodein++;
+ cnt.v_vnodepgsin += npages;
iov.iov_base = (caddr_t) kva;
iov.iov_len = count;
@@ -626,8 +626,8 @@ smbfs_putpages(ap)
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, pages, npages);
- VMCNT_ADD(vnodeout, 1);
- VMCNT_ADD(vnodepgsout, count);
+ cnt.v_vnodeout++;
+ cnt.v_vnodepgsout += count;
iov.iov_base = (caddr_t) kva;
iov.iov_len = count;
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index cc6984e..f9398df 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -247,8 +247,8 @@ cpu_startup(dummy)
vm_ksubmap_init(&kmi);
printf("avail memory = %ju (%ju MB)\n",
- ptoa((uintmax_t)VMCNT_GET(free_count)),
- ptoa((uintmax_t)VMCNT_GET(free_count)) / 1048576);
+ ptoa((uintmax_t)cnt.v_free_count),
+ ptoa((uintmax_t)cnt.v_free_count) / 1048576);
/*
* Set up buffers, so they can be used to read disk labels.
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 6a14079..01b2422 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -606,7 +606,7 @@ pmap_init(void)
* numbers of pv entries.
*/
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
- pv_entry_max = shpgperproc * maxproc + VMCNT_GET(page_count);
+ pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
pv_entry_max = roundup(pv_entry_max, _NPCPV);
pv_entry_high_water = 9 * (pv_entry_max / 10);
@@ -1168,7 +1168,7 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
pmap->pm_pdir[m->pindex] = 0;
--pmap->pm_stats.resident_count;
- VMCNT_SUB(wire_count, 1);
+ atomic_subtract_int(&cnt.v_wire_count, 1);
/*
* Do an invltlb to make the invalidated mapping
@@ -1536,7 +1536,7 @@ pmap_release(pmap_t pmap)
("pmap_release: got wrong ptd page"));
#endif
m->wire_count--;
- VMCNT_SUB(wire_count, 1);
+ atomic_subtract_int(&cnt.v_wire_count, 1);
vm_page_free_zero(m);
}
PMAP_LOCK_DESTROY(pmap);
diff --git a/sys/ia64/ia64/machdep.c b/sys/ia64/ia64/machdep.c
index 612c0c4..61ba826 100644
--- a/sys/ia64/ia64/machdep.c
+++ b/sys/ia64/ia64/machdep.c
@@ -283,8 +283,8 @@ cpu_startup(dummy)
vm_ksubmap_init(&kmi);
- printf("avail memory = %ld (%ld MB)\n", ptoa(VMCNT_GET(free_count)),
- ptoa(VMCNT_GET(free_count)) / 1048576);
+ printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
+ ptoa(cnt.v_free_count) / 1048576);
if (fpswa_iface == NULL)
printf("Warning: no FPSWA package supplied\n");
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 0771c6c..8879fd7 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -531,7 +531,7 @@ pmap_init(void)
pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
- pv_entry_max = shpgperproc * maxproc + VMCNT_GET(page_count);
+ pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
pv_entry_high_water = 9 * (pv_entry_max / 10);
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 22241a4..9306e83 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -467,7 +467,7 @@ proc0_init(void *dummy __unused)
p->p_limit->pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles;
p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_cur =
p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
- i = ptoa(VMCNT_GET(free_count));
+ i = ptoa(cnt.v_free_count);
p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_max = i;
p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = i;
p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = i / 3;
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index fd39e2a..cbcb25a 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -665,20 +665,20 @@ again:
vm_forkproc(td, p2, td2, flags);
if (flags == (RFFDG | RFPROC)) {
- VMCNT_ADD(forks, 1);
- VMCNT_ADD(forkpages, p2->p_vmspace->vm_dsize +
+ atomic_add_int(&cnt.v_forks, 1);
+ atomic_add_int(&cnt.v_forkpages, p2->p_vmspace->vm_dsize +
p2->p_vmspace->vm_ssize);
} else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
- VMCNT_ADD(forks, 1);
- VMCNT_ADD(forkpages, p2->p_vmspace->vm_dsize +
+ atomic_add_int(&cnt.v_vforks, 1);
+ atomic_add_int(&cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
p2->p_vmspace->vm_ssize);
} else if (p1 == &proc0) {
- VMCNT_ADD(kthreads, 1);
- VMCNT_ADD(kthreadpages, p2->p_vmspace->vm_dsize +
+ atomic_add_int(&cnt.v_kthreads, 1);
+ atomic_add_int(&cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
p2->p_vmspace->vm_ssize);
} else {
- VMCNT_ADD(rforks, 1);
- VMCNT_ADD(rforkpages, p2->p_vmspace->vm_dsize +
+ atomic_add_int(&cnt.v_rforks, 1);
+ atomic_add_int(&cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
p2->p_vmspace->vm_ssize);
}
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 219d47d..9a99539 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -547,7 +547,7 @@ kmeminit(void *dummy)
* so make sure that there is enough space.
*/
vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE;
- mem_size = VMCNT_GET(page_count);
+ mem_size = cnt.v_page_count;
#if defined(VM_KMEM_SIZE_SCALE)
vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
@@ -585,8 +585,8 @@ kmeminit(void *dummy)
* to something sane. Be careful to not overflow the 32bit
* ints while doing the check.
*/
- if (((vm_kmem_size / 2) / PAGE_SIZE) > VMCNT_GET(page_count))
- vm_kmem_size = 2 * VMCNT_GET(page_count) * PAGE_SIZE;
+ if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count)
+ vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
/*
* Tune settings based on the kernel map's size at this time.
@@ -646,8 +646,7 @@ malloc_init(void *data)
struct malloc_type_internal *mtip;
struct malloc_type *mtp;
- KASSERT(VMCNT_GET(page_count) != 0,
- ("malloc_register before vm_init"));
+ KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init"));
mtp = data;
mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
diff --git a/sys/kern/kern_mib.c b/sys/kern/kern_mib.c
index 2aeb03a..52f8754 100644
--- a/sys/kern/kern_mib.c
+++ b/sys/kern/kern_mib.c
@@ -189,7 +189,7 @@ sysctl_hw_usermem(SYSCTL_HANDLER_ARGS)
{
u_long val;
- val = ctob(physmem - VMCNT_GET(wire_count));
+ val = ctob(physmem - cnt.v_wire_count);
return (sysctl_handle_long(oidp, &val, 0, req));
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 9d571b0..d61dddf 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -433,7 +433,7 @@ mi_switch(int flags, struct thread *newtd)
/*
* Finish up stats for outgoing thread.
*/
- VMCNT_ADD(swtch, 1);
+ cnt.v_swtch++;
PCPU_SET(switchtime, new_switchtime);
PCPU_SET(switchticks, ticks);
CTR4(KTR_PROC, "mi_switch: old thread %ld (kse %p, pid %ld, %s)",
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index c967eac..e83bf7e 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -407,7 +407,7 @@ thread_exit(void)
p->p_rux.rux_iticks += td->td_iticks;
PCPU_SET(switchtime, new_switchtime);
PCPU_SET(switchticks, ticks);
- VMCNT_ADD(swtch, 1);
+ cnt.v_swtch++;
/* Add our usage into the usage of all our children. */
if (p->p_numthreads == 1)
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index db81011..15c8fdd 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -191,8 +191,8 @@ ast(struct trapframe *framep)
#endif
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK |
TDF_NEEDRESCHED | TDF_INTERRUPT);
+ cnt.v_trap++;
mtx_unlock_spin(&sched_lock);
- VMCNT_ADD(trap, 1);
/*
* XXXKSE While the fact that we owe a user profiling
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index f0a2286..44879ff 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -2919,10 +2919,8 @@ allocbuf(struct buf *bp, int size)
*/
if ((curproc != pageproc) &&
(VM_PAGE_INQUEUE1(m, PQ_CACHE)) &&
- ((VMCNT_GET(free_count) +
- VMCNT_GET(cache_count)) <
- (VMCNT_GET(free_min) +
- VMCNT_GET(cache_min)))) {
+ ((cnt.v_free_count + cnt.v_cache_count) <
+ (cnt.v_free_min + cnt.v_cache_min))) {
pagedaemon_wakeup();
}
vm_page_wire(m);
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 40bdaaa..8e56b16 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -297,9 +297,8 @@ vntblinit(void *dummy __unused)
* of the kernel's heap size is consumed by vnodes and vm
* objects.
*/
- desiredvnodes = min(maxproc + VMCNT_GET(page_count) / 4, 2 *
- vm_kmem_size / (5 * (sizeof(struct vm_object) +
- sizeof(struct vnode))));
+ desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
+ (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
if (desiredvnodes > MAXVNODES_MAX) {
if (bootverbose)
printf("Reducing kern.maxvnodes %d -> %d\n",
@@ -582,7 +581,7 @@ vlrureclaim(struct mount *mp)
usevnodes = desiredvnodes;
if (usevnodes <= 0)
usevnodes = 1;
- trigger = VMCNT_GET(page_count) * 2 / usevnodes;
+ trigger = cnt.v_page_count * 2 / usevnodes;
done = 0;
td = curthread;
vn_start_write(NULL, &mp, V_WAIT);
diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c
index 38bc475..e42a356 100644
--- a/sys/nfsclient/nfs_bio.c
+++ b/sys/nfsclient/nfs_bio.c
@@ -159,8 +159,8 @@ nfs_getpages(struct vop_getpages_args *ap)
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, pages, npages);
- VMCNT_ADD(vnodein, 1);
- VMCNT_ADD(vnodepgsin, npages);
+ cnt.v_vnodein++;
+ cnt.v_vnodepgsin += npages;
iov.iov_base = (caddr_t) kva;
iov.iov_len = count;
@@ -323,8 +323,8 @@ nfs_putpages(struct vop_putpages_args *ap)
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, pages, npages);
- VMCNT_ADD(vnodeout, 1);
- VMCNT_ADD(vnodepgsout, count);
+ cnt.v_vnodeout++;
+ cnt.v_vnodepgsout += count;
iov.iov_base = (caddr_t) kva;
iov.iov_len = count;
diff --git a/sys/pc98/pc98/machdep.c b/sys/pc98/pc98/machdep.c
index 349ba22..b5b61b2 100644
--- a/sys/pc98/pc98/machdep.c
+++ b/sys/pc98/pc98/machdep.c
@@ -244,8 +244,8 @@ cpu_startup(dummy)
vm_ksubmap_init(&kmi);
printf("avail memory = %ju (%ju MB)\n",
- ptoa((uintmax_t)VMCNT_GET(free_count)),
- ptoa((uintmax_t)VMCNT_GET(free_count)) / 1048576);
+ ptoa((uintmax_t)cnt.v_free_count),
+ ptoa((uintmax_t)cnt.v_free_count) / 1048576);
/*
* Set up buffers, so they can be used to read disk labels.
diff --git a/sys/powerpc/aim/machdep.c b/sys/powerpc/aim/machdep.c
index 6f31c87..98c4051 100644
--- a/sys/powerpc/aim/machdep.c
+++ b/sys/powerpc/aim/machdep.c
@@ -218,8 +218,8 @@ cpu_startup(void *dummy)
vm_ksubmap_init(&kmi);
- printf("avail memory = %ld (%ld MB)\n", ptoa(VMCNT_GET(free_count)),
- ptoa(VMCNT_GET(free_count)) / 1048576);
+ printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
+ ptoa(cnt.v_free_count) / 1048576);
/*
* Set up buffers, so they can be used to read disk labels.
diff --git a/sys/powerpc/powerpc/machdep.c b/sys/powerpc/powerpc/machdep.c
index 6f31c87..98c4051 100644
--- a/sys/powerpc/powerpc/machdep.c
+++ b/sys/powerpc/powerpc/machdep.c
@@ -218,8 +218,8 @@ cpu_startup(void *dummy)
vm_ksubmap_init(&kmi);
- printf("avail memory = %ld (%ld MB)\n", ptoa(VMCNT_GET(free_count)),
- ptoa(VMCNT_GET(free_count)) / 1048576);
+ printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
+ ptoa(cnt.v_free_count) / 1048576);
/*
* Set up buffers, so they can be used to read disk labels.
diff --git a/sys/sparc64/sparc64/machdep.c b/sys/sparc64/sparc64/machdep.c
index 218131c..6a589b4 100644
--- a/sys/sparc64/sparc64/machdep.c
+++ b/sys/sparc64/sparc64/machdep.c
@@ -211,8 +211,8 @@ cpu_startup(void *arg)
EVENTHANDLER_REGISTER(shutdown_final, sparc64_shutdown_final, NULL,
SHUTDOWN_PRI_LAST);
- printf("avail memory = %lu (%lu MB)\n", VMCNT_GET(free_count) *
- PAGE_SIZE, VMCNT_GET(free_count) / ((1024 * 1024) / PAGE_SIZE));
+ printf("avail memory = %lu (%lu MB)\n", cnt.v_free_count * PAGE_SIZE,
+ cnt.v_free_count / ((1024 * 1024) / PAGE_SIZE));
if (bootverbose)
printf("machine: %s\n", sparc64_model);
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index da8bcf0..bf0cb0b 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1088,7 +1088,7 @@ pmap_release(pmap_t pm)
("pmap_release: freeing held tsb page"));
m->md.pmap = NULL;
m->wire_count--;
- VMCNT_SUB(wire_count, 1);
+ atomic_subtract_int(&cnt.v_wire_count, 1);
vm_page_free_zero(m);
vm_page_unlock_queues();
}
diff --git a/sys/sun4v/sun4v/machdep.c b/sys/sun4v/sun4v/machdep.c
index 728a503..9039ca9 100644
--- a/sys/sun4v/sun4v/machdep.c
+++ b/sys/sun4v/sun4v/machdep.c
@@ -233,8 +233,8 @@ cpu_startup(void *arg)
EVENTHANDLER_REGISTER(shutdown_final, sparc64_shutdown_final, NULL,
SHUTDOWN_PRI_LAST);
- printf("avail memory = %lu (%lu MB)\n", VMCNT_GET(free_count) *
- PAGE_SIZE, VMCNT_GET(free_count) / ((1024 * 1024) / PAGE_SIZE));
+ printf("avail memory = %lu (%lu MB)\n", cnt.v_free_count * PAGE_SIZE,
+ cnt.v_free_count / ((1024 * 1024) / PAGE_SIZE));
if (bootverbose)
printf("machine: %s\n", sparc64_model);
diff --git a/sys/sun4v/sun4v/pmap.c b/sys/sun4v/sun4v/pmap.c
index 9f80247..f2fa3ea 100644
--- a/sys/sun4v/sun4v/pmap.c
+++ b/sys/sun4v/sun4v/pmap.c
@@ -973,7 +973,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
* way below the low water mark of free pages or way
* above high water mark of used pv entries.
*/
- if (VMCNT_GET(free_count) < VMCNT_GET(free_reserved) ||
+ if (cnt.v_free_count < cnt.v_free_reserved ||
pv_entry_count > pv_entry_high_water)
return;
@@ -1316,7 +1316,7 @@ pmap_free_contig_pages(void *ptr, int npages)
m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)ptr));
for (i = 0; i < npages; i++, m++) {
m->wire_count--;
- VMCNT_SUB(wire_count, 1);
+ atomic_subtract_int(&cnt.v_wire_count, 1);
vm_page_free(m);
}
}
@@ -1347,7 +1347,7 @@ pmap_init(void)
pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
- pv_entry_max = shpgperproc * maxproc + VMCNT_GET(page_count);
+ pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
pv_entry_high_water = 9 * (pv_entry_max / 10);
uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
diff --git a/sys/sun4v/sun4v/tsb.c b/sys/sun4v/sun4v/tsb.c
index a317583..7ea8d04 100644
--- a/sys/sun4v/sun4v/tsb.c
+++ b/sys/sun4v/sun4v/tsb.c
@@ -104,7 +104,7 @@ tsb_deinit(hv_tsb_info_t *hvtsb)
m = PHYS_TO_VM_PAGE((vm_paddr_t)hvtsb->hti_ra);
for (i = 0, tm = m; i < TSB_SIZE; i++, m++) {
tm->wire_count--;
- VMCNT_SUB(wire_count, 1);
+ atomic_subtract_int(&cnt.v_wire_count, 1);
vm_page_free(tm);
}
}
diff --git a/sys/sun4v/sun4v/tte_hash.c b/sys/sun4v/sun4v/tte_hash.c
index ca507f7..a60185e 100644
--- a/sys/sun4v/sun4v/tte_hash.c
+++ b/sys/sun4v/sun4v/tte_hash.c
@@ -231,7 +231,7 @@ free_fragment_pages(void *ptr)
for (fh = ptr; fh != NULL; fh = fh->thf_head.fh_next) {
m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)fh));
m->wire_count--;
- VMCNT_SUB(wire_count, 1);
+ atomic_subtract_int(&cnt.v_wire_count, 1);
vm_page_free(m);
}
}
diff --git a/sys/sys/vmmeter.h b/sys/sys/vmmeter.h
index 7defd2d..177986b 100644
--- a/sys/sys/vmmeter.h
+++ b/sys/sys/vmmeter.h
@@ -102,18 +102,7 @@ struct vmmeter {
};
#ifdef _KERNEL
-extern volatile struct vmmeter cnt;
-
-#define VMCNT __DEVOLATILE(struct vmmeter *, &cnt)
-#define VMCNT_SET(member, val) \
- atomic_store_rel_int(__CONCAT(&cnt.v_, member), val)
-#define VMCNT_ADD(member, val) \
- atomic_add_int(__CONCAT(&cnt.v_, member), val)
-#define VMCNT_SUB(member, val) \
- atomic_subtract_int(__CONCAT(&cnt.v_, member), val)
-#define VMCNT_GET(member) (__CONCAT(cnt.v_, member))
-#define VMCNT_PTR(member) \
- __DEVOLATILE(u_int *, __CONCAT(&cnt.v_, member))
+extern struct vmmeter cnt;
/*
* Return TRUE if we are under our reserved low-free-pages threshold
@@ -123,8 +112,7 @@ static __inline
int
vm_page_count_reserved(void)
{
- return (VMCNT_GET(free_reserved) > (VMCNT_GET(free_count) +
- VMCNT_GET(cache_count)));
+ return (cnt.v_free_reserved > (cnt.v_free_count + cnt.v_cache_count));
}
/*
@@ -138,8 +126,7 @@ static __inline
int
vm_page_count_severe(void)
{
- return (VMCNT_GET(free_severe) > (VMCNT_GET(free_count) +
- VMCNT_GET(cache_count)));
+ return (cnt.v_free_severe > (cnt.v_free_count + cnt.v_cache_count));
}
/*
@@ -156,8 +143,7 @@ static __inline
int
vm_page_count_min(void)
{
- return (VMCNT_GET(free_min) > (VMCNT_GET(free_count) +
- VMCNT_GET(cache_count)));
+ return (cnt.v_free_min > (cnt.v_free_count + cnt.v_cache_count));
}
/*
@@ -169,8 +155,7 @@ static __inline
int
vm_page_count_target(void)
{
- return (VMCNT_GET(free_target) > (VMCNT_GET(free_count) +
- VMCNT_GET(cache_count)));
+ return (cnt.v_free_target > (cnt.v_free_count + cnt.v_cache_count));
}
/*
@@ -183,8 +168,8 @@ int
vm_paging_target(void)
{
return (
- (VMCNT_GET(free_target) + VMCNT_GET(cache_min)) -
- (VMCNT_GET(free_count) + VMCNT_GET(cache_count))
+ (cnt.v_free_target + cnt.v_cache_min) -
+ (cnt.v_free_count + cnt.v_cache_count)
);
}
@@ -197,8 +182,8 @@ int
vm_paging_needed(void)
{
return (
- (VMCNT_GET(free_reserved) + VMCNT_GET(cache_min)) >
- (VMCNT_GET(free_count) + VMCNT_GET(cache_count))
+ (cnt.v_free_reserved + cnt.v_cache_min) >
+ (cnt.v_free_count + cnt.v_cache_count)
);
}
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 10de8a4..121c4a5 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -385,7 +385,7 @@ swap_pager_swap_init(void)
* can hold 16 pages, so this is probably overkill. This reservation
* is typically limited to around 32MB by default.
*/
- n = VMCNT_GET(page_count) / 2;
+ n = cnt.v_page_count / 2;
if (maxswzone && n > maxswzone / sizeof(struct swblock))
n = maxswzone / sizeof(struct swblock);
n2 = n;
@@ -1037,8 +1037,8 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
}
bp->b_npages = j - i;
- VMCNT_ADD(swapin, 1);
- VMCNT_ADD(swappgsin, bp->b_npages);
+ cnt.v_swapin++;
+ cnt.v_swappgsin += bp->b_npages;
/*
* We still hold the lock on mreq, and our automatic completion routine
@@ -1072,7 +1072,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
vm_page_lock_queues();
vm_page_flag_set(mreq, PG_REFERENCED);
vm_page_unlock_queues();
- VMCNT_ADD(intrans, 1);
+ cnt.v_intrans++;
if (msleep(mreq, VM_OBJECT_MTX(object), PSWP, "swread", hz*20)) {
printf(
"swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
@@ -1263,8 +1263,8 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
bp->b_dirtyoff = 0;
bp->b_dirtyend = bp->b_bcount;
- VMCNT_ADD(swapout, 1);
- VMCNT_ADD(swappgsout, bp->b_npages);
+ cnt.v_swapout++;
+ cnt.v_swappgsout += bp->b_npages;
/*
* asynchronous
@@ -2135,8 +2135,8 @@ swapoff_one(struct swdevt *sp, struct thread *td)
* of data we will have to page back in, plus an epsilon so
* the system doesn't become critically low on swap space.
*/
- if (VMCNT_GET(free_count) + VMCNT_GET(cache_count) +
- swap_pager_avail < nblks + nswap_lowat) {
+ if (cnt.v_free_count + cnt.v_cache_count + swap_pager_avail <
+ nblks + nswap_lowat) {
return (ENOMEM);
}
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index dc87672..eb00bfe 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -271,7 +271,7 @@ SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
static void
bucket_enable(void)
{
- if (VMCNT_GET(free_count) < VMCNT_GET(free_min))
+ if (cnt.v_free_count < cnt.v_free_min)
bucketdisable = 1;
else
bucketdisable = 0;
diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c
index 8278c14..b26c46f 100644
--- a/sys/vm/vm_contig.c
+++ b/sys/vm/vm_contig.c
@@ -204,7 +204,7 @@ again:
* Find first page in array that is free, within range,
* aligned, and such that the boundary won't be crossed.
*/
- for (i = start; i < VMCNT_GET(page_count); i++) {
+ for (i = start; i < cnt.v_page_count; i++) {
phys = VM_PAGE_TO_PHYS(&pga[i]);
pqtype = pga[i].queue - pga[i].pc;
if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
@@ -217,7 +217,7 @@ again:
/*
* If the above failed or we will exceed the upper bound, fail.
*/
- if ((i == VMCNT_GET(page_count)) ||
+ if ((i == cnt.v_page_count) ||
((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
mtx_unlock(&vm_page_queue_free_mtx);
/*
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index cd5de64..5855430 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -1271,8 +1271,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
* try to do any readahead that we might have free pages for.
*/
if ((rahead + rbehind) >
- ((VMCNT_GET(free_count) + VMCNT_GET(cache_count)) -
- VMCNT_GET(free_reserved))) {
+ ((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) {
pagedaemon_wakeup();
marray[0] = m;
*reqpage = 0;
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 4981efc..a3749bf 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -219,7 +219,7 @@ vslock(void *addr, size_t len)
* Also, the sysctl code, which is the only present user
* of vslock(), does a hard loop on EAGAIN.
*/
- if (npages + VMCNT_GET(wire_count) > vm_page_max_wired)
+ if (npages + cnt.v_wire_count > vm_page_max_wired)
return (EAGAIN);
#endif
error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
@@ -589,7 +589,7 @@ vm_init_limits(udata)
limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
/* limit the limit to no less than 2MB */
- rss_limit = max(VMCNT_GET(free_count), 512);
+ rss_limit = max(cnt.v_free_count, 512);
limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
}
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 7e518606..9fe60b7 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -274,7 +274,7 @@ vmspace_alloc(min, max)
void
vm_init2(void)
{
- uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(VMCNT_GET(page_count),
+ uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
(VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8 +
maxproc * 2 + maxfiles);
vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
@@ -1489,7 +1489,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
* free pages allocating pv entries.
*/
if ((flags & MAP_PREFAULT_MADVISE) &&
- VMCNT_GET(free_count) < VMCNT_GET(free_reserved)) {
+ cnt.v_free_count < cnt.v_free_reserved) {
psize = tmpidx;
break;
}
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 14c9f5d..d4b51e7 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -52,26 +52,26 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_object.h>
#include <sys/sysctl.h>
-volatile struct vmmeter cnt;
+struct vmmeter cnt;
int maxslp = MAXSLP;
SYSCTL_UINT(_vm, VM_V_FREE_MIN, v_free_min,
- CTLFLAG_RW, VMCNT_PTR(free_min), 0, "");
+ CTLFLAG_RW, &cnt.v_free_min, 0, "");
SYSCTL_UINT(_vm, VM_V_FREE_TARGET, v_free_target,
- CTLFLAG_RW, VMCNT_PTR(free_target), 0, "");
+ CTLFLAG_RW, &cnt.v_free_target, 0, "");
SYSCTL_UINT(_vm, VM_V_FREE_RESERVED, v_free_reserved,
- CTLFLAG_RW, VMCNT_PTR(free_reserved), 0, "");
+ CTLFLAG_RW, &cnt.v_free_reserved, 0, "");
SYSCTL_UINT(_vm, VM_V_INACTIVE_TARGET, v_inactive_target,
- CTLFLAG_RW, VMCNT_PTR(inactive_target), 0, "");
+ CTLFLAG_RW, &cnt.v_inactive_target, 0, "");
SYSCTL_UINT(_vm, VM_V_CACHE_MIN, v_cache_min,
- CTLFLAG_RW, VMCNT_PTR(cache_min), 0, "");
+ CTLFLAG_RW, &cnt.v_cache_min, 0, "");
SYSCTL_UINT(_vm, VM_V_CACHE_MAX, v_cache_max,
- CTLFLAG_RW, VMCNT_PTR(cache_max), 0, "");
+ CTLFLAG_RW, &cnt.v_cache_max, 0, "");
SYSCTL_UINT(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min,
- CTLFLAG_RW, VMCNT_PTR(pageout_free_min), 0, "");
+ CTLFLAG_RW, &cnt.v_pageout_free_min, 0, "");
SYSCTL_UINT(_vm, OID_AUTO, v_free_severe,
- CTLFLAG_RW, VMCNT_PTR(free_severe), 0, "");
+ CTLFLAG_RW, &cnt.v_free_severe, 0, "");
static int
sysctl_vm_loadavg(SYSCTL_HANDLER_ARGS)
@@ -235,7 +235,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
}
}
mtx_unlock(&vm_object_list_mtx);
- total.t_free = VMCNT_GET(free_count) + VMCNT_GET(cache_count);
+ total.t_free = cnt.v_free_count + cnt.v_cache_count;
return (sysctl_handle_opaque(oidp, &total, sizeof(total), req));
}
@@ -255,7 +255,7 @@ static int
vcnt(SYSCTL_HANDLER_ARGS)
{
int count = *(int *)arg1;
- int offset = (char *)arg1 - (char *)VMCNT;
+ int offset = (char *)arg1 - (char *)&cnt;
#ifdef SMP
int i;
@@ -280,103 +280,101 @@ static SYSCTL_NODE(_vm_stats, OID_AUTO, vm, CTLFLAG_RW, 0,
SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats");
SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_swtch, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(swtch), 0, vcnt, "IU", "Context switches");
+ &cnt.v_swtch, 0, vcnt, "IU", "Context switches");
SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_trap, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(trap), 0, vcnt, "IU", "Traps");
+ &cnt.v_trap, 0, vcnt, "IU", "Traps");
SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_syscall, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(syscall), 0, vcnt, "IU", "Syscalls");
+ &cnt.v_syscall, 0, vcnt, "IU", "Syscalls");
SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intr, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(intr), 0, vcnt, "IU", "Hardware interrupts");
+ &cnt.v_intr, 0, vcnt, "IU", "Hardware interrupts");
SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_soft, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(soft), 0, vcnt, "IU", "Software interrupts");
+ &cnt.v_soft, 0, vcnt, "IU", "Software interrupts");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vm_faults, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(vm_faults), 0, vcnt, "IU", "VM faults");
+ &cnt.v_vm_faults, 0, vcnt, "IU", "VM faults");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_faults, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(cow_faults), 0, vcnt, "IU", "COW faults");
+ &cnt.v_cow_faults, 0, vcnt, "IU", "COW faults");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_optim, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(cow_optim), 0, vcnt, "IU", "Optimized COW faults");
+ &cnt.v_cow_optim, 0, vcnt, "IU", "Optimized COW faults");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_zfod, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(zfod), 0, vcnt, "IU", "Zero fill");
+ &cnt.v_zfod, 0, vcnt, "IU", "Zero fill");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ozfod, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(ozfod), 0, vcnt, "IU", "Optimized zero fill");
+ &cnt.v_ozfod, 0, vcnt, "IU", "Optimized zero fill");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapin, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(swapin), 0, vcnt, "IU", "Swapin operations");
+ &cnt.v_swapin, 0, vcnt, "IU", "Swapin operations");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapout, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(swapout), 0, vcnt, "IU", "Swapout operations");
+ &cnt.v_swapout, 0, vcnt, "IU", "Swapout operations");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsin, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(swappgsin), 0, vcnt, "IU", "Swapin pages");
+ &cnt.v_swappgsin, 0, vcnt, "IU", "Swapin pages");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsout, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(swappgsout), 0, vcnt, "IU", "Swapout pages");
+ &cnt.v_swappgsout, 0, vcnt, "IU", "Swapout pages");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodein, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(vnodein), 0, vcnt, "IU", "Vnodein operations");
+ &cnt.v_vnodein, 0, vcnt, "IU", "Vnodein operations");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodeout, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(vnodeout), 0, vcnt, "IU", "Vnodeout operations");
+ &cnt.v_vnodeout, 0, vcnt, "IU", "Vnodeout operations");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsin, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(vnodepgsin), 0, vcnt, "IU", "Vnodein pages");
+ &cnt.v_vnodepgsin, 0, vcnt, "IU", "Vnodein pages");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsout, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(vnodepgsout), 0, vcnt, "IU", "Vnodeout pages");
+ &cnt.v_vnodepgsout, 0, vcnt, "IU", "Vnodeout pages");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_intrans, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(intrans), 0, vcnt, "IU", "In transit page blocking");
+ &cnt.v_intrans, 0, vcnt, "IU", "In transit page blocking");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_reactivated, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(reactivated), 0, vcnt, "IU", "Reactivated pages");
+ &cnt.v_reactivated, 0, vcnt, "IU", "Reactivated pages");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdwakeups, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(pdwakeups), 0, vcnt, "IU", "Pagedaemon wakeups");
+ &cnt.v_pdwakeups, 0, vcnt, "IU", "Pagedaemon wakeups");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdpages, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(pdpages), 0, vcnt, "IU", "Pagedaemon page scans");
+ &cnt.v_pdpages, 0, vcnt, "IU", "Pagedaemon page scans");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_dfree, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(dfree), 0, vcnt, "IU", "");
+ &cnt.v_dfree, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pfree, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(pfree), 0, vcnt, "IU", "");
+ &cnt.v_pfree, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_tfree, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(tfree), 0, vcnt, "IU", "");
+ &cnt.v_tfree, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_page_size, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(page_size), 0, vcnt, "IU", "");
+ &cnt.v_page_size, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_page_count, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(page_count), 0, vcnt, "IU", "");
+ &cnt.v_page_count, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_reserved, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(free_reserved), 0, vcnt, "IU", "");
+ &cnt.v_free_reserved, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_target, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(free_target), 0, vcnt, "IU", "");
+ &cnt.v_free_target, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_min, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(free_min), 0, vcnt, "IU", "");
+ &cnt.v_free_min, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_count, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(free_count), 0, vcnt, "IU", "");
+ &cnt.v_free_count, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_wire_count, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(wire_count), 0, vcnt, "IU", "");
+ &cnt.v_wire_count, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_active_count, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(active_count), 0, vcnt, "IU", "");
+ &cnt.v_active_count, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_inactive_target, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(inactive_target), 0, vcnt, "IU", "");
+ &cnt.v_inactive_target, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_inactive_count, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(inactive_count), 0, vcnt, "IU", "");
+ &cnt.v_inactive_count, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cache_count, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(cache_count), 0, vcnt, "IU", "");
+ &cnt.v_cache_count, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cache_min, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(cache_min), 0, vcnt, "IU", "");
+ &cnt.v_cache_min, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cache_max, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(cache_max), 0, vcnt, "IU", "");
+ &cnt.v_cache_max, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pageout_free_min, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(pageout_free_min), 0, vcnt, "IU", "");
-SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_interrupt_free_min, CTLTYPE_UINT |
- CTLFLAG_RD, VMCNT_PTR(interrupt_free_min), 0, vcnt, "IU", "");
+ &cnt.v_pageout_free_min, 0, vcnt, "IU", "");
+SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_interrupt_free_min, CTLTYPE_UINT|CTLFLAG_RD,
+ &cnt.v_interrupt_free_min, 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forks, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(forks), 0, vcnt, "IU", "Number of fork() calls");
+ &cnt.v_forks, 0, vcnt, "IU", "Number of fork() calls");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforks, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(vforks), 0, vcnt, "IU", "Number of vfork() calls");
+ &cnt.v_vforks, 0, vcnt, "IU", "Number of vfork() calls");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforks, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(rforks), 0, vcnt, "IU", "Number of rfork() calls");
+ &cnt.v_rforks, 0, vcnt, "IU", "Number of rfork() calls");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreads, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(kthreads), 0, vcnt, "IU",
- "Number of fork() calls by kernel");
+ &cnt.v_kthreads, 0, vcnt, "IU", "Number of fork() calls by kernel");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forkpages, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(forkpages), 0, vcnt, "IU", "VM pages affected by fork()");
+ &cnt.v_forkpages, 0, vcnt, "IU", "VM pages affected by fork()");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforkpages, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(vforkpages), 0, vcnt, "IU", "VM pages affected by vfork()");
+ &cnt.v_vforkpages, 0, vcnt, "IU", "VM pages affected by vfork()");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforkpages, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(rforkpages), 0, vcnt, "IU", "VM pages affected by rfork()");
+ &cnt.v_rforkpages, 0, vcnt, "IU", "VM pages affected by rfork()");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreadpages, CTLTYPE_UINT|CTLFLAG_RD,
- VMCNT_PTR(kthreadpages), 0, vcnt, "IU",
- "VM pages affected by fork() by kernel");
+ &cnt.v_kthreadpages, 0, vcnt, "IU", "VM pages affected by fork() by kernel");
SYSCTL_INT(_vm_stats_misc, OID_AUTO,
zero_page_count, CTLFLAG_RD, &vm_page_zero_count, 0, "");
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 901ff13..ff1ba18 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -974,7 +974,7 @@ mlock(td, uap)
return (ENOMEM);
}
PROC_UNLOCK(proc);
- if (npages + VMCNT_GET(wire_count) > vm_page_max_wired)
+ if (npages + cnt.v_wire_count > vm_page_max_wired)
return (EAGAIN);
error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index c12095a..dfcade1 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -655,7 +655,7 @@ vm_object_terminate(vm_object_t object)
"p->busy = %d, p->flags %x\n", p, p->busy, p->flags));
if (p->wire_count == 0) {
vm_page_free(p);
- VMCNT_ADD(pfree, 1);
+ cnt.v_pfree++;
} else {
vm_page_remove(p);
}
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 2e97237..d4f8148 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -151,9 +151,9 @@ SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
void
vm_set_page_size(void)
{
- if (VMCNT_GET(page_size) == 0)
- VMCNT_SET(page_size, PAGE_SIZE);
- if (((VMCNT_GET(page_size) - 1) & VMCNT_GET(page_size)) != 0)
+ if (cnt.v_page_size == 0)
+ cnt.v_page_size = PAGE_SIZE;
+ if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
panic("vm_set_page_size: page size not a power of two");
}
@@ -357,8 +357,8 @@ vm_page_startup(vm_offset_t vaddr)
* last rather than first. On large-memory machines, this avoids
* the exhaustion of low physical memory before isa_dma_init has run.
*/
- VMCNT_SET(page_count, 0);
- VMCNT_SET(free_count, 0);
+ cnt.v_page_count = 0;
+ cnt.v_free_count = 0;
list = getenv("vm.blacklist");
for (i = 0; phys_avail[i + 1] != 0; i += 2) {
pa = phys_avail[i];
@@ -874,11 +874,11 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
loop:
mtx_lock(&vm_page_queue_free_mtx);
- if (VMCNT_GET(free_count) > VMCNT_GET(free_reserved) ||
+ if (cnt.v_free_count > cnt.v_free_reserved ||
(page_req == VM_ALLOC_SYSTEM &&
- VMCNT_GET(cache_count) == 0 &&
- VMCNT_GET(free_count) > VMCNT_GET(interrupt_free_min)) ||
- (page_req == VM_ALLOC_INTERRUPT && VMCNT_GET(free_count) > 0)) {
+ cnt.v_cache_count == 0 &&
+ cnt.v_free_count > cnt.v_interrupt_free_min) ||
+ (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0)) {
/*
* Allocate from the free queue if the number of free pages
* exceeds the minimum for the request class.
@@ -893,9 +893,9 @@ loop:
*/
vm_page_lock_queues();
if ((m = vm_page_select_cache(color)) == NULL) {
- KASSERT(VMCNT_GET(cache_count) == 0,
+ KASSERT(cnt.v_cache_count == 0,
("vm_page_alloc: cache queue is missing %d pages",
- VMCNT_GET(cache_count)));
+ cnt.v_cache_count));
vm_page_unlock_queues();
atomic_add_int(&vm_pageout_deficit, 1);
pagedaemon_wakeup();
@@ -904,8 +904,7 @@ loop:
return (NULL);
mtx_lock(&vm_page_queue_free_mtx);
- if (VMCNT_GET(free_count) <=
- VMCNT_GET(interrupt_free_min)) {
+ if (cnt.v_free_count <= cnt.v_interrupt_free_min) {
mtx_unlock(&vm_page_queue_free_mtx);
return (NULL);
}
@@ -955,7 +954,7 @@ loop:
else
m->oflags = VPO_BUSY;
if (req & VM_ALLOC_WIRED) {
- VMCNT_ADD(wire_count, 1);
+ atomic_add_int(&cnt.v_wire_count, 1);
m->wire_count = 1;
} else
m->wire_count = 0;
@@ -1001,8 +1000,8 @@ vm_wait(void)
vm_pages_needed = 1;
wakeup(&vm_pages_needed);
}
- msleep(VMCNT_PTR(free_count), &vm_page_queue_free_mtx, PDROP |
- PVM, "vmwait", 0);
+ msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM,
+ "vmwait", 0);
}
}
@@ -1025,7 +1024,7 @@ vm_waitpfault(void)
vm_pages_needed = 1;
wakeup(&vm_pages_needed);
}
- msleep(VMCNT_PTR(free_count), &vm_page_queue_free_mtx, PDROP | PUSER,
+ msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER,
"pfault", 0);
}
@@ -1046,7 +1045,7 @@ vm_page_activate(vm_page_t m)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) {
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
- VMCNT_ADD(reactivated, 1);
+ cnt.v_reactivated++;
vm_pageq_remove(m);
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
if (m->act_count < ACT_INIT)
@@ -1079,8 +1078,7 @@ vm_page_free_wakeup(void)
* some free.
*/
if (vm_pageout_pages_needed &&
- VMCNT_GET(cache_count) + VMCNT_GET(free_count) >=
- VMCNT_GET(pageout_free_min)) {
+ cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
wakeup(&vm_pageout_pages_needed);
vm_pageout_pages_needed = 0;
}
@@ -1091,7 +1089,7 @@ vm_page_free_wakeup(void)
*/
if (vm_pages_needed && !vm_page_count_min()) {
vm_pages_needed = 0;
- wakeup(VMCNT_PTR(free_count));
+ wakeup(&cnt.v_free_count);
}
}
@@ -1114,7 +1112,7 @@ vm_page_free_toq(vm_page_t m)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
KASSERT(!pmap_page_is_mapped(m),
("vm_page_free_toq: freeing mapped page %p", m));
- VMCNT_ADD(tfree, 1);
+ cnt.v_tfree++;
if (m->busy || VM_PAGE_INQUEUE1(m, PQ_FREE)) {
printf(
@@ -1205,7 +1203,7 @@ vm_page_wire(vm_page_t m)
if (m->wire_count == 0) {
if ((m->flags & PG_UNMANAGED) == 0)
vm_pageq_remove(m);
- VMCNT_ADD(wire_count, 1);
+ atomic_add_int(&cnt.v_wire_count, 1);
}
m->wire_count++;
KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
@@ -1249,7 +1247,7 @@ vm_page_unwire(vm_page_t m, int activate)
if (m->wire_count > 0) {
m->wire_count--;
if (m->wire_count == 0) {
- VMCNT_SUB(wire_count, 1);
+ atomic_subtract_int(&cnt.v_wire_count, 1);
if (m->flags & PG_UNMANAGED) {
;
} else if (activate)
@@ -1288,7 +1286,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
return;
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
- VMCNT_ADD(reactivated, 1);
+ cnt.v_reactivated++;
vm_page_flag_clear(m, PG_WINATCFLS);
vm_pageq_remove(m);
if (athead)
@@ -1297,7 +1295,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
VM_PAGE_SETQUEUE2(m, PQ_INACTIVE);
vm_page_queues[PQ_INACTIVE].lcnt++;
- VMCNT_ADD(inactive_count, 1);
+ cnt.v_inactive_count++;
}
}
@@ -1782,16 +1780,16 @@ vm_page_cowsetup(vm_page_t m)
DB_SHOW_COMMAND(page, vm_page_print_page_info)
{
- db_printf("cnt.v_free_count: %d\n", VMCNT_GET(free_count));
- db_printf("cnt.v_cache_count: %d\n", VMCNT_GET(cache_count));
- db_printf("cnt.v_inactive_count: %d\n", VMCNT_GET(inactive_count));
- db_printf("cnt.v_active_count: %d\n", VMCNT_GET(active_count));
- db_printf("cnt.v_wire_count: %d\n", VMCNT_GET(wire_count));
- db_printf("cnt.v_free_reserved: %d\n", VMCNT_GET(free_reserved));
- db_printf("cnt.v_free_min: %d\n", VMCNT_GET(free_min));
- db_printf("cnt.v_free_target: %d\n", VMCNT_GET(free_target));
- db_printf("cnt.v_cache_min: %d\n", VMCNT_GET(cache_min));
- db_printf("cnt.v_inactive_target: %d\n", VMCNT_GET(inactive_target));
+ db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
+ db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
+ db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
+ db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
+ db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
+ db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
+ db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
+ db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
+ db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
+ db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
}
DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index d3c14ba..c0611ba 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -538,7 +538,7 @@ vm_pageout_object_deactivate_pages(pmap, first_object, desired)
goto unlock_return;
}
next = TAILQ_NEXT(p, listq);
- VMCNT_ADD(pdpages, 1);
+ cnt.v_pdpages++;
if (p->wire_count != 0 ||
p->hold_count != 0 ||
p->busy != 0 ||
@@ -739,13 +739,13 @@ vm_pageout_scan(int pass)
vm_page_lock_queues();
rescan0:
addl_page_shortage = addl_page_shortage_init;
- maxscan = VMCNT_GET(inactive_count);
+ maxscan = cnt.v_inactive_count;
for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
m != NULL && maxscan-- > 0 && page_shortage > 0;
m = next) {
- VMCNT_ADD(pdpages, 1);
+ cnt.v_pdpages++;
if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) {
goto rescan0;
@@ -856,7 +856,7 @@ rescan0:
* Invalid pages can be easily freed
*/
vm_page_free(m);
- VMCNT_ADD(dfree, 1);
+ cnt.v_dfree++;
--page_shortage;
} else if (m->dirty == 0) {
/*
@@ -1043,8 +1043,8 @@ unlock_and_continue:
* Compute the number of pages we want to try to move from the
* active queue to the inactive queue.
*/
- page_shortage = vm_paging_target() + VMCNT_GET(inactive_target) -
- VMCNT_GET(inactive_count);
+ page_shortage = vm_paging_target() +
+ cnt.v_inactive_target - cnt.v_inactive_count;
page_shortage += addl_page_shortage;
/*
@@ -1052,7 +1052,7 @@ unlock_and_continue:
* track the per-page activity counter and use it to locate
* deactivation candidates.
*/
- pcount = VMCNT_GET(active_count);
+ pcount = cnt.v_active_count;
m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
@@ -1089,7 +1089,7 @@ unlock_and_continue:
* The count for pagedaemon pages is done after checking the
* page for eligibility...
*/
- VMCNT_ADD(pdpages, 1);
+ cnt.v_pdpages++;
/*
* Check to see "how much" the page has been used.
@@ -1149,9 +1149,8 @@ unlock_and_continue:
*/
cache_cur = cache_last_free;
cache_first_failure = -1;
- while (VMCNT_GET(free_count) < VMCNT_GET(free_reserved) &&
- (cache_cur = (cache_cur + PQ_PRIME2) & PQ_COLORMASK) !=
- cache_first_failure) {
+ while (cnt.v_free_count < cnt.v_free_reserved && (cache_cur =
+ (cache_cur + PQ_PRIME2) & PQ_COLORMASK) != cache_first_failure) {
TAILQ_FOREACH(m, &vm_page_queues[PQ_CACHE + cache_cur].pl,
pageq) {
KASSERT(m->dirty == 0,
@@ -1169,7 +1168,7 @@ unlock_and_continue:
m));
vm_page_free(m);
VM_OBJECT_UNLOCK(object);
- VMCNT_ADD(dfree, 1);
+ cnt.v_dfree++;
cache_last_free = cache_cur;
cache_first_failure = -1;
break;
@@ -1292,7 +1291,7 @@ unlock_and_continue:
sched_nice(bigproc, PRIO_MIN);
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(bigproc);
- wakeup(VMCNT_PTR(free_count));
+ wakeup(&cnt.v_free_count);
}
}
mtx_unlock(&Giant);
@@ -1315,18 +1314,16 @@ vm_pageout_page_stats()
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
page_shortage =
- (VMCNT_GET(inactive_target) + VMCNT_GET(cache_max) +
- VMCNT_GET(free_min)) - (VMCNT_GET(free_count) +
- VMCNT_GET(inactive_count) + VMCNT_GET(cache_count));
+ (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
+ (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
if (page_shortage <= 0)
return;
- pcount = VMCNT_GET(active_count);
+ pcount = cnt.v_active_count;
fullintervalcount += vm_pageout_stats_interval;
if (fullintervalcount < vm_pageout_full_stats_interval) {
- tpcount = (vm_pageout_stats_max * VMCNT_GET(active_count)) /
- VMCNT_GET(page_count);
+ tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
if (pcount > tpcount)
pcount = tpcount;
} else {
@@ -1412,8 +1409,8 @@ vm_pageout()
/*
* Initialize some paging parameters.
*/
- VMCNT_SET(interrupt_free_min, 2);
- if (VMCNT_GET(page_count) < 2000)
+ cnt.v_interrupt_free_min = 2;
+ if (cnt.v_page_count < 2000)
vm_pageout_page_count = 8;
/*
@@ -1421,16 +1418,17 @@ vm_pageout()
* swap pager structures plus enough for any pv_entry structs
* when paging.
*/
- VMCNT_SET(free_min, (VMCNT_GET(page_count) > 1024) ? (4 +
- (VMCNT_GET(page_count) - 1024) / 200) : 4);
- VMCNT_SET(pageout_free_min, (2 * MAXBSIZE) / PAGE_SIZE +
- VMCNT_GET(interrupt_free_min));
- VMCNT_SET(free_reserved, vm_pageout_page_count +
- VMCNT_GET(pageout_free_min) + (VMCNT_GET(page_count) / 768) +
- PQ_NUMCOLORS);
- VMCNT_SET(free_severe, VMCNT_GET(free_min) / 2);
- VMCNT_ADD(free_min, VMCNT_GET(free_reserved));
- VMCNT_ADD(free_severe, VMCNT_GET(free_reserved));
+ if (cnt.v_page_count > 1024)
+ cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
+ else
+ cnt.v_free_min = 4;
+ cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
+ cnt.v_interrupt_free_min;
+ cnt.v_free_reserved = vm_pageout_page_count +
+ cnt.v_pageout_free_min + (cnt.v_page_count / 768) + PQ_NUMCOLORS;
+ cnt.v_free_severe = cnt.v_free_min / 2;
+ cnt.v_free_min += cnt.v_free_reserved;
+ cnt.v_free_severe += cnt.v_free_reserved;
/*
* v_free_target and v_cache_min control pageout hysteresis. Note
@@ -1443,27 +1441,29 @@ vm_pageout()
* be big enough to handle memory needs while the pageout daemon
* is signalled and run to free more pages.
*/
- VMCNT_SET(free_target, ((VMCNT_GET(free_count) > 6144) ? 4 : 2) *
- VMCNT_GET(free_min) + VMCNT_GET(free_reserved));
-
- if (VMCNT_GET(free_count) > 2048) {
- VMCNT_SET(cache_min, VMCNT_GET(free_target));
- VMCNT_SET(cache_max, 2 * VMCNT_GET(cache_min));
- VMCNT_SET(inactive_target, (3 * VMCNT_GET(free_target) / 2));
+ if (cnt.v_free_count > 6144)
+ cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved;
+ else
+ cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
+
+ if (cnt.v_free_count > 2048) {
+ cnt.v_cache_min = cnt.v_free_target;
+ cnt.v_cache_max = 2 * cnt.v_cache_min;
+ cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
} else {
- VMCNT_SET(cache_min, 0);
- VMCNT_SET(cache_max, 0);
- VMCNT_SET(inactive_target, VMCNT_GET(free_count) / 4);
+ cnt.v_cache_min = 0;
+ cnt.v_cache_max = 0;
+ cnt.v_inactive_target = cnt.v_free_count / 4;
}
- if (VMCNT_GET(inactive_target) > VMCNT_GET(free_count) / 3)
- VMCNT_SET(inactive_target, VMCNT_GET(free_count) / 3);
+ if (cnt.v_inactive_target > cnt.v_free_count / 3)
+ cnt.v_inactive_target = cnt.v_free_count / 3;
/* XXX does not really belong here */
if (vm_page_max_wired == 0)
- vm_page_max_wired = VMCNT_GET(free_count) / 3;
+ vm_page_max_wired = cnt.v_free_count / 3;
if (vm_pageout_stats_max == 0)
- vm_pageout_stats_max = VMCNT_GET(free_target);
+ vm_pageout_stats_max = cnt.v_free_target;
/*
* Set interval in seconds for stats scan.
@@ -1489,7 +1489,7 @@ vm_pageout()
if (vm_pages_needed && !vm_page_count_min()) {
if (!vm_paging_needed())
vm_pages_needed = 0;
- wakeup(VMCNT_PTR(free_count));
+ wakeup(&cnt.v_free_count);
}
if (vm_pages_needed) {
/*
@@ -1524,7 +1524,7 @@ vm_pageout()
}
}
if (vm_pages_needed)
- VMCNT_ADD(pdwakeups, 1);
+ cnt.v_pdwakeups++;
mtx_unlock(&vm_page_queue_free_mtx);
vm_pageout_scan(pass);
}
diff --git a/sys/vm/vm_pageq.c b/sys/vm/vm_pageq.c
index 6f6aadf..1b3e9a4 100644
--- a/sys/vm/vm_pageq.c
+++ b/sys/vm/vm_pageq.c
@@ -140,14 +140,14 @@ vm_pageq_init(void)
vm_coloring_init();
for (i = 0; i < PQ_NUMCOLORS; ++i) {
- vm_page_queues[PQ_FREE+i].cnt = VMCNT_PTR(free_count);
+ vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
}
for (i = 0; i < PQ_NUMCOLORS; ++i) {
- vm_page_queues[PQ_CACHE + i].cnt = VMCNT_PTR(cache_count);
+ vm_page_queues[PQ_CACHE + i].cnt = &cnt.v_cache_count;
}
- vm_page_queues[PQ_INACTIVE].cnt = VMCNT_PTR(inactive_count);
- vm_page_queues[PQ_ACTIVE].cnt = VMCNT_PTR(active_count);
- vm_page_queues[PQ_HOLD].cnt = VMCNT_PTR(active_count);
+ vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
+ vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
+ vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count;
for (i = 0; i < PQ_COUNT; i++) {
TAILQ_INIT(&vm_page_queues[i].pl);
@@ -192,7 +192,7 @@ vm_pageq_add_new_page(vm_paddr_t pa)
{
vm_page_t m;
- VMCNT_ADD(page_count, 1);
+ atomic_add_int(&cnt.v_page_count, 1);
m = PHYS_TO_VM_PAGE(pa);
m->phys_addr = pa;
m->flags = 0;
diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c
index 29144ed..5af84e0 100644
--- a/sys/vm/vm_zeroidle.c
+++ b/sys/vm/vm_zeroidle.c
@@ -90,10 +90,9 @@ vm_page_zero_check(void)
* fast sleeps. We also do not want to be continuously zeroing
* pages because doing so may flush our L1 and L2 caches too much.
*/
- if (zero_state && vm_page_zero_count >=
- ZIDLE_LO(VMCNT_GET(free_count)))
+ if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
return (0);
- if (vm_page_zero_count >= ZIDLE_HI(VMCNT_GET(free_count)))
+ if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
return (0);
return (1);
}
@@ -116,7 +115,7 @@ vm_page_zero_idle(void)
vm_pageq_enqueue(PQ_FREE + m->pc, m);
++vm_page_zero_count;
++cnt_prezero;
- if (vm_page_zero_count >= ZIDLE_HI(VMCNT_GET(free_count)))
+ if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
zero_state = 1;
}
free_rover = (free_rover + PQ_PRIME2) & PQ_COLORMASK;
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 837aa46..cb4cc39 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -728,8 +728,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
if (i != reqpage)
vm_page_free(m[i]);
vm_page_unlock_queues();
- VMCNT_ADD(vnodein, 1);
- VMCNT_ADD(vnodepgsin, 1);
+ cnt.v_vnodein++;
+ cnt.v_vnodepgsin++;
error = vnode_pager_input_old(object, m[reqpage]);
VM_OBJECT_UNLOCK(object);
return (error);
@@ -757,8 +757,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_free(m[i]);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
- VMCNT_ADD(vnodein, 1);
- VMCNT_ADD(vnodepgsin, 1);
+ cnt.v_vnodein++;
+ cnt.v_vnodepgsin++;
return vnode_pager_input_smlfs(object, m[reqpage]);
}
@@ -909,8 +909,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
bp->b_runningbufspace = bp->b_bufsize;
atomic_add_int(&runningbufspace, bp->b_runningbufspace);
- VMCNT_ADD(vnodein, 1);
- VMCNT_ADD(vnodepgsin, 1);
+ cnt.v_vnodein++;
+ cnt.v_vnodepgsin += count;
/* do the input */
bp->b_iooffset = dbtob(bp->b_blkno);
@@ -1031,8 +1031,7 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
* daemon up. This should be probably be addressed XXX.
*/
- if ((VMCNT_GET(free_count) + VMCNT_GET(cache_count)) <
- VMCNT_GET(pageout_free_min))
+ if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)
sync |= OBJPC_SYNC;
/*
@@ -1158,8 +1157,8 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
auio.uio_resid = maxsize;
auio.uio_td = (struct thread *) 0;
error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred);
- VMCNT_ADD(vnodein, 1);
- VMCNT_ADD(vnodepgsin, ncount);
+ cnt.v_vnodeout++;
+ cnt.v_vnodepgsout += ncount;
if (error) {
if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1)))
OpenPOWER on IntegriCloud