summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2007-05-31 22:52:15 +0000
committerattilio <attilio@FreeBSD.org>2007-05-31 22:52:15 +0000
commit7dd8ed88a925a943f1963baa072f4b6c6a8c9930 (patch)
tree10bf0f11ceeb18c6b03947eb85223abbbbf9cc67 /sys/kern
parent4681b4098bbf12784d009826b2223ace96a2306b (diff)
downloadFreeBSD-src-7dd8ed88a925a943f1963baa072f4b6c6a8c9930.zip
FreeBSD-src-7dd8ed88a925a943f1963baa072f4b6c6a8c9930.tar.gz
Revert VMCNT_* operations introduction.
Probabilly, a general approach is not the better solution here, so we should solve the sched_lock protection problems separately. Requested by: alc Approved by: jeff (mentor)
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/init_main.c2
-rw-r--r--sys/kern/kern_fork.c16
-rw-r--r--sys/kern/kern_malloc.c9
-rw-r--r--sys/kern/kern_mib.c2
-rw-r--r--sys/kern/kern_synch.c2
-rw-r--r--sys/kern/kern_thread.c2
-rw-r--r--sys/kern/subr_trap.c2
-rw-r--r--sys/kern/vfs_bio.c6
-rw-r--r--sys/kern/vfs_subr.c7
9 files changed, 22 insertions, 26 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 22241a4..9306e83 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -467,7 +467,7 @@ proc0_init(void *dummy __unused)
p->p_limit->pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles;
p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_cur =
p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
- i = ptoa(VMCNT_GET(free_count));
+ i = ptoa(cnt.v_free_count);
p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_max = i;
p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = i;
p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = i / 3;
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index fd39e2a..cbcb25a 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -665,20 +665,20 @@ again:
vm_forkproc(td, p2, td2, flags);
if (flags == (RFFDG | RFPROC)) {
- VMCNT_ADD(forks, 1);
- VMCNT_ADD(forkpages, p2->p_vmspace->vm_dsize +
+ atomic_add_int(&cnt.v_forks, 1);
+ atomic_add_int(&cnt.v_forkpages, p2->p_vmspace->vm_dsize +
p2->p_vmspace->vm_ssize);
} else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
- VMCNT_ADD(forks, 1);
- VMCNT_ADD(forkpages, p2->p_vmspace->vm_dsize +
+ atomic_add_int(&cnt.v_vforks, 1);
+ atomic_add_int(&cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
p2->p_vmspace->vm_ssize);
} else if (p1 == &proc0) {
- VMCNT_ADD(kthreads, 1);
- VMCNT_ADD(kthreadpages, p2->p_vmspace->vm_dsize +
+ atomic_add_int(&cnt.v_kthreads, 1);
+ atomic_add_int(&cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
p2->p_vmspace->vm_ssize);
} else {
- VMCNT_ADD(rforks, 1);
- VMCNT_ADD(rforkpages, p2->p_vmspace->vm_dsize +
+ atomic_add_int(&cnt.v_rforks, 1);
+ atomic_add_int(&cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
p2->p_vmspace->vm_ssize);
}
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 219d47d..9a99539 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -547,7 +547,7 @@ kmeminit(void *dummy)
* so make sure that there is enough space.
*/
vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE;
- mem_size = VMCNT_GET(page_count);
+ mem_size = cnt.v_page_count;
#if defined(VM_KMEM_SIZE_SCALE)
vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
@@ -585,8 +585,8 @@ kmeminit(void *dummy)
* to something sane. Be careful to not overflow the 32bit
* ints while doing the check.
*/
- if (((vm_kmem_size / 2) / PAGE_SIZE) > VMCNT_GET(page_count))
- vm_kmem_size = 2 * VMCNT_GET(page_count) * PAGE_SIZE;
+ if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count)
+ vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
/*
* Tune settings based on the kernel map's size at this time.
@@ -646,8 +646,7 @@ malloc_init(void *data)
struct malloc_type_internal *mtip;
struct malloc_type *mtp;
- KASSERT(VMCNT_GET(page_count) != 0,
- ("malloc_register before vm_init"));
+ KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init"));
mtp = data;
mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
diff --git a/sys/kern/kern_mib.c b/sys/kern/kern_mib.c
index 2aeb03a..52f8754 100644
--- a/sys/kern/kern_mib.c
+++ b/sys/kern/kern_mib.c
@@ -189,7 +189,7 @@ sysctl_hw_usermem(SYSCTL_HANDLER_ARGS)
{
u_long val;
- val = ctob(physmem - VMCNT_GET(wire_count));
+ val = ctob(physmem - cnt.v_wire_count);
return (sysctl_handle_long(oidp, &val, 0, req));
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 9d571b0..d61dddf 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -433,7 +433,7 @@ mi_switch(int flags, struct thread *newtd)
/*
* Finish up stats for outgoing thread.
*/
- VMCNT_ADD(swtch, 1);
+ cnt.v_swtch++;
PCPU_SET(switchtime, new_switchtime);
PCPU_SET(switchticks, ticks);
CTR4(KTR_PROC, "mi_switch: old thread %ld (kse %p, pid %ld, %s)",
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index c967eac..e83bf7e 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -407,7 +407,7 @@ thread_exit(void)
p->p_rux.rux_iticks += td->td_iticks;
PCPU_SET(switchtime, new_switchtime);
PCPU_SET(switchticks, ticks);
- VMCNT_ADD(swtch, 1);
+ cnt.v_swtch++;
/* Add our usage into the usage of all our children. */
if (p->p_numthreads == 1)
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index db81011..15c8fdd 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -191,8 +191,8 @@ ast(struct trapframe *framep)
#endif
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK |
TDF_NEEDRESCHED | TDF_INTERRUPT);
+ cnt.v_trap++;
mtx_unlock_spin(&sched_lock);
- VMCNT_ADD(trap, 1);
/*
* XXXKSE While the fact that we owe a user profiling
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index f0a2286..44879ff 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -2919,10 +2919,8 @@ allocbuf(struct buf *bp, int size)
*/
if ((curproc != pageproc) &&
(VM_PAGE_INQUEUE1(m, PQ_CACHE)) &&
- ((VMCNT_GET(free_count) +
- VMCNT_GET(cache_count)) <
- (VMCNT_GET(free_min) +
- VMCNT_GET(cache_min)))) {
+ ((cnt.v_free_count + cnt.v_cache_count) <
+ (cnt.v_free_min + cnt.v_cache_min))) {
pagedaemon_wakeup();
}
vm_page_wire(m);
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 40bdaaa..8e56b16 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -297,9 +297,8 @@ vntblinit(void *dummy __unused)
* of the kernel's heap size is consumed by vnodes and vm
* objects.
*/
- desiredvnodes = min(maxproc + VMCNT_GET(page_count) / 4, 2 *
- vm_kmem_size / (5 * (sizeof(struct vm_object) +
- sizeof(struct vnode))));
+ desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
+ (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
if (desiredvnodes > MAXVNODES_MAX) {
if (bootverbose)
printf("Reducing kern.maxvnodes %d -> %d\n",
@@ -582,7 +581,7 @@ vlrureclaim(struct mount *mp)
usevnodes = desiredvnodes;
if (usevnodes <= 0)
usevnodes = 1;
- trigger = VMCNT_GET(page_count) * 2 / usevnodes;
+ trigger = cnt.v_page_count * 2 / usevnodes;
done = 0;
td = curthread;
vn_start_write(NULL, &mp, V_WAIT);
OpenPOWER on IntegriCloud