summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authortrasz <trasz@FreeBSD.org>2011-07-06 20:06:44 +0000
committertrasz <trasz@FreeBSD.org>2011-07-06 20:06:44 +0000
commit4a17b24427e55ec7e1862b08a0a9247f0717d010 (patch)
tree443d8da9aef22fdf042ab6f2cf16f5e2d4f04bd3 /sys/vm
parent1adac93bc0e2f40ac5d98734f9451cb1a0f42124 (diff)
downloadFreeBSD-src-4a17b24427e55ec7e1862b08a0a9247f0717d010.zip
FreeBSD-src-4a17b24427e55ec7e1862b08a0a9247f0717d010.tar.gz
All the racct_*() calls need to happen with the proc locked. Fixing this
won't happen before 9.0. This commit adds "#ifdef RACCT" around all the "PROC_LOCK(p); racct_whatever(p, ...); PROC_UNLOCK(p)" instances, in order to avoid useless locking/unlocking in kernels built without "options RACCT".
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/swap_pager.c6
-rw-r--r--sys/vm/vm_glue.c4
-rw-r--r--sys/vm/vm_map.c10
-rw-r--r--sys/vm/vm_mmap.c12
-rw-r--r--sys/vm/vm_pageout.c4
-rw-r--r--sys/vm/vm_unix.c6
6 files changed, 42 insertions, 0 deletions
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 2868a2f..f421e4f 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -193,11 +193,13 @@ swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred)
if (incr & PAGE_MASK)
panic("swap_reserve: & PAGE_MASK");
+#ifdef RACCT
PROC_LOCK(curproc);
error = racct_add(curproc, RACCT_SWAP, incr);
PROC_UNLOCK(curproc);
if (error != 0)
return (0);
+#endif
res = 0;
mtx_lock(&sw_dev_mtx);
@@ -237,11 +239,13 @@ swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred)
curproc->p_pid, uip->ui_uid, incr);
}
+#ifdef RACCT
if (!res) {
PROC_LOCK(curproc);
racct_sub(curproc, RACCT_SWAP, incr);
PROC_UNLOCK(curproc);
}
+#endif
return (res);
}
@@ -255,9 +259,11 @@ swap_reserve_force(vm_ooffset_t incr)
swap_reserved += incr;
mtx_unlock(&sw_dev_mtx);
+#ifdef RACCT
PROC_LOCK(curproc);
racct_add_force(curproc, RACCT_SWAP, incr);
PROC_UNLOCK(curproc);
+#endif
uip = curthread->td_ucred->cr_ruidinfo;
PROC_LOCK(curproc);
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index da9b1dc..c552cb7 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -222,12 +222,14 @@ vslock(void *addr, size_t len)
#endif
error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
+#ifdef RACCT
if (error != KERN_SUCCESS) {
PROC_LOCK(curproc);
racct_set(curproc, RACCT_MEMLOCK,
ptoa(pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))));
PROC_UNLOCK(curproc);
}
+#endif
/*
* Return EFAULT on error to match copy{in,out}() behaviour
* rather than returning ENOMEM like mlock() would.
@@ -244,10 +246,12 @@ vsunlock(void *addr, size_t len)
trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
+#ifdef RACCT
PROC_LOCK(curproc);
racct_set(curproc, RACCT_MEMLOCK,
ptoa(pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))));
PROC_UNLOCK(curproc);
+#endif
}
/*
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 31886af..8493478 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -318,6 +318,7 @@ static void
vmspace_container_reset(struct proc *p)
{
+#ifdef RACCT
PROC_LOCK(p);
racct_set(p, RACCT_DATA, 0);
racct_set(p, RACCT_STACK, 0);
@@ -325,6 +326,7 @@ vmspace_container_reset(struct proc *p)
racct_set(p, RACCT_MEMLOCK, 0);
racct_set(p, RACCT_VMEM, 0);
PROC_UNLOCK(p);
+#endif
}
static inline void
@@ -3305,7 +3307,9 @@ vm_map_growstack(struct proc *p, vm_offset_t addr)
#ifdef notyet
uint64_t limit;
#endif
+#ifdef RACCT
int error;
+#endif
Retry:
PROC_LOCK(p);
@@ -3404,6 +3408,7 @@ Retry:
vm_map_unlock_read(map);
return (KERN_NO_SPACE);
}
+#ifdef RACCT
PROC_LOCK(p);
if (is_procstack &&
racct_set(p, RACCT_STACK, ctob(vm->vm_ssize) + grow_amount)) {
@@ -3412,6 +3417,7 @@ Retry:
return (KERN_NO_SPACE);
}
PROC_UNLOCK(p);
+#endif
/* Round up the grow amount modulo SGROWSIZ */
grow_amount = roundup (grow_amount, sgrowsiz);
@@ -3435,6 +3441,7 @@ Retry:
rv = KERN_NO_SPACE;
goto out;
}
+#ifdef RACCT
PROC_LOCK(p);
if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
PROC_UNLOCK(p);
@@ -3443,6 +3450,7 @@ Retry:
goto out;
}
PROC_UNLOCK(p);
+#endif
if (vm_map_lock_upgrade(map))
goto Retry;
@@ -3542,6 +3550,7 @@ Retry:
}
out:
+#ifdef RACCT
if (rv != KERN_SUCCESS) {
PROC_LOCK(p);
error = racct_set(p, RACCT_VMEM, map->size);
@@ -3550,6 +3559,7 @@ out:
KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
PROC_UNLOCK(p);
}
+#endif
return (rv);
}
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 37949a5..e50aa0b 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -1019,19 +1019,23 @@ mlock(td, uap)
PROC_UNLOCK(proc);
if (npages + cnt.v_wire_count > vm_page_max_wired)
return (EAGAIN);
+#ifdef RACCT
PROC_LOCK(proc);
error = racct_set(proc, RACCT_MEMLOCK, nsize);
PROC_UNLOCK(proc);
if (error != 0)
return (ENOMEM);
+#endif
error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
+#ifdef RACCT
if (error != KERN_SUCCESS) {
PROC_LOCK(proc);
racct_set(proc, RACCT_MEMLOCK,
ptoa(pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))));
PROC_UNLOCK(proc);
}
+#endif
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
@@ -1074,11 +1078,13 @@ mlockall(td, uap)
if (error)
return (error);
#endif
+#ifdef RACCT
PROC_LOCK(td->td_proc);
error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size);
PROC_UNLOCK(td->td_proc);
if (error != 0)
return (ENOMEM);
+#endif
if (uap->how & MCL_FUTURE) {
vm_map_lock(map);
@@ -1098,12 +1104,14 @@ mlockall(td, uap)
VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
error = (error == KERN_SUCCESS ? 0 : EAGAIN);
}
+#ifdef RACCT
if (error != KERN_SUCCESS) {
PROC_LOCK(td->td_proc);
racct_set(td->td_proc, RACCT_MEMLOCK,
ptoa(pmap_wired_count(vm_map_pmap(&td->td_proc->p_vmspace->vm_map))));
PROC_UNLOCK(td->td_proc);
}
+#endif
return (error);
}
@@ -1138,11 +1146,13 @@ munlockall(td, uap)
/* Forcibly unwire all pages. */
error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
+#ifdef RACCT
if (error == KERN_SUCCESS) {
PROC_LOCK(td->td_proc);
racct_set(td->td_proc, RACCT_MEMLOCK, 0);
PROC_UNLOCK(td->td_proc);
}
+#endif
return (error);
}
@@ -1177,11 +1187,13 @@ munlock(td, uap)
return (EINVAL);
error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
+#ifdef RACCT
if (error == KERN_SUCCESS) {
PROC_LOCK(td->td_proc);
racct_sub(td->td_proc, RACCT_MEMLOCK, ptoa(end - start));
PROC_UNLOCK(td->td_proc);
}
+#endif
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 6dfdc4b..e9c9927 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1640,7 +1640,9 @@ vm_daemon()
struct thread *td;
struct vmspace *vm;
int breakout, swapout_flags, tryagain, attempts;
+#ifdef RACCT
uint64_t rsize, ravailable;
+#endif
while (TRUE) {
mtx_lock(&vm_daemon_mtx);
@@ -1722,6 +1724,7 @@ again:
vm_pageout_map_deactivate_pages(
&vm->vm_map, limit);
}
+#ifdef RACCT
rsize = IDX_TO_OFF(size);
PROC_LOCK(p);
racct_set(p, RACCT_RSS, rsize);
@@ -1750,6 +1753,7 @@ again:
if (rsize > ravailable)
tryagain = 1;
}
+#endif
vmspace_free(vm);
}
sx_sunlock(&allproc_lock);
diff --git a/sys/vm/vm_unix.c b/sys/vm/vm_unix.c
index c07a1c1..6f8e7c8 100644
--- a/sys/vm/vm_unix.c
+++ b/sys/vm/vm_unix.c
@@ -117,6 +117,7 @@ obreak(td, uap)
error = ENOMEM;
goto done;
}
+#ifdef RACCT
PROC_LOCK(td->td_proc);
error = racct_set(td->td_proc, RACCT_DATA, new - base);
if (error != 0) {
@@ -133,13 +134,16 @@ obreak(td, uap)
goto done;
}
PROC_UNLOCK(td->td_proc);
+#endif
rv = vm_map_insert(&vm->vm_map, NULL, 0, old, new,
VM_PROT_RW, VM_PROT_ALL, 0);
if (rv != KERN_SUCCESS) {
+#ifdef RACCT
PROC_LOCK(td->td_proc);
racct_set_force(td->td_proc, RACCT_DATA, old - base);
racct_set_force(td->td_proc, RACCT_VMEM, vm->vm_map.size);
PROC_UNLOCK(td->td_proc);
+#endif
error = ENOMEM;
goto done;
}
@@ -165,10 +169,12 @@ obreak(td, uap)
goto done;
}
vm->vm_dsize -= btoc(old - new);
+#ifdef RACCT
PROC_LOCK(td->td_proc);
racct_set_force(td->td_proc, RACCT_DATA, new - base);
racct_set_force(td->td_proc, RACCT_VMEM, vm->vm_map.size);
PROC_UNLOCK(td->td_proc);
+#endif
}
done:
vm_map_unlock(&vm->vm_map);
OpenPOWER on IntegriCloud