summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authortrasz <trasz@FreeBSD.org>2011-07-06 20:06:44 +0000
committertrasz <trasz@FreeBSD.org>2011-07-06 20:06:44 +0000
commit4a17b24427e55ec7e1862b08a0a9247f0717d010 (patch)
tree443d8da9aef22fdf042ab6f2cf16f5e2d4f04bd3
parent1adac93bc0e2f40ac5d98734f9451cb1a0f42124 (diff)
downloadFreeBSD-src-4a17b24427e55ec7e1862b08a0a9247f0717d010.zip
FreeBSD-src-4a17b24427e55ec7e1862b08a0a9247f0717d010.tar.gz
All the racct_*() calls need to happen with the proc locked. Fixing this
won't happen before 9.0. This commit adds "#ifdef RACCT" around all the "PROC_LOCK(p); racct_whatever(p, ...); PROC_UNLOCK(p)" instances, in order to avoid useless locking/unlocking in kernels built without "options RACCT".
-rw-r--r--sys/kern/imgact_elf.c2
-rw-r--r--sys/kern/kern_descrip.c11
-rw-r--r--sys/kern/kern_exit.c6
-rw-r--r--sys/kern/kern_fork.c6
-rw-r--r--sys/kern/kern_thr.c4
-rw-r--r--sys/kern/sysv_msg.c8
-rw-r--r--sys/kern/sysv_sem.c4
-rw-r--r--sys/kern/sysv_shm.c4
-rw-r--r--sys/vm/swap_pager.c6
-rw-r--r--sys/vm/vm_glue.c4
-rw-r--r--sys/vm/vm_map.c10
-rw-r--r--sys/vm/vm_mmap.c12
-rw-r--r--sys/vm/vm_pageout.c4
-rw-r--r--sys/vm/vm_unix.c6
14 files changed, 86 insertions, 1 deletions
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index ad79360..45f6d64 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -1115,6 +1115,7 @@ __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags)
hdrsize = 0;
__elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
+#ifdef RACCT
PROC_LOCK(td->td_proc);
error = racct_add(td->td_proc, RACCT_CORE, hdrsize + seginfo.size);
PROC_UNLOCK(td->td_proc);
@@ -1122,6 +1123,7 @@ __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags)
error = EFAULT;
goto done;
}
+#endif
if (hdrsize + seginfo.size >= limit) {
error = EFAULT;
goto done;
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
index 731bb61..b32c28a 100644
--- a/sys/kern/kern_descrip.c
+++ b/sys/kern/kern_descrip.c
@@ -821,6 +821,7 @@ do_dup(struct thread *td, int flags, int old, int new,
* descriptors, just put the limit on the size of the file
* descriptor table.
*/
+#ifdef RACCT
PROC_LOCK(p);
error = racct_set(p, RACCT_NOFILE, new + 1);
PROC_UNLOCK(p);
@@ -829,6 +830,7 @@ do_dup(struct thread *td, int flags, int old, int new,
fdrop(fp, td);
return (EMFILE);
}
+#endif
fdgrowtable(fdp, new + 1);
}
if (fdp->fd_ofiles[new] == NULL)
@@ -1476,7 +1478,10 @@ fdalloc(struct thread *td, int minfd, int *result)
{
struct proc *p = td->td_proc;
struct filedesc *fdp = p->p_fd;
- int fd = -1, maxfd, error;
+ int fd = -1, maxfd;
+#ifdef RACCT
+ int error;
+#endif
FILEDESC_XLOCK_ASSERT(fdp);
@@ -1499,11 +1504,13 @@ fdalloc(struct thread *td, int minfd, int *result)
return (EMFILE);
if (fd < fdp->fd_nfiles)
break;
+#ifdef RACCT
PROC_LOCK(p);
error = racct_set(p, RACCT_NOFILE, min(fdp->fd_nfiles * 2, maxfd));
PROC_UNLOCK(p);
if (error != 0)
return (EMFILE);
+#endif
fdgrowtable(fdp, min(fdp->fd_nfiles * 2, maxfd));
}
@@ -1819,9 +1826,11 @@ fdfree(struct thread *td)
if (fdp == NULL)
return;
+#ifdef RACCT
PROC_LOCK(td->td_proc);
racct_set(td->td_proc, RACCT_NOFILE, 0);
PROC_UNLOCK(td->td_proc);
+#endif
/* Check for special need to clear POSIX style locks */
fdtol = td->td_proc->p_fdtol;
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index bb25d17..30b94b6 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -744,9 +744,11 @@ proc_reap(struct thread *td, struct proc *p, int *status, int options,
* Destroy resource accounting information associated with the process.
*/
racct_proc_exit(p);
+#ifdef RACCT
PROC_LOCK(p->p_pptr);
racct_sub(p->p_pptr, RACCT_NPROC, 1);
PROC_UNLOCK(p->p_pptr);
+#endif
/*
* Free credentials, arguments, and sigacts.
@@ -905,19 +907,23 @@ loop:
void
proc_reparent(struct proc *child, struct proc *parent)
{
+#ifdef RACCT
int locked;
+#endif
sx_assert(&proctree_lock, SX_XLOCKED);
PROC_LOCK_ASSERT(child, MA_OWNED);
if (child->p_pptr == parent)
return;
+#ifdef RACCT
locked = PROC_LOCKED(parent);
if (!locked)
PROC_LOCK(parent);
racct_add_force(parent, RACCT_NPROC, 1);
if (!locked)
PROC_UNLOCK(parent);
+#endif
PROC_LOCK(child->p_pptr);
racct_sub(child->p_pptr, RACCT_NPROC, 1);
sigqueue_take(child->p_ksi);
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 04e635a..a8abd8e 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -734,11 +734,13 @@ fork1(struct thread *td, int flags, int pages, struct proc **procp)
return (fork_norfproc(td, flags));
}
+#ifdef RACCT
PROC_LOCK(p1);
error = racct_add(p1, RACCT_NPROC, 1);
PROC_UNLOCK(p1);
if (error != 0)
return (EAGAIN);
+#endif
mem_charged = 0;
vm2 = NULL;
@@ -822,6 +824,7 @@ fork1(struct thread *td, int flags, int pages, struct proc **procp)
goto fail;
}
+#ifdef RACCT
/*
* After fork, there is exactly one thread running.
*/
@@ -832,6 +835,7 @@ fork1(struct thread *td, int flags, int pages, struct proc **procp)
error = EAGAIN;
goto fail;
}
+#endif
/*
* Increment the count of procs running with this uid. Don't allow
@@ -874,9 +878,11 @@ fail1:
vmspace_free(vm2);
uma_zfree(proc_zone, newproc);
pause("fork", hz / 2);
+#ifdef RACCT
PROC_LOCK(p1);
racct_sub(p1, RACCT_NPROC, 1);
PROC_UNLOCK(p1);
+#endif
return (error);
}
diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c
index 7011a53..94e41e2 100644
--- a/sys/kern/kern_thr.c
+++ b/sys/kern/kern_thr.c
@@ -185,11 +185,13 @@ create_thread(struct thread *td, mcontext_t *ctx,
}
}
+#ifdef RACCT
PROC_LOCK(td->td_proc);
error = racct_add(p, RACCT_NTHR, 1);
PROC_UNLOCK(td->td_proc);
if (error != 0)
return (EPROCLIM);
+#endif
/* Initialize our td */
newtd = thread_alloc(0);
@@ -277,9 +279,11 @@ create_thread(struct thread *td, mcontext_t *ctx,
return (0);
fail:
+#ifdef RACCT
PROC_LOCK(p);
racct_sub(p, RACCT_NTHR, 1);
PROC_UNLOCK(p);
+#endif
return (error);
}
diff --git a/sys/kern/sysv_msg.c b/sys/kern/sysv_msg.c
index 87d479e..ffd8580 100644
--- a/sys/kern/sysv_msg.c
+++ b/sys/kern/sysv_msg.c
@@ -620,6 +620,7 @@ msgget(td, uap)
error = ENOSPC;
goto done2;
}
+#ifdef RACCT
PROC_LOCK(td->td_proc);
error = racct_add(td->td_proc, RACCT_NMSGQ, 1);
PROC_UNLOCK(td->td_proc);
@@ -627,6 +628,7 @@ msgget(td, uap)
error = ENOSPC;
goto done2;
}
+#endif
DPRINTF(("msqid %d is available\n", msqid));
msqkptr->u.msg_perm.key = key;
msqkptr->u.msg_perm.cuid = cred->cr_uid;
@@ -685,7 +687,9 @@ kern_msgsnd(td, msqid, msgp, msgsz, msgflg, mtype)
register struct msqid_kernel *msqkptr;
register struct msg *msghdr;
short next;
+#ifdef RACCT
size_t saved_msgsz;
+#endif
if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC))
return (ENOSYS);
@@ -723,6 +727,7 @@ kern_msgsnd(td, msqid, msgp, msgsz, msgflg, mtype)
goto done2;
#endif
+#ifdef RACCT
PROC_LOCK(td->td_proc);
if (racct_add(td->td_proc, RACCT_MSGQQUEUED, 1)) {
PROC_UNLOCK(td->td_proc);
@@ -737,6 +742,7 @@ kern_msgsnd(td, msqid, msgp, msgsz, msgflg, mtype)
goto done2;
}
PROC_UNLOCK(td->td_proc);
+#endif
segs_needed = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz;
DPRINTF(("msgsz=%zu, msgssz=%d, segs_needed=%d\n", msgsz,
@@ -991,12 +997,14 @@ kern_msgsnd(td, msqid, msgp, msgsz, msgflg, mtype)
wakeup(msqkptr);
td->td_retval[0] = 0;
done3:
+#ifdef RACCT
if (error != 0) {
PROC_LOCK(td->td_proc);
racct_sub(td->td_proc, RACCT_MSGQQUEUED, 1);
racct_sub(td->td_proc, RACCT_MSGQSIZE, saved_msgsz);
PROC_UNLOCK(td->td_proc);
}
+#endif
done2:
mtx_unlock(&msq_mtx);
return (error);
diff --git a/sys/kern/sysv_sem.c b/sys/kern/sysv_sem.c
index ac53a8d..4bbe787 100644
--- a/sys/kern/sysv_sem.c
+++ b/sys/kern/sysv_sem.c
@@ -931,6 +931,7 @@ semget(struct thread *td, struct semget_args *uap)
error = ENOSPC;
goto done2;
}
+#ifdef RACCT
PROC_LOCK(td->td_proc);
error = racct_add(td->td_proc, RACCT_NSEM, nsems);
PROC_UNLOCK(td->td_proc);
@@ -938,6 +939,7 @@ semget(struct thread *td, struct semget_args *uap)
error = ENOSPC;
goto done2;
}
+#endif
DPRINTF(("semid %d is available\n", semid));
mtx_lock(&sema_mtx[semid]);
KASSERT((sema[semid].u.sem_perm.mode & SEM_ALLOC) == 0,
@@ -1023,12 +1025,14 @@ semop(struct thread *td, struct semop_args *uap)
nsops));
return (E2BIG);
} else {
+#ifdef RACCT
PROC_LOCK(td->td_proc);
if (nsops > racct_get_available(td->td_proc, RACCT_NSEMOP)) {
PROC_UNLOCK(td->td_proc);
return (E2BIG);
}
PROC_UNLOCK(td->td_proc);
+#endif
sops = malloc(nsops * sizeof(*sops), M_TEMP, M_WAITOK);
}
diff --git a/sys/kern/sysv_shm.c b/sys/kern/sysv_shm.c
index f5a84ae..1741a21 100644
--- a/sys/kern/sysv_shm.c
+++ b/sys/kern/sysv_shm.c
@@ -672,6 +672,7 @@ shmget_allocate_segment(td, uap, mode)
shm_last_free = -1;
}
shmseg = &shmsegs[segnum];
+#ifdef RACCT
PROC_LOCK(td->td_proc);
if (racct_add(td->td_proc, RACCT_NSHM, 1)) {
PROC_UNLOCK(td->td_proc);
@@ -683,6 +684,7 @@ shmget_allocate_segment(td, uap, mode)
return (ENOMEM);
}
PROC_UNLOCK(td->td_proc);
+#endif
/*
* In case we sleep in malloc(), mark the segment present but deleted
* so that noone else tries to create the same key.
@@ -699,10 +701,12 @@ shmget_allocate_segment(td, uap, mode)
shm_object = vm_pager_allocate(shm_use_phys ? OBJT_PHYS : OBJT_SWAP,
0, size, VM_PROT_DEFAULT, 0, cred);
if (shm_object == NULL) {
+#ifdef RACCT
PROC_LOCK(td->td_proc);
racct_sub(td->td_proc, RACCT_NSHM, 1);
racct_sub(td->td_proc, RACCT_SHMSIZE, size);
PROC_UNLOCK(td->td_proc);
+#endif
return (ENOMEM);
}
VM_OBJECT_LOCK(shm_object);
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 2868a2f..f421e4f 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -193,11 +193,13 @@ swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred)
if (incr & PAGE_MASK)
panic("swap_reserve: & PAGE_MASK");
+#ifdef RACCT
PROC_LOCK(curproc);
error = racct_add(curproc, RACCT_SWAP, incr);
PROC_UNLOCK(curproc);
if (error != 0)
return (0);
+#endif
res = 0;
mtx_lock(&sw_dev_mtx);
@@ -237,11 +239,13 @@ swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred)
curproc->p_pid, uip->ui_uid, incr);
}
+#ifdef RACCT
if (!res) {
PROC_LOCK(curproc);
racct_sub(curproc, RACCT_SWAP, incr);
PROC_UNLOCK(curproc);
}
+#endif
return (res);
}
@@ -255,9 +259,11 @@ swap_reserve_force(vm_ooffset_t incr)
swap_reserved += incr;
mtx_unlock(&sw_dev_mtx);
+#ifdef RACCT
PROC_LOCK(curproc);
racct_add_force(curproc, RACCT_SWAP, incr);
PROC_UNLOCK(curproc);
+#endif
uip = curthread->td_ucred->cr_ruidinfo;
PROC_LOCK(curproc);
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index da9b1dc..c552cb7 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -222,12 +222,14 @@ vslock(void *addr, size_t len)
#endif
error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
+#ifdef RACCT
if (error != KERN_SUCCESS) {
PROC_LOCK(curproc);
racct_set(curproc, RACCT_MEMLOCK,
ptoa(pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))));
PROC_UNLOCK(curproc);
}
+#endif
/*
* Return EFAULT on error to match copy{in,out}() behaviour
* rather than returning ENOMEM like mlock() would.
@@ -244,10 +246,12 @@ vsunlock(void *addr, size_t len)
trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
+#ifdef RACCT
PROC_LOCK(curproc);
racct_set(curproc, RACCT_MEMLOCK,
ptoa(pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))));
PROC_UNLOCK(curproc);
+#endif
}
/*
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 31886af..8493478 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -318,6 +318,7 @@ static void
vmspace_container_reset(struct proc *p)
{
+#ifdef RACCT
PROC_LOCK(p);
racct_set(p, RACCT_DATA, 0);
racct_set(p, RACCT_STACK, 0);
@@ -325,6 +326,7 @@ vmspace_container_reset(struct proc *p)
racct_set(p, RACCT_MEMLOCK, 0);
racct_set(p, RACCT_VMEM, 0);
PROC_UNLOCK(p);
+#endif
}
static inline void
@@ -3305,7 +3307,9 @@ vm_map_growstack(struct proc *p, vm_offset_t addr)
#ifdef notyet
uint64_t limit;
#endif
+#ifdef RACCT
int error;
+#endif
Retry:
PROC_LOCK(p);
@@ -3404,6 +3408,7 @@ Retry:
vm_map_unlock_read(map);
return (KERN_NO_SPACE);
}
+#ifdef RACCT
PROC_LOCK(p);
if (is_procstack &&
racct_set(p, RACCT_STACK, ctob(vm->vm_ssize) + grow_amount)) {
@@ -3412,6 +3417,7 @@ Retry:
return (KERN_NO_SPACE);
}
PROC_UNLOCK(p);
+#endif
/* Round up the grow amount modulo SGROWSIZ */
grow_amount = roundup (grow_amount, sgrowsiz);
@@ -3435,6 +3441,7 @@ Retry:
rv = KERN_NO_SPACE;
goto out;
}
+#ifdef RACCT
PROC_LOCK(p);
if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
PROC_UNLOCK(p);
@@ -3443,6 +3450,7 @@ Retry:
goto out;
}
PROC_UNLOCK(p);
+#endif
if (vm_map_lock_upgrade(map))
goto Retry;
@@ -3542,6 +3550,7 @@ Retry:
}
out:
+#ifdef RACCT
if (rv != KERN_SUCCESS) {
PROC_LOCK(p);
error = racct_set(p, RACCT_VMEM, map->size);
@@ -3550,6 +3559,7 @@ out:
KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
PROC_UNLOCK(p);
}
+#endif
return (rv);
}
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 37949a5..e50aa0b 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -1019,19 +1019,23 @@ mlock(td, uap)
PROC_UNLOCK(proc);
if (npages + cnt.v_wire_count > vm_page_max_wired)
return (EAGAIN);
+#ifdef RACCT
PROC_LOCK(proc);
error = racct_set(proc, RACCT_MEMLOCK, nsize);
PROC_UNLOCK(proc);
if (error != 0)
return (ENOMEM);
+#endif
error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
+#ifdef RACCT
if (error != KERN_SUCCESS) {
PROC_LOCK(proc);
racct_set(proc, RACCT_MEMLOCK,
ptoa(pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))));
PROC_UNLOCK(proc);
}
+#endif
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
@@ -1074,11 +1078,13 @@ mlockall(td, uap)
if (error)
return (error);
#endif
+#ifdef RACCT
PROC_LOCK(td->td_proc);
error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size);
PROC_UNLOCK(td->td_proc);
if (error != 0)
return (ENOMEM);
+#endif
if (uap->how & MCL_FUTURE) {
vm_map_lock(map);
@@ -1098,12 +1104,14 @@ mlockall(td, uap)
VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
error = (error == KERN_SUCCESS ? 0 : EAGAIN);
}
+#ifdef RACCT
if (error != KERN_SUCCESS) {
PROC_LOCK(td->td_proc);
racct_set(td->td_proc, RACCT_MEMLOCK,
ptoa(pmap_wired_count(vm_map_pmap(&td->td_proc->p_vmspace->vm_map))));
PROC_UNLOCK(td->td_proc);
}
+#endif
return (error);
}
@@ -1138,11 +1146,13 @@ munlockall(td, uap)
/* Forcibly unwire all pages. */
error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
+#ifdef RACCT
if (error == KERN_SUCCESS) {
PROC_LOCK(td->td_proc);
racct_set(td->td_proc, RACCT_MEMLOCK, 0);
PROC_UNLOCK(td->td_proc);
}
+#endif
return (error);
}
@@ -1177,11 +1187,13 @@ munlock(td, uap)
return (EINVAL);
error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
+#ifdef RACCT
if (error == KERN_SUCCESS) {
PROC_LOCK(td->td_proc);
racct_sub(td->td_proc, RACCT_MEMLOCK, ptoa(end - start));
PROC_UNLOCK(td->td_proc);
}
+#endif
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 6dfdc4b..e9c9927 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1640,7 +1640,9 @@ vm_daemon()
struct thread *td;
struct vmspace *vm;
int breakout, swapout_flags, tryagain, attempts;
+#ifdef RACCT
uint64_t rsize, ravailable;
+#endif
while (TRUE) {
mtx_lock(&vm_daemon_mtx);
@@ -1722,6 +1724,7 @@ again:
vm_pageout_map_deactivate_pages(
&vm->vm_map, limit);
}
+#ifdef RACCT
rsize = IDX_TO_OFF(size);
PROC_LOCK(p);
racct_set(p, RACCT_RSS, rsize);
@@ -1750,6 +1753,7 @@ again:
if (rsize > ravailable)
tryagain = 1;
}
+#endif
vmspace_free(vm);
}
sx_sunlock(&allproc_lock);
diff --git a/sys/vm/vm_unix.c b/sys/vm/vm_unix.c
index c07a1c1..6f8e7c8 100644
--- a/sys/vm/vm_unix.c
+++ b/sys/vm/vm_unix.c
@@ -117,6 +117,7 @@ obreak(td, uap)
error = ENOMEM;
goto done;
}
+#ifdef RACCT
PROC_LOCK(td->td_proc);
error = racct_set(td->td_proc, RACCT_DATA, new - base);
if (error != 0) {
@@ -133,13 +134,16 @@ obreak(td, uap)
goto done;
}
PROC_UNLOCK(td->td_proc);
+#endif
rv = vm_map_insert(&vm->vm_map, NULL, 0, old, new,
VM_PROT_RW, VM_PROT_ALL, 0);
if (rv != KERN_SUCCESS) {
+#ifdef RACCT
PROC_LOCK(td->td_proc);
racct_set_force(td->td_proc, RACCT_DATA, old - base);
racct_set_force(td->td_proc, RACCT_VMEM, vm->vm_map.size);
PROC_UNLOCK(td->td_proc);
+#endif
error = ENOMEM;
goto done;
}
@@ -165,10 +169,12 @@ obreak(td, uap)
goto done;
}
vm->vm_dsize -= btoc(old - new);
+#ifdef RACCT
PROC_LOCK(td->td_proc);
racct_set_force(td->td_proc, RACCT_DATA, new - base);
racct_set_force(td->td_proc, RACCT_VMEM, vm->vm_map.size);
PROC_UNLOCK(td->td_proc);
+#endif
}
done:
vm_map_unlock(&vm->vm_map);
OpenPOWER on IntegriCloud