summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2004-02-04 21:52:57 +0000
committerjhb <jhb@FreeBSD.org>2004-02-04 21:52:57 +0000
commit279b2b827810d149b5b8453900cdea57874ae234 (patch)
treed9c0a05d62914174d6f00ab22300e935c3e6d983 /sys/vm
parentf7b1079809c2529c50447de59fdce77a7f5a08f5 (diff)
downloadFreeBSD-src-279b2b827810d149b5b8453900cdea57874ae234.zip
FreeBSD-src-279b2b827810d149b5b8453900cdea57874ae234.tar.gz
Locking for the per-process resource limits structure.
- struct plimit includes a mutex to protect a reference count. The plimit structure is treated similarly to struct ucred in that is is always copy on write, so having a reference to a structure is sufficient to read from it without needing a further lock. - The proc lock protects the p_limit pointer and must be held while reading limits from a process to keep the limit structure from changing out from under you while reading from it. - Various global limits that are ints are not protected by a lock since int writes are atomic on all the archs we support and thus a lock wouldn't buy us anything. - All accesses to individual resource limits from a process are abstracted behind a simple lim_rlimit(), lim_max(), and lim_cur() API that return either an rlimit, or the current or max individual limit of the specified resource from a process. - dosetrlimit() was renamed to kern_setrlimit() to match existing style of other similar syscall helper functions. - The alpha OSF/1 compat layer no longer calls getrlimit() and setrlimit() (it didn't used the stackgap when it should have) but uses lim_rlimit() and kern_setrlimit() instead. - The svr4 compat no longer uses the stackgap for resource limits calls, but uses lim_rlimit() and kern_setrlimit() instead. - The ibcs2 compat no longer uses the stackgap for resource limits. It also no longer uses the stackgap for accessing sysctl's for the ibcs2_sysconf() syscall but uses kernel_sysctl() instead. As a result, ibcs2_sysconf() no longer needs Giant. - The p_rlimit macro no longer exists. Submitted by: mtm (mostly, I only did a few cleanups and catchups) Tested on: i386 Compiled on: alpha, amd64
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_glue.c14
-rw-r--r--sys/vm/vm_map.c25
-rw-r--r--sys/vm/vm_mmap.c35
-rw-r--r--sys/vm/vm_pageout.c5
-rw-r--r--sys/vm/vm_unix.c12
5 files changed, 59 insertions, 32 deletions
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 6fe4405..15d0fd5 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -682,6 +682,7 @@ vm_init_limits(udata)
void *udata;
{
struct proc *p = udata;
+ struct plimit *limp;
int rss_limit;
/*
@@ -691,14 +692,15 @@ vm_init_limits(udata)
* of memory - half of main memory helps to favor smaller processes,
* and reduces thrashing of the object cache.
*/
- p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
- p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
- p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
- p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
+ limp = p->p_limit;
+ limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
+ limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
+ limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
+ limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
/* limit the limit to no less than 2MB */
rss_limit = max(cnt.v_free_count, 512);
- p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
- p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
+ limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
+ limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
}
void
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index f06fd78..5c93a84 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -2467,6 +2467,7 @@ vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
vm_offset_t bot, top;
vm_size_t init_ssize;
int orient, rv;
+ rlim_t vmemlim;
/*
* The stack orientation is piggybacked with the cow argument.
@@ -2483,6 +2484,10 @@ vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
init_ssize = (max_ssize < sgrowsiz) ? max_ssize : sgrowsiz;
+ PROC_LOCK(curthread->td_proc);
+ vmemlim = lim_cur(curthread->td_proc, RLIMIT_VMEM);
+ PROC_UNLOCK(curthread->td_proc);
+
vm_map_lock(map);
/* If addr is already mapped, no go */
@@ -2492,8 +2497,7 @@ vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
}
/* If we would blow our VMEM resource limit, no go */
- if (map->size + init_ssize >
- curthread->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
+ if (map->size + init_ssize > vmemlim) {
vm_map_unlock(map);
return (KERN_NO_SPACE);
}
@@ -2566,11 +2570,17 @@ vm_map_growstack(struct proc *p, vm_offset_t addr)
vm_map_t map = &vm->vm_map;
vm_offset_t end;
size_t grow_amount, max_grow;
+ rlim_t stacklim, vmemlim;
int is_procstack, rv;
GIANT_REQUIRED;
Retry:
+ PROC_LOCK(p);
+ stacklim = lim_cur(p, RLIMIT_STACK);
+ vmemlim = lim_cur(curthread->td_proc, RLIMIT_VMEM);
+ PROC_UNLOCK(p);
+
vm_map_lock_read(map);
/* If addr is already in the entry range, no need to grow.*/
@@ -2658,8 +2668,7 @@ Retry:
* If this is the main process stack, see if we're over the stack
* limit.
*/
- if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
- p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
+ if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
vm_map_unlock_read(map);
return (KERN_NO_SPACE);
}
@@ -2668,14 +2677,12 @@ Retry:
grow_amount = roundup (grow_amount, sgrowsiz);
if (grow_amount > stack_entry->avail_ssize)
grow_amount = stack_entry->avail_ssize;
- if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
- p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
- grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
- ctob(vm->vm_ssize);
+ if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
+ grow_amount = stacklim - ctob(vm->vm_ssize);
}
/* If we would blow our VMEM resource limit, no go */
- if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
+ if (map->size + grow_amount > vmemlim) {
vm_map_unlock_read(map);
return (KERN_NO_SPACE);
}
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index ab876a5..2cad29b 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -266,7 +266,7 @@ mmap(td, uap)
return (EINVAL);
if (addr + size < addr)
return (EINVAL);
- }
+ } else {
/*
* XXX for non-fixed mappings where no hint is provided or
* the hint would fall in the potential heap space,
@@ -275,13 +275,15 @@ mmap(td, uap)
* There should really be a pmap call to determine a reasonable
* location.
*/
- else if (addr == 0 ||
- (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
- addr < round_page((vm_offset_t)vms->vm_daddr +
- td->td_proc->p_rlimit[RLIMIT_DATA].rlim_max)))
- addr = round_page((vm_offset_t)vms->vm_daddr +
- td->td_proc->p_rlimit[RLIMIT_DATA].rlim_max);
-
+ PROC_LOCK(td->td_proc);
+ if (addr == 0 ||
+ (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
+ addr < round_page((vm_offset_t)vms->vm_daddr +
+ lim_max(td->td_proc, RLIMIT_DATA))))
+ addr = round_page((vm_offset_t)vms->vm_daddr +
+ lim_max(td->td_proc, RLIMIT_DATA));
+ PROC_UNLOCK(td->td_proc);
+ }
mtx_lock(&Giant); /* syscall marked mp-safe but isn't */
do {
if (flags & MAP_ANON) {
@@ -1002,9 +1004,13 @@ mlock(td, uap)
return (EAGAIN);
#if 0
+ PROC_LOCK(td->td_proc);
if (size + ptoa(pmap_wired_count(vm_map_pmap(&td->td_proc->p_vmspace->vm_map))) >
- td->td_proc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
+ lim_cur(td->td_proc, RLIMIT_MEMLOCK)) {
+ PROC_UNLOCK(td->td_proc);
return (ENOMEM);
+ }
+ PROC_UNLOCK(td->td_proc);
#else
error = suser(td);
if (error)
@@ -1044,9 +1050,13 @@ mlockall(td, uap)
* If wiring all pages in the process would cause it to exceed
* a hard resource limit, return ENOMEM.
*/
+ PROC_LOCK(td->td_proc);
if (map->size - ptoa(pmap_wired_count(vm_map_pmap(map)) >
- td->td_proc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur))
+ lim_cur(td->td_proc, RLIMIT_MEMLOCK))) {
+ PROC_UNLOCK(td->td_proc);
return (ENOMEM);
+ }
+ PROC_UNLOCK(td->td_proc);
#else
error = suser(td);
if (error)
@@ -1176,10 +1186,13 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
objsize = size = round_page(size);
+ PROC_LOCK(td->td_proc);
if (td->td_proc->p_vmspace->vm_map.size + size >
- td->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
+ lim_cur(td->td_proc, RLIMIT_VMEM)) {
+ PROC_UNLOCK(td->td_proc);
return(ENOMEM);
}
+ PROC_UNLOCK(td->td_proc);
/*
* We currently can only deal with page aligned file offsets.
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 471f9b3..506f1ec 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1486,6 +1486,7 @@ vm_daemon()
struct proc *p;
int breakout;
struct thread *td;
+ struct rlimit rsslim;
mtx_lock(&Giant);
while (TRUE) {
@@ -1511,6 +1512,7 @@ vm_daemon()
PROC_UNLOCK(p);
continue;
}
+ lim_rlimit(p, RLIMIT_RSS, &rsslim);
/*
* if the process is in a non-running type state,
* don't touch it.
@@ -1534,8 +1536,7 @@ vm_daemon()
* get a limit
*/
limit = OFF_TO_IDX(
- qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
- p->p_rlimit[RLIMIT_RSS].rlim_max));
+ qmin(rsslim.rlim_cur, rsslim.rlim_max));
/*
* let processes that are swapped out really be
diff --git a/sys/vm/vm_unix.c b/sys/vm/vm_unix.c
index 877609f..6313cc1 100644
--- a/sys/vm/vm_unix.c
+++ b/sys/vm/vm_unix.c
@@ -77,10 +77,16 @@ obreak(td, uap)
{
struct vmspace *vm = td->td_proc->p_vmspace;
vm_offset_t new, old, base;
+ rlim_t datalim, vmemlim;
int rv;
int error = 0;
boolean_t do_map_wirefuture;
+ PROC_LOCK(td->td_proc);
+ datalim = lim_cur(td->td_proc, RLIMIT_DATA);
+ vmemlim = lim_cur(td->td_proc, RLIMIT_VMEM);
+ PROC_UNLOCK(td->td_proc);
+
do_map_wirefuture = FALSE;
new = round_page((vm_offset_t)uap->nsize);
vm_map_lock(&vm->vm_map);
@@ -92,8 +98,7 @@ obreak(td, uap)
* Check the resource limit, but allow a process to reduce
* its usage, even if it remains over the limit.
*/
- if (new - base > td->td_proc->p_rlimit[RLIMIT_DATA].rlim_cur &&
- new > old) {
+ if (new - base > datalim && new > old) {
error = ENOMEM;
goto done;
}
@@ -111,8 +116,7 @@ obreak(td, uap)
goto done;
}
if (new > old) {
- if (vm->vm_map.size + (new - old) >
- td->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
+ if (vm->vm_map.size + (new - old) > vmemlim) {
error = ENOMEM;
goto done;
}
OpenPOWER on IntegriCloud