summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>1999-01-06 23:05:42 +0000
committerjulian <julian@FreeBSD.org>1999-01-06 23:05:42 +0000
commit4666ac50272776168d29d2c4142de771daa30381 (patch)
tree132bbd3c7ed8de9adf36dcd6258013de903e583a /sys/vm
parent6b0a11c013bb11bbed19aea0a563ebb393a899ef (diff)
downloadFreeBSD-src-4666ac50272776168d29d2c4142de771daa30381.zip
FreeBSD-src-4666ac50272776168d29d2c4142de771daa30381.tar.gz
Add (but don't activate) code for a special VM option to make
downward growing stacks more general. Add (but don't activate) code to use the new stack facility when running threads, (specifically the linux threads support). This allows people to use both linux compiled linuxthreads, and also the native FreeBSD linux-threads port. The code is conditional on VM_STACK. Not using this will produce the old heavily tested system. Submitted by: Richard Seaman <dick@tar.com>
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_extern.h6
-rw-r--r--sys/vm/vm_map.c207
-rw-r--r--sys/vm/vm_map.h9
-rw-r--r--sys/vm/vm_mmap.c17
4 files changed, 235 insertions, 4 deletions
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 34deba7..ca5a53e 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
- * $Id: vm_extern.h,v 1.37 1998/01/22 17:30:32 dyson Exp $
+ * $Id: vm_extern.h,v 1.38 1998/06/07 17:13:09 dfr Exp $
*/
#ifndef _VM_EXTERN_H_
@@ -61,7 +61,11 @@ int swapon __P((struct proc *, void *, int *));
#endif
void faultin __P((struct proc *p));
+#ifndef VM_STACK
int grow __P((struct proc *, size_t));
+#else
+int grow_stack __P((struct proc *, size_t));
+#endif
int kernacc __P((caddr_t, int, int));
vm_offset_t kmem_alloc __P((vm_map_t, vm_size_t));
vm_offset_t kmem_alloc_pageable __P((vm_map_t, vm_size_t));
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 83132ad..829548a 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.137 1998/10/13 08:24:43 dg Exp $
+ * $Id: vm_map.c,v 1.138 1998/10/25 17:44:58 phk Exp $
*/
/*
@@ -75,6 +75,9 @@
#include <sys/vmmeter.h>
#include <sys/mman.h>
#include <sys/vnode.h>
+#ifdef VM_STACK
+#include <sys/resourcevar.h>
+#endif
#include <vm/vm.h>
#include <vm/vm_param.h>
@@ -538,6 +541,10 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
new_entry->eflags = protoeflags;
new_entry->object.vm_object = object;
new_entry->offset = offset;
+#ifdef VM_STACK
+ new_entry->avail_ssize = 0;
+#endif
+
if (object) {
if ((object->ref_count > 1) || (object->shadow_count != 0)) {
vm_object_clear_flag(object, OBJ_ONEMAPPING);
@@ -570,6 +577,204 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
return (KERN_SUCCESS);
}
+#ifdef VM_STACK
+int
+vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
+ vm_prot_t prot, vm_prot_t max, int cow)
+{
+ vm_map_entry_t prev_entry;
+ vm_map_entry_t new_stack_entry;
+ vm_size_t init_ssize;
+ int rv;
+
+ if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
+ return (KERN_NO_SPACE);
+
+ if (max_ssize < SGROWSIZ)
+ init_ssize = max_ssize;
+ else
+ init_ssize = SGROWSIZ;
+
+ vm_map_lock(map);
+
+ /* If addr is already mapped, no go */
+ if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
+ vm_map_unlock(map);
+ return (KERN_NO_SPACE);
+ }
+
+ /* If we can't accomodate max_ssize in the current mapping,
+ * no go. However, we need to be aware that subsequent user
+ * mappings might map into the space we have reserved for
+ * stack, and currently this space is not protected.
+ *
+ * Hopefully we will at least detect this condition
+ * when we try to grow the stack.
+ */
+ if ((prev_entry->next != &map->header) &&
+ (prev_entry->next->start < addrbos + max_ssize)) {
+ vm_map_unlock(map);
+ return (KERN_NO_SPACE);
+ }
+
+ /* We initially map a stack of only init_ssize. We will
+ * grow as needed later. Since this is to be a grow
+ * down stack, we map at the top of the range.
+ *
+ * Note: we would normally expect prot and max to be
+ * VM_PROT_ALL, and cow to be 0. Possibly we should
+ * eliminate these as input parameters, and just
+ * pass these values here in the insert call.
+ */
+ rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
+ addrbos + max_ssize, prot, max, cow);
+
+ /* Now set the avail_ssize amount */
+ if (rv == KERN_SUCCESS){
+ new_stack_entry = prev_entry->next;
+ if (new_stack_entry->end != addrbos + max_ssize ||
+ new_stack_entry->start != addrbos + max_ssize - init_ssize)
+ panic ("Bad entry start/end for new stack entry");
+ else
+ new_stack_entry->avail_ssize = max_ssize - init_ssize;
+ }
+
+ vm_map_unlock(map);
+ return (rv);
+}
+
+/* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the
+ * desired address is already mapped, or if we successfully grow
+ * the stack. Also returns KERN_SUCCESS if addr is outside the
+ * stack range (this is strange, but preserves compatibility with
+ * the grow function in vm_machdep.c).
+ */
+int
+vm_map_growstack (struct proc *p, vm_offset_t addr)
+{
+ vm_map_entry_t prev_entry;
+ vm_map_entry_t stack_entry;
+ vm_map_entry_t new_stack_entry;
+ struct vmspace *vm = p->p_vmspace;
+ vm_map_t map = &vm->vm_map;
+ vm_offset_t end;
+ int grow_amount;
+ int rv;
+ int is_procstack = 0;
+
+ vm_map_lock(map);
+
+ /* If addr is already in the entry range, no need to grow.*/
+ if (vm_map_lookup_entry(map, addr, &prev_entry)) {
+ vm_map_unlock(map);
+ return (KERN_SUCCESS);
+ }
+
+ if ((stack_entry = prev_entry->next) == &map->header) {
+ vm_map_unlock(map);
+ return (KERN_SUCCESS);
+ }
+ if (prev_entry == &map->header)
+ end = stack_entry->start - stack_entry->avail_ssize;
+ else
+ end = prev_entry->end;
+
+ /* This next test mimics the old grow function in vm_machdep.c.
+ * It really doesn't quite make sense, but we do it anyway
+ * for compatibility.
+ *
+ * If not growable stack, return success. This signals the
+ * caller to proceed as he would normally with normal vm.
+ */
+ if (stack_entry->avail_ssize < 1 ||
+ addr >= stack_entry->start ||
+ addr < stack_entry->start - stack_entry->avail_ssize) {
+ vm_map_unlock(map);
+ return (KERN_SUCCESS);
+ }
+
+ /* Find the minimum grow amount */
+ grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
+ if (grow_amount > stack_entry->avail_ssize) {
+ vm_map_unlock(map);
+ return (KERN_NO_SPACE);
+ }
+
+ /* If there is no longer enough space between the entries
+ * nogo, and adjust the available space. Note: this
+ * should only happen if the user has mapped into the
+ * stack area after the stack was created, and is
+ * probably an error.
+ *
+ * This also effectively destroys any guard page the user
+ * might have intended by limiting the stack size.
+ */
+ if (grow_amount > stack_entry->start - end) {
+ stack_entry->avail_ssize = stack_entry->start - end;
+ vm_map_unlock(map);
+ return (KERN_NO_SPACE);
+ }
+
+ if (addr >= (vm_offset_t)vm->vm_maxsaddr)
+ is_procstack = 1;
+
+ /* If this is the main process stack, see if we're over the
+ * stack limit.
+ */
+ if (is_procstack && (vm->vm_ssize + grow_amount >
+ p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
+ vm_map_unlock(map);
+ return (KERN_NO_SPACE);
+ }
+
+ /* Round up the grow amount modulo SGROWSIZ */
+ grow_amount = roundup (grow_amount, SGROWSIZ);
+ if (grow_amount > stack_entry->avail_ssize) {
+ grow_amount = stack_entry->avail_ssize;
+ }
+ if (is_procstack && (vm->vm_ssize + grow_amount >
+ p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
+ grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
+ vm->vm_ssize;
+ }
+
+ /* Get the preliminary new entry start value */
+ addr = stack_entry->start - grow_amount;
+
+ /* If this puts us into the previous entry, cut back our growth
+ * to the available space. Also, see the note above.
+ */
+ if (addr < end) {
+ stack_entry->avail_ssize = stack_entry->start - end;
+ addr = end;
+ }
+
+ rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
+ stack_entry->protection,
+ stack_entry->max_protection,
+ 0);
+
+ /* Adjust the available stack space by the amount we grew. */
+ if (rv == KERN_SUCCESS) {
+ new_stack_entry = prev_entry->next;
+ if (new_stack_entry->end != stack_entry->start ||
+ new_stack_entry->start != addr)
+ panic ("Bad stack grow start/end in new stack entry");
+ else {
+ new_stack_entry->avail_ssize = stack_entry->avail_ssize -
+ (new_stack_entry->end -
+ new_stack_entry->start);
+ vm->vm_ssize += new_stack_entry->end -
+ new_stack_entry->start;
+ }
+ }
+
+ vm_map_unlock(map);
+ return (rv);
+
+}
+#endif
+
/*
* Find sufficient space for `length' bytes in the given map, starting at
* `start'. The map must be locked. Returns 0 on success, 1 on no space.
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index b7c6cd5..4d61a3f 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.h,v 1.31 1998/01/17 09:16:52 dyson Exp $
+ * $Id: vm_map.h,v 1.32 1998/01/22 17:30:38 dyson Exp $
*/
/*
@@ -102,6 +102,9 @@ struct vm_map_entry {
struct vm_map_entry *next; /* next entry */
vm_offset_t start; /* start address */
vm_offset_t end; /* end address */
+#ifdef VM_STACK
+ vm_offset_t avail_ssize; /* amt can grow if this is a stack */
+#endif
union vm_map_object object; /* object I point to */
vm_ooffset_t offset; /* offset into object */
u_char eflags; /* map entry flags */
@@ -335,6 +338,10 @@ void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
void vm_init2 __P((void));
int vm_uiomove __P((vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *));
void vm_freeze_copyopts __P((vm_object_t, vm_pindex_t, vm_pindex_t));
+#ifdef VM_STACK
+int vm_map_stack __P((vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int));
+int vm_map_growstack __P((struct proc *p, vm_offset_t addr));
+#endif
#endif
#endif /* _VM_MAP_ */
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 180ad25..ba36e41 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
- * $Id: vm_mmap.c,v 1.84 1998/10/13 08:24:44 dg Exp $
+ * $Id: vm_mmap.c,v 1.85 1998/12/09 20:22:21 dt Exp $
*/
/*
@@ -177,6 +177,15 @@ mmap(p, uap)
((flags & MAP_ANON) && uap->fd != -1))
return (EINVAL);
+#ifdef VM_STACK
+ if (flags & MAP_STACK) {
+ if ((uap->fd != -1) ||
+ ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
+ return (EINVAL);
+ flags |= MAP_ANON;
+ pos = 0;
+ }
+#endif
/*
* Align the file position to a page boundary,
* and save its page offset component.
@@ -1016,6 +1025,12 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
*addr = pmap_addr_hint(object, *addr, size);
}
+#ifdef VM_STACK
+ if (flags & MAP_STACK)
+ rv = vm_map_stack (map, *addr, size, prot,
+ maxprot, docow);
+ else
+#endif
rv = vm_map_find(map, object, foff, addr, size, fitit,
prot, maxprot, docow);
OpenPOWER on IntegriCloud