diff options
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/imgact_aout.c | 3 | ||||
-rw-r--r-- | sys/kern/imgact_elf.c | 9 | ||||
-rw-r--r-- | sys/kern/init_main.c | 1 | ||||
-rw-r--r-- | sys/kern/kern_exec.c | 9 |
4 files changed, 20 insertions, 2 deletions
diff --git a/sys/kern/imgact_aout.c b/sys/kern/imgact_aout.c index 37e9fe2..4cb7b63 100644 --- a/sys/kern/imgact_aout.c +++ b/sys/kern/imgact_aout.c @@ -81,7 +81,8 @@ struct sysentvec aout_sysvec = { PS_STRINGS, VM_PROT_ALL, exec_copyout_strings, - exec_setregs + exec_setregs, + NULL }; static int diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index 349078b..330437a 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -806,7 +806,14 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) vmspace->vm_dsize = data_size >> PAGE_SHIFT; vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; - addr = ELF_RTLD_ADDR(vmspace); + /* + * We load the dynamic linker where a userland call + * to mmap(0, ...) would put it. The rationale behind this + * calculation is that it leaves room for the heap to grow to + * its maximum allowed size. + */ + addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr + + imgp->proc->p_rlimit[RLIMIT_DATA].rlim_max); imgp->entry_addr = entry; diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index d006522..bff07b1 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -284,6 +284,7 @@ struct sysentvec null_sysvec = { PS_STRINGS, VM_PROT_ALL, NULL, + NULL, NULL }; diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 8ff2dda..5b0d44f 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -842,6 +842,15 @@ exec_new_vmspace(imgp, sv) EVENTHANDLER_INVOKE(process_exec, p); /* + * Here is as good a place as any to do any resource limit cleanups. + * This is needed if a 64 bit binary exec's a 32 bit binary - the + * data size limit may need to be changed to a value that makes + * sense for the 32 bit binary. + */ + if (sv->sv_fixlimits) + sv->sv_fixlimits(imgp); + + /* * Blow away entire process VM, if address space not shared, * otherwise, create a new VM space so that other threads are * not disrupted |