summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2012-01-17 00:27:32 +0000
committeralc <alc@FreeBSD.org>2012-01-17 00:27:32 +0000
commit5210c69a89d8c0ae7c56529f6e621298d707bcba (patch)
treea432c03be68eee689e30a5ef5389803329a497d7 /sys/kern
parent6e62eeb5060f7a68d13a2176a7a44f5df6812769 (diff)
downloadFreeBSD-src-5210c69a89d8c0ae7c56529f6e621298d707bcba.zip
FreeBSD-src-5210c69a89d8c0ae7c56529f6e621298d707bcba.tar.gz
Improve abstraction. Eliminate direct access by elf*_load_section()
to an OBJT_VNODE-specific field of the vm object. The same information can be just as easily obtained from the struct vattr that is in struct image_params if the latter is passed to elf*_load_section(). Moreover, by replacing the vmspace and vm object parameters to elf*_load_section() with a struct image_params parameter, we actually reduce the size of the object code. In collaboration with: kib
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/imgact_elf.c51
1 files changed, 25 insertions, 26 deletions
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index 9794544..f7e5752 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -86,9 +86,9 @@ static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
const char *interp, int32_t *osrel);
static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
u_long *entry, size_t pagesize);
-static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object,
- vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
- vm_prot_t prot, size_t pagesize);
+static int __elfN(load_section)(struct image_params *imgp, vm_offset_t offset,
+ caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
+ size_t pagesize);
static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note,
int32_t *osrel);
@@ -445,13 +445,14 @@ __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
}
static int
-__elfN(load_section)(struct vmspace *vmspace,
- vm_object_t object, vm_offset_t offset,
- caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
- size_t pagesize)
+__elfN(load_section)(struct image_params *imgp, vm_offset_t offset,
+ caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
+ size_t pagesize)
{
struct sf_buf *sf;
size_t map_len;
+ vm_map_t map;
+ vm_object_t object;
vm_offset_t map_addr;
int error, rv, cow;
size_t copy_len;
@@ -466,12 +467,13 @@ __elfN(load_section)(struct vmspace *vmspace,
* While I'm here, might as well check for something else that
* is invalid: filsz cannot be greater than memsz.
*/
- if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
- filsz > memsz) {
+ if ((off_t)filsz + offset > imgp->attr->va_size || filsz > memsz) {
uprintf("elf_load_section: truncated ELF file\n");
return (ENOEXEC);
}
+ object = imgp->object;
+ map = &imgp->proc->p_vmspace->vm_map;
map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
file_addr = trunc_page_ps(offset, pagesize);
@@ -491,7 +493,7 @@ __elfN(load_section)(struct vmspace *vmspace,
cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
(prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
- rv = __elfN(map_insert)(&vmspace->vm_map,
+ rv = __elfN(map_insert)(map,
object,
file_addr, /* file offset */
map_addr, /* virtual start */
@@ -521,8 +523,8 @@ __elfN(load_section)(struct vmspace *vmspace,
/* This had damn well better be true! */
if (map_len != 0) {
- rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
- map_addr + map_len, VM_PROT_ALL, 0);
+ rv = __elfN(map_insert)(map, NULL, 0, map_addr, map_addr +
+ map_len, VM_PROT_ALL, 0);
if (rv != KERN_SUCCESS) {
return (EINVAL);
}
@@ -550,8 +552,8 @@ __elfN(load_section)(struct vmspace *vmspace,
* set it to the specified protection.
* XXX had better undo the damage from pasting over the cracks here!
*/
- vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
- round_page(map_addr + map_len), prot, FALSE);
+ vm_map_protect(map, trunc_page(map_addr), round_page(map_addr +
+ map_len), prot, FALSE);
return (0);
}
@@ -580,7 +582,6 @@ __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
const Elf_Ehdr *hdr = NULL;
const Elf_Phdr *phdr = NULL;
struct nameidata *nd;
- struct vmspace *vmspace = p->p_vmspace;
struct vattr *attr;
struct image_params *imgp;
vm_prot_t prot;
@@ -672,11 +673,10 @@ __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) {
/* Loadable segment */
prot = __elfN(trans_prot)(phdr[i].p_flags);
- if ((error = __elfN(load_section)(vmspace,
- imgp->object, phdr[i].p_offset,
+ error = __elfN(load_section)(imgp, phdr[i].p_offset,
(caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
- phdr[i].p_memsz, phdr[i].p_filesz, prot,
- pagesize)) != 0)
+ phdr[i].p_memsz, phdr[i].p_filesz, prot, pagesize);
+ if (error != 0)
goto fail;
/*
* Establish the base address if this is the
@@ -810,8 +810,6 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
if (error)
return (error);
- vmspace = imgp->proc->p_vmspace;
-
for (i = 0; i < hdr->e_phnum; i++) {
switch (phdr[i].p_type) {
case PT_LOAD: /* Loadable segment */
@@ -828,11 +826,11 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
prot |= VM_PROT_EXECUTE;
#endif
- if ((error = __elfN(load_section)(vmspace,
- imgp->object, phdr[i].p_offset,
+ error = __elfN(load_section)(imgp, phdr[i].p_offset,
(caddr_t)(uintptr_t)phdr[i].p_vaddr + et_dyn_addr,
phdr[i].p_memsz, phdr[i].p_filesz, prot,
- sv->sv_pagesize)) != 0)
+ sv->sv_pagesize);
+ if (error != 0)
return (error);
/*
@@ -901,6 +899,7 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
return (ENOMEM);
}
+ vmspace = imgp->proc->p_vmspace;
vmspace->vm_tsize = text_size >> PAGE_SHIFT;
vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
vmspace->vm_dsize = data_size >> PAGE_SHIFT;
@@ -912,8 +911,8 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
* calculation is that it leaves room for the heap to grow to
* its maximum allowed size.
*/
- addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
- lim_max(imgp->proc, RLIMIT_DATA));
+ addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(imgp->proc,
+ RLIMIT_DATA));
PROC_UNLOCK(imgp->proc);
imgp->entry_addr = entry;
OpenPOWER on IntegriCloud