summaryrefslogtreecommitdiffstats
path: root/sys/amd64/linux/linux_machdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/amd64/linux/linux_machdep.c')
-rw-r--r--sys/amd64/linux/linux_machdep.c433
1 files changed, 433 insertions, 0 deletions
diff --git a/sys/amd64/linux/linux_machdep.c b/sys/amd64/linux/linux_machdep.c
new file mode 100644
index 0000000..d6174e6
--- /dev/null
+++ b/sys/amd64/linux/linux_machdep.c
@@ -0,0 +1,433 @@
+/*-
+ * Copyright (c) 2013 Dmitry Chagin
+ * Copyright (c) 2004 Tim J. Robbins
+ * Copyright (c) 2002 Doug Rabson
+ * Copyright (c) 2000 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/capability.h>
+#include <sys/dirent.h>
+#include <sys/file.h>
+#include <sys/fcntl.h>
+#include <sys/filedesc.h>
+#include <sys/clock.h>
+#include <sys/imgact.h>
+#include <sys/ktr.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/mutex.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/resource.h>
+#include <sys/resourcevar.h>
+#include <sys/sched.h>
+#include <sys/syscallsubr.h>
+#include <sys/sysproto.h>
+#include <sys/vnode.h>
+#include <sys/unistd.h>
+#include <sys/wait.h>
+
+#include <security/mac/mac_framework.h>
+
+#include <ufs/ufs/extattr.h>
+#include <ufs/ufs/quota.h>
+#include <ufs/ufs/ufsmount.h>
+
+#include <machine/frame.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+#include <machine/psl.h>
+#include <machine/segments.h>
+#include <machine/specialreg.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+
+#include <amd64/linux/linux.h>
+#include <amd64/linux/linux_proto.h>
+#include <compat/linux/linux_ipc.h>
+#include <compat/linux/linux_file.h>
+#include <compat/linux/linux_misc.h>
+#include <compat/linux/linux_signal.h>
+#include <compat/linux/linux_util.h>
+#include <compat/linux/linux_emul.h>
+
+
+int
+linux_execve(struct thread *td, struct linux_execve_args *args)
+{
+ struct image_args eargs;
+ char *path;
+ int error;
+
+ LCONVPATHEXIST(td, args->path, &path);
+
+ LINUX_CTR(execve);
+
+ error = exec_copyin_args(&eargs, path, UIO_SYSSPACE, args->argp,
+ args->envp);
+ free(path, M_TEMP);
+ if (error == 0)
+ error = linux_common_execve(td, &eargs);
+ return (error);
+}
+
+int
+linux_set_upcall_kse(struct thread *td, register_t stack)
+{
+
+ if (stack)
+ td->td_frame->tf_rsp = stack;
+
+ /*
+ * The newly created Linux thread returns
+ * to the user space by the same path that a parent do.
+ */
+ td->td_frame->tf_rax = 0;
+ return (0);
+}
+
+#define STACK_SIZE (2 * 1024 * 1024)
+#define GUARD_SIZE (4 * PAGE_SIZE)
+
+int
+linux_mmap2(struct thread *td, struct linux_mmap2_args *args)
+{
+ struct proc *p = td->td_proc;
+ struct mmap_args /* {
+ caddr_t addr;
+ size_t len;
+ int prot;
+ int flags;
+ int fd;
+ long pad;
+ off_t pos;
+ } */ bsd_args;
+ int error;
+ struct file *fp;
+ cap_rights_t rights;
+
+ LINUX_CTR6(mmap2, "0x%lx, %ld, %ld, 0x%08lx, %ld, 0x%lx",
+ args->addr, args->len, args->prot,
+ args->flags, args->fd, args->pgoff);
+
+ error = 0;
+ bsd_args.flags = 0;
+ fp = NULL;
+
+ /*
+ * Linux mmap(2):
+ * You must specify exactly one of MAP_SHARED and MAP_PRIVATE
+ */
+ if (! ((args->flags & LINUX_MAP_SHARED) ^
+ (args->flags & LINUX_MAP_PRIVATE)))
+ return (EINVAL);
+
+ if (args->flags & LINUX_MAP_SHARED)
+ bsd_args.flags |= MAP_SHARED;
+ if (args->flags & LINUX_MAP_PRIVATE)
+ bsd_args.flags |= MAP_PRIVATE;
+ if (args->flags & LINUX_MAP_FIXED)
+ bsd_args.flags |= MAP_FIXED;
+ if (args->flags & LINUX_MAP_ANON)
+ bsd_args.flags |= MAP_ANON;
+ else
+ bsd_args.flags |= MAP_NOSYNC;
+ if (args->flags & LINUX_MAP_GROWSDOWN)
+ bsd_args.flags |= MAP_STACK;
+
+ /*
+ * PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC
+ * on Linux/i386. We do this to ensure maximum compatibility.
+ * Linux/ia64 does the same in i386 emulation mode.
+ */
+ bsd_args.prot = args->prot;
+ if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC))
+ bsd_args.prot |= PROT_READ | PROT_EXEC;
+
+ /* Linux does not check file descriptor when MAP_ANONYMOUS is set. */
+ bsd_args.fd = (bsd_args.flags & MAP_ANON) ? -1 : args->fd;
+ if (bsd_args.fd != -1) {
+ /*
+ * Linux follows Solaris mmap(2) description:
+ * The file descriptor fildes is opened with
+ * read permission, regardless of the
+ * protection options specified.
+ */
+
+ error = fget(td, bsd_args.fd,
+ cap_rights_init(&rights, CAP_MMAP), &fp);
+ if (error != 0 )
+ return (error);
+ if (fp->f_type != DTYPE_VNODE) {
+ fdrop(fp, td);
+ return (EINVAL);
+ }
+
+ /* Linux mmap() just fails for O_WRONLY files */
+ if (!(fp->f_flag & FREAD)) {
+ fdrop(fp, td);
+ return (EACCES);
+ }
+
+ fdrop(fp, td);
+ }
+
+ if (args->flags & LINUX_MAP_GROWSDOWN) {
+ /*
+ * The Linux MAP_GROWSDOWN option does not limit auto
+ * growth of the region. Linux mmap with this option
+ * takes as addr the inital BOS, and as len, the initial
+ * region size. It can then grow down from addr without
+ * limit. However, Linux threads has an implicit internal
+ * limit to stack size of STACK_SIZE. Its just not
+ * enforced explicitly in Linux. But, here we impose
+ * a limit of (STACK_SIZE - GUARD_SIZE) on the stack
+ * region, since we can do this with our mmap.
+ *
+ * Our mmap with MAP_STACK takes addr as the maximum
+ * downsize limit on BOS, and as len the max size of
+ * the region. It then maps the top SGROWSIZ bytes,
+ * and auto grows the region down, up to the limit
+ * in addr.
+ *
+ * If we don't use the MAP_STACK option, the effect
+ * of this code is to allocate a stack region of a
+ * fixed size of (STACK_SIZE - GUARD_SIZE).
+ */
+
+ if ((caddr_t)PTRIN(args->addr) + args->len >
+ p->p_vmspace->vm_maxsaddr) {
+ /*
+ * Some Linux apps will attempt to mmap
+ * thread stacks near the top of their
+ * address space. If their TOS is greater
+ * than vm_maxsaddr, vm_map_growstack()
+ * will confuse the thread stack with the
+ * process stack and deliver a SEGV if they
+ * attempt to grow the thread stack past their
+ * current stacksize rlimit. To avoid this,
+ * adjust vm_maxsaddr upwards to reflect
+ * the current stacksize rlimit rather
+ * than the maximum possible stacksize.
+ * It would be better to adjust the
+ * mmap'ed region, but some apps do not check
+ * mmap's return value.
+ */
+ PROC_LOCK(p);
+ p->p_vmspace->vm_maxsaddr = (char *)USRSTACK -
+ lim_cur(p, RLIMIT_STACK);
+ PROC_UNLOCK(p);
+ }
+
+ /*
+ * This gives us our maximum stack size and a new BOS.
+ * If we're using VM_STACK, then mmap will just map
+ * the top SGROWSIZ bytes, and let the stack grow down
+ * to the limit at BOS. If we're not using VM_STACK
+ * we map the full stack, since we don't have a way
+ * to autogrow it.
+ */
+ if (args->len > STACK_SIZE - GUARD_SIZE) {
+ bsd_args.addr = (caddr_t)PTRIN(args->addr);
+ bsd_args.len = args->len;
+ } else {
+ bsd_args.addr = (caddr_t)PTRIN(args->addr) -
+ (STACK_SIZE - GUARD_SIZE - args->len);
+ bsd_args.len = STACK_SIZE - GUARD_SIZE;
+ }
+ } else {
+ bsd_args.addr = (caddr_t)PTRIN(args->addr);
+ bsd_args.len = args->len;
+ }
+ bsd_args.pos = (off_t)args->pgoff;
+
+ error = sys_mmap(td, &bsd_args);
+
+ LINUX_CTR2(mmap2, "return: %d (%p)",
+ error, td->td_retval[0]);
+ return (error);
+}
+
+int
+linux_mprotect(struct thread *td, struct linux_mprotect_args *uap)
+{
+ struct mprotect_args bsd_args;
+
+ LINUX_CTR(mprotect);
+
+ bsd_args.addr = uap->addr;
+ bsd_args.len = uap->len;
+ bsd_args.prot = uap->prot;
+ if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC))
+ bsd_args.prot |= PROT_READ | PROT_EXEC;
+ return (sys_mprotect(td, &bsd_args));
+}
+
+int
+linux_iopl(struct thread *td, struct linux_iopl_args *args)
+{
+ int error;
+
+ LINUX_CTR(iopl);
+
+ if (args->level > 3)
+ return (EINVAL);
+ if ((error = priv_check(td, PRIV_IO)) != 0)
+ return (error);
+ if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
+ return (error);
+ td->td_frame->tf_rflags = (td->td_frame->tf_rflags & ~PSL_IOPL) |
+ (args->level * (PSL_IOPL / 3));
+
+ return (0);
+}
+
+int
+linux_rt_sigsuspend(struct thread *td, struct linux_rt_sigsuspend_args *uap)
+{
+ l_sigset_t lmask;
+ sigset_t sigmask;
+ int error;
+
+ LINUX_CTR2(rt_sigsuspend, "%p, %ld",
+ uap->newset, uap->sigsetsize);
+
+ if (uap->sigsetsize != sizeof(l_sigset_t))
+ return (EINVAL);
+
+ error = copyin(uap->newset, &lmask, sizeof(l_sigset_t));
+ if (error)
+ return (error);
+
+ linux_to_bsd_sigset(&lmask, &sigmask);
+ return (kern_sigsuspend(td, sigmask));
+}
+
+int
+linux_pause(struct thread *td, struct linux_pause_args *args)
+{
+ struct proc *p = td->td_proc;
+ sigset_t sigmask;
+
+ LINUX_CTR(pause);
+
+ PROC_LOCK(p);
+ sigmask = td->td_sigmask;
+ PROC_UNLOCK(p);
+ return (kern_sigsuspend(td, sigmask));
+}
+
+int
+linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap)
+{
+ stack_t ss, oss;
+ l_stack_t lss;
+ int error;
+
+ LINUX_CTR2(sigaltstack, "%p, %p", uap->uss, uap->uoss);
+
+ if (uap->uss != NULL) {
+ error = copyin(uap->uss, &lss, sizeof(l_stack_t));
+ if (error)
+ return (error);
+
+ ss.ss_sp = PTRIN(lss.ss_sp);
+ ss.ss_size = lss.ss_size;
+ ss.ss_flags = linux_to_bsd_sigaltstack(lss.ss_flags);
+ }
+ error = kern_sigaltstack(td, (uap->uss != NULL) ? &ss : NULL,
+ (uap->uoss != NULL) ? &oss : NULL);
+ if (!error && uap->uoss != NULL) {
+ lss.ss_sp = PTROUT(oss.ss_sp);
+ lss.ss_size = oss.ss_size;
+ lss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags);
+ error = copyout(&lss, uap->uoss, sizeof(l_stack_t));
+ }
+
+ return (error);
+}
+
+/* XXX do all */
+int
+linux_arch_prctl(struct thread *td, struct linux_arch_prctl_args *args)
+{
+ int error;
+ struct pcb *pcb;
+
+ LINUX_CTR2(arch_prctl, "0x%x, %p", args->code, args->addr);
+
+ error = ENOTSUP;
+ pcb = td->td_pcb;
+
+ switch (args->code) {
+ case LINUX_ARCH_GET_GS:
+ error = copyout(&pcb->pcb_gsbase, (unsigned long *)args->addr,
+ sizeof(args->addr));
+ break;
+ case LINUX_ARCH_SET_GS:
+ if (args->addr >= VM_MAXUSER_ADDRESS)
+ return(EPERM);
+ break;
+ case LINUX_ARCH_GET_FS:
+ error = copyout(&pcb->pcb_fsbase, (unsigned long *)args->addr,
+ sizeof(args->addr));
+ break;
+ case LINUX_ARCH_SET_FS:
+ error = linux_set_cloned_tls(td, (void *)args->addr);
+ break;
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
+
+int
+linux_set_cloned_tls(struct thread *td, void *desc)
+{
+ struct pcb *pcb;
+
+ if ((uint64_t)desc >= VM_MAXUSER_ADDRESS)
+ return (EPERM);
+
+ pcb = td->td_pcb;
+ pcb->pcb_fsbase = (register_t)desc;
+ td->td_frame->tf_fs = _ufssel;
+
+ return (0);
+}
OpenPOWER on IntegriCloud