summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/imgact_aout.c26
-rw-r--r--sys/kern/imgact_elf.c32
-rw-r--r--sys/kern/init_main.c42
-rw-r--r--sys/kern/kern_acct.c33
-rw-r--r--sys/kern/kern_acl.c88
-rw-r--r--sys/kern/kern_cap.c12
-rw-r--r--sys/kern/kern_clock.c63
-rw-r--r--sys/kern/kern_condvar.c290
-rw-r--r--sys/kern/kern_conf.c2
-rw-r--r--sys/kern/kern_descrip.c284
-rw-r--r--sys/kern/kern_event.c84
-rw-r--r--sys/kern/kern_exec.c43
-rw-r--r--sys/kern/kern_exit.c74
-rw-r--r--sys/kern/kern_fork.c127
-rw-r--r--sys/kern/kern_idle.c8
-rw-r--r--sys/kern/kern_intr.c68
-rw-r--r--sys/kern/kern_jail.c7
-rw-r--r--sys/kern/kern_kthread.c8
-rw-r--r--sys/kern/kern_ktrace.c33
-rw-r--r--sys/kern/kern_linker.c87
-rw-r--r--sys/kern/kern_lock.c28
-rw-r--r--sys/kern/kern_lockf.c30
-rw-r--r--sys/kern/kern_malloc.c2
-rw-r--r--sys/kern/kern_module.c26
-rw-r--r--sys/kern/kern_mutex.c198
-rw-r--r--sys/kern/kern_ntptime.c8
-rw-r--r--sys/kern/kern_proc.c104
-rw-r--r--sys/kern/kern_prot.c199
-rw-r--r--sys/kern/kern_resource.c223
-rw-r--r--sys/kern/kern_shutdown.c14
-rw-r--r--sys/kern/kern_sig.c244
-rw-r--r--sys/kern/kern_subr.c27
-rw-r--r--sys/kern/kern_switch.c82
-rw-r--r--sys/kern/kern_sx.c10
-rw-r--r--sys/kern/kern_synch.c387
-rw-r--r--sys/kern/kern_syscalls.c8
-rw-r--r--sys/kern/kern_sysctl.c36
-rw-r--r--sys/kern/kern_time.c54
-rw-r--r--sys/kern/kern_xxx.c54
-rw-r--r--sys/kern/ksched.c36
-rw-r--r--sys/kern/link_aout.c13
-rw-r--r--sys/kern/link_elf.c19
-rw-r--r--sys/kern/link_elf_obj.c19
-rw-r--r--sys/kern/makesyscalls.sh5
-rw-r--r--sys/kern/p1003_1b.c81
-rw-r--r--sys/kern/subr_acl_posix1e.c88
-rw-r--r--sys/kern/subr_disk.c14
-rw-r--r--sys/kern/subr_diskslice.c4
-rw-r--r--sys/kern/subr_eventhandler.c8
-rw-r--r--sys/kern/subr_log.c12
-rw-r--r--sys/kern/subr_mbuf.c2
-rw-r--r--sys/kern/subr_prf.c5
-rw-r--r--sys/kern/subr_prof.c23
-rw-r--r--sys/kern/subr_smp.c18
-rw-r--r--sys/kern/subr_trap.c45
-rw-r--r--sys/kern/subr_turnstile.c198
-rw-r--r--sys/kern/subr_witness.c59
-rw-r--r--sys/kern/subr_xxx.c20
-rw-r--r--sys/kern/sys_generic.c330
-rw-r--r--sys/kern/sys_pipe.c64
-rw-r--r--sys/kern/sys_process.c33
-rw-r--r--sys/kern/sys_socket.c32
-rw-r--r--sys/kern/sysv_ipc.c5
-rw-r--r--sys/kern/sysv_msg.c65
-rw-r--r--sys/kern/sysv_sem.c94
-rw-r--r--sys/kern/sysv_shm.c92
-rw-r--r--sys/kern/tty.c27
-rw-r--r--sys/kern/tty_conf.c4
-rw-r--r--sys/kern/tty_cons.c32
-rw-r--r--sys/kern/tty_pty.c37
-rw-r--r--sys/kern/tty_tty.c54
-rw-r--r--sys/kern/uipc_sockbuf.c26
-rw-r--r--sys/kern/uipc_socket.c68
-rw-r--r--sys/kern/uipc_socket2.c26
-rw-r--r--sys/kern/uipc_syscalls.c344
-rw-r--r--sys/kern/uipc_usrreq.c79
-rw-r--r--sys/kern/vfs_acl.c88
-rw-r--r--sys/kern/vfs_aio.c169
-rw-r--r--sys/kern/vfs_bio.c14
-rw-r--r--sys/kern/vfs_cache.c24
-rw-r--r--sys/kern/vfs_conf.c6
-rw-r--r--sys/kern/vfs_default.c76
-rw-r--r--sys/kern/vfs_extattr.c872
-rw-r--r--sys/kern/vfs_lookup.c52
-rw-r--r--sys/kern/vfs_mount.c6
-rw-r--r--sys/kern/vfs_subr.c179
-rw-r--r--sys/kern/vfs_syscalls.c872
-rw-r--r--sys/kern/vfs_vnops.c158
-rw-r--r--sys/kern/vnode_if.pl4
-rw-r--r--sys/kern/vnode_if.src40
90 files changed, 4173 insertions, 3613 deletions
diff --git a/sys/kern/imgact_aout.c b/sys/kern/imgact_aout.c
index 9aa8b3d..3ec8f5f 100644
--- a/sys/kern/imgact_aout.c
+++ b/sys/kern/imgact_aout.c
@@ -26,7 +26,7 @@
* $FreeBSD$
*/
-#include "opt_upages.h"
+#include "opt_kstack_pages.h"
#include <sys/param.h>
#include <sys/exec.h>
@@ -250,29 +250,33 @@ exec_aout_imgact(imgp)
* expand_name(), unless the process was setuid/setgid.
*/
int
-aout_coredump(p, vp, limit)
- register struct proc *p;
+aout_coredump(td, vp, limit)
+ register struct thread *td;
register struct vnode *vp;
off_t limit;
{
+ struct proc *p = td->td_proc;
register struct ucred *cred = p->p_ucred;
register struct vmspace *vm = p->p_vmspace;
int error;
- if (ctob(UPAGES + vm->vm_dsize + vm->vm_ssize) >= limit)
+ if (ctob((UAREA_PAGES + KSTACK_PAGES)
+ + vm->vm_dsize + vm->vm_ssize) >= limit)
return (EFAULT);
- fill_kinfo_proc(p, &p->p_addr->u_kproc);
- error = cpu_coredump(p, vp, cred);
+ fill_kinfo_proc(p, &p->p_uarea->u_kproc);
+ error = cpu_coredump(td, vp, cred);
if (error == 0)
- error = vn_rdwr_inchunks(UIO_WRITE, vp, vm->vm_daddr,
- (int)ctob(vm->vm_dsize), (off_t)ctob(UPAGES), UIO_USERSPACE,
- IO_UNIT, cred, (int *) NULL, p);
+ error = vn_rdwr(UIO_WRITE, vp, vm->vm_daddr,
+ (int)ctob(vm->vm_dsize),
+ (off_t)ctob(UAREA_PAGES + KSTACK_PAGES), UIO_USERSPACE,
+ IO_UNIT, cred, (int *) NULL, td);
if (error == 0)
error = vn_rdwr_inchunks(UIO_WRITE, vp,
(caddr_t) trunc_page(USRSTACK - ctob(vm->vm_ssize)),
round_page(ctob(vm->vm_ssize)),
- (off_t)ctob(UPAGES) + ctob(vm->vm_dsize), UIO_USERSPACE,
- IO_UNIT, cred, (int *) NULL, p);
+ (off_t)ctob(UAREA_PAGES + KSTACK_PAGES) +
+ ctob(vm->vm_dsize), UIO_USERSPACE,
+ IO_UNIT, cred, (int *) NULL, td);
return (error);
}
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index 63999d1..66a3a34 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -342,6 +342,9 @@ elf_load_file(struct proc *p, const char *file, u_long *addr, u_long *entry)
u_long base_addr = 0;
int error, i, numsegs;
+ if (curthread->td_proc != p)
+ panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */
+
tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
nd = &tempdata->nd;
attr = &tempdata->attr;
@@ -362,7 +365,8 @@ elf_load_file(struct proc *p, const char *file, u_long *addr, u_long *entry)
goto fail;
}
- NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, p);
+ /* XXXKSE */
+ NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread);
if ((error = namei(nd)) != 0) {
nd->ni_vp = NULL;
@@ -376,7 +380,7 @@ elf_load_file(struct proc *p, const char *file, u_long *addr, u_long *entry)
*/
error = exec_check_permissions(imgp);
if (error) {
- VOP_UNLOCK(nd->ni_vp, 0, p);
+ VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
goto fail;
}
@@ -387,7 +391,7 @@ elf_load_file(struct proc *p, const char *file, u_long *addr, u_long *entry)
*/
if (error == 0)
nd->ni_vp->v_flag |= VTEXT;
- VOP_UNLOCK(nd->ni_vp, 0, p);
+ VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
if (error)
goto fail;
@@ -736,7 +740,7 @@ static void cb_put_phdr __P((vm_map_entry_t, void *));
static void cb_size_segment __P((vm_map_entry_t, void *));
static void each_writable_segment __P((struct proc *, segment_callback,
void *));
-static int elf_corehdr __P((struct proc *, struct vnode *, struct ucred *,
+static int elf_corehdr __P((struct thread *, struct vnode *, struct ucred *,
int, void *, size_t));
static void elf_puthdr __P((struct proc *, void *, size_t *,
const prstatus_t *, const prfpregset_t *, const prpsinfo_t *, int));
@@ -746,11 +750,12 @@ static void elf_putnote __P((void *, size_t *, const char *, int,
extern int osreldate;
int
-elf_coredump(p, vp, limit)
- register struct proc *p;
+elf_coredump(td, vp, limit)
+ struct thread *td;
register struct vnode *vp;
off_t limit;
{
+ register struct proc *p = td->td_proc;
register struct ucred *cred = p->p_ucred;
int error = 0;
struct sseg_closure seginfo;
@@ -783,7 +788,7 @@ elf_coredump(p, vp, limit)
if (hdr == NULL) {
return EINVAL;
}
- error = elf_corehdr(p, vp, cred, seginfo.count, hdr, hdrsize);
+ error = elf_corehdr(td, vp, cred, seginfo.count, hdr, hdrsize);
/* Write the contents of all of the writable segments. */
if (error == 0) {
@@ -797,7 +802,7 @@ elf_coredump(p, vp, limit)
error = vn_rdwr_inchunks(UIO_WRITE, vp,
(caddr_t)php->p_vaddr,
php->p_filesz, offset, UIO_USERSPACE,
- IO_UNIT, cred, (int *)NULL, p);
+ IO_UNIT, cred, (int *)NULL, curthread); /* XXXKSE */
if (error != 0)
break;
offset += php->p_filesz;
@@ -909,8 +914,8 @@ each_writable_segment(p, func, closure)
* the page boundary.
*/
static int
-elf_corehdr(p, vp, cred, numsegs, hdr, hdrsize)
- struct proc *p;
+elf_corehdr(td, vp, cred, numsegs, hdr, hdrsize)
+ struct thread *td;
struct vnode *vp;
struct ucred *cred;
int numsegs;
@@ -922,6 +927,7 @@ elf_corehdr(p, vp, cred, numsegs, hdr, hdrsize)
prfpregset_t fpregset;
prpsinfo_t psinfo;
} *tempdata;
+ struct proc *p = td->td_proc;
size_t off;
prstatus_t *status;
prfpregset_t *fpregset;
@@ -940,9 +946,9 @@ elf_corehdr(p, vp, cred, numsegs, hdr, hdrsize)
status->pr_osreldate = osreldate;
status->pr_cursig = p->p_sig;
status->pr_pid = p->p_pid;
- fill_regs(p, &status->pr_reg);
+ fill_regs(td, &status->pr_reg);
- fill_fpregs(p, fpregset);
+ fill_fpregs(td, fpregset);
psinfo->pr_version = PRPSINFO_VERSION;
psinfo->pr_psinfosz = sizeof(prpsinfo_t);
@@ -960,7 +966,7 @@ elf_corehdr(p, vp, cred, numsegs, hdr, hdrsize)
/* Write it to the core file. */
return vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
- UIO_SYSSPACE, IO_UNIT, cred, NULL, p);
+ UIO_SYSSPACE, IO_UNIT, cred, NULL, td); /* XXXKSE */
}
static void
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 3859c5b..62d1655 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -83,6 +83,7 @@ void mi_startup(void); /* Should be elsewhere */
static struct session session0;
static struct pgrp pgrp0;
struct proc proc0;
+struct thread *thread0;
static struct procsig procsig0;
static struct filedesc0 filedesc0;
static struct plimit limit0;
@@ -90,7 +91,6 @@ static struct vmspace vmspace0;
struct proc *initproc;
int cmask = CMASK;
-extern struct user *proc0paddr;
extern int fallback_elf_brand;
struct vnode *rootvp;
@@ -273,10 +273,17 @@ proc0_init(void *dummy __unused)
register struct proc *p;
register struct filedesc0 *fdp;
register unsigned i;
+ struct thread *td;
GIANT_REQUIRED;
-
+ /*
+ * This assumes the proc0 struct has already been linked
+ * using proc_linkup() in the machine specific initialisation
+ * e.g. i386_init()
+ */
p = &proc0;
+ td = thread0;
+ mtx_init(&p->p_mtx, "process lock", MTX_DEF);
/*
* Initialize magic number.
@@ -321,11 +328,11 @@ proc0_init(void *dummy __unused)
p->p_flag = P_SYSTEM;
p->p_sflag = PS_INMEM;
p->p_stat = SRUN;
- p->p_nice = NZERO;
- p->p_pri.pri_class = PRI_TIMESHARE;
- p->p_pri.pri_level = PVM;
- p->p_pri.pri_native = PUSER;
- p->p_pri.pri_user = PUSER;
+ p->p_ksegrp.kg_nice = NZERO;
+ p->p_ksegrp.kg_pri.pri_class = PRI_TIMESHARE;
+ p->p_ksegrp.kg_pri.pri_level = PVM;
+ p->p_ksegrp.kg_pri.pri_native = PUSER;
+ p->p_ksegrp.kg_pri.pri_user = PUSER;
p->p_peers = 0;
p->p_leader = p;
@@ -333,7 +340,7 @@ proc0_init(void *dummy __unused)
bcopy("swapper", p->p_comm, sizeof ("swapper"));
callout_init(&p->p_itcallout, 0);
- callout_init(&p->p_slpcallout, 1);
+ callout_init(&td->td_slpcallout, 1);
/* Create credentials. */
p->p_ucred = crget();
@@ -381,14 +388,13 @@ proc0_init(void *dummy __unused)
vm_map_init(&vmspace0.vm_map, round_page(VM_MIN_ADDRESS),
trunc_page(VM_MAXUSER_ADDRESS));
vmspace0.vm_map.pmap = vmspace_pmap(&vmspace0);
- p->p_addr = proc0paddr; /* XXX */
/*
* We continue to place resource usage info and signal
* actions in the user struct so they're pageable.
*/
- p->p_stats = &p->p_addr->u_stats;
- p->p_sigacts = &p->p_addr->u_sigacts;
+ p->p_stats = &p->p_uarea->u_stats;
+ p->p_sigacts = &p->p_uarea->u_sigacts;
/*
* Charge root for one process.
@@ -467,13 +473,15 @@ start_init(void *dummy)
int options, error;
char *var, *path, *next, *s;
char *ucp, **uap, *arg0, *arg1;
+ struct thread *td;
struct proc *p;
mtx_lock(&Giant);
GIANT_REQUIRED;
- p = curproc;
+ td = curthread;
+ p = td->td_proc;
/* Get the vnode for '/'. Set p->p_fd->fd_cdir to reference it. */
if (VFS_ROOT(TAILQ_FIRST(&mountlist), &rootvnode))
@@ -482,7 +490,7 @@ start_init(void *dummy)
VREF(p->p_fd->fd_cdir);
p->p_fd->fd_rdir = rootvnode;
VREF(p->p_fd->fd_rdir);
- VOP_UNLOCK(rootvnode, 0, p);
+ VOP_UNLOCK(rootvnode, 0, td);
/*
* Need just enough stack to hold the faked-up "execve()" arguments.
@@ -573,7 +581,7 @@ start_init(void *dummy)
* Otherwise, return via fork_trampoline() all the way
* to user mode as init!
*/
- if ((error = execve(p, &args)) == 0) {
+ if ((error = execve(td, &args)) == 0) {
mtx_unlock(&Giant);
return;
}
@@ -597,7 +605,7 @@ create_init(const void *udata __unused)
{
int error;
- error = fork1(&proc0, RFFDG | RFPROC | RFSTOPPED, &initproc);
+ error = fork1(thread0, RFFDG | RFPROC | RFSTOPPED, &initproc);
if (error)
panic("cannot fork init: %d\n", error);
PROC_LOCK(initproc);
@@ -606,7 +614,7 @@ create_init(const void *udata __unused)
mtx_lock_spin(&sched_lock);
initproc->p_sflag |= PS_INMEM;
mtx_unlock_spin(&sched_lock);
- cpu_set_fork_handler(initproc, start_init, NULL);
+ cpu_set_fork_handler(&initproc->p_thread, start_init, NULL);
}
SYSINIT(init, SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL)
@@ -619,7 +627,7 @@ kick_init(const void *udata __unused)
mtx_lock_spin(&sched_lock);
initproc->p_stat = SRUN;
- setrunqueue(initproc);
+ setrunqueue(&initproc->p_thread); /* XXXKSE */
mtx_unlock_spin(&sched_lock);
}
SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL)
diff --git a/sys/kern/kern_acct.c b/sys/kern/kern_acct.c
index 9a2ebab..8c319f7 100644
--- a/sys/kern/kern_acct.c
+++ b/sys/kern/kern_acct.c
@@ -110,20 +110,20 @@ SYSCTL_INT(_kern, OID_AUTO, acct_chkfreq, CTLFLAG_RW,
* MPSAFE
*/
int
-acct(a1, uap)
- struct proc *a1;
+acct(td, uap)
+ struct thread *td;
struct acct_args /* {
syscallarg(char *) path;
} */ *uap;
{
- struct proc *p = curproc; /* XXX */
struct nameidata nd;
int error, flags;
mtx_lock(&Giant);
-
+ if (td != curthread)
+ panic("acct"); /* XXXKSE DIAGNOSTIC */
/* Make sure that the caller is root. */
- error = suser(p);
+ error = suser(td->td_proc);
if (error)
goto done2;
@@ -133,15 +133,15 @@ acct(a1, uap)
*/
if (SCARG(uap, path) != NULL) {
NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path),
- p);
+ td);
flags = FWRITE;
error = vn_open(&nd, &flags, 0);
if (error)
goto done2;
NDFREE(&nd, NDF_ONLY_PNBUF);
- VOP_UNLOCK(nd.ni_vp, 0, p);
+ VOP_UNLOCK(nd.ni_vp, 0, td);
if (nd.ni_vp->v_type != VREG) {
- vn_close(nd.ni_vp, FWRITE, p->p_ucred, p);
+ vn_close(nd.ni_vp, FWRITE, td->td_proc->p_ucred, td);
error = EACCES;
goto done2;
}
@@ -154,7 +154,7 @@ acct(a1, uap)
if (acctp != NULLVP || savacctp != NULLVP) {
callout_stop(&acctwatch_callout);
error = vn_close((acctp != NULLVP ? acctp : savacctp), FWRITE,
- p->p_ucred, p);
+ td->td_proc->p_ucred, td);
acctp = savacctp = NULLVP;
}
if (SCARG(uap, path) == NULL)
@@ -180,9 +180,10 @@ done2:
*/
int
-acct_process(p)
- struct proc *p;
+acct_process(td)
+ struct thread *td;
{
+ struct proc *p = td->td_proc;
struct acct acct;
struct rusage *r;
struct timeval ut, st, tmp;
@@ -253,10 +254,10 @@ acct_process(p)
/*
* Write the accounting information to the file.
*/
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
return (vn_rdwr(UIO_WRITE, vp, (caddr_t)&acct, sizeof (acct),
- (off_t)0, UIO_SYSSPACE, IO_APPEND|IO_UNIT, p->p_ucred,
- (int *)0, p));
+ (off_t)0, UIO_SYSSPACE, IO_APPEND|IO_UNIT, td->td_proc->p_ucred,
+ (int *)0, td));
}
/*
@@ -317,7 +318,7 @@ acctwatch(a)
savacctp = NULLVP;
return;
}
- (void)VFS_STATFS(savacctp->v_mount, &sb, (struct proc *)0);
+ (void)VFS_STATFS(savacctp->v_mount, &sb, (struct thread *)0);
if (sb.f_bavail > acctresume * sb.f_blocks / 100) {
acctp = savacctp;
savacctp = NULLVP;
@@ -331,7 +332,7 @@ acctwatch(a)
acctp = NULLVP;
return;
}
- (void)VFS_STATFS(acctp->v_mount, &sb, (struct proc *)0);
+ (void)VFS_STATFS(acctp->v_mount, &sb, (struct thread *)0);
if (sb.f_bavail <= acctsuspend * sb.f_blocks / 100) {
savacctp = acctp;
acctp = NULLVP;
diff --git a/sys/kern/kern_acl.c b/sys/kern/kern_acl.c
index 69dbe85..045d1a8 100644
--- a/sys/kern/kern_acl.c
+++ b/sys/kern/kern_acl.c
@@ -48,11 +48,11 @@
MALLOC_DEFINE(M_ACL, "acl", "access control list");
-static int vacl_set_acl(struct proc *p, struct vnode *vp, acl_type_t type,
+static int vacl_set_acl( struct thread *td, struct vnode *vp, acl_type_t type,
struct acl *aclp);
-static int vacl_get_acl(struct proc *p, struct vnode *vp, acl_type_t type,
+static int vacl_get_acl( struct thread *td, struct vnode *vp, acl_type_t type,
struct acl *aclp);
-static int vacl_aclcheck(struct proc *p, struct vnode *vp,
+static int vacl_aclcheck( struct thread *td, struct vnode *vp,
acl_type_t type, struct acl *aclp);
/*
@@ -562,7 +562,7 @@ acl_posix1e_check(struct acl *acl)
* Given a vnode, set its ACL.
*/
static int
-vacl_set_acl(struct proc *p, struct vnode *vp, acl_type_t type,
+vacl_set_acl( struct thread *td, struct vnode *vp, acl_type_t type,
struct acl *aclp)
{
struct acl inkernacl;
@@ -571,10 +571,10 @@ vacl_set_acl(struct proc *p, struct vnode *vp, acl_type_t type,
error = copyin(aclp, &inkernacl, sizeof(struct acl));
if (error)
return(error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_SETACL(vp, type, &inkernacl, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_SETACL(vp, type, &inkernacl, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
return(error);
}
@@ -582,16 +582,16 @@ vacl_set_acl(struct proc *p, struct vnode *vp, acl_type_t type,
* Given a vnode, get its ACL.
*/
static int
-vacl_get_acl(struct proc *p, struct vnode *vp, acl_type_t type,
+vacl_get_acl( struct thread *td, struct vnode *vp, acl_type_t type,
struct acl *aclp)
{
struct acl inkernelacl;
int error;
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_GETACL(vp, type, &inkernelacl, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_GETACL(vp, type, &inkernelacl, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
if (error == 0)
error = copyout(&inkernelacl, aclp, sizeof(struct acl));
return (error);
@@ -601,14 +601,14 @@ vacl_get_acl(struct proc *p, struct vnode *vp, acl_type_t type,
* Given a vnode, delete its ACL.
*/
static int
-vacl_delete(struct proc *p, struct vnode *vp, acl_type_t type)
+vacl_delete( struct thread *td, struct vnode *vp, acl_type_t type)
{
int error;
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_SETACL(vp, ACL_TYPE_DEFAULT, 0, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_SETACL(vp, ACL_TYPE_DEFAULT, 0, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
return (error);
}
@@ -616,7 +616,7 @@ vacl_delete(struct proc *p, struct vnode *vp, acl_type_t type)
* Given a vnode, check whether an ACL is appropriate for it
*/
static int
-vacl_aclcheck(struct proc *p, struct vnode *vp, acl_type_t type,
+vacl_aclcheck( struct thread *td, struct vnode *vp, acl_type_t type,
struct acl *aclp)
{
struct acl inkernelacl;
@@ -625,7 +625,7 @@ vacl_aclcheck(struct proc *p, struct vnode *vp, acl_type_t type,
error = copyin(aclp, &inkernelacl, sizeof(struct acl));
if (error)
return(error);
- error = VOP_ACLCHECK(vp, type, &inkernelacl, p->p_ucred, p);
+ error = VOP_ACLCHECK(vp, type, &inkernelacl, td->td_proc->p_ucred, td);
return (error);
}
@@ -641,17 +641,17 @@ vacl_aclcheck(struct proc *p, struct vnode *vp, acl_type_t type,
* MPSAFE
*/
int
-__acl_get_file(struct proc *p, struct __acl_get_file_args *uap)
+__acl_get_file( struct thread *td, struct __acl_get_file_args *uap)
{
struct nameidata nd;
int error;
mtx_lock(&Giant);
/* what flags are required here -- possible not LOCKLEAF? */
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
error = namei(&nd);
if (error == 0) {
- error = vacl_get_acl(p, nd.ni_vp, SCARG(uap, type),
+ error = vacl_get_acl(td, nd.ni_vp, SCARG(uap, type),
SCARG(uap, aclp));
NDFREE(&nd, 0);
}
@@ -665,16 +665,16 @@ __acl_get_file(struct proc *p, struct __acl_get_file_args *uap)
* MPSAFE
*/
int
-__acl_set_file(struct proc *p, struct __acl_set_file_args *uap)
+__acl_set_file( struct thread *td, struct __acl_set_file_args *uap)
{
struct nameidata nd;
int error;
mtx_lock(&Giant);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
error = namei(&nd);
if (error == 0) {
- error = vacl_set_acl(p, nd.ni_vp, SCARG(uap, type),
+ error = vacl_set_acl(td, nd.ni_vp, SCARG(uap, type),
SCARG(uap, aclp));
NDFREE(&nd, 0);
}
@@ -688,15 +688,15 @@ __acl_set_file(struct proc *p, struct __acl_set_file_args *uap)
* MPSAFE
*/
int
-__acl_get_fd(struct proc *p, struct __acl_get_fd_args *uap)
+__acl_get_fd( struct thread *td, struct __acl_get_fd_args *uap)
{
struct file *fp;
int error;
mtx_lock(&Giant);
- error = getvnode(p->p_fd, SCARG(uap, filedes), &fp);
+ error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
if (error == 0) {
- error = vacl_get_acl(p, (struct vnode *)fp->f_data,
+ error = vacl_get_acl(td, (struct vnode *)fp->f_data,
SCARG(uap, type), SCARG(uap, aclp));
}
mtx_unlock(&Giant);
@@ -709,15 +709,15 @@ __acl_get_fd(struct proc *p, struct __acl_get_fd_args *uap)
* MPSAFE
*/
int
-__acl_set_fd(struct proc *p, struct __acl_set_fd_args *uap)
+__acl_set_fd( struct thread *td, struct __acl_set_fd_args *uap)
{
struct file *fp;
int error;
mtx_lock(&Giant);
- error = getvnode(p->p_fd, SCARG(uap, filedes), &fp);
+ error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
if (error == 0) {
- error = vacl_set_acl(p, (struct vnode *)fp->f_data,
+ error = vacl_set_acl(td, (struct vnode *)fp->f_data,
SCARG(uap, type), SCARG(uap, aclp));
}
mtx_unlock(&Giant);
@@ -730,16 +730,16 @@ __acl_set_fd(struct proc *p, struct __acl_set_fd_args *uap)
* MPSAFE
*/
int
-__acl_delete_file(struct proc *p, struct __acl_delete_file_args *uap)
+__acl_delete_file( struct thread *td, struct __acl_delete_file_args *uap)
{
struct nameidata nd;
int error;
mtx_lock(&Giant);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
error = namei(&nd);
if (error == 0) {
- error = vacl_delete(p, nd.ni_vp, SCARG(uap, type));
+ error = vacl_delete(td, nd.ni_vp, SCARG(uap, type));
NDFREE(&nd, 0);
}
mtx_unlock(&Giant);
@@ -752,15 +752,15 @@ __acl_delete_file(struct proc *p, struct __acl_delete_file_args *uap)
* MPSAFE
*/
int
-__acl_delete_fd(struct proc *p, struct __acl_delete_fd_args *uap)
+__acl_delete_fd( struct thread *td, struct __acl_delete_fd_args *uap)
{
struct file *fp;
int error;
mtx_lock(&Giant);
- error = getvnode(p->p_fd, SCARG(uap, filedes), &fp);
+ error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
if (error == 0) {
- error = vacl_delete(p, (struct vnode *)fp->f_data,
+ error = vacl_delete(td, (struct vnode *)fp->f_data,
SCARG(uap, type));
}
mtx_unlock(&Giant);
@@ -773,16 +773,16 @@ __acl_delete_fd(struct proc *p, struct __acl_delete_fd_args *uap)
* MPSAFE
*/
int
-__acl_aclcheck_file(struct proc *p, struct __acl_aclcheck_file_args *uap)
+__acl_aclcheck_file( struct thread *td, struct __acl_aclcheck_file_args *uap)
{
struct nameidata nd;
int error;
mtx_lock(&Giant);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
error = namei(&nd);
if (error == 0) {
- error = vacl_aclcheck(p, nd.ni_vp, SCARG(uap, type),
+ error = vacl_aclcheck(td, nd.ni_vp, SCARG(uap, type),
SCARG(uap, aclp));
NDFREE(&nd, 0);
}
@@ -796,15 +796,15 @@ __acl_aclcheck_file(struct proc *p, struct __acl_aclcheck_file_args *uap)
* MPSAFE
*/
int
-__acl_aclcheck_fd(struct proc *p, struct __acl_aclcheck_fd_args *uap)
+__acl_aclcheck_fd( struct thread *td, struct __acl_aclcheck_fd_args *uap)
{
struct file *fp;
int error;
mtx_lock(&Giant);
- error = getvnode(p->p_fd, SCARG(uap, filedes), &fp);
+ error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
if (error == 0) {
- error = vacl_aclcheck(p, (struct vnode *)fp->f_data,
+ error = vacl_aclcheck(td, (struct vnode *)fp->f_data,
SCARG(uap, type), SCARG(uap, aclp));
}
mtx_unlock(&Giant);
diff --git a/sys/kern/kern_cap.c b/sys/kern/kern_cap.c
index b8f0c42..45a2509 100644
--- a/sys/kern/kern_cap.c
+++ b/sys/kern/kern_cap.c
@@ -54,7 +54,7 @@
* Syscall to allow a process to get it's currently capability set
*/
int
-__cap_get_proc(struct proc *p, struct __cap_get_proc_args *uap)
+__cap_get_proc(struct thread *td, struct __cap_get_proc_args *uap)
{
return (ENOSYS);
@@ -65,7 +65,7 @@ __cap_get_proc(struct proc *p, struct __cap_get_proc_args *uap)
* permitted.
*/
int
-__cap_set_proc(struct proc *p, struct __cap_set_proc_args *uap)
+__cap_set_proc(struct thread *td, struct __cap_set_proc_args *uap)
{
return (ENOSYS);
@@ -76,14 +76,14 @@ __cap_set_proc(struct proc *p, struct __cap_set_proc_args *uap)
* files, if permitted.
*/
int
-__cap_get_fd(struct proc *p, struct __cap_get_fd_args *uap)
+__cap_get_fd(struct thread *td, struct __cap_get_fd_args *uap)
{
return (ENOSYS);
}
int
-__cap_get_file(struct proc *p, struct __cap_get_file_args *uap)
+__cap_get_file(struct thread *td, struct __cap_get_file_args *uap)
{
return (ENOSYS);
@@ -94,14 +94,14 @@ __cap_get_file(struct proc *p, struct __cap_get_file_args *uap)
* if permitted.
*/
int
-__cap_set_fd(struct proc *p, struct __cap_set_fd_args *uap)
+__cap_set_fd(struct thread *td, struct __cap_set_fd_args *uap)
{
return (ENOSYS);
}
int
-__cap_set_file(struct proc *p, struct __cap_set_file_args *uap)
+__cap_set_file(struct thread *td, struct __cap_set_file_args *uap)
{
return (ENOSYS);
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index f48b212..9ac63ad 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -150,7 +150,7 @@ initclocks(dummy)
/*
* Each time the real-time timer fires, this function is called on all CPUs
- * with each CPU passing in its curproc as the first argument. If possible
+ * with each CPU passing in its curthread as the first argument. If possible
* a nice optimization in the future would be to allow the CPU receiving the
* actual real-time timer interrupt to call this function on behalf of the
* other CPUs rather than sending an IPI to all other CPUs so that they
@@ -159,24 +159,33 @@ initclocks(dummy)
* system need to call this function (or have it called on their behalf.
*/
void
-hardclock_process(p, user)
- struct proc *p;
+hardclock_process(td, user)
+ struct thread *td;
int user;
{
struct pstats *pstats;
+ struct proc *p = td->td_proc;
/*
* Run current process's virtual and profile time, as needed.
*/
mtx_assert(&sched_lock, MA_OWNED);
- pstats = p->p_stats;
- if (user &&
- timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
- p->p_sflag |= PS_ALRMPEND | PS_ASTPENDING;
- if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
- itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
- p->p_sflag |= PS_PROFPEND | PS_ASTPENDING;
+ if (p->p_flag & P_KSES) {
+ /* XXXKSE What to do? */
+ } else {
+ pstats = p->p_stats;
+ if (user &&
+ timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
+ itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
+ p->p_sflag |= PS_ALRMPEND;
+ td->td_kse->ke_flags |= KEF_ASTPENDING;
+ }
+ if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
+ itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
+ p->p_sflag |= PS_PROFPEND;
+ td->td_kse->ke_flags |= KEF_ASTPENDING;
+ }
+ }
}
/*
@@ -190,7 +199,7 @@ hardclock(frame)
CTR0(KTR_INTR, "hardclock fired");
mtx_lock_spin(&sched_lock);
- hardclock_process(curproc, CLKF_USERMODE(frame));
+ hardclock_process(curthread, CLKF_USERMODE(frame));
mtx_unlock_spin(&sched_lock);
/*
@@ -337,14 +346,14 @@ stopprofclock(p)
/*
* Do process and kernel statistics. Most of the statistics are only
* used by user-level statistics programs. The main exceptions are
- * p->p_uticks, p->p_sticks, p->p_iticks, and p->p_estcpu. This function
+ * ke->ke_uticks, p->p_sticks, p->p_iticks, and p->p_estcpu. This function
* should be called by all CPUs in the system for each statistics clock
* interrupt. See the description of hardclock_process for more detail on
* this function's relationship to statclock.
*/
void
-statclock_process(p, pc, user)
- struct proc *p;
+statclock_process(ke, pc, user)
+ struct kse *ke;
register_t pc;
int user;
{
@@ -356,8 +365,10 @@ statclock_process(p, pc, user)
long rss;
struct rusage *ru;
struct vmspace *vm;
+ struct proc *p = ke->ke_proc;
+ struct thread *td = ke->ke_thread; /* current thread */
- KASSERT(p == curproc, ("statclock_process: p != curproc"));
+ KASSERT(ke == curthread->td_kse, ("statclock_process: td != curthread"));
mtx_assert(&sched_lock, MA_OWNED);
if (user) {
/*
@@ -365,14 +376,14 @@ statclock_process(p, pc, user)
* If this process is being profiled, record the tick.
*/
if (p->p_sflag & PS_PROFIL)
- addupc_intr(p, pc, 1);
+ addupc_intr(ke, pc, 1);
if (pscnt < psdiv)
return;
/*
* Charge the time as appropriate.
*/
- p->p_uticks++;
- if (p->p_nice > NZERO)
+ ke->ke_uticks++;
+ if (ke->ke_ksegrp->kg_nice > NZERO)
cp_time[CP_NICE]++;
else
cp_time[CP_USER]++;
@@ -404,19 +415,19 @@ statclock_process(p, pc, user)
* so that we know how much of its real time was spent
* in ``non-process'' (i.e., interrupt) work.
*/
- if ((p->p_ithd != NULL) || p->p_intr_nesting_level >= 2) {
- p->p_iticks++;
+ if ((td->td_ithd != NULL) || td->td_intr_nesting_level >= 2) {
+ ke->ke_iticks++;
cp_time[CP_INTR]++;
} else {
- p->p_sticks++;
- if (p != PCPU_GET(idleproc))
+ ke->ke_sticks++;
+ if (p != PCPU_GET(idlethread)->td_proc)
cp_time[CP_SYS]++;
else
cp_time[CP_IDLE]++;
}
}
- schedclock(p);
+ schedclock(ke->ke_thread);
/* Update resource usage integrals and maximums. */
if ((pstats = p->p_stats) != NULL &&
@@ -435,7 +446,7 @@ statclock_process(p, pc, user)
* Statistics clock. Grab profile sample, and if divider reaches 0,
* do process and kernel statistics. Most of the statistics are only
* used by user-level statistics programs. The main exceptions are
- * p->p_uticks, p->p_sticks, p->p_iticks, and p->p_estcpu.
+ * ke->ke_uticks, p->p_sticks, p->p_iticks, and p->p_estcpu.
*/
void
statclock(frame)
@@ -446,7 +457,7 @@ statclock(frame)
mtx_lock_spin(&sched_lock);
if (--pscnt == 0)
pscnt = psdiv;
- statclock_process(curproc, CLKF_PC(frame), CLKF_USERMODE(frame));
+ statclock_process(curthread->td_kse, CLKF_PC(frame), CLKF_USERMODE(frame));
mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index 4e26f2e..7a8b376 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -46,9 +46,9 @@
/*
* Common sanity checks for cv_wait* functions.
*/
-#define CV_ASSERT(cvp, mp, p) do { \
- KASSERT((p) != NULL, ("%s: curproc NULL", __FUNCTION__)); \
- KASSERT((p)->p_stat == SRUN, ("%s: not SRUN", __FUNCTION__)); \
+#define CV_ASSERT(cvp, mp, td) do { \
+ KASSERT((td) != NULL, ("%s: curthread NULL", __FUNCTION__)); \
+ KASSERT((td)->td_proc->p_stat == SRUN, ("%s: not SRUN", __FUNCTION__)); \
KASSERT((cvp) != NULL, ("%s: cvp NULL", __FUNCTION__)); \
KASSERT((mp) != NULL, ("%s: mp NULL", __FUNCTION__)); \
mtx_assert((mp), MA_OWNED | MA_NOTRECURSED); \
@@ -112,21 +112,23 @@ cv_destroy(struct cv *cvp)
* Switch context.
*/
static __inline void
-cv_switch(struct proc *p)
+cv_switch(struct thread *td)
{
- p->p_stat = SSLEEP;
- p->p_stats->p_ru.ru_nvcsw++;
+ td->td_proc->p_stat = SSLEEP;
+ td->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();
- CTR3(KTR_PROC, "cv_switch: resume proc %p (pid %d, %s)", p, p->p_pid,
- p->p_comm);
+ CTR3(KTR_PROC, "cv_switch: resume thread %p (pid %d, %s)",
+ td,
+ td->td_proc->p_pid,
+ td->td_proc->p_comm);
}
/*
* Switch context, catching signals.
*/
static __inline int
-cv_switch_catch(struct proc *p)
+cv_switch_catch(struct thread *td)
{
int sig;
@@ -136,69 +138,71 @@ cv_switch_catch(struct proc *p)
* both) could occur while we were stopped. A SIGCONT would cause us to
* be marked as SSLEEP without resuming us, thus we must be ready for
* sleep when CURSIG is called. If the wakeup happens while we're
- * stopped, p->p_wchan will be 0 upon return from CURSIG.
+ * stopped, td->td_wchan will be 0 upon return from CURSIG.
*/
- p->p_sflag |= PS_SINTR;
+ td->td_flags |= TDF_SINTR;
mtx_unlock_spin(&sched_lock);
- PROC_LOCK(p);
- sig = CURSIG(p);
+ PROC_LOCK(td->td_proc);
+ sig = CURSIG(td->td_proc); /* XXXKSE */
mtx_lock_spin(&sched_lock);
- PROC_UNLOCK_NOSWITCH(p);
+ PROC_UNLOCK_NOSWITCH(td->td_proc);
if (sig != 0) {
- if (p->p_wchan != NULL)
- cv_waitq_remove(p);
- p->p_stat = SRUN;
- } else if (p->p_wchan != NULL) {
- cv_switch(p);
+ if (td->td_wchan != NULL)
+ cv_waitq_remove(td);
+ td->td_proc->p_stat = SRUN;
+ } else if (td->td_wchan != NULL) {
+ cv_switch(td);
}
- p->p_sflag &= ~PS_SINTR;
+ td->td_flags &= ~TDF_SINTR;
return sig;
}
/*
- * Add a process to the wait queue of a condition variable.
+ * Add a thread to the wait queue of a condition variable.
*/
static __inline void
-cv_waitq_add(struct cv *cvp, struct proc *p)
+cv_waitq_add(struct cv *cvp, struct thread *td)
{
/*
* Process may be sitting on a slpque if asleep() was called, remove it
* before re-adding.
*/
- if (p->p_wchan != NULL)
- unsleep(p);
-
- p->p_sflag |= PS_CVWAITQ;
- p->p_wchan = cvp;
- p->p_wmesg = cvp->cv_description;
- p->p_slptime = 0;
- p->p_pri.pri_native = p->p_pri.pri_level;
- CTR3(KTR_PROC, "cv_waitq_add: proc %p (pid %d, %s)", p, p->p_pid,
- p->p_comm);
- TAILQ_INSERT_TAIL(&cvp->cv_waitq, p, p_slpq);
+ if (td->td_wchan != NULL)
+ unsleep(td);
+
+ td->td_flags |= TDF_CVWAITQ;
+ td->td_wchan = cvp;
+ td->td_wmesg = cvp->cv_description;
+ td->td_kse->ke_slptime = 0; /* XXXKSE */
+ td->td_ksegrp->kg_slptime = 0; /* XXXKSE */
+ td->td_ksegrp->kg_pri.pri_native = td->td_ksegrp->kg_pri.pri_level;
+ CTR3(KTR_PROC, "cv_waitq_add: thread %p (pid %d, %s)", td,
+ td->td_proc->p_pid,
+ td->td_proc->p_comm);
+ TAILQ_INSERT_TAIL(&cvp->cv_waitq, td, td_slpq);
}
/*
- * Wait on a condition variable. The current process is placed on the condition
+ * Wait on a condition variable. The current thread is placed on the condition
* variable's wait queue and suspended. A cv_signal or cv_broadcast on the same
- * condition variable will resume the process. The mutex is released before
+ * condition variable will resume the thread. The mutex is released before
* sleeping and will be held on return. It is recommended that the mutex be
* held when cv_signal or cv_broadcast are called.
*/
void
cv_wait(struct cv *cvp, struct mtx *mp)
{
- struct proc *p;
+ struct thread *td;
WITNESS_SAVE_DECL(mp);
- p = CURPROC;
+ td = curthread;
#ifdef KTRACE
- if (p && KTRPOINT(p, KTR_CSW))
- ktrcsw(p->p_tracep, 1, 0);
+ if (td->td_proc && KTRPOINT(td->td_proc, KTR_CSW))
+ ktrcsw(td->td_proc->p_tracep, 1, 0);
#endif
- CV_ASSERT(cvp, mp, p);
+ CV_ASSERT(cvp, mp, td);
WITNESS_SLEEP(0, &mp->mtx_object);
WITNESS_SAVE(&mp->mtx_object, mp);
@@ -207,7 +211,7 @@ cv_wait(struct cv *cvp, struct mtx *mp)
/*
* After a panic, or during autoconfiguration, just give
* interrupts a chance, then just return; don't run any other
- * procs or panic below, in case this is the idle process and
+ * thread or panic below, in case this is the idle process and
* already asleep.
*/
mtx_unlock_spin(&sched_lock);
@@ -218,13 +222,13 @@ cv_wait(struct cv *cvp, struct mtx *mp)
DROP_GIANT_NOSWITCH();
mtx_unlock_flags(mp, MTX_NOSWITCH);
- cv_waitq_add(cvp, p);
- cv_switch(p);
+ cv_waitq_add(cvp, td);
+ cv_switch(td);
mtx_unlock_spin(&sched_lock);
#ifdef KTRACE
- if (KTRPOINT(p, KTR_CSW))
- ktrcsw(p->p_tracep, 0, 0);
+ if (KTRPOINT(td->td_proc, KTR_CSW))
+ ktrcsw(td->td_proc->p_tracep, 0, 0);
#endif
PICKUP_GIANT();
mtx_lock(mp);
@@ -233,25 +237,25 @@ cv_wait(struct cv *cvp, struct mtx *mp)
/*
* Wait on a condition variable, allowing interruption by signals. Return 0 if
- * the process was resumed with cv_signal or cv_broadcast, EINTR or ERESTART if
+ * the thread was resumed with cv_signal or cv_broadcast, EINTR or ERESTART if
* a signal was caught. If ERESTART is returned the system call should be
* restarted if possible.
*/
int
cv_wait_sig(struct cv *cvp, struct mtx *mp)
{
- struct proc *p;
+ struct thread *td;
int rval;
int sig;
WITNESS_SAVE_DECL(mp);
- p = CURPROC;
+ td = curthread;
rval = 0;
#ifdef KTRACE
- if (p && KTRPOINT(p, KTR_CSW))
- ktrcsw(p->p_tracep, 1, 0);
+ if (td->td_proc && KTRPOINT(td->td_proc, KTR_CSW))
+ ktrcsw(td->td_proc->p_tracep, 1, 0);
#endif
- CV_ASSERT(cvp, mp, p);
+ CV_ASSERT(cvp, mp, td);
WITNESS_SLEEP(0, &mp->mtx_object);
WITNESS_SAVE(&mp->mtx_object, mp);
@@ -271,27 +275,27 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
DROP_GIANT_NOSWITCH();
mtx_unlock_flags(mp, MTX_NOSWITCH);
- cv_waitq_add(cvp, p);
- sig = cv_switch_catch(p);
+ cv_waitq_add(cvp, td);
+ sig = cv_switch_catch(td);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
- PROC_LOCK(p);
+ PROC_LOCK(td->td_proc);
if (sig == 0)
- sig = CURSIG(p);
+ sig = CURSIG(td->td_proc); /* XXXKSE */
if (sig != 0) {
- if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
+ if (SIGISMEMBER(td->td_proc->p_sigacts->ps_sigintr, sig))
rval = EINTR;
else
rval = ERESTART;
}
- PROC_UNLOCK(p);
+ PROC_UNLOCK(td->td_proc);
#ifdef KTRACE
mtx_lock(&Giant);
- if (KTRPOINT(p, KTR_CSW))
- ktrcsw(p->p_tracep, 0, 0);
+ if (KTRPOINT(td->td_proc, KTR_CSW))
+ ktrcsw(td->td_proc->p_tracep, 0, 0);
mtx_unlock(&Giant);
#endif
mtx_lock(mp);
@@ -308,17 +312,16 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
int
cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
{
- struct proc *p;
+ struct thread *td;
int rval;
WITNESS_SAVE_DECL(mp);
- p = CURPROC;
+ td = curthread;
rval = 0;
#ifdef KTRACE
- if (p && KTRPOINT(p, KTR_CSW))
- ktrcsw(p->p_tracep, 1, 0);
+ ktrcsw(td->td_proc->p_tracep, 1, 0);
#endif
- CV_ASSERT(cvp, mp, p);
+ CV_ASSERT(cvp, mp, td);
WITNESS_SLEEP(0, &mp->mtx_object);
WITNESS_SAVE(&mp->mtx_object, mp);
@@ -327,7 +330,7 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
/*
* After a panic, or during autoconfiguration, just give
* interrupts a chance, then just return; don't run any other
- * procs or panic below, in case this is the idle process and
+ * thread or panic below, in case this is the idle process and
* already asleep.
*/
mtx_unlock_spin(&sched_lock);
@@ -338,29 +341,29 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
DROP_GIANT_NOSWITCH();
mtx_unlock_flags(mp, MTX_NOSWITCH);
- cv_waitq_add(cvp, p);
- callout_reset(&p->p_slpcallout, timo, cv_timedwait_end, p);
- cv_switch(p);
+ cv_waitq_add(cvp, td);
+ callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
+ cv_switch(td);
- if (p->p_sflag & PS_TIMEOUT) {
- p->p_sflag &= ~PS_TIMEOUT;
+ if (td->td_flags & TDF_TIMEOUT) {
+ td->td_flags &= ~TDF_TIMEOUT;
rval = EWOULDBLOCK;
- } else if (p->p_sflag & PS_TIMOFAIL)
- p->p_sflag &= ~PS_TIMOFAIL;
- else if (callout_stop(&p->p_slpcallout) == 0) {
+ } else if (td->td_flags & TDF_TIMOFAIL)
+ td->td_flags &= ~TDF_TIMOFAIL;
+ else if (callout_stop(&td->td_slpcallout) == 0) {
/*
* Work around race with cv_timedwait_end similar to that
* between msleep and endtsleep.
*/
- p->p_sflag |= PS_TIMEOUT;
- p->p_stats->p_ru.ru_nivcsw++;
+ td->td_flags |= TDF_TIMEOUT;
+ td->td_proc->p_stats->p_ru.ru_nivcsw++;
mi_switch();
}
mtx_unlock_spin(&sched_lock);
#ifdef KTRACE
- if (KTRPOINT(p, KTR_CSW))
- ktrcsw(p->p_tracep, 0, 0);
+ if (KTRPOINT(td->td_proc, KTR_CSW))
+ ktrcsw(td->td_proc->p_tracep, 0, 0);
#endif
PICKUP_GIANT();
mtx_lock(mp);
@@ -371,25 +374,25 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
/*
* Wait on a condition variable for at most timo/hz seconds, allowing
- * interruption by signals. Returns 0 if the process was resumed by cv_signal
+ * interruption by signals. Returns 0 if the thread was resumed by cv_signal
* or cv_broadcast, EWOULDBLOCK if the timeout expires, and EINTR or ERESTART if
* a signal was caught.
*/
int
cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
{
- struct proc *p;
+ struct thread *td;
int rval;
int sig;
WITNESS_SAVE_DECL(mp);
- p = CURPROC;
+ td = curthread;
rval = 0;
#ifdef KTRACE
- if (p && KTRPOINT(p, KTR_CSW))
- ktrcsw(p->p_tracep, 1, 0);
+ if (td->td_proc && KTRPOINT(td->td_proc, KTR_CSW))
+ ktrcsw(td->td_proc->p_tracep, 1, 0);
#endif
- CV_ASSERT(cvp, mp, p);
+ CV_ASSERT(cvp, mp, td);
WITNESS_SLEEP(0, &mp->mtx_object);
WITNESS_SAVE(&mp->mtx_object, mp);
@@ -398,7 +401,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
/*
* After a panic, or during autoconfiguration, just give
* interrupts a chance, then just return; don't run any other
- * procs or panic below, in case this is the idle process and
+ * thread or panic below, in case this is the idle process and
* already asleep.
*/
mtx_unlock_spin(&sched_lock);
@@ -409,43 +412,43 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
DROP_GIANT_NOSWITCH();
mtx_unlock_flags(mp, MTX_NOSWITCH);
- cv_waitq_add(cvp, p);
- callout_reset(&p->p_slpcallout, timo, cv_timedwait_end, p);
- sig = cv_switch_catch(p);
+ cv_waitq_add(cvp, td);
+ callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
+ sig = cv_switch_catch(td);
- if (p->p_sflag & PS_TIMEOUT) {
- p->p_sflag &= ~PS_TIMEOUT;
+ if (td->td_flags & TDF_TIMEOUT) {
+ td->td_flags &= ~TDF_TIMEOUT;
rval = EWOULDBLOCK;
- } else if (p->p_sflag & PS_TIMOFAIL)
- p->p_sflag &= ~PS_TIMOFAIL;
- else if (callout_stop(&p->p_slpcallout) == 0) {
+ } else if (td->td_flags & TDF_TIMOFAIL)
+ td->td_flags &= ~TDF_TIMOFAIL;
+ else if (callout_stop(&td->td_slpcallout) == 0) {
/*
* Work around race with cv_timedwait_end similar to that
* between msleep and endtsleep.
*/
- p->p_sflag |= PS_TIMEOUT;
- p->p_stats->p_ru.ru_nivcsw++;
+ td->td_flags |= TDF_TIMEOUT;
+ td->td_proc->p_stats->p_ru.ru_nivcsw++;
mi_switch();
}
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
- PROC_LOCK(p);
+ PROC_LOCK(td->td_proc);
if (sig == 0)
- sig = CURSIG(p);
+ sig = CURSIG(td->td_proc);
if (sig != 0) {
- if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
+ if (SIGISMEMBER(td->td_proc->p_sigacts->ps_sigintr, sig))
rval = EINTR;
else
rval = ERESTART;
}
- PROC_UNLOCK(p);
+ PROC_UNLOCK(td->td_proc);
#ifdef KTRACE
mtx_lock(&Giant);
- if (KTRPOINT(p, KTR_CSW))
- ktrcsw(p->p_tracep, 0, 0);
+ if (KTRPOINT(td->td_proc, KTR_CSW))
+ ktrcsw(td->td_proc->p_tracep, 0, 0);
mtx_unlock(&Giant);
#endif
mtx_lock(mp);
@@ -461,38 +464,39 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
static __inline void
cv_wakeup(struct cv *cvp)
{
- struct proc *p;
+ struct thread *td;
mtx_assert(&sched_lock, MA_OWNED);
- p = TAILQ_FIRST(&cvp->cv_waitq);
- KASSERT(p->p_wchan == cvp, ("%s: bogus wchan", __FUNCTION__));
- KASSERT(p->p_sflag & PS_CVWAITQ, ("%s: not on waitq", __FUNCTION__));
- TAILQ_REMOVE(&cvp->cv_waitq, p, p_slpq);
- p->p_sflag &= ~PS_CVWAITQ;
- p->p_wchan = 0;
- if (p->p_stat == SSLEEP) {
- /* OPTIMIZED EXPANSION OF setrunnable(p); */
- CTR3(KTR_PROC, "cv_signal: proc %p (pid %d, %s)",
- p, p->p_pid, p->p_comm);
- if (p->p_slptime > 1)
- updatepri(p);
- p->p_slptime = 0;
- p->p_stat = SRUN;
- if (p->p_sflag & PS_INMEM) {
- setrunqueue(p);
- maybe_resched(p);
+ td = TAILQ_FIRST(&cvp->cv_waitq);
+ KASSERT(td->td_wchan == cvp, ("%s: bogus wchan", __FUNCTION__));
+ KASSERT(td->td_flags & TDF_CVWAITQ, ("%s: not on waitq", __FUNCTION__));
+ TAILQ_REMOVE(&cvp->cv_waitq, td, td_slpq);
+ td->td_flags &= ~TDF_CVWAITQ;
+ td->td_wchan = 0;
+ if (td->td_proc->p_stat == SSLEEP) {
+ /* OPTIMIZED EXPANSION OF setrunnable(td); */
+ CTR3(KTR_PROC, "cv_signal: thread %p (pid %d, %s)",
+ td, td->td_proc->p_pid, td->td_proc->p_comm);
+ if (td->td_ksegrp->kg_slptime > 1) /* XXXKSE */
+ updatepri(td);
+ td->td_kse->ke_slptime = 0;
+ td->td_ksegrp->kg_slptime = 0;
+ td->td_proc->p_stat = SRUN;
+ if (td->td_proc->p_sflag & PS_INMEM) {
+ setrunqueue(td);
+ maybe_resched(td->td_ksegrp);
} else {
- p->p_sflag |= PS_SWAPINREQ;
- wakeup(&proc0);
+ td->td_proc->p_sflag |= PS_SWAPINREQ;
+ wakeup(&proc0); /* XXXKSE */
}
/* END INLINE EXPANSION */
}
}
/*
- * Signal a condition variable, wakes up one waiting process. Will also wakeup
+ * Signal a condition variable, wakes up one waiting thread. Will also wakeup
* the swapper if the process is not in memory, so that it can bring the
- * sleeping process in. Note that this may also result in additional processes
+ * sleeping process in. Note that this may also result in additional threads
* being made runnable. Should be called with the same mutex as was passed to
* cv_wait held.
*/
@@ -510,7 +514,7 @@ cv_signal(struct cv *cvp)
}
/*
- * Broadcast a signal to a condition variable. Wakes up all waiting processes.
+ * Broadcast a signal to a condition variable. Wakes up all waiting threads.
* Should be called with the same mutex as was passed to cv_wait held.
*/
void
@@ -526,46 +530,46 @@ cv_broadcast(struct cv *cvp)
}
/*
- * Remove a process from the wait queue of its condition variable. This may be
+ * Remove a thread from the wait queue of its condition variable. This may be
* called externally.
*/
void
-cv_waitq_remove(struct proc *p)
+cv_waitq_remove(struct thread *td)
{
struct cv *cvp;
mtx_lock_spin(&sched_lock);
- if ((cvp = p->p_wchan) != NULL && p->p_sflag & PS_CVWAITQ) {
- TAILQ_REMOVE(&cvp->cv_waitq, p, p_slpq);
- p->p_sflag &= ~PS_CVWAITQ;
- p->p_wchan = NULL;
+ if ((cvp = td->td_wchan) != NULL && td->td_flags & TDF_CVWAITQ) {
+ TAILQ_REMOVE(&cvp->cv_waitq, td, td_slpq);
+ td->td_flags &= ~TDF_CVWAITQ;
+ td->td_wchan = NULL;
}
mtx_unlock_spin(&sched_lock);
}
/*
- * Timeout function for cv_timedwait. Put the process on the runqueue and set
+ * Timeout function for cv_timedwait. Put the thread on the runqueue and set
* its timeout flag.
*/
static void
cv_timedwait_end(void *arg)
{
- struct proc *p;
+ struct thread *td;
- p = arg;
- CTR3(KTR_PROC, "cv_timedwait_end: proc %p (pid %d, %s)", p, p->p_pid,
- p->p_comm);
+ td = arg;
+ CTR3(KTR_PROC, "cv_timedwait_end: thread %p (pid %d, %s)", td, td->td_proc->p_pid,
+ td->td_proc->p_comm);
mtx_lock_spin(&sched_lock);
- if (p->p_sflag & PS_TIMEOUT) {
- p->p_sflag &= ~PS_TIMEOUT;
- setrunqueue(p);
- } else if (p->p_wchan != NULL) {
- if (p->p_stat == SSLEEP)
- setrunnable(p);
+ if (td->td_flags & TDF_TIMEOUT) {
+ td->td_flags &= ~TDF_TIMEOUT;
+ setrunqueue(td);
+ } else if (td->td_wchan != NULL) {
+ if (td->td_proc->p_stat == SSLEEP) /* XXXKSE */
+ setrunnable(td);
else
- cv_waitq_remove(p);
- p->p_sflag |= PS_TIMEOUT;
+ cv_waitq_remove(td);
+ td->td_flags |= TDF_TIMEOUT;
} else
- p->p_sflag |= PS_TIMOFAIL;
+ td->td_flags |= TDF_TIMOFAIL;
mtx_unlock_spin(&sched_lock);
}
diff --git a/sys/kern/kern_conf.c b/sys/kern/kern_conf.c
index 35cffc2..2f3bf92 100644
--- a/sys/kern/kern_conf.c
+++ b/sys/kern/kern_conf.c
@@ -35,8 +35,8 @@
#include <sys/param.h>
#include <sys/kernel.h>
-#include <sys/sysctl.h>
#include <sys/systm.h>
+#include <sys/sysctl.h>
#include <sys/module.h>
#include <sys/malloc.h>
#include <sys/conf.h>
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
index a09c8c2..da3bf4c 100644
--- a/sys/kern/kern_descrip.c
+++ b/sys/kern/kern_descrip.c
@@ -90,16 +90,16 @@ static struct cdevsw fildesc_cdevsw = {
/* flags */ 0,
};
-static int do_dup __P((struct filedesc *fdp, int old, int new, register_t *retval, struct proc *p));
+static int do_dup __P((struct filedesc *fdp, int old, int new, register_t *retval, struct thread *td));
static int badfo_readwrite __P((struct file *fp, struct uio *uio,
- struct ucred *cred, int flags, struct proc *p));
+ struct ucred *cred, int flags, struct thread *td));
static int badfo_ioctl __P((struct file *fp, u_long com, caddr_t data,
- struct proc *p));
+ struct thread *td));
static int badfo_poll __P((struct file *fp, int events,
- struct ucred *cred, struct proc *p));
+ struct ucred *cred, struct thread *td));
static int badfo_kqfilter __P((struct file *fp, struct knote *kn));
-static int badfo_stat __P((struct file *fp, struct stat *sb, struct proc *p));
-static int badfo_close __P((struct file *fp, struct proc *p));
+static int badfo_stat __P((struct file *fp, struct stat *sb, struct thread *td));
+static int badfo_close __P((struct file *fp, struct thread *td));
/*
* Descriptor management.
@@ -121,13 +121,14 @@ struct getdtablesize_args {
*/
/* ARGSUSED */
int
-getdtablesize(p, uap)
- struct proc *p;
+getdtablesize(td, uap)
+ struct thread *td;
struct getdtablesize_args *uap;
{
+ struct proc *p = td->td_proc;
mtx_lock(&Giant);
- p->p_retval[0] =
+ td->td_retval[0] =
min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
mtx_unlock(&Giant);
return (0);
@@ -150,11 +151,12 @@ struct dup2_args {
*/
/* ARGSUSED */
int
-dup2(p, uap)
- struct proc *p;
+dup2(td, uap)
+ struct thread *td;
struct dup2_args *uap;
{
- register struct filedesc *fdp = p->p_fd;
+ struct proc *p = td->td_proc;
+ register struct filedesc *fdp = td->td_proc->p_fd;
register u_int old = uap->from, new = uap->to;
int i, error;
@@ -168,12 +170,12 @@ retry:
goto done2;
}
if (old == new) {
- p->p_retval[0] = new;
+ td->td_retval[0] = new;
error = 0;
goto done2;
}
if (new >= fdp->fd_nfiles) {
- if ((error = fdalloc(p, new, &i)))
+ if ((error = fdalloc(td, new, &i)))
goto done2;
if (new != i)
panic("dup2: fdalloc");
@@ -182,7 +184,7 @@ retry:
*/
goto retry;
}
- error = do_dup(fdp, (int)old, (int)new, p->p_retval, p);
+ error = do_dup(fdp, (int)old, (int)new, td->td_retval, td);
done2:
mtx_unlock(&Giant);
return(error);
@@ -201,8 +203,8 @@ struct dup_args {
*/
/* ARGSUSED */
int
-dup(p, uap)
- struct proc *p;
+dup(td, uap)
+ struct thread *td;
struct dup_args *uap;
{
register struct filedesc *fdp;
@@ -211,14 +213,14 @@ dup(p, uap)
mtx_lock(&Giant);
old = uap->fd;
- fdp = p->p_fd;
+ fdp = td->td_proc->p_fd;
if (old >= fdp->fd_nfiles || fdp->fd_ofiles[old] == NULL) {
error = EBADF;
goto done2;
}
- if ((error = fdalloc(p, 0, &new)))
+ if ((error = fdalloc(td, 0, &new)))
goto done2;
- error = do_dup(fdp, (int)old, new, p->p_retval, p);
+ error = do_dup(fdp, (int)old, new, td->td_retval, td);
done2:
mtx_unlock(&Giant);
return (error);
@@ -239,10 +241,11 @@ struct fcntl_args {
*/
/* ARGSUSED */
int
-fcntl(p, uap)
- struct proc *p;
+fcntl(td, uap)
+ struct thread *td;
register struct fcntl_args *uap;
{
+ register struct proc *p = td->td_proc;
register struct filedesc *fdp;
register struct file *fp;
register char *pop;
@@ -269,13 +272,13 @@ fcntl(p, uap)
error = EINVAL;
break;
}
- if ((error = fdalloc(p, newmin, &i)))
+ if ((error = fdalloc(td, newmin, &i)))
break;
- error = do_dup(fdp, uap->fd, i, p->p_retval, p);
+ error = do_dup(fdp, uap->fd, i, td->td_retval, td);
break;
case F_GETFD:
- p->p_retval[0] = *pop & 1;
+ td->td_retval[0] = *pop & 1;
break;
case F_SETFD:
@@ -283,7 +286,7 @@ fcntl(p, uap)
break;
case F_GETFL:
- p->p_retval[0] = OFLAGS(fp->f_flag);
+ td->td_retval[0] = OFLAGS(fp->f_flag);
break;
case F_SETFL:
@@ -291,33 +294,33 @@ fcntl(p, uap)
fp->f_flag &= ~FCNTLFLAGS;
fp->f_flag |= FFLAGS(uap->arg & ~O_ACCMODE) & FCNTLFLAGS;
tmp = fp->f_flag & FNONBLOCK;
- error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, p);
+ error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td);
if (error) {
- fdrop(fp, p);
+ fdrop(fp, td);
break;
}
tmp = fp->f_flag & FASYNC;
- error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, p);
+ error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, td);
if (!error) {
- fdrop(fp, p);
+ fdrop(fp, td);
break;
}
fp->f_flag &= ~FNONBLOCK;
tmp = 0;
- (void)fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, p);
- fdrop(fp, p);
+ (void)fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td);
+ fdrop(fp, td);
break;
case F_GETOWN:
fhold(fp);
- error = fo_ioctl(fp, FIOGETOWN, (caddr_t)p->p_retval, p);
- fdrop(fp, p);
+ error = fo_ioctl(fp, FIOGETOWN, (caddr_t)td->td_retval, td);
+ fdrop(fp, td);
break;
case F_SETOWN:
fhold(fp);
- error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&uap->arg, p);
- fdrop(fp, p);
+ error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&uap->arg, td);
+ fdrop(fp, td);
break;
case F_SETLKW:
@@ -339,14 +342,14 @@ fcntl(p, uap)
error = copyin((caddr_t)(intptr_t)uap->arg, (caddr_t)&fl,
sizeof(fl));
if (error) {
- fdrop(fp, p);
+ fdrop(fp, td);
break;
}
if (fl.l_whence == SEEK_CUR) {
if (fp->f_offset < 0 ||
(fl.l_start > 0 &&
fp->f_offset > OFF_MAX - fl.l_start)) {
- fdrop(fp, p);
+ fdrop(fp, td);
error = EOVERFLOW;
break;
}
@@ -380,8 +383,9 @@ fcntl(p, uap)
error = EINVAL;
break;
}
- fdrop(fp, p);
+ fdrop(fp, td);
break;
+
case F_GETLK:
if (fp->f_type != DTYPE_VNODE) {
error = EBADF;
@@ -396,12 +400,12 @@ fcntl(p, uap)
error = copyin((caddr_t)(intptr_t)uap->arg, (caddr_t)&fl,
sizeof(fl));
if (error) {
- fdrop(fp, p);
+ fdrop(fp, td);
break;
}
if (fl.l_type != F_RDLCK && fl.l_type != F_WRLCK &&
fl.l_type != F_UNLCK) {
- fdrop(fp, p);
+ fdrop(fp, td);
error = EINVAL;
break;
}
@@ -410,7 +414,7 @@ fcntl(p, uap)
fp->f_offset > OFF_MAX - fl.l_start) ||
(fl.l_start < 0 &&
fp->f_offset < OFF_MIN - fl.l_start)) {
- fdrop(fp, p);
+ fdrop(fp, td);
error = EOVERFLOW;
break;
}
@@ -418,7 +422,7 @@ fcntl(p, uap)
}
error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK,
&fl, F_POSIX);
- fdrop(fp, p);
+ fdrop(fp, td);
if (error == 0) {
error = copyout((caddr_t)&fl,
(caddr_t)(intptr_t)uap->arg, sizeof(fl));
@@ -437,11 +441,11 @@ done2:
* Common code for dup, dup2, and fcntl(F_DUPFD).
*/
static int
-do_dup(fdp, old, new, retval, p)
+do_dup(fdp, old, new, retval, td)
register struct filedesc *fdp;
register int old, new;
register_t *retval;
- struct proc *p;
+ struct thread *td;
{
struct file *fp;
struct file *delfp;
@@ -454,7 +458,7 @@ do_dup(fdp, old, new, retval, p)
delfp = fdp->fd_ofiles[new];
#if 0
if (delfp && (fdp->fd_ofileflags[new] & UF_MAPPED))
- (void) munmapfd(p, new);
+ (void) munmapfd(td, new);
#endif
/*
@@ -474,7 +478,7 @@ do_dup(fdp, old, new, retval, p)
* close() were performed on it).
*/
if (delfp)
- (void) closef(delfp, p);
+ (void) closef(delfp, td);
return (0);
}
@@ -549,7 +553,7 @@ fsetown(pgid, sigiop)
* restrict FSETOWN to the current process or process
* group for maximum safety.
*/
- if (proc->p_session != curproc->p_session) {
+ if (proc->p_session != curthread->td_proc->p_session) {
PROC_UNLOCK(proc);
return (EPERM);
}
@@ -569,7 +573,7 @@ fsetown(pgid, sigiop)
* restrict FSETOWN to the current process or process
* group for maximum safety.
*/
- if (pgrp->pg_session != curproc->p_session)
+ if (pgrp->pg_session != curthread->td_proc->p_session)
return (EPERM);
proc = NULL;
@@ -584,8 +588,8 @@ fsetown(pgid, sigiop)
sigio->sio_pgrp = pgrp;
}
sigio->sio_pgid = pgid;
- crhold(curproc->p_ucred);
- sigio->sio_ucred = curproc->p_ucred;
+ crhold(curthread->td_proc->p_ucred);
+ sigio->sio_ucred = curthread->td_proc->p_ucred;
sigio->sio_myref = sigiop;
s = splhigh();
*sigiop = sigio;
@@ -616,8 +620,8 @@ struct close_args {
*/
/* ARGSUSED */
int
-close(p, uap)
- struct proc *p;
+close(td, uap)
+ struct thread *td;
struct close_args *uap;
{
register struct filedesc *fdp;
@@ -626,7 +630,7 @@ close(p, uap)
int error = 0;
mtx_lock(&Giant);
- fdp = p->p_fd;
+ fdp = td->td_proc->p_fd;
if ((unsigned)fd >= fdp->fd_nfiles ||
(fp = fdp->fd_ofiles[fd]) == NULL) {
error = EBADF;
@@ -634,7 +638,7 @@ close(p, uap)
}
#if 0
if (fdp->fd_ofileflags[fd] & UF_MAPPED)
- (void) munmapfd(p, fd);
+ (void) munmapfd(td, fd);
#endif
fdp->fd_ofiles[fd] = NULL;
fdp->fd_ofileflags[fd] = 0;
@@ -648,8 +652,8 @@ close(p, uap)
if (fd < fdp->fd_freefile)
fdp->fd_freefile = fd;
if (fd < fdp->fd_knlistsize)
- knote_fdclose(p, fd);
- error = closef(fp, p);
+ knote_fdclose(td, fd);
+ error = closef(fp, td);
done2:
mtx_unlock(&Giant);
return(error);
@@ -670,11 +674,11 @@ struct ofstat_args {
*/
/* ARGSUSED */
int
-ofstat(p, uap)
- struct proc *p;
+ofstat(td, uap)
+ struct thread *td;
register struct ofstat_args *uap;
{
- register struct filedesc *fdp = p->p_fd;
+ register struct filedesc *fdp = td->td_proc->p_fd;
register struct file *fp;
struct stat ub;
struct ostat oub;
@@ -688,12 +692,12 @@ ofstat(p, uap)
goto done2;
}
fhold(fp);
- error = fo_stat(fp, &ub, p);
+ error = fo_stat(fp, &ub, td);
if (error == 0) {
cvtstat(&ub, &oub);
error = copyout((caddr_t)&oub, (caddr_t)uap->sb, sizeof (oub));
}
- fdrop(fp, p);
+ fdrop(fp, td);
done2:
mtx_unlock(&Giant);
return (error);
@@ -714,8 +718,8 @@ struct fstat_args {
*/
/* ARGSUSED */
int
-fstat(p, uap)
- struct proc *p;
+fstat(td, uap)
+ struct thread *td;
register struct fstat_args *uap;
{
register struct filedesc *fdp;
@@ -724,7 +728,7 @@ fstat(p, uap)
int error;
mtx_lock(&Giant);
- fdp = p->p_fd;
+ fdp = td->td_proc->p_fd;
if ((unsigned)uap->fd >= fdp->fd_nfiles ||
(fp = fdp->fd_ofiles[uap->fd]) == NULL) {
@@ -732,10 +736,10 @@ fstat(p, uap)
goto done2;
}
fhold(fp);
- error = fo_stat(fp, &ub, p);
+ error = fo_stat(fp, &ub, td);
if (error == 0)
error = copyout((caddr_t)&ub, (caddr_t)uap->sb, sizeof (ub));
- fdrop(fp, p);
+ fdrop(fp, td);
done2:
mtx_unlock(&Giant);
return (error);
@@ -755,8 +759,8 @@ struct nfstat_args {
*/
/* ARGSUSED */
int
-nfstat(p, uap)
- struct proc *p;
+nfstat(td, uap)
+ struct thread *td;
register struct nfstat_args *uap;
{
register struct filedesc *fdp;
@@ -767,19 +771,19 @@ nfstat(p, uap)
mtx_lock(&Giant);
- fdp = p->p_fd;
+ fdp = td->td_proc->p_fd;
if ((unsigned)uap->fd >= fdp->fd_nfiles ||
(fp = fdp->fd_ofiles[uap->fd]) == NULL) {
error = EBADF;
goto done2;
}
fhold(fp);
- error = fo_stat(fp, &ub, p);
+ error = fo_stat(fp, &ub, td);
if (error == 0) {
cvtnstat(&ub, &nub);
error = copyout((caddr_t)&nub, (caddr_t)uap->sb, sizeof (nub));
}
- fdrop(fp, p);
+ fdrop(fp, td);
done2:
mtx_unlock(&Giant);
return (error);
@@ -799,8 +803,8 @@ struct fpathconf_args {
*/
/* ARGSUSED */
int
-fpathconf(p, uap)
- struct proc *p;
+fpathconf(td, uap)
+ struct thread *td;
register struct fpathconf_args *uap;
{
struct filedesc *fdp;
@@ -809,7 +813,7 @@ fpathconf(p, uap)
int error = 0;
mtx_lock(&Giant);
- fdp = p->p_fd;
+ fdp = td->td_proc->p_fd;
if ((unsigned)uap->fd >= fdp->fd_nfiles ||
(fp = fdp->fd_ofiles[uap->fd]) == NULL) {
@@ -826,19 +830,19 @@ fpathconf(p, uap)
error = EINVAL;
goto done2;
}
- p->p_retval[0] = PIPE_BUF;
+ td->td_retval[0] = PIPE_BUF;
error = 0;
break;
case DTYPE_FIFO:
case DTYPE_VNODE:
vp = (struct vnode *)fp->f_data;
- error = VOP_PATHCONF(vp, uap->name, p->p_retval);
+ error = VOP_PATHCONF(vp, uap->name, td->td_retval);
break;
default:
error = EOPNOTSUPP;
break;
}
- fdrop(fp, p);
+ fdrop(fp, td);
done2:
mtx_unlock(&Giant);
return(error);
@@ -851,12 +855,13 @@ static int fdexpand;
SYSCTL_INT(_debug, OID_AUTO, fdexpand, CTLFLAG_RD, &fdexpand, 0, "");
int
-fdalloc(p, want, result)
- struct proc *p;
+fdalloc(td, want, result)
+ struct thread *td;
int want;
int *result;
{
- register struct filedesc *fdp = p->p_fd;
+ struct proc *p = td->td_proc;
+ register struct filedesc *fdp = td->td_proc->p_fd;
register int i;
int lim, last, nfiles;
struct file **newofile;
@@ -930,11 +935,12 @@ fdalloc(p, want, result)
* are available to the process p.
*/
int
-fdavail(p, n)
- struct proc *p;
+fdavail(td, n)
+ struct thread *td;
register int n;
{
- register struct filedesc *fdp = p->p_fd;
+ struct proc *p = td->td_proc;
+ register struct filedesc *fdp = td->td_proc->p_fd;
register struct file **fpp;
register int i, lim, last;
@@ -956,11 +962,12 @@ fdavail(p, n)
* a file decriptor for the process that refers to it.
*/
int
-falloc(p, resultfp, resultfd)
- register struct proc *p;
+falloc(td, resultfp, resultfd)
+ register struct thread *td;
struct file **resultfp;
int *resultfd;
{
+ struct proc *p = td->td_proc;
register struct file *fp, *fq;
int error, i;
@@ -982,7 +989,7 @@ falloc(p, resultfp, resultfd)
* allocating the slot, else a race might have shrunk it if we had
* allocated it before the malloc.
*/
- if ((error = fdalloc(p, 0, &i))) {
+ if ((error = fdalloc(td, 0, &i))) {
nfiles--;
FREE(fp, M_FILE);
return (error);
@@ -1023,11 +1030,11 @@ ffree(fp)
* Build a new filedesc structure.
*/
struct filedesc *
-fdinit(p)
- struct proc *p;
+fdinit(td)
+ struct thread *td;
{
register struct filedesc0 *newfdp;
- register struct filedesc *fdp = p->p_fd;
+ register struct filedesc *fdp = td->td_proc->p_fd;
MALLOC(newfdp, struct filedesc0 *, sizeof(struct filedesc0),
M_FILEDESC, M_WAITOK | M_ZERO);
@@ -1067,10 +1074,10 @@ fdshare(p)
* Copy a filedesc structure.
*/
struct filedesc *
-fdcopy(p)
- struct proc *p;
+fdcopy(td)
+ struct thread *td;
{
- register struct filedesc *newfdp, *fdp = p->p_fd;
+ register struct filedesc *newfdp, *fdp = td->td_proc->p_fd;
register struct file **fpp;
register int i;
@@ -1144,10 +1151,10 @@ fdcopy(p)
* Release a filedesc structure.
*/
void
-fdfree(p)
- struct proc *p;
+fdfree(td)
+ struct thread *td;
{
- register struct filedesc *fdp = p->p_fd;
+ register struct filedesc *fdp = td->td_proc->p_fd;
struct file **fpp;
register int i;
@@ -1164,7 +1171,7 @@ fdfree(p)
fpp = fdp->fd_ofiles;
for (i = fdp->fd_lastfile; i-- >= 0; fpp++) {
if (*fpp)
- (void) closef(*fpp, p);
+ (void) closef(*fpp, td);
}
if (fdp->fd_nfiles > NDFILE)
FREE(fdp->fd_ofiles, M_FILEDESC);
@@ -1206,10 +1213,10 @@ is_unsafe(struct file *fp)
* Make this setguid thing safe, if at all possible.
*/
void
-setugidsafety(p)
- struct proc *p;
+setugidsafety(td)
+ struct thread *td;
{
- struct filedesc *fdp = p->p_fd;
+ struct filedesc *fdp = td->td_proc->p_fd;
register int i;
/* Certain daemons might not have file descriptors. */
@@ -1228,10 +1235,10 @@ setugidsafety(p)
#if 0
if ((fdp->fd_ofileflags[i] & UF_MAPPED) != 0)
- (void) munmapfd(p, i);
+ (void) munmapfd(td, i);
#endif
if (i < fdp->fd_knlistsize)
- knote_fdclose(p, i);
+ knote_fdclose(td, i);
/*
* NULL-out descriptor prior to close to avoid
* a race while close blocks.
@@ -1241,7 +1248,7 @@ setugidsafety(p)
fdp->fd_ofileflags[i] = 0;
if (i < fdp->fd_freefile)
fdp->fd_freefile = i;
- (void) closef(fp, p);
+ (void) closef(fp, td);
}
}
while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
@@ -1252,10 +1259,10 @@ setugidsafety(p)
* Close any files on exec?
*/
void
-fdcloseexec(p)
- struct proc *p;
+fdcloseexec(td)
+ struct thread *td;
{
- struct filedesc *fdp = p->p_fd;
+ struct filedesc *fdp = td->td_proc->p_fd;
register int i;
/* Certain daemons might not have file descriptors. */
@@ -1273,10 +1280,10 @@ fdcloseexec(p)
#if 0
if (fdp->fd_ofileflags[i] & UF_MAPPED)
- (void) munmapfd(p, i);
+ (void) munmapfd(td, i);
#endif
if (i < fdp->fd_knlistsize)
- knote_fdclose(p, i);
+ knote_fdclose(td, i);
/*
* NULL-out descriptor prior to close to avoid
* a race while close blocks.
@@ -1286,7 +1293,7 @@ fdcloseexec(p)
fdp->fd_ofileflags[i] = 0;
if (i < fdp->fd_freefile)
fdp->fd_freefile = i;
- (void) closef(fp, p);
+ (void) closef(fp, td);
}
}
while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL)
@@ -1300,10 +1307,11 @@ fdcloseexec(p)
* that was being passed in a message.
*/
int
-closef(fp, p)
+closef(fp, td)
register struct file *fp;
- register struct proc *p;
+ register struct thread *td;
{
+ struct proc *p = td->td_proc;
struct vnode *vp;
struct flock lf;
@@ -1325,13 +1333,13 @@ closef(fp, p)
vp = (struct vnode *)fp->f_data;
(void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, &lf, F_POSIX);
}
- return (fdrop(fp, p));
+ return (fdrop(fp, td));
}
int
-fdrop(fp, p)
+fdrop(fp, td)
struct file *fp;
- struct proc *p;
+ struct thread *td;
{
struct flock lf;
struct vnode *vp;
@@ -1350,7 +1358,7 @@ fdrop(fp, p)
(void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
}
if (fp->f_ops != &badfileops)
- error = fo_close(fp, p);
+ error = fo_close(fp, td);
else
error = 0;
ffree(fp);
@@ -1374,11 +1382,11 @@ struct flock_args {
*/
/* ARGSUSED */
int
-flock(p, uap)
- struct proc *p;
+flock(td, uap)
+ struct thread *td;
register struct flock_args *uap;
{
- register struct filedesc *fdp = p->p_fd;
+ register struct filedesc *fdp = td->td_proc->p_fd;
register struct file *fp;
struct vnode *vp;
struct flock lf;
@@ -1433,21 +1441,21 @@ done2:
*/
/* ARGSUSED */
static int
-fdopen(dev, mode, type, p)
+fdopen(dev, mode, type, td)
dev_t dev;
int mode, type;
- struct proc *p;
+ struct thread *td;
{
/*
- * XXX Kludge: set curproc->p_dupfd to contain the value of the
+ * XXX Kludge: set curthread->td_dupfd to contain the value of the
* the file descriptor being sought for duplication. The error
* return ensures that the vnode for this device will be released
* by vn_open. Open will detect this special error and take the
* actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
* will simply report the error.
*/
- p->p_dupfd = dev2unit(dev);
+ td->td_dupfd = dev2unit(dev);
return (ENODEV);
}
@@ -1455,8 +1463,8 @@ fdopen(dev, mode, type, p)
* Duplicate the specified descriptor to a free descriptor.
*/
int
-dupfdopen(p, fdp, indx, dfd, mode, error)
- struct proc *p;
+dupfdopen(td, fdp, indx, dfd, mode, error)
+ struct thread *td;
struct filedesc *fdp;
int indx, dfd;
int mode;
@@ -1498,7 +1506,7 @@ dupfdopen(p, fdp, indx, dfd, mode, error)
fp = fdp->fd_ofiles[indx];
#if 0
if (fp && fdp->fd_ofileflags[indx] & UF_MAPPED)
- (void) munmapfd(p, indx);
+ (void) munmapfd(td, indx);
#endif
fdp->fd_ofiles[indx] = wfp;
fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
@@ -1510,7 +1518,7 @@ dupfdopen(p, fdp, indx, dfd, mode, error)
* used to own. Release it.
*/
if (fp)
- fdrop(fp, p);
+ fdrop(fp, td);
return (0);
case ENXIO:
@@ -1520,7 +1528,7 @@ dupfdopen(p, fdp, indx, dfd, mode, error)
fp = fdp->fd_ofiles[indx];
#if 0
if (fp && fdp->fd_ofileflags[indx] & UF_MAPPED)
- (void) munmapfd(p, indx);
+ (void) munmapfd(td, indx);
#endif
fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd];
fdp->fd_ofiles[dfd] = NULL;
@@ -1532,7 +1540,7 @@ dupfdopen(p, fdp, indx, dfd, mode, error)
* used to own. Release it.
*/
if (fp)
- fdrop(fp, p);
+ fdrop(fp, td);
/*
* Complete the clean up of the filedesc structure by
* recomputing the various hints.
@@ -1630,11 +1638,11 @@ struct fileops badfileops = {
};
static int
-badfo_readwrite(fp, uio, cred, flags, p)
+badfo_readwrite(fp, uio, cred, flags, td)
struct file *fp;
struct uio *uio;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
int flags;
{
@@ -1642,22 +1650,22 @@ badfo_readwrite(fp, uio, cred, flags, p)
}
static int
-badfo_ioctl(fp, com, data, p)
+badfo_ioctl(fp, com, data, td)
struct file *fp;
u_long com;
caddr_t data;
- struct proc *p;
+ struct thread *td;
{
return (EBADF);
}
static int
-badfo_poll(fp, events, cred, p)
+badfo_poll(fp, events, cred, td)
struct file *fp;
int events;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
{
return (0);
@@ -1673,19 +1681,19 @@ badfo_kqfilter(fp, kn)
}
static int
-badfo_stat(fp, sb, p)
+badfo_stat(fp, sb, td)
struct file *fp;
struct stat *sb;
- struct proc *p;
+ struct thread *td;
{
return (EBADF);
}
static int
-badfo_close(fp, p)
+badfo_close(fp, td)
struct file *fp;
- struct proc *p;
+ struct thread *td;
{
return (EBADF);
diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c
index 6f871b6..35391b0 100644
--- a/sys/kern/kern_event.c
+++ b/sys/kern/kern_event.c
@@ -52,18 +52,18 @@
static int kqueue_scan(struct file *fp, int maxevents,
struct kevent *ulistp, const struct timespec *timeout,
- struct proc *p);
+ struct thread *td);
static int kqueue_read(struct file *fp, struct uio *uio,
- struct ucred *cred, int flags, struct proc *p);
+ struct ucred *cred, int flags, struct thread *td);
static int kqueue_write(struct file *fp, struct uio *uio,
- struct ucred *cred, int flags, struct proc *p);
+ struct ucred *cred, int flags, struct thread *td);
static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
- struct proc *p);
+ struct thread *td);
static int kqueue_poll(struct file *fp, int events, struct ucred *cred,
- struct proc *p);
+ struct thread *td);
static int kqueue_kqfilter(struct file *fp, struct knote *kn);
-static int kqueue_stat(struct file *fp, struct stat *st, struct proc *p);
-static int kqueue_close(struct file *fp, struct proc *p);
+static int kqueue_stat(struct file *fp, struct stat *st, struct thread *td);
+static int kqueue_close(struct file *fp, struct thread *td);
static void kqueue_wakeup(struct kqueue *kq);
static struct fileops kqueueops = {
@@ -77,7 +77,7 @@ static struct fileops kqueueops = {
};
static void knote_attach(struct knote *kn, struct filedesc *fdp);
-static void knote_drop(struct knote *kn, struct proc *p);
+static void knote_drop(struct knote *kn, struct thread *td);
static void knote_enqueue(struct knote *kn);
static void knote_dequeue(struct knote *kn);
static void knote_init(void);
@@ -335,7 +335,7 @@ filt_timer(struct knote *kn, long hint)
* MPSAFE
*/
int
-kqueue(struct proc *p, struct kqueue_args *uap)
+kqueue(struct thread *td, struct kqueue_args *uap)
{
struct filedesc *fdp;
struct kqueue *kq;
@@ -343,8 +343,8 @@ kqueue(struct proc *p, struct kqueue_args *uap)
int fd, error;
mtx_lock(&Giant);
- fdp = p->p_fd;
- error = falloc(p, &fp, &fd);
+ fdp = td->td_proc->p_fd;
+ error = falloc(td, &fp, &fd);
if (error)
goto done2;
fp->f_flag = FREAD | FWRITE;
@@ -353,7 +353,7 @@ kqueue(struct proc *p, struct kqueue_args *uap)
kq = malloc(sizeof(struct kqueue), M_TEMP, M_WAITOK | M_ZERO);
TAILQ_INIT(&kq->kq_head);
fp->f_data = (caddr_t)kq;
- p->p_retval[0] = fd;
+ td->td_retval[0] = fd;
if (fdp->fd_knlistsize < 0)
fdp->fd_knlistsize = 0; /* this process has a kq */
kq->kq_fdp = fdp;
@@ -376,7 +376,7 @@ struct kevent_args {
* MPSAFE
*/
int
-kevent(struct proc *p, struct kevent_args *uap)
+kevent(struct thread *td, struct kevent_args *uap)
{
struct filedesc *fdp;
struct kevent *kevp;
@@ -386,7 +386,7 @@ kevent(struct proc *p, struct kevent_args *uap)
int i, n, nerrors, error;
mtx_lock(&Giant);
- fdp = p->p_fd;
+ fdp = td->td_proc->p_fd;
if (((u_int)uap->fd) >= fdp->fd_nfiles ||
(fp = fdp->fd_ofiles[uap->fd]) == NULL ||
(fp->f_type != DTYPE_KQUEUE)) {
@@ -414,7 +414,7 @@ kevent(struct proc *p, struct kevent_args *uap)
for (i = 0; i < n; i++) {
kevp = &kq->kq_kev[i];
kevp->flags &= ~EV_SYSFLAGS;
- error = kqueue_register(kq, kevp, p);
+ error = kqueue_register(kq, kevp, td);
if (error) {
if (uap->nevents != 0) {
kevp->flags = EV_ERROR;
@@ -434,21 +434,21 @@ kevent(struct proc *p, struct kevent_args *uap)
uap->changelist += n;
}
if (nerrors) {
- p->p_retval[0] = nerrors;
+ td->td_retval[0] = nerrors;
error = 0;
goto done;
}
- error = kqueue_scan(fp, uap->nevents, uap->eventlist, uap->timeout, p);
+ error = kqueue_scan(fp, uap->nevents, uap->eventlist, uap->timeout, td);
done:
if (fp != NULL)
- fdrop(fp, p);
+ fdrop(fp, td);
mtx_unlock(&Giant);
return (error);
}
int
-kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p)
+kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td)
{
struct filedesc *fdp = kq->kq_fdp;
struct filterops *fops;
@@ -531,7 +531,7 @@ kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p)
knote_attach(kn, fdp);
if ((error = fops->f_attach(kn)) != 0) {
- knote_drop(kn, p);
+ knote_drop(kn, td);
goto done;
}
} else {
@@ -552,7 +552,7 @@ kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p)
} else if (kev->flags & EV_DELETE) {
kn->kn_fop->f_detach(kn);
- knote_drop(kn, p);
+ knote_drop(kn, td);
goto done;
}
@@ -574,13 +574,13 @@ kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p)
done:
if (fp != NULL)
- fdrop(fp, p);
+ fdrop(fp, td);
return (error);
}
static int
kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp,
- const struct timespec *tsp, struct proc *p)
+ const struct timespec *tsp, struct thread *td)
{
struct kqueue *kq = (struct kqueue *)fp->f_data;
struct kevent *kevp;
@@ -673,7 +673,7 @@ start:
kq->kq_count--;
splx(s);
kn->kn_fop->f_detach(kn);
- knote_drop(kn, p);
+ knote_drop(kn, td);
s = splhigh();
} else if (kn->kn_flags & EV_CLEAR) {
kn->kn_data = 0;
@@ -702,7 +702,7 @@ done:
if (nkev != 0)
error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
sizeof(struct kevent) * nkev);
- p->p_retval[0] = maxevents - count;
+ td->td_retval[0] = maxevents - count;
return (error);
}
@@ -713,7 +713,7 @@ done:
/*ARGSUSED*/
static int
kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred,
- int flags, struct proc *p)
+ int flags, struct thread *td)
{
return (ENXIO);
}
@@ -721,21 +721,21 @@ kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred,
/*ARGSUSED*/
static int
kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred,
- int flags, struct proc *p)
+ int flags, struct thread *td)
{
return (ENXIO);
}
/*ARGSUSED*/
static int
-kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
+kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct thread *td)
{
return (ENOTTY);
}
/*ARGSUSED*/
static int
-kqueue_poll(struct file *fp, int events, struct ucred *cred, struct proc *p)
+kqueue_poll(struct file *fp, int events, struct ucred *cred, struct thread *td)
{
struct kqueue *kq = (struct kqueue *)fp->f_data;
int revents = 0;
@@ -745,7 +745,7 @@ kqueue_poll(struct file *fp, int events, struct ucred *cred, struct proc *p)
if (kq->kq_count) {
revents |= events & (POLLIN | POLLRDNORM);
} else {
- selrecord(p, &kq->kq_sel);
+ selrecord(curthread, &kq->kq_sel);
kq->kq_state |= KQ_SEL;
}
}
@@ -755,7 +755,7 @@ kqueue_poll(struct file *fp, int events, struct ucred *cred, struct proc *p)
/*ARGSUSED*/
static int
-kqueue_stat(struct file *fp, struct stat *st, struct proc *p)
+kqueue_stat(struct file *fp, struct stat *st, struct thread *td)
{
struct kqueue *kq = (struct kqueue *)fp->f_data;
@@ -768,10 +768,10 @@ kqueue_stat(struct file *fp, struct stat *st, struct proc *p)
/*ARGSUSED*/
static int
-kqueue_close(struct file *fp, struct proc *p)
+kqueue_close(struct file *fp, struct thread *td)
{
struct kqueue *kq = (struct kqueue *)fp->f_data;
- struct filedesc *fdp = p->p_fd;
+ struct filedesc *fdp = td->td_proc->p_fd;
struct knote **knp, *kn, *kn0;
int i;
@@ -782,7 +782,7 @@ kqueue_close(struct file *fp, struct proc *p)
kn0 = SLIST_NEXT(kn, kn_link);
if (kq == kn->kn_kq) {
kn->kn_fop->f_detach(kn);
- fdrop(kn->kn_fp, p);
+ fdrop(kn->kn_fp, td);
knote_free(kn);
*knp = kn0;
} else {
@@ -847,13 +847,13 @@ knote(struct klist *list, long hint)
* remove all knotes from a specified klist
*/
void
-knote_remove(struct proc *p, struct klist *list)
+knote_remove(struct thread *td, struct klist *list)
{
struct knote *kn;
while ((kn = SLIST_FIRST(list)) != NULL) {
kn->kn_fop->f_detach(kn);
- knote_drop(kn, p);
+ knote_drop(kn, td);
}
}
@@ -861,12 +861,12 @@ knote_remove(struct proc *p, struct klist *list)
* remove all knotes referencing a specified fd
*/
void
-knote_fdclose(struct proc *p, int fd)
+knote_fdclose(struct thread *td, int fd)
{
- struct filedesc *fdp = p->p_fd;
+ struct filedesc *fdp = td->td_proc->p_fd;
struct klist *list = &fdp->fd_knlist[fd];
- knote_remove(p, list);
+ knote_remove(td, list);
}
static void
@@ -910,9 +910,9 @@ done:
* while calling fdrop and free.
*/
static void
-knote_drop(struct knote *kn, struct proc *p)
+knote_drop(struct knote *kn, struct thread *td)
{
- struct filedesc *fdp = p->p_fd;
+ struct filedesc *fdp = td->td_proc->p_fd;
struct klist *list;
if (kn->kn_fop->f_isfd)
@@ -924,7 +924,7 @@ knote_drop(struct knote *kn, struct proc *p)
if (kn->kn_status & KN_QUEUED)
knote_dequeue(kn);
if (kn->kn_fop->f_isfd)
- fdrop(kn->kn_fp, p);
+ fdrop(kn->kn_fp, td);
knote_free(kn);
}
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 90a4f4c..0091d09 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -102,10 +102,11 @@ struct execve_args {
* MPSAFE
*/
int
-execve(p, uap)
- struct proc *p;
+execve(td, uap)
+ struct thread *td;
register struct execve_args *uap;
{
+ struct proc *p = td->td_proc;
struct nameidata nd, *ndp;
struct ucred *newcred, *oldcred;
register_t *stack_base;
@@ -117,6 +118,10 @@ execve(p, uap)
imgp = &image_params;
+/* XXXKSE */
+/* !!!!!!!! we need abort all the other threads of this process before we */
+/* proceed beyond his point! */
+
/*
* Initialize part of the common data
*/
@@ -156,7 +161,7 @@ execve(p, uap)
*/
ndp = &nd;
NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME,
- UIO_USERSPACE, uap->fname, p);
+ UIO_USERSPACE, uap->fname, td);
interpret:
@@ -175,12 +180,12 @@ interpret:
*/
error = exec_check_permissions(imgp);
if (error) {
- VOP_UNLOCK(imgp->vp, 0, p);
+ VOP_UNLOCK(imgp->vp, 0, td);
goto exec_fail_dealloc;
}
error = exec_map_first_page(imgp);
- VOP_UNLOCK(imgp->vp, 0, p);
+ VOP_UNLOCK(imgp->vp, 0, td);
if (error)
goto exec_fail_dealloc;
@@ -223,7 +228,7 @@ interpret:
vrele(ndp->ni_vp);
/* set new name to that of the interpreter */
NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME,
- UIO_SYSSPACE, imgp->interpreter_name, p);
+ UIO_SYSSPACE, imgp->interpreter_name, td);
goto interpret;
}
@@ -250,8 +255,8 @@ interpret:
if (p->p_fd->fd_refcnt > 1) {
struct filedesc *tmp;
- tmp = fdcopy(p);
- fdfree(p);
+ tmp = fdcopy(td);
+ fdfree(td);
p->p_fd = tmp;
}
@@ -270,17 +275,17 @@ interpret:
p->p_procsig->ps_refcnt--;
p->p_procsig = newprocsig;
p->p_procsig->ps_refcnt = 1;
- if (p->p_sigacts == &p->p_addr->u_sigacts)
+ if (p->p_sigacts == &p->p_uarea->u_sigacts)
panic("shared procsig but private sigacts?");
- p->p_addr->u_sigacts = *p->p_sigacts;
- p->p_sigacts = &p->p_addr->u_sigacts;
+ p->p_uarea->u_sigacts = *p->p_sigacts;
+ p->p_sigacts = &p->p_uarea->u_sigacts;
}
/* Stop profiling */
stopprofclock(p);
/* close files on exec */
- fdcloseexec(p);
+ fdcloseexec(td);
/* reset caught signals */
execsigs(p);
@@ -342,7 +347,7 @@ interpret:
change_euid(newcred, attr.va_uid);
if (attr.va_mode & VSGID)
change_egid(newcred, attr.va_gid);
- setugidsafety(p);
+ setugidsafety(td);
} else {
if (oldcred->cr_uid == oldcred->cr_ruid &&
oldcred->cr_gid == oldcred->cr_rgid)
@@ -408,7 +413,7 @@ interpret:
p->p_acflag &= ~AFORK;
/* Set values passed into the program in registers. */
- setregs(p, imgp->entry_addr, (u_long)(uintptr_t)stack_base,
+ setregs(td, imgp->entry_addr, (u_long)(uintptr_t)stack_base,
imgp->ps_strings);
/* Free any previous argument cache */
@@ -454,7 +459,7 @@ exec_fail_dealloc:
exec_fail:
if (imgp->vmspace_destroyed) {
/* sorry, no more process anymore. exit gracefully */
- exit1(p, W_EXITCODE(0, SIGABRT));
+ exit1(td, W_EXITCODE(0, SIGABRT));
/* NOT REACHED */
error = 0;
}
@@ -587,7 +592,7 @@ exec_new_vmspace(imgp)
error = vm_map_find(&vmspace->vm_map, 0, 0, &bsaddr,
4*PAGE_SIZE, 0,
VM_PROT_ALL, VM_PROT_ALL, 0);
- imgp->proc->p_md.md_bspstore = bsaddr;
+ imgp->proc->p_thread.td_md.md_bspstore = bsaddr;
}
#endif
@@ -798,7 +803,7 @@ exec_check_permissions(imgp)
int error;
/* Get file attributes */
- error = VOP_GETATTR(vp, attr, p->p_ucred, p);
+ error = VOP_GETATTR(vp, attr, p->p_ucred, curthread); /* XXXKSE */
if (error)
return (error);
@@ -825,7 +830,7 @@ exec_check_permissions(imgp)
/*
* Check for execute permission to file based on current credentials.
*/
- error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p);
+ error = VOP_ACCESS(vp, VEXEC, p->p_ucred, curthread); /* XXXKSE */
if (error)
return (error);
@@ -840,7 +845,7 @@ exec_check_permissions(imgp)
* Call filesystem specific open routine (which does nothing in the
* general case).
*/
- error = VOP_OPEN(vp, FREAD, p->p_ucred, p);
+ error = VOP_OPEN(vp, FREAD, p->p_ucred, curthread); /* XXXKSE */
if (error)
return (error);
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 812b20e..80da053 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -79,7 +79,7 @@ MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status");
static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback");
-static int wait1 __P((struct proc *, struct wait_args *, int));
+static int wait1 __P((struct thread *, struct wait_args *, int));
/*
* callout list for things to do at exit time
@@ -99,14 +99,15 @@ static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list);
* MPSAFE
*/
void
-sys_exit(p, uap)
- struct proc *p;
+sys_exit(td, uap)
+ struct thread *td;
struct sys_exit_args /* {
int rval;
} */ *uap;
{
+
mtx_lock(&Giant);
- exit1(p, W_EXITCODE(uap->rval, 0));
+ exit1(td, W_EXITCODE(uap->rval, 0));
/* NOTREACHED */
}
@@ -116,10 +117,11 @@ sys_exit(p, uap)
* status and rusage for wait(). Check for child processes and orphan them.
*/
void
-exit1(p, rv)
- register struct proc *p;
+exit1(td, rv)
+ register struct thread *td;
int rv;
{
+ struct proc *p = td->td_proc;
register struct proc *q, *nq;
register struct vmspace *vm;
struct exitlist *ep;
@@ -132,6 +134,9 @@ exit1(p, rv)
panic("Going nowhere without my init!");
}
+/* XXXXKSE */
+/* MUST abort all other threads before proceeding past this point */
+
aio_proc_rundown(p);
/* are we a task leader? */
@@ -189,7 +194,7 @@ exit1(p, rv)
* Close open files and release open-file table.
* This may block!
*/
- fdfree(p);
+ fdfree(&p->p_thread); /* XXXKSE */
/*
* Remove ourself from our leader's peer list and wake our leader.
@@ -264,7 +269,7 @@ exit1(p, rv)
} else
PROC_UNLOCK(p);
fixjobc(p, p->p_pgrp, 0);
- (void)acct_process(p);
+ (void)acct_process(td);
#ifdef KTRACE
/*
* release trace file
@@ -385,7 +390,7 @@ exit1(p, rv)
* The address space is released by "vmspace_free(p->p_vmspace)"
* in vm_waitproc();
*/
- cpu_exit(p);
+ cpu_exit(td);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
@@ -413,8 +418,8 @@ exit1(p, rv)
* MPSAFE, the dirty work is handled by wait1().
*/
int
-owait(p, uap)
- struct proc *p;
+owait(td, uap)
+ struct thread *td;
register struct owait_args /* {
int dummy;
} */ *uap;
@@ -425,7 +430,7 @@ owait(p, uap)
w.rusage = NULL;
w.pid = WAIT_ANY;
w.status = NULL;
- return (wait1(p, &w, 1));
+ return (wait1(td, &w, 1));
}
#endif /* COMPAT_43 */
@@ -433,19 +438,20 @@ owait(p, uap)
* MPSAFE, the dirty work is handled by wait1().
*/
int
-wait4(p, uap)
- struct proc *p;
+wait4(td, uap)
+ struct thread *td;
struct wait_args *uap;
{
- return (wait1(p, uap, 0));
+
+ return (wait1(td, uap, 0));
}
/*
* MPSAFE
*/
static int
-wait1(q, uap, compat)
- register struct proc *q;
+wait1(td, uap, compat)
+ register struct thread *td;
register struct wait_args /* {
int pid;
int *status;
@@ -455,10 +461,11 @@ wait1(q, uap, compat)
int compat;
{
register int nfound;
- register struct proc *p, *t;
+ register struct proc *q, *p, *t;
int status, error;
mtx_lock(&Giant);
+ q = td->td_proc;
if (uap->pid == 0)
uap->pid = -q->p_pgid;
if (uap->options &~ (WUNTRACED|WNOHANG|WLINUXCLONE)) {
@@ -491,20 +498,31 @@ loop:
nfound++;
mtx_lock_spin(&sched_lock);
if (p->p_stat == SZOMB) {
- /* charge childs scheduling cpu usage to parent */
- if (curproc->p_pid != 1) {
- curproc->p_estcpu =
- ESTCPULIM(curproc->p_estcpu + p->p_estcpu);
+ /*
+ * charge childs scheduling cpu usage to parent
+ * XXXKSE assume only one thread & kse & ksegrp
+ * keep estcpu in each ksegrp
+ * so charge it to the ksegrp that did the wait
+ * since process estcpu is sum of all ksegrps,
+ * this is strictly as expected.
+ * Assume that the child process aggregated all
+ * tke estcpu into the 'build-in' ksegrp.
+ * XXXKSE
+ */
+ if (curthread->td_proc->p_pid != 1) {
+ curthread->td_ksegrp->kg_estcpu =
+ ESTCPULIM(curthread->td_ksegrp->kg_estcpu +
+ p->p_ksegrp.kg_estcpu);
}
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
sx_sunlock(&proctree_lock);
- q->p_retval[0] = p->p_pid;
+ td->td_retval[0] = p->p_pid;
#ifdef COMPAT_43
if (compat)
- q->p_retval[1] = p->p_xstat;
+ td->td_retval[1] = p->p_xstat;
else
#endif
if (uap->status) {
@@ -583,7 +601,7 @@ loop:
FREE(p->p_args, M_PARGS);
if (--p->p_procsig->ps_refcnt == 0) {
- if (p->p_sigacts != &p->p_addr->u_sigacts)
+ if (p->p_sigacts != &p->p_uarea->u_sigacts)
FREE(p->p_sigacts, M_SUBPROC);
FREE(p->p_procsig, M_SUBPROC);
p->p_procsig = NULL;
@@ -607,10 +625,10 @@ loop:
p->p_flag |= P_WAITED;
PROC_UNLOCK(p);
sx_sunlock(&proctree_lock);
- q->p_retval[0] = p->p_pid;
+ td->td_retval[0] = p->p_pid;
#ifdef COMPAT_43
if (compat) {
- q->p_retval[1] = W_STOPCODE(p->p_xstat);
+ td->td_retval[1] = W_STOPCODE(p->p_xstat);
error = 0;
} else
#endif
@@ -631,7 +649,7 @@ loop:
goto done2;
}
if (uap->options & WNOHANG) {
- q->p_retval[0] = 0;
+ td->td_retval[0] = 0;
error = 0;
goto done2;
}
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index af154c3..f5ae42c 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -110,18 +110,18 @@ SYSINIT(fork_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_fork_list, NULL);
*/
/* ARGSUSED */
int
-fork(p, uap)
- struct proc *p;
+fork(td, uap)
+ struct thread *td;
struct fork_args *uap;
{
int error;
struct proc *p2;
mtx_lock(&Giant);
- error = fork1(p, RFFDG | RFPROC, &p2);
+ error = fork1(td, RFFDG | RFPROC, &p2);
if (error == 0) {
- p->p_retval[0] = p2->p_pid;
- p->p_retval[1] = 0;
+ td->td_retval[0] = p2->p_pid;
+ td->td_retval[1] = 0;
}
mtx_unlock(&Giant);
return error;
@@ -132,18 +132,18 @@ fork(p, uap)
*/
/* ARGSUSED */
int
-vfork(p, uap)
- struct proc *p;
+vfork(td, uap)
+ struct thread *td;
struct vfork_args *uap;
{
int error;
struct proc *p2;
mtx_lock(&Giant);
- error = fork1(p, RFFDG | RFPROC | RFPPWAIT | RFMEM, &p2);
+ error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, &p2);
if (error == 0) {
- p->p_retval[0] = p2->p_pid;
- p->p_retval[1] = 0;
+ td->td_retval[0] = p2->p_pid;
+ td->td_retval[1] = 0;
}
mtx_unlock(&Giant);
return error;
@@ -153,8 +153,8 @@ vfork(p, uap)
* MPSAFE
*/
int
-rfork(p, uap)
- struct proc *p;
+rfork(td, uap)
+ struct thread *td;
struct rfork_args *uap;
{
int error;
@@ -162,10 +162,10 @@ rfork(p, uap)
/* mask kernel only flags out of the user flags */
mtx_lock(&Giant);
- error = fork1(p, uap->flags & ~RFKERNELONLY, &p2);
+ error = fork1(td, uap->flags & ~RFKERNELONLY, &p2);
if (error == 0) {
- p->p_retval[0] = p2 ? p2->p_pid : 0;
- p->p_retval[1] = 0;
+ td->td_retval[0] = p2 ? p2->p_pid : 0;
+ td->td_retval[1] = 0;
}
mtx_unlock(&Giant);
return error;
@@ -209,9 +209,26 @@ sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
+#if 0
+void
+kse_init(struct kse *kse1, struct kse *kse2)
+{
+}
+
+void
+thread_init(struct thread *thread1, struct thread *thread2)
+{
+}
+
+void
+ksegrp_init(struct ksegrp *ksegrp1, struct ksegrp *ksegrp2)
+{
+}
+#endif
+
int
-fork1(p1, flags, procp)
- struct proc *p1; /* parent proc */
+fork1(td, flags, procp)
+ struct thread *td; /* parent proc */
int flags;
struct proc **procp; /* child proc */
{
@@ -223,6 +240,7 @@ fork1(p1, flags, procp)
static int pidchecked = 0;
struct forklist *ep;
struct filedesc *fd;
+ struct proc *p1 = td->td_proc;
GIANT_REQUIRED;
@@ -235,16 +253,16 @@ fork1(p1, flags, procp)
* certain parts of a process from itself.
*/
if ((flags & RFPROC) == 0) {
- vm_forkproc(p1, 0, flags);
+ vm_forkproc(td, 0, flags);
/*
* Close all file descriptors.
*/
if (flags & RFCFDG) {
struct filedesc *fdtmp;
- fdtmp = fdinit(p1);
+ fdtmp = fdinit(td); /* XXXKSE */
PROC_LOCK(p1);
- fdfree(p1);
+ fdfree(td); /* XXXKSE */
p1->p_fd = fdtmp;
PROC_UNLOCK(p1);
}
@@ -255,9 +273,9 @@ fork1(p1, flags, procp)
if (flags & RFFDG) {
if (p1->p_fd->fd_refcnt > 1) {
struct filedesc *newfd;
- newfd = fdcopy(p1);
+ newfd = fdcopy(td);
PROC_LOCK(p1);
- fdfree(p1);
+ fdfree(td);
p1->p_fd = newfd;
PROC_UNLOCK(p1);
}
@@ -401,13 +419,42 @@ again:
*/
bzero(&p2->p_startzero,
(unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
+ bzero(&p2->p_kse.ke_startzero,
+ (unsigned) ((caddr_t)&p2->p_kse.ke_endzero
+ - (caddr_t)&p2->p_kse.ke_startzero));
+ bzero(&p2->p_thread.td_startzero,
+ (unsigned) ((caddr_t)&p2->p_thread.td_endzero
+ - (caddr_t)&p2->p_thread.td_startzero));
+ bzero(&p2->p_ksegrp.kg_startzero,
+ (unsigned) ((caddr_t)&p2->p_ksegrp.kg_endzero
+ - (caddr_t)&p2->p_ksegrp.kg_startzero));
PROC_LOCK(p1);
bcopy(&p1->p_startcopy, &p2->p_startcopy,
(unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
+
+ bcopy(&p1->p_kse.ke_startcopy, &p2->p_kse.ke_startcopy,
+ (unsigned) ((caddr_t)&p2->p_kse.ke_endcopy
+ - (caddr_t)&p2->p_kse.ke_startcopy));
+
+ bcopy(&p1->p_thread.td_startcopy, &p2->p_thread.td_startcopy,
+ (unsigned) ((caddr_t)&p2->p_thread.td_endcopy
+ - (caddr_t)&p2->p_thread.td_startcopy));
+
+ bcopy(&p1->p_ksegrp.kg_startcopy, &p2->p_ksegrp.kg_startcopy,
+ (unsigned) ((caddr_t)&p2->p_ksegrp.kg_endcopy
+ - (caddr_t)&p2->p_ksegrp.kg_startcopy));
PROC_UNLOCK(p1);
+ /*
+ * XXXKSE Theoretically only the running thread would get copied
+ * Others in the kernel would be 'aborted' in the child.
+ * i.e return E*something*
+ */
+ proc_linkup(p2);
+
mtx_init(&p2->p_mtx, "process lock", MTX_DEF);
PROC_LOCK(p2);
+ /* note.. XXXKSE no pcb or u-area yet */
/*
* Duplicate sub-structures as needed.
@@ -433,7 +480,7 @@ again:
if (flags & RFSIGSHARE) {
p2->p_procsig = p1->p_procsig;
p2->p_procsig->ps_refcnt++;
- if (p1->p_sigacts == &p1->p_addr->u_sigacts) {
+ if (p1->p_sigacts == &p1->p_uarea->u_sigacts) {
struct sigacts *newsigacts;
PROC_UNLOCK(p1);
@@ -450,7 +497,7 @@ again:
* the shared p_procsig->ps_sigacts.
*/
p2->p_sigacts = newsigacts;
- *p2->p_sigacts = p1->p_addr->u_sigacts;
+ *p2->p_sigacts = p1->p_uarea->u_sigacts;
}
} else {
PROC_UNLOCK(p1);
@@ -476,9 +523,9 @@ again:
VREF(p2->p_textvp);
if (flags & RFCFDG)
- fd = fdinit(p1);
+ fd = fdinit(td);
else if (flags & RFFDG)
- fd = fdcopy(p1);
+ fd = fdcopy(td);
else
fd = fdshare(p1);
PROC_LOCK(p2);
@@ -531,10 +578,10 @@ again:
sx_xunlock(&proctree_lock);
PROC_LOCK(p2);
LIST_INIT(&p2->p_children);
- LIST_INIT(&p2->p_contested);
+ LIST_INIT(&p2->p_thread.td_contested); /* XXXKSE only 1 thread? */
callout_init(&p2->p_itcallout, 0);
- callout_init(&p2->p_slpcallout, 1);
+ callout_init(&p2->p_thread.td_slpcallout, 1); /* XXXKSE */
PROC_LOCK(p1);
#ifdef KTRACE
@@ -556,9 +603,10 @@ again:
/*
* set priority of child to be that of parent
+ * XXXKSE hey! copying the estcpu seems dodgy.. should split it..
*/
mtx_lock_spin(&sched_lock);
- p2->p_estcpu = p1->p_estcpu;
+ p2->p_ksegrp.kg_estcpu = p1->p_ksegrp.kg_estcpu;
mtx_unlock_spin(&sched_lock);
/*
@@ -573,7 +621,7 @@ again:
* Finish creating the child process. It will return via a different
* execution path later. (ie: directly into user mode)
*/
- vm_forkproc(p1, p2, flags);
+ vm_forkproc(td, p2, flags);
if (flags == (RFFDG | RFPROC)) {
cnt.v_forks++;
@@ -609,7 +657,7 @@ again:
if ((flags & RFSTOPPED) == 0) {
mtx_lock_spin(&sched_lock);
p2->p_stat = SRUN;
- setrunqueue(p2);
+ setrunqueue(&p2->p_thread);
mtx_unlock_spin(&sched_lock);
}
@@ -708,14 +756,13 @@ fork_exit(callout, arg, frame)
void *arg;
struct trapframe *frame;
{
- struct proc *p;
-
- p = curproc;
+ struct thread *td = curthread;
+ struct proc *p = td->td_proc;
/*
* Setup the sched_lock state so that we can release it.
*/
- sched_lock.mtx_lock = (uintptr_t)p;
+ sched_lock.mtx_lock = (uintptr_t)td;
sched_lock.mtx_recurse = 0;
/*
* XXX: We really shouldn't have to do this.
@@ -760,15 +807,15 @@ fork_exit(callout, arg, frame)
* first parameter and is called when returning to a new userland process.
*/
void
-fork_return(p, frame)
- struct proc *p;
+fork_return(td, frame)
+ struct thread *td;
struct trapframe *frame;
{
- userret(p, frame, 0);
+ userret(td, frame, 0);
#ifdef KTRACE
- if (KTRPOINT(p, KTR_SYSRET)) {
- ktrsysret(p->p_tracep, SYS_fork, 0, 0);
+ if (KTRPOINT(td->td_proc, KTR_SYSRET)) {
+ ktrsysret(td->td_proc->p_tracep, SYS_fork, 0, 0);
}
#endif
mtx_assert(&Giant, MA_NOTOWNED);
diff --git a/sys/kern/kern_idle.c b/sys/kern/kern_idle.c
index c1482bb..b37e27a 100644
--- a/sys/kern/kern_idle.c
+++ b/sys/kern/kern_idle.c
@@ -46,13 +46,13 @@ idle_setup(void *dummy)
SLIST_FOREACH(gd, &cpuhead, gd_allcpu) {
error = kthread_create(idle_proc, NULL, &p,
RFSTOPPED | RFHIGHPID, "idle: cpu%d", gd->gd_cpuid);
- gd->gd_idleproc = p;
- if (gd->gd_curproc == NULL)
- gd->gd_curproc = p;
+ gd->gd_idlethread = &p->p_thread;
+ if (gd->gd_curthread == NULL)
+ gd->gd_curthread = gd->gd_idlethread;
#else
error = kthread_create(idle_proc, NULL, &p,
RFSTOPPED | RFHIGHPID, "idle");
- PCPU_SET(idleproc, p);
+ PCPU_SET(idlethread, &p->p_thread);
#endif
if (error)
panic("idle_setup: kthread_create error %d\n", error);
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index f5ba010..15ae4ff 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -118,25 +118,27 @@ static void
ithread_update(struct ithd *ithd)
{
struct intrhand *ih;
+ struct thread *td;
struct proc *p;
int entropy;
mtx_assert(&ithd->it_lock, MA_OWNED);
- p = ithd->it_proc;
- if (p == NULL)
+ td = ithd->it_td;
+ if (td == NULL)
return;
+ p = td->td_proc;
strncpy(p->p_comm, ithd->it_name, sizeof(ithd->it_name));
ih = TAILQ_FIRST(&ithd->it_handlers);
if (ih == NULL) {
- p->p_pri.pri_level = PRI_MAX_ITHD;
+ td->td_ksegrp->kg_pri.pri_level = PRI_MAX_ITHD;
ithd->it_flags &= ~IT_ENTROPY;
return;
}
entropy = 0;
- p->p_pri.pri_level = ih->ih_pri;
- p->p_pri.pri_native = ih->ih_pri;
+ td->td_ksegrp->kg_pri.pri_level = ih->ih_pri;
+ td->td_ksegrp->kg_pri.pri_native = ih->ih_pri;
TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
if (strlen(p->p_comm) + strlen(ih->ih_name) + 1 <
sizeof(p->p_comm)) {
@@ -166,6 +168,7 @@ ithread_create(struct ithd **ithread, int vector, int flags,
void (*disable)(int), void (*enable)(int), const char *fmt, ...)
{
struct ithd *ithd;
+ struct thread *td;
struct proc *p;
int error;
va_list ap;
@@ -194,11 +197,12 @@ ithread_create(struct ithd **ithread, int vector, int flags,
free(ithd, M_ITHREAD);
return (error);
}
- p->p_pri.pri_class = PRI_ITHD;
- p->p_pri.pri_level = PRI_MAX_ITHD;
+ td = &p->p_thread; /* XXXKSE */
+ td->td_ksegrp->kg_pri.pri_class = PRI_ITHD;
+ td->td_ksegrp->kg_pri.pri_level = PRI_MAX_ITHD;
p->p_stat = SWAIT;
- ithd->it_proc = p;
- p->p_ithd = ithd;
+ ithd->it_td = td;
+ td->td_ithd = ithd;
if (ithread != NULL)
*ithread = ithd;
mtx_unlock(&ithd->it_lock);
@@ -211,9 +215,13 @@ int
ithread_destroy(struct ithd *ithread)
{
+ struct thread *td;
+ struct proc *p;
if (ithread == NULL)
return (EINVAL);
+ td = ithread->it_td;
+ p = td->td_proc;
mtx_lock(&ithread->it_lock);
if (!TAILQ_EMPTY(&ithread->it_handlers)) {
mtx_unlock(&ithread->it_lock);
@@ -221,9 +229,9 @@ ithread_destroy(struct ithd *ithread)
}
ithread->it_flags |= IT_DEAD;
mtx_lock_spin(&sched_lock);
- if (ithread->it_proc->p_stat == SWAIT) {
- ithread->it_proc->p_stat = SRUN;
- setrunqueue(ithread->it_proc);
+ if (p->p_stat == SWAIT) {
+ p->p_stat = SRUN; /* XXXKSE */
+ setrunqueue(td);
}
mtx_unlock_spin(&sched_lock);
mtx_unlock(&ithread->it_lock);
@@ -319,7 +327,7 @@ ok:
* handler as being dead and let the ithread do the actual removal.
*/
mtx_lock_spin(&sched_lock);
- if (ithread->it_proc->p_stat != SWAIT) {
+ if (ithread->it_td->td_proc->p_stat != SWAIT) {
handler->ih_flags |= IH_DEAD;
/*
@@ -343,6 +351,7 @@ int
ithread_schedule(struct ithd *ithread, int do_switch)
{
struct int_entropy entropy;
+ struct thread *td;
struct proc *p;
/*
@@ -357,12 +366,13 @@ ithread_schedule(struct ithd *ithread, int do_switch)
*/
if (harvest.interrupt && ithread->it_flags & IT_ENTROPY) {
entropy.vector = ithread->it_vector;
- entropy.proc = CURPROC;
+ entropy.proc = curthread->td_proc;;
random_harvest(&entropy, sizeof(entropy), 2, 0,
RANDOM_INTERRUPT);
}
- p = ithread->it_proc;
+ td = ithread->it_td;
+ p = td->td_proc;
KASSERT(p != NULL, ("ithread %s has no process", ithread->it_name));
CTR3(KTR_INTR, __func__ ": pid %d: (%s) need = %d", p->p_pid, p->p_comm,
ithread->it_need);
@@ -380,14 +390,14 @@ ithread_schedule(struct ithd *ithread, int do_switch)
if (p->p_stat == SWAIT) {
CTR1(KTR_INTR, __func__ ": setrunqueue %d", p->p_pid);
p->p_stat = SRUN;
- setrunqueue(p);
- if (do_switch && curproc->p_stat == SRUN) {
- if (curproc != PCPU_GET(idleproc))
- setrunqueue(curproc);
- curproc->p_stats->p_ru.ru_nivcsw++;
+ setrunqueue(td); /* XXXKSE */
+ if (do_switch && curthread->td_proc->p_stat == SRUN) {
+ if (curthread != PCPU_GET(idlethread))
+ setrunqueue(curthread);
+ curthread->td_proc->p_stats->p_ru.ru_nivcsw++;
mi_switch();
} else
- curproc->p_sflag |= PS_NEEDRESCHED;
+ curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
} else {
CTR3(KTR_INTR, __func__ ": pid %d: it_need %d, state %d",
p->p_pid, ithread->it_need, p->p_stat);
@@ -439,7 +449,7 @@ swi_sched(void *cookie, int flags)
atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
CTR3(KTR_INTR, "swi_sched pid %d(%s) need=%d",
- it->it_proc->p_pid, it->it_proc->p_comm, it->it_need);
+ it->it_td->td_proc->p_pid, it->it_td->td_proc->p_comm, it->it_need);
/*
* Set ih_need for this handler so that if the ithread is already
@@ -461,11 +471,13 @@ ithread_loop(void *arg)
{
struct ithd *ithd; /* our thread context */
struct intrhand *ih; /* and our interrupt handler chain */
+ struct thread *td;
struct proc *p;
- p = curproc;
+ td = curthread;
+ p = td->td_proc;
ithd = (struct ithd *)arg; /* point to myself */
- KASSERT(ithd->it_proc == p && p->p_ithd == ithd,
+ KASSERT(ithd->it_td == td && td->td_ithd == ithd,
(__func__ ": ithread and proc linkage out of sync"));
/*
@@ -479,7 +491,7 @@ ithread_loop(void *arg)
if (ithd->it_flags & IT_DEAD) {
CTR2(KTR_INTR, __func__ ": pid %d: (%s) exiting",
p->p_pid, p->p_comm);
- p->p_ithd = NULL;
+ td->td_ithd = NULL;
mtx_destroy(&ithd->it_lock);
mtx_lock(&Giant);
free(ithd, M_ITHREAD);
@@ -559,9 +571,9 @@ start_softintr(void *dummy)
swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, 0, &vm_ih))
panic("died while creating standard software ithreads");
- PROC_LOCK(clk_ithd->it_proc);
- clk_ithd->it_proc->p_flag |= P_NOLOAD;
- PROC_UNLOCK(clk_ithd->it_proc);
+ PROC_LOCK(clk_ithd->it_td->td_proc);
+ clk_ithd->it_td->td_proc->p_flag |= P_NOLOAD;
+ PROC_UNLOCK(clk_ithd->it_td->td_proc);
}
SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
diff --git a/sys/kern/kern_jail.c b/sys/kern/kern_jail.c
index abe5e0f..c1d9470 100644
--- a/sys/kern/kern_jail.c
+++ b/sys/kern/kern_jail.c
@@ -48,12 +48,13 @@ SYSCTL_INT(_jail, OID_AUTO, sysvipc_allowed, CTLFLAG_RW,
* MPSAFE
*/
int
-jail(p, uap)
- struct proc *p;
+jail(td, uap)
+ struct thread *td;
struct jail_args /* {
syscallarg(struct jail *) jail;
} */ *uap;
{
+ struct proc *p = td->td_proc;
int error;
struct prison *pr;
struct jail j;
@@ -79,7 +80,7 @@ jail(p, uap)
pr->pr_ip = j.ip_number;
ca.path = j.path;
- error = chroot(p, &ca);
+ error = chroot(td, &ca);
if (error)
goto bail;
diff --git a/sys/kern/kern_kthread.c b/sys/kern/kern_kthread.c
index 7e8e702..cb715da 100644
--- a/sys/kern/kern_kthread.c
+++ b/sys/kern/kern_kthread.c
@@ -81,7 +81,7 @@ kthread_create(void (*func)(void *), void *arg,
if (!proc0.p_stats /* || proc0.p_stats->p_start.tv_sec == 0 */)
panic("kthread_create called too soon");
- error = fork1(&proc0, RFMEM | RFFDG | RFPROC | RFSTOPPED | flags, &p2);
+ error = fork1(thread0, RFMEM | RFFDG | RFPROC | RFSTOPPED | flags, &p2);
if (error)
return error;
@@ -102,14 +102,14 @@ kthread_create(void (*func)(void *), void *arg,
va_end(ap);
/* call the processes' main()... */
- cpu_set_fork_handler(p2, func, arg);
+ cpu_set_fork_handler(&p2->p_thread, func, arg); /* XXXKSE */
/* Delay putting it on the run queue until now. */
mtx_lock_spin(&sched_lock);
p2->p_sflag |= PS_INMEM;
if (!(flags & RFSTOPPED)) {
p2->p_stat = SRUN;
- setrunqueue(p2);
+ setrunqueue(&p2->p_thread); /* XXXKSE */
}
mtx_unlock_spin(&sched_lock);
@@ -125,7 +125,7 @@ kthread_exit(int ecode)
proc_reparent(curproc, initproc);
PROC_UNLOCK(curproc);
sx_xunlock(&proctree_lock);
- exit1(curproc, W_EXITCODE(ecode, 0));
+ exit1(curthread, W_EXITCODE(ecode, 0));
}
/*
diff --git a/sys/kern/kern_ktrace.c b/sys/kern/kern_ktrace.c
index 9b97944..6297f9b 100644
--- a/sys/kern/kern_ktrace.c
+++ b/sys/kern/kern_ktrace.c
@@ -251,11 +251,12 @@ struct ktrace_args {
#endif
/* ARGSUSED */
int
-ktrace(curp, uap)
- struct proc *curp;
+ktrace(td, uap)
+ struct thread *td;
register struct ktrace_args *uap;
{
#ifdef KTRACE
+ struct proc *curp = td->td_proc;
register struct vnode *vp = NULL;
register struct proc *p;
struct pgrp *pg;
@@ -271,7 +272,7 @@ ktrace(curp, uap)
/*
* an operation which requires a file argument.
*/
- NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname, curp);
+ NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname, td);
flags = FREAD | FWRITE | O_NOFOLLOW;
error = vn_open(&nd, &flags, 0);
if (error) {
@@ -280,9 +281,9 @@ ktrace(curp, uap)
}
NDFREE(&nd, NDF_ONLY_PNBUF);
vp = nd.ni_vp;
- VOP_UNLOCK(vp, 0, curp);
+ VOP_UNLOCK(vp, 0, td);
if (vp->v_type != VREG) {
- (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp);
+ (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, td);
curp->p_traceflag &= ~KTRFAC_ACTIVE;
return (EACCES);
}
@@ -298,7 +299,7 @@ ktrace(curp, uap)
p->p_tracep = NULL;
p->p_traceflag = 0;
(void) vn_close(vp, FREAD|FWRITE,
- p->p_ucred, p);
+ p->p_ucred, td);
} else
error = EPERM;
}
@@ -349,7 +350,7 @@ ktrace(curp, uap)
error = EPERM;
done:
if (vp != NULL)
- (void) vn_close(vp, FWRITE, curp->p_ucred, curp);
+ (void) vn_close(vp, FWRITE, curp->p_ucred, td);
curp->p_traceflag &= ~KTRFAC_ACTIVE;
return (error);
#else
@@ -362,10 +363,11 @@ done:
*/
/* ARGSUSED */
int
-utrace(curp, uap)
- struct proc *curp;
+utrace(td, uap)
+ struct thread *td;
register struct utrace_args *uap;
{
+
#ifdef KTRACE
struct ktr_header *kth;
struct proc *p = curproc; /* XXX */
@@ -474,7 +476,8 @@ ktrwrite(vp, kth, uio)
{
struct uio auio;
struct iovec aiov[2];
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
+ struct proc *p = td->td_proc; /* XXX */
struct mount *mp;
int error;
@@ -488,7 +491,7 @@ ktrwrite(vp, kth, uio)
aiov[0].iov_len = sizeof(struct ktr_header);
auio.uio_resid = sizeof(struct ktr_header);
auio.uio_iovcnt = 1;
- auio.uio_procp = curproc;
+ auio.uio_td = curthread;
if (kth->ktr_len > 0) {
auio.uio_iovcnt++;
aiov[1].iov_base = kth->ktr_buffer;
@@ -498,14 +501,14 @@ ktrwrite(vp, kth, uio)
kth->ktr_len += uio->uio_resid;
}
vn_start_write(vp, &mp, V_WAIT);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- (void)VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ (void)VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, p->p_ucred);
if (error == 0 && uio != NULL) {
- (void)VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
+ (void)VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
error = VOP_WRITE(vp, uio, IO_UNIT | IO_APPEND, p->p_ucred);
}
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
if (!error)
return;
diff --git a/sys/kern/kern_linker.c b/sys/kern/kern_linker.c
index bf6face..353d811 100644
--- a/sys/kern/kern_linker.c
+++ b/sys/kern/kern_linker.c
@@ -354,14 +354,14 @@ linker_find_file_by_name(const char* filename)
goto out;
sprintf(koname, "%s.ko", filename);
- lockmgr(&lock, LK_SHARED, 0, curproc);
+ lockmgr(&lock, LK_SHARED, 0, curthread);
TAILQ_FOREACH(lf, &linker_files, link) {
if (!strcmp(lf->filename, koname))
break;
if (!strcmp(lf->filename, filename))
break;
}
- lockmgr(&lock, LK_RELEASE, 0, curproc);
+ lockmgr(&lock, LK_RELEASE, 0, curthread);
out:
if (koname)
@@ -374,11 +374,11 @@ linker_find_file_by_id(int fileid)
{
linker_file_t lf = 0;
- lockmgr(&lock, LK_SHARED, 0, curproc);
+ lockmgr(&lock, LK_SHARED, 0, curthread);
TAILQ_FOREACH(lf, &linker_files, link)
if (lf->id == fileid)
break;
- lockmgr(&lock, LK_RELEASE, 0, curproc);
+ lockmgr(&lock, LK_RELEASE, 0, curthread);
return lf;
}
@@ -392,7 +392,7 @@ linker_make_file(const char* pathname, linker_class_t lc)
filename = linker_basename(pathname);
KLD_DPF(FILE, ("linker_make_file: new file, filename=%s\n", filename));
- lockmgr(&lock, LK_EXCLUSIVE, 0, curproc);
+ lockmgr(&lock, LK_EXCLUSIVE, 0, curthread);
lf = (linker_file_t) kobj_create((kobj_class_t) lc, M_LINKER, M_WAITOK);
if (!lf)
goto out;
@@ -410,7 +410,7 @@ linker_make_file(const char* pathname, linker_class_t lc)
TAILQ_INSERT_TAIL(&linker_files, lf, link);
out:
- lockmgr(&lock, LK_RELEASE, 0, curproc);
+ lockmgr(&lock, LK_RELEASE, 0, curthread);
return lf;
}
@@ -428,7 +428,7 @@ linker_file_unload(linker_file_t file)
return EPERM;
KLD_DPF(FILE, ("linker_file_unload: lf->refs=%d\n", file->refs));
- lockmgr(&lock, LK_EXCLUSIVE, 0, curproc);
+ lockmgr(&lock, LK_EXCLUSIVE, 0, curthread);
if (file->refs == 1) {
KLD_DPF(FILE, ("linker_file_unload: file is unloading, informing modules\n"));
/*
@@ -443,7 +443,7 @@ linker_file_unload(linker_file_t file)
if ((error = module_unload(mod)) != 0) {
KLD_DPF(FILE, ("linker_file_unload: module %x vetoes unload\n",
mod));
- lockmgr(&lock, LK_RELEASE, 0, curproc);
+ lockmgr(&lock, LK_RELEASE, 0, curthread);
goto out;
}
@@ -453,7 +453,7 @@ linker_file_unload(linker_file_t file)
file->refs--;
if (file->refs > 0) {
- lockmgr(&lock, LK_RELEASE, 0, curproc);
+ lockmgr(&lock, LK_RELEASE, 0, curthread);
goto out;
}
@@ -471,7 +471,7 @@ linker_file_unload(linker_file_t file)
}
TAILQ_REMOVE(&linker_files, file, link);
- lockmgr(&lock, LK_RELEASE, 0, curproc);
+ lockmgr(&lock, LK_RELEASE, 0, curthread);
if (file->deps) {
for (i = 0; i < file->ndeps; i++)
@@ -683,21 +683,21 @@ linker_ddb_symbol_values(c_linker_sym_t sym, linker_symval_t *symval)
* MPSAFE
*/
int
-kldload(struct proc* p, struct kldload_args* uap)
+kldload(struct thread* td, struct kldload_args* uap)
{
char *kldname, *modname;
char *pathname = NULL;
linker_file_t lf;
int error = 0;
- p->p_retval[0] = -1;
+ td->td_retval[0] = -1;
if (securelevel > 0) /* redundant, but that's OK */
return EPERM;
mtx_lock(&Giant);
- if ((error = suser(p)) != 0)
+ if ((error = suser_td(td)) != 0)
goto out;
pathname = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
@@ -720,7 +720,7 @@ kldload(struct proc* p, struct kldload_args* uap)
goto out;
lf->userrefs++;
- p->p_retval[0] = lf->id;
+ td->td_retval[0] = lf->id;
out:
if (pathname)
@@ -733,7 +733,7 @@ out:
* MPSAFE
*/
int
-kldunload(struct proc* p, struct kldunload_args* uap)
+kldunload(struct thread* td, struct kldunload_args* uap)
{
linker_file_t lf;
int error = 0;
@@ -743,7 +743,7 @@ kldunload(struct proc* p, struct kldunload_args* uap)
mtx_lock(&Giant);
- if ((error = suser(p)) != 0)
+ if ((error = suser_td(td)) != 0)
goto out;
lf = linker_find_file_by_id(SCARG(uap, fileid));
@@ -770,7 +770,7 @@ out:
* MPSAFE
*/
int
-kldfind(struct proc* p, struct kldfind_args* uap)
+kldfind(struct thread* td, struct kldfind_args* uap)
{
char* pathname;
const char *filename;
@@ -778,8 +778,7 @@ kldfind(struct proc* p, struct kldfind_args* uap)
int error = 0;
mtx_lock(&Giant);
-
- p->p_retval[0] = -1;
+ td->td_retval[0] = -1;
pathname = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
if ((error = copyinstr(SCARG(uap, file), pathname, MAXPATHLEN, NULL)) != 0)
@@ -789,7 +788,7 @@ kldfind(struct proc* p, struct kldfind_args* uap)
lf = linker_find_file_by_name(filename);
if (lf)
- p->p_retval[0] = lf->id;
+ td->td_retval[0] = lf->id;
else
error = ENOENT;
@@ -804,7 +803,7 @@ out:
* MPSAFE
*/
int
-kldnext(struct proc* p, struct kldnext_args* uap)
+kldnext(struct thread* td, struct kldnext_args* uap)
{
linker_file_t lf;
int error = 0;
@@ -813,18 +812,18 @@ kldnext(struct proc* p, struct kldnext_args* uap)
if (SCARG(uap, fileid) == 0) {
if (TAILQ_FIRST(&linker_files))
- p->p_retval[0] = TAILQ_FIRST(&linker_files)->id;
+ td->td_retval[0] = TAILQ_FIRST(&linker_files)->id;
else
- p->p_retval[0] = 0;
+ td->td_retval[0] = 0;
goto out;
}
lf = linker_find_file_by_id(SCARG(uap, fileid));
if (lf) {
if (TAILQ_NEXT(lf, link))
- p->p_retval[0] = TAILQ_NEXT(lf, link)->id;
+ td->td_retval[0] = TAILQ_NEXT(lf, link)->id;
else
- p->p_retval[0] = 0;
+ td->td_retval[0] = 0;
} else {
error = ENOENT;
}
@@ -837,7 +836,7 @@ out:
* MPSAFE
*/
int
-kldstat(struct proc* p, struct kldstat_args* uap)
+kldstat(struct thread* td, struct kldstat_args* uap)
{
linker_file_t lf;
int error = 0;
@@ -879,7 +878,7 @@ kldstat(struct proc* p, struct kldstat_args* uap)
if ((error = copyout(&lf->size, &stat->size, sizeof(size_t))) != 0)
goto out;
- p->p_retval[0] = 0;
+ td->td_retval[0] = 0;
out:
mtx_unlock(&Giant);
@@ -890,7 +889,7 @@ out:
* MPSAFE
*/
int
-kldfirstmod(struct proc* p, struct kldfirstmod_args* uap)
+kldfirstmod(struct thread* td, struct kldfirstmod_args* uap)
{
linker_file_t lf;
int error = 0;
@@ -899,9 +898,9 @@ kldfirstmod(struct proc* p, struct kldfirstmod_args* uap)
lf = linker_find_file_by_id(SCARG(uap, fileid));
if (lf) {
if (TAILQ_FIRST(&lf->modules))
- p->p_retval[0] = module_getid(TAILQ_FIRST(&lf->modules));
+ td->td_retval[0] = module_getid(TAILQ_FIRST(&lf->modules));
else
- p->p_retval[0] = 0;
+ td->td_retval[0] = 0;
} else {
error = ENOENT;
}
@@ -913,7 +912,7 @@ kldfirstmod(struct proc* p, struct kldfirstmod_args* uap)
* MPSAFE
*/
int
-kldsym(struct proc *p, struct kldsym_args *uap)
+kldsym(struct thread *td, struct kldsym_args *uap)
{
char *symstr = NULL;
c_linker_sym_t sym;
@@ -1309,7 +1308,7 @@ linker_lookup_file(const char *path, int pathlen,
const char *name, int namelen, struct vattr *vap)
{
struct nameidata nd;
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
char *result, **cpp, *sep;
int error, len, extlen, reclen, flags;
enum vtype type;
@@ -1332,16 +1331,16 @@ linker_lookup_file(const char *path, int pathlen,
* Attempt to open the file, and return the path if we succeed
* and it's a regular file.
*/
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, result, p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, result, td);
flags = FREAD;
error = vn_open(&nd, &flags, 0);
if (error == 0) {
NDFREE(&nd, NDF_ONLY_PNBUF);
type = nd.ni_vp->v_type;
if (vap)
- VOP_GETATTR(nd.ni_vp, vap, p->p_ucred, p);
- VOP_UNLOCK(nd.ni_vp, 0, p);
- vn_close(nd.ni_vp, FREAD, p->p_ucred, p);
+ VOP_GETATTR(nd.ni_vp, vap, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(nd.ni_vp, 0, td);
+ vn_close(nd.ni_vp, FREAD, td->td_proc->p_ucred, td);
if (type == VREG)
return(result);
}
@@ -1363,8 +1362,8 @@ linker_hints_lookup(const char *path, int pathlen,
const char *modname, int modnamelen,
struct mod_depend *verinfo)
{
- struct proc *p = curproc;
- struct ucred *cred = p ? p->p_ucred : NULL;
+ struct thread *td = curthread; /* XXX */
+ struct ucred *cred = td ? td->td_proc->p_ucred : NULL;
struct nameidata nd;
struct vattr vattr, mattr;
u_char *hints = NULL;
@@ -1380,17 +1379,17 @@ linker_hints_lookup(const char *path, int pathlen,
pathbuf = malloc(reclen, M_LINKER, M_WAITOK);
snprintf(pathbuf, reclen, "%.*s%s%s", pathlen, path, sep, linker_hintfile);
- NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, pathbuf, p);
+ NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, pathbuf, td);
flags = FREAD;
error = vn_open(&nd, &flags, 0);
if (error)
goto bad;
NDFREE(&nd, NDF_ONLY_PNBUF);
- VOP_UNLOCK(nd.ni_vp, 0, p);
+ VOP_UNLOCK(nd.ni_vp, 0, td);
if (nd.ni_vp->v_type != VREG)
goto bad;
best = cp = NULL;
- error = VOP_GETATTR(nd.ni_vp, &vattr, cred, p);
+ error = VOP_GETATTR(nd.ni_vp, &vattr, cred, td);
if (error)
goto bad;
/*
@@ -1404,10 +1403,10 @@ linker_hints_lookup(const char *path, int pathlen,
if (hints == NULL)
goto bad;
error = vn_rdwr(UIO_READ, nd.ni_vp, (caddr_t)hints, vattr.va_size, 0,
- UIO_SYSSPACE, IO_NODELOCKED, cred, &reclen, p);
+ UIO_SYSSPACE, IO_NODELOCKED, cred, &reclen, td);
if (error)
goto bad;
- vn_close(nd.ni_vp, FREAD, cred, p);
+ vn_close(nd.ni_vp, FREAD, cred, td);
nd.ni_vp = NULL;
if (reclen != 0) {
printf("can't read %d\n", reclen);
@@ -1472,7 +1471,7 @@ bad:
if (hints)
free(hints, M_TEMP);
if (nd.ni_vp != NULL)
- vn_close(nd.ni_vp, FREAD, cred, p);
+ vn_close(nd.ni_vp, FREAD, cred, td);
/*
* If nothing found or hints is absent - fallback to the old way
* by using "kldname[.ko]" as module name.
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 147508c..3f8373b 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -208,14 +208,14 @@ acquire(struct lock *lkp, int extflags, int wanted) {
*/
int
#ifndef DEBUG_LOCKS
-lockmgr(lkp, flags, interlkp, p)
+lockmgr(lkp, flags, interlkp, td)
#else
-debuglockmgr(lkp, flags, interlkp, p, name, file, line)
+debuglockmgr(lkp, flags, interlkp, td, name, file, line)
#endif
struct lock *lkp;
u_int flags;
struct mtx *interlkp;
- struct proc *p;
+ struct thread *td;
#ifdef DEBUG_LOCKS
const char *name; /* Name of lock function */
const char *file; /* Name of file call is from */
@@ -228,13 +228,13 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
CTR5(KTR_LOCKMGR,
"lockmgr(): lkp == %p (lk_wmesg == \"%s\"), flags == 0x%x, "
- "interlkp == %p, p == %p", lkp, lkp->lk_wmesg, flags, interlkp, p);
+ "interlkp == %p, td == %p", lkp, lkp->lk_wmesg, flags, interlkp, td);
error = 0;
- if (p == NULL)
+ if (td == NULL)
pid = LK_KERNPROC;
else
- pid = p->p_pid;
+ pid = td->td_proc->p_pid;
mtx_lock(lkp->lk_interlock);
if (flags & LK_INTERLOCK) {
@@ -257,19 +257,19 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
* while there is an exclusive lock holder or while an
* exclusive lock request or upgrade request is in progress.
*
- * However, if P_DEADLKTREAT is set, we override exclusive
+ * However, if TDF_DEADLKTREAT is set, we override exclusive
* lock requests or upgrade requests ( but not the exclusive
* lock itself ).
*/
if (lkp->lk_lockholder != pid) {
lockflags = LK_HAVE_EXCL;
- if (p) {
- PROC_LOCK(p);
- if (!(p->p_flag & P_DEADLKTREAT)) {
+ if (td) {
+ PROC_LOCK(td->td_proc);
+ if (!(td->td_flags & TDF_DEADLKTREAT)) {
lockflags |= LK_WANT_EXCL |
LK_WANT_UPGRADE;
}
- PROC_UNLOCK(p);
+ PROC_UNLOCK(td->td_proc);
}
error = acquire(lkp, extflags, lockflags);
if (error)
@@ -560,15 +560,15 @@ lockdestroy(lkp)
* Determine the status of a lock.
*/
int
-lockstatus(lkp, p)
+lockstatus(lkp, td)
struct lock *lkp;
- struct proc *p;
+ struct thread *td;
{
int lock_type = 0;
mtx_lock(lkp->lk_interlock);
if (lkp->lk_exclusivecount != 0) {
- if (p == NULL || lkp->lk_lockholder == p->p_pid)
+ if (td == NULL || lkp->lk_lockholder == td->td_proc->p_pid)
lock_type = LK_EXCLUSIVE;
else
lock_type = LK_EXCLOTHER;
diff --git a/sys/kern/kern_lockf.c b/sys/kern/kern_lockf.c
index c60705f..3bbb4d0 100644
--- a/sys/kern/kern_lockf.c
+++ b/sys/kern/kern_lockf.c
@@ -242,25 +242,29 @@ lf_setlock(lock)
if ((lock->lf_flags & F_POSIX) &&
(block->lf_flags & F_POSIX)) {
register struct proc *wproc;
+ struct thread *td;
register struct lockf *waitblock;
int i = 0;
/* The block is waiting on something */
+ /* XXXKSE this is not complete under threads */
wproc = (struct proc *)block->lf_id;
mtx_lock_spin(&sched_lock);
- while (wproc->p_wchan &&
- (wproc->p_wmesg == lockstr) &&
- (i++ < maxlockdepth)) {
- waitblock = (struct lockf *)wproc->p_wchan;
- /* Get the owner of the blocking lock */
- waitblock = waitblock->lf_next;
- if ((waitblock->lf_flags & F_POSIX) == 0)
- break;
- wproc = (struct proc *)waitblock->lf_id;
- if (wproc == (struct proc *)lock->lf_id) {
- mtx_unlock_spin(&sched_lock);
- free(lock, M_LOCKF);
- return (EDEADLK);
+ FOREACH_THREAD_IN_PROC(wproc, td) {
+ while (td->td_wchan &&
+ (td->td_wmesg == lockstr) &&
+ (i++ < maxlockdepth)) {
+ waitblock = (struct lockf *)td->td_wchan;
+ /* Get the owner of the blocking lock */
+ waitblock = waitblock->lf_next;
+ if ((waitblock->lf_flags & F_POSIX) == 0)
+ break;
+ wproc = (struct proc *)waitblock->lf_id;
+ if (wproc == (struct proc *)lock->lf_id) {
+ mtx_unlock_spin(&sched_lock);
+ free(lock, M_LOCKF);
+ return (EDEADLK);
+ }
}
}
mtx_unlock_spin(&sched_lock);
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 5f9ca0c..0446b1f 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -144,7 +144,7 @@ malloc(size, type, flags)
#if defined(INVARIANTS)
if (flags == M_WAITOK)
- KASSERT(curproc->p_intr_nesting_level == 0,
+ KASSERT(curthread->td_intr_nesting_level == 0,
("malloc(M_WAITOK) in interrupt context"));
#endif
indx = BUCKETINDX(size);
diff --git a/sys/kern/kern_module.c b/sys/kern/kern_module.c
index 3fbdb72..f3651d2 100644
--- a/sys/kern/kern_module.c
+++ b/sys/kern/kern_module.c
@@ -222,18 +222,18 @@ module_setspecific(module_t mod, modspecific_t *datap)
* MPSAFE
*/
int
-modnext(struct proc* p, struct modnext_args* uap)
+modnext(struct thread *td, struct modnext_args *uap)
{
module_t mod;
int error = 0;
mtx_lock(&Giant);
- p->p_retval[0] = -1;
+ td->td_retval[0] = -1;
if (SCARG(uap, modid) == 0) {
mod = TAILQ_FIRST(&modules);
if (mod)
- p->p_retval[0] = mod->id;
+ td->td_retval[0] = mod->id;
else
error = ENOENT;
goto done2;
@@ -246,9 +246,9 @@ modnext(struct proc* p, struct modnext_args* uap)
}
if (TAILQ_NEXT(mod, link))
- p->p_retval[0] = TAILQ_NEXT(mod, link)->id;
+ td->td_retval[0] = TAILQ_NEXT(mod, link)->id;
else
- p->p_retval[0] = 0;
+ td->td_retval[0] = 0;
done2:
mtx_unlock(&Giant);
return (error);
@@ -258,12 +258,12 @@ done2:
* MPSAFE
*/
int
-modfnext(struct proc* p, struct modfnext_args* uap)
+modfnext(struct thread *td, struct modfnext_args *uap)
{
module_t mod;
int error;
- p->p_retval[0] = -1;
+ td->td_retval[0] = -1;
mtx_lock(&Giant);
@@ -273,9 +273,9 @@ modfnext(struct proc* p, struct modfnext_args* uap)
} else {
error = 0;
if (TAILQ_NEXT(mod, flink))
- p->p_retval[0] = TAILQ_NEXT(mod, flink)->id;
+ td->td_retval[0] = TAILQ_NEXT(mod, flink)->id;
else
- p->p_retval[0] = 0;
+ td->td_retval[0] = 0;
}
mtx_unlock(&Giant);
return (error);
@@ -292,7 +292,7 @@ struct module_stat_v1 {
* MPSAFE
*/
int
-modstat(struct proc* p, struct modstat_args* uap)
+modstat(struct thread *td, struct modstat_args *uap)
{
module_t mod;
int error = 0;
@@ -340,7 +340,7 @@ modstat(struct proc* p, struct modstat_args* uap)
goto out;
}
- p->p_retval[0] = 0;
+ td->td_retval[0] = 0;
out:
mtx_unlock(&Giant);
@@ -351,7 +351,7 @@ out:
* MPSAFE
*/
int
-modfind(struct proc* p, struct modfind_args* uap)
+modfind(struct thread *td, struct modfind_args *uap)
{
int error = 0;
char name[MAXMODNAME];
@@ -365,7 +365,7 @@ modfind(struct proc* p, struct modfind_args* uap)
if (mod == NULL)
error = ENOENT;
else
- p->p_retval[0] = mod->id;
+ td->td_retval[0] = mod->id;
mtx_unlock(&Giant);
out:
return error;
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 646d99a..bbcac94 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -86,9 +86,9 @@
#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
- : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
+ : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
-#define SET_PRIO(p, pri) (p)->p_pri.pri_level = (pri)
+#define SET_PRIO(td, pri) (td)->td_ksegrp->kg_pri.pri_level = (pri)
/*
* Lock classes for sleep and spin mutexes.
@@ -105,113 +105,118 @@ struct lock_class lock_class_mtx_spin = {
/*
* Prototypes for non-exported routines.
*/
-static void propagate_priority(struct proc *);
+static void propagate_priority(struct thread *);
static void
-propagate_priority(struct proc *p)
+propagate_priority(struct thread *td)
{
- int pri = p->p_pri.pri_level;
- struct mtx *m = p->p_blocked;
+ struct ksegrp *kg = td->td_ksegrp;
+ int pri = kg->kg_pri.pri_level;
+ struct mtx *m = td->td_blocked;
mtx_assert(&sched_lock, MA_OWNED);
for (;;) {
- struct proc *p1;
+ struct thread *td1;
- p = mtx_owner(m);
+ td = mtx_owner(m);
- if (p == NULL) {
+ if (td == NULL) {
/*
* This really isn't quite right. Really
- * ought to bump priority of process that
+ * ought to bump priority of thread that
* next acquires the mutex.
*/
MPASS(m->mtx_lock == MTX_CONTESTED);
return;
}
- MPASS(p->p_magic == P_MAGIC);
- KASSERT(p->p_stat != SSLEEP, ("sleeping process owns a mutex"));
- if (p->p_pri.pri_level <= pri)
+ MPASS(td->td_proc->p_magic == P_MAGIC);
+ KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex"));
+ if (kg->kg_pri.pri_level <= pri) /* lower is higher priority */
return;
/*
- * Bump this process' priority.
+ * Bump this thread's priority.
*/
- SET_PRIO(p, pri);
+ SET_PRIO(td, pri);
/*
* If lock holder is actually running, just bump priority.
*/
- if (p->p_oncpu != NOCPU) {
- MPASS(p->p_stat == SRUN || p->p_stat == SZOMB || p->p_stat == SSTOP);
+ /* XXXKSE this test is not sufficient */
+ if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) {
+ MPASS(td->td_proc->p_stat == SRUN
+ || td->td_proc->p_stat == SZOMB
+ || td->td_proc->p_stat == SSTOP);
return;
}
#ifndef SMP
/*
- * For UP, we check to see if p is curproc (this shouldn't
+ * For UP, we check to see if td is curthread (this shouldn't
* ever happen however as it would mean we are in a deadlock.)
*/
- KASSERT(p != curproc, ("Deadlock detected"));
+ KASSERT(td != curthread, ("Deadlock detected"));
#endif
/*
- * If on run queue move to new run queue, and
- * quit.
+ * If on run queue move to new run queue, and quit.
+ * XXXKSE this gets a lot more complicated under threads
+ * but try anyhow.
*/
- if (p->p_stat == SRUN) {
- MPASS(p->p_blocked == NULL);
- remrunqueue(p);
- setrunqueue(p);
+ if (td->td_proc->p_stat == SRUN) {
+ MPASS(td->td_blocked == NULL);
+ remrunqueue(td);
+ setrunqueue(td);
return;
}
/*
* If we aren't blocked on a mutex, we should be.
*/
- KASSERT(p->p_stat == SMTX, (
+ KASSERT(td->td_proc->p_stat == SMTX, (
"process %d(%s):%d holds %s but isn't blocked on a mutex\n",
- p->p_pid, p->p_comm, p->p_stat,
+ td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat,
m->mtx_object.lo_name));
/*
- * Pick up the mutex that p is blocked on.
+ * Pick up the mutex that td is blocked on.
*/
- m = p->p_blocked;
+ m = td->td_blocked;
MPASS(m != NULL);
/*
- * Check if the proc needs to be moved up on
+ * Check if the thread needs to be moved up on
* the blocked chain
*/
- if (p == TAILQ_FIRST(&m->mtx_blocked)) {
+ if (td == TAILQ_FIRST(&m->mtx_blocked)) {
continue;
}
- p1 = TAILQ_PREV(p, procqueue, p_procq);
- if (p1->p_pri.pri_level <= pri) {
+ td1 = TAILQ_PREV(td, threadqueue, td_blkq);
+ if (td1->td_ksegrp->kg_pri.pri_level <= pri) {
continue;
}
/*
- * Remove proc from blocked chain and determine where
- * it should be moved up to. Since we know that p1 has
- * a lower priority than p, we know that at least one
- * process in the chain has a lower priority and that
- * p1 will thus not be NULL after the loop.
+ * Remove thread from blocked chain and determine where
+ * it should be moved up to. Since we know that td1 has
+ * a lower priority than td, we know that at least one
+ * thread in the chain has a lower priority and that
+ * td1 will thus not be NULL after the loop.
*/
- TAILQ_REMOVE(&m->mtx_blocked, p, p_procq);
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) {
- MPASS(p1->p_magic == P_MAGIC);
- if (p1->p_pri.pri_level > pri)
+ TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq);
+ TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) {
+ MPASS(td1->td_proc->p_magic == P_MAGIC);
+ if (td1->td_ksegrp->kg_pri.pri_level > pri)
break;
}
- MPASS(p1 != NULL);
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ MPASS(td1 != NULL);
+ TAILQ_INSERT_BEFORE(td1, td, td_blkq);
CTR4(KTR_LOCK,
"propagate_priority: p %p moved before %p on [%p] %s",
- p, p1, m, m->mtx_object.lo_name);
+ td, td1, m, m->mtx_object.lo_name);
}
}
@@ -257,7 +262,7 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
{
int rval;
- MPASS(curproc != NULL);
+ MPASS(curthread != NULL);
/*
* _mtx_trylock does not accept MTX_NOSWITCH option.
@@ -265,7 +270,7 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
KASSERT((opts & MTX_NOSWITCH) == 0,
("mtx_trylock() called with invalid option flag(s) %d", opts));
- rval = _obtain_lock(m, curproc);
+ rval = _obtain_lock(m, curthread);
LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
if (rval) {
@@ -291,9 +296,10 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
void
_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
{
- struct proc *p = curproc;
+ struct thread *td = curthread;
+ struct ksegrp *kg = td->td_ksegrp;
- if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
+ if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
m->mtx_recurse++;
atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
@@ -306,9 +312,9 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
"_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
- while (!_obtain_lock(m, p)) {
+ while (!_obtain_lock(m, td)) {
uintptr_t v;
- struct proc *p1;
+ struct thread *td1;
mtx_lock_spin(&sched_lock);
/*
@@ -322,15 +328,15 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
/*
* The mutex was marked contested on release. This means that
- * there are processes blocked on it.
+ * there are threads blocked on it.
*/
if (v == MTX_CONTESTED) {
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- MPASS(p1 != NULL);
- m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
+ td1 = TAILQ_FIRST(&m->mtx_blocked);
+ MPASS(td1 != NULL);
+ m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
- if (p1->p_pri.pri_level < p->p_pri.pri_level)
- SET_PRIO(p, p1->p_pri.pri_level);
+ if (td1->td_ksegrp->kg_pri.pri_level < kg->kg_pri.pri_level)
+ SET_PRIO(td, td1->td_ksegrp->kg_pri.pri_level);
mtx_unlock_spin(&sched_lock);
return;
}
@@ -357,8 +363,8 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
* If we're borrowing an interrupted thread's VM context, we
* must clean up before going to sleep.
*/
- if (p->p_ithd != NULL) {
- struct ithd *it = p->p_ithd;
+ if (td->td_ithd != NULL) {
+ struct ithd *it = td->td_ithd;
if (it->it_interrupted) {
if (LOCK_LOG_TEST(&m->mtx_object, opts))
@@ -374,39 +380,39 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
* Put us on the list of threads blocked on this mutex.
*/
if (TAILQ_EMPTY(&m->mtx_blocked)) {
- p1 = (struct proc *)(m->mtx_lock & MTX_FLAGMASK);
- LIST_INSERT_HEAD(&p1->p_contested, m, mtx_contested);
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ td1 = (struct thread *)(m->mtx_lock & MTX_FLAGMASK);
+ LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
} else {
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
- if (p1->p_pri.pri_level > p->p_pri.pri_level)
+ TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq)
+ if (td1->td_ksegrp->kg_pri.pri_level > kg->kg_pri.pri_level)
break;
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ if (td1)
+ TAILQ_INSERT_BEFORE(td1, td, td_blkq);
else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
}
/*
* Save who we're blocked on.
*/
- p->p_blocked = m;
- p->p_mtxname = m->mtx_object.lo_name;
- p->p_stat = SMTX;
- propagate_priority(p);
+ td->td_blocked = m;
+ td->td_mtxname = m->mtx_object.lo_name;
+ td->td_proc->p_stat = SMTX;
+ propagate_priority(td);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR3(KTR_LOCK,
- "_mtx_lock_sleep: p %p blocked on [%p] %s", p, m,
+ "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
m->mtx_object.lo_name);
- p->p_stats->p_ru.ru_nvcsw++;
+ td->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR3(KTR_LOCK,
"_mtx_lock_sleep: p %p free from blocked on [%p] %s",
- p, m, m->mtx_object.lo_name);
+ td, m, m->mtx_object.lo_name);
mtx_unlock_spin(&sched_lock);
}
@@ -430,7 +436,7 @@ _mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
for (;;) {
- if (_obtain_lock(m, curproc))
+ if (_obtain_lock(m, curthread))
break;
/* Give interrupts a chance while we spin. */
@@ -467,11 +473,13 @@ _mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
void
_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
{
- struct proc *p, *p1;
+ struct thread *td, *td1;
struct mtx *m1;
int pri;
+ struct ksegrp *kg;
- p = curproc;
+ td = curthread;
+ kg = td->td_ksegrp;
if (mtx_recursed(m)) {
if (--(m->mtx_recurse) == 0)
@@ -485,11 +493,11 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- MPASS(p->p_magic == P_MAGIC);
- MPASS(p1->p_magic == P_MAGIC);
+ td1 = TAILQ_FIRST(&m->mtx_blocked);
+ MPASS(td->td_proc->p_magic == P_MAGIC);
+ MPASS(td1->td_proc->p_magic == P_MAGIC);
- TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
+ TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq);
if (TAILQ_EMPTY(&m->mtx_blocked)) {
LIST_REMOVE(m, mtx_contested);
@@ -500,28 +508,28 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
pri = PRI_MAX;
- LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
- int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_pri.pri_level;
+ LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
+ int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_ksegrp->kg_pri.pri_level;
if (cp < pri)
pri = cp;
}
- if (pri > p->p_pri.pri_native)
- pri = p->p_pri.pri_native;
- SET_PRIO(p, pri);
+ if (pri > kg->kg_pri.pri_native)
+ pri = kg->kg_pri.pri_native;
+ SET_PRIO(td, pri);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
- m, p1);
+ m, td1);
- p1->p_blocked = NULL;
- p1->p_stat = SRUN;
- setrunqueue(p1);
+ td1->td_blocked = NULL;
+ td1->td_proc->p_stat = SRUN;
+ setrunqueue(td1);
- if ((opts & MTX_NOSWITCH) == 0 && p1->p_pri.pri_level < pri) {
+ if ((opts & MTX_NOSWITCH) == 0 && td1->td_ksegrp->kg_pri.pri_level < pri) {
#ifdef notyet
- if (p->p_ithd != NULL) {
- struct ithd *it = p->p_ithd;
+ if (td->td_ithd != NULL) {
+ struct ithd *it = td->td_ithd;
if (it->it_interrupted) {
if (LOCK_LOG_TEST(&m->mtx_object, opts))
@@ -532,13 +540,13 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
}
}
#endif
- setrunqueue(p);
+ setrunqueue(td);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR2(KTR_LOCK,
"_mtx_unlock_sleep: %p switching out lock=%p", m,
(void *)m->mtx_lock);
- p->p_stats->p_ru.ru_nivcsw++;
+ td->td_proc->p_stats->p_ru.ru_nivcsw++;
mi_switch();
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
diff --git a/sys/kern/kern_ntptime.c b/sys/kern/kern_ntptime.c
index 3d689db..4701d2f 100644
--- a/sys/kern/kern_ntptime.c
+++ b/sys/kern/kern_ntptime.c
@@ -276,7 +276,7 @@ struct ntp_adjtime_args {
* MPSAFE
*/
int
-ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap)
+ntp_adjtime(struct thread *td, struct ntp_adjtime_args *uap)
{
struct timex ntv; /* temporary structure */
long freq; /* frequency ns/s) */
@@ -300,7 +300,7 @@ ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap)
mtx_lock(&Giant);
modes = ntv.modes;
if (modes)
- error = suser(p);
+ error = suser_td(td);
if (error)
goto done2;
s = splclock();
@@ -416,9 +416,9 @@ ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap)
time_status & STA_PPSJITTER) ||
(time_status & STA_PPSFREQ &&
time_status & (STA_PPSWANDER | STA_PPSERROR))) {
- p->p_retval[0] = TIME_ERROR;
+ td->td_retval[0] = TIME_ERROR;
} else {
- p->p_retval[0] = time_state;
+ td->td_retval[0] = time_state;
}
done2:
mtx_unlock(&Giant);
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 858b37a..5b0e960 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -115,6 +115,72 @@ procinit()
}
/*
+ * link up a process structure and it's inbuilt threads etc.
+ */
+void
+proc_linkup(struct proc *p)
+{
+ struct thread *td;
+
+ td = &p->p_thread;
+
+ /**** lists headed in the proc structure ****/
+ /* ALL KSEGRPs in this process */
+ TAILQ_INIT( &p->p_ksegrps); /* all ksegrps in proc */
+ TAILQ_INSERT_HEAD(&p->p_ksegrps, &p->p_ksegrp, kg_ksegrp);
+
+ /* All threads in this process (an optimisation) */
+ TAILQ_INIT( &p->p_threads); /* all threads in proc */
+ TAILQ_INSERT_HEAD(&p->p_threads, &p->p_thread, td_plist);
+
+ /**** Lists headed in the KSEGROUP structure ****/
+ /* all thread in this ksegroup */
+ TAILQ_INIT( &p->p_ksegrp.kg_threads);
+ TAILQ_INSERT_HEAD(&p->p_ksegrp.kg_threads, &p->p_thread, td_kglist);
+
+ /* All runnable threads not assigned to a particular KSE */
+ /* XXXKSE THIS MAY GO AWAY.. KSEs are never unassigned */
+ TAILQ_INIT( &p->p_ksegrp.kg_runq); /* links with td_runq */
+
+ /* All threads presently not runnable (Thread starts this way) */
+ TAILQ_INIT( &p->p_ksegrp.kg_slpq); /* links with td_runq */
+ TAILQ_INSERT_HEAD(&p->p_ksegrp.kg_slpq, &p->p_thread, td_runq);
+ /*p->p_thread.td_flags &= ~TDF_ONRUNQ;*/
+
+ /* all KSEs in this ksegroup */
+ TAILQ_INIT( &p->p_ksegrp.kg_kseq); /* all kses in ksegrp */
+ TAILQ_INSERT_HEAD(&p->p_ksegrp.kg_kseq, &p->p_kse, ke_kglist);
+
+ /* KSE starts out idle *//* XXXKSE */
+ TAILQ_INIT( &p->p_ksegrp.kg_rq); /* all kses in ksegrp */
+ TAILQ_INIT( &p->p_ksegrp.kg_iq); /* all kses in ksegrp */
+#if 0
+ TAILQ_INSERT_HEAD(&p->p_ksegrp.kg_iq, &p->p_kse, ke_kgrlist);
+#endif /* is running, not idle */
+ /*p->p_kse.ke_flags &= &KEF_ONRUNQ;*/
+
+ /**** Lists headed in the KSE structure ****/
+ /* runnable threads assigned to this kse */
+ TAILQ_INIT( &p->p_kse.ke_runq); /* links with td_runq */
+
+ p->p_thread.td_proc = p;
+ p->p_kse.ke_proc = p;
+ p->p_ksegrp.kg_proc = p;
+
+ p->p_thread.td_ksegrp = &p->p_ksegrp;
+ p->p_kse.ke_ksegrp = &p->p_ksegrp;
+
+ p->p_thread.td_last_kse = &p->p_kse;
+ p->p_thread.td_kse = &p->p_kse;
+
+ p->p_kse.ke_thread = &p->p_thread;
+
+ p->p_ksegrp.kg_runnable = 1;
+ p->p_ksegrp.kg_kses = 1;
+ p->p_ksegrp.kg_runq_kses = 1; /* XXXKSE change name */
+}
+
+/*
* Is p an inferior of the current process?
*/
int
@@ -410,6 +476,7 @@ fill_kinfo_proc(p, kp)
struct proc *p;
struct kinfo_proc *kp;
{
+ struct thread *td;
struct tty *tp;
struct session *sp;
@@ -418,7 +485,7 @@ fill_kinfo_proc(p, kp)
kp->ki_structsize = sizeof(*kp);
kp->ki_paddr = p;
PROC_LOCK(p);
- kp->ki_addr = p->p_addr;
+ kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */
kp->ki_args = p->p_args;
kp->ki_tracep = p->p_tracep;
kp->ki_textvp = p->p_textvp;
@@ -446,7 +513,9 @@ fill_kinfo_proc(p, kp)
kp->ki_size = vm->vm_map.size;
kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
if (p->p_sflag & PS_INMEM)
- kp->ki_rssize += UPAGES;
+ kp->ki_rssize += UAREA_PAGES;
+ FOREACH_THREAD_IN_PROC(p, td) /* XXXKSE: thread swapout check */
+ kp->ki_rssize += KSTACK_PAGES;
kp->ki_swrss = vm->vm_swrss;
kp->ki_tsize = vm->vm_tsize;
kp->ki_dsize = vm->vm_dsize;
@@ -460,28 +529,33 @@ fill_kinfo_proc(p, kp)
kp->ki_childtime.tv_usec = p->p_stats->p_cru.ru_utime.tv_usec +
p->p_stats->p_cru.ru_stime.tv_usec;
}
- if (p->p_wmesg != NULL)
- strncpy(kp->ki_wmesg, p->p_wmesg, sizeof(kp->ki_wmesg) - 1);
+ if (p->p_thread.td_wmesg != NULL)
+ strncpy(kp->ki_wmesg, p->p_thread.td_wmesg, sizeof(kp->ki_wmesg) - 1);
if (p->p_stat == SMTX) {
kp->ki_kiflag |= KI_MTXBLOCK;
- strncpy(kp->ki_mtxname, p->p_mtxname,
+ strncpy(kp->ki_mtxname, p->p_thread.td_mtxname,
sizeof(kp->ki_mtxname) - 1);
}
kp->ki_stat = p->p_stat;
kp->ki_sflag = p->p_sflag;
- kp->ki_pctcpu = p->p_pctcpu;
- kp->ki_estcpu = p->p_estcpu;
- kp->ki_slptime = p->p_slptime;
kp->ki_swtime = p->p_swtime;
- kp->ki_wchan = p->p_wchan;
kp->ki_traceflag = p->p_traceflag;
- kp->ki_pri = p->p_pri;
- kp->ki_nice = p->p_nice;
- kp->ki_runtime = p->p_runtime;
kp->ki_pid = p->p_pid;
- kp->ki_rqindex = p->p_rqindex;
- kp->ki_oncpu = p->p_oncpu;
- kp->ki_lastcpu = p->p_lastcpu;
+ /* vvv XXXKSE */
+ kp->ki_runtime = p->p_runtime;
+ kp->ki_pctcpu = p->p_kse.ke_pctcpu;
+ kp->ki_estcpu = p->p_ksegrp.kg_estcpu;
+ kp->ki_slptime = p->p_ksegrp.kg_slptime;
+ kp->ki_wchan = p->p_thread.td_wchan;
+ kp->ki_pri = p->p_ksegrp.kg_pri;
+ kp->ki_nice = p->p_ksegrp.kg_nice;
+ kp->ki_rqindex = p->p_kse.ke_rqindex;
+ kp->ki_oncpu = p->p_kse.ke_oncpu;
+ kp->ki_lastcpu = p->p_thread.td_lastcpu;
+ kp->ki_tdflags = p->p_thread.td_flags;
+ kp->ki_pcb = p->p_thread.td_pcb;
+ kp->ki_kstack = (void *)p->p_thread.td_kstack;
+ /* ^^^ XXXKSE */
mtx_unlock_spin(&sched_lock);
sp = NULL;
if (p->p_pgrp) {
diff --git a/sys/kern/kern_prot.c b/sys/kern/kern_prot.c
index baa73dd..0c2939c 100644
--- a/sys/kern/kern_prot.c
+++ b/sys/kern/kern_prot.c
@@ -81,16 +81,17 @@ struct getpid_args {
*/
/* ARGSUSED */
int
-getpid(p, uap)
- struct proc *p;
+getpid(td, uap)
+ struct thread *td;
struct getpid_args *uap;
{
+ struct proc *p = td->td_proc;
mtx_lock(&Giant);
- p->p_retval[0] = p->p_pid;
+ td->td_retval[0] = p->p_pid;
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
PROC_LOCK(p);
- p->p_retval[1] = p->p_pptr->p_pid;
+ td->td_retval[1] = p->p_pptr->p_pid;
PROC_UNLOCK(p);
#endif
mtx_unlock(&Giant);
@@ -111,14 +112,15 @@ struct getppid_args {
*/
/* ARGSUSED */
int
-getppid(p, uap)
- struct proc *p;
+getppid(td, uap)
+ struct thread *td;
struct getppid_args *uap;
{
+ struct proc *p = td->td_proc;
mtx_lock(&Giant);
PROC_LOCK(p);
- p->p_retval[0] = p->p_pptr->p_pid;
+ td->td_retval[0] = p->p_pptr->p_pid;
PROC_UNLOCK(p);
mtx_unlock(&Giant);
return (0);
@@ -138,13 +140,14 @@ struct getpgrp_args {
* MPSAFE
*/
int
-getpgrp(p, uap)
- struct proc *p;
+getpgrp(td, uap)
+ struct thread *td;
struct getpgrp_args *uap;
{
+ struct proc *p = td->td_proc;
mtx_lock(&Giant);
- p->p_retval[0] = p->p_pgrp->pg_id;
+ td->td_retval[0] = p->p_pgrp->pg_id;
mtx_unlock(&Giant);
return (0);
}
@@ -160,16 +163,17 @@ struct getpgid_args {
* MPSAFE
*/
int
-getpgid(p, uap)
- struct proc *p;
+getpgid(td, uap)
+ struct thread *td;
struct getpgid_args *uap;
{
+ struct proc *p = td->td_proc;
struct proc *pt;
int error = 0;
mtx_lock(&Giant);
if (uap->pid == 0)
- p->p_retval[0] = p->p_pgrp->pg_id;
+ td->td_retval[0] = p->p_pgrp->pg_id;
else {
if ((pt = pfind(uap->pid)) == NULL) {
error = ESRCH;
@@ -179,7 +183,7 @@ getpgid(p, uap)
PROC_UNLOCK(pt);
goto done2;
}
- p->p_retval[0] = pt->p_pgrp->pg_id;
+ td->td_retval[0] = pt->p_pgrp->pg_id;
PROC_UNLOCK(pt);
}
done2:
@@ -200,16 +204,17 @@ struct getsid_args {
* MPSAFE
*/
int
-getsid(p, uap)
- struct proc *p;
+getsid(td, uap)
+ struct thread *td;
struct getsid_args *uap;
{
+ struct proc *p = td->td_proc;
struct proc *pt;
int error = 0;
mtx_lock(&Giant);
if (uap->pid == 0) {
- p->p_retval[0] = p->p_session->s_sid;
+ td->td_retval[0] = p->p_session->s_sid;
} else {
if ((pt = pfind(uap->pid)) == NULL) {
error = ESRCH;
@@ -219,7 +224,7 @@ getsid(p, uap)
PROC_UNLOCK(pt);
goto done2;
}
- p->p_retval[0] = pt->p_session->s_sid;
+ td->td_retval[0] = pt->p_session->s_sid;
PROC_UNLOCK(pt);
}
done2:
@@ -242,15 +247,16 @@ struct getuid_args {
*/
/* ARGSUSED */
int
-getuid(p, uap)
- struct proc *p;
+getuid(td, uap)
+ struct thread *td;
struct getuid_args *uap;
{
+ struct proc *p = td->td_proc;
mtx_lock(&Giant);
- p->p_retval[0] = p->p_ucred->cr_ruid;
+ td->td_retval[0] = p->p_ucred->cr_ruid;
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
- p->p_retval[1] = p->p_ucred->cr_uid;
+ td->td_retval[1] = p->p_ucred->cr_uid;
#endif
mtx_unlock(&Giant);
return (0);
@@ -267,13 +273,12 @@ struct geteuid_args {
/* ARGSUSED */
int
-geteuid(p, uap)
- struct proc *p;
+geteuid(td, uap)
+ struct thread *td;
struct geteuid_args *uap;
{
-
mtx_lock(&Giant);
- p->p_retval[0] = p->p_ucred->cr_uid;
+ td->td_retval[0] = td->td_proc->p_ucred->cr_uid;
mtx_unlock(&Giant);
return (0);
}
@@ -292,15 +297,16 @@ struct getgid_args {
*/
/* ARGSUSED */
int
-getgid(p, uap)
- struct proc *p;
+getgid(td, uap)
+ struct thread *td;
struct getgid_args *uap;
{
+ struct proc *p = td->td_proc;
mtx_lock(&Giant);
- p->p_retval[0] = p->p_ucred->cr_rgid;
+ td->td_retval[0] = p->p_ucred->cr_rgid;
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
- p->p_retval[1] = p->p_ucred->cr_groups[0];
+ td->td_retval[1] = p->p_ucred->cr_groups[0];
#endif
mtx_unlock(&Giant);
return (0);
@@ -322,13 +328,14 @@ struct getegid_args {
*/
/* ARGSUSED */
int
-getegid(p, uap)
- struct proc *p;
+getegid(td, uap)
+ struct thread *td;
struct getegid_args *uap;
{
+ struct proc *p = td->td_proc;
mtx_lock(&Giant);
- p->p_retval[0] = p->p_ucred->cr_groups[0];
+ td->td_retval[0] = p->p_ucred->cr_groups[0];
mtx_unlock(&Giant);
return (0);
}
@@ -343,18 +350,19 @@ struct getgroups_args {
* MPSAFE
*/
int
-getgroups(p, uap)
- struct proc *p;
+getgroups(td, uap)
+ struct thread *td;
register struct getgroups_args *uap;
{
struct ucred *cred;
+ struct proc *p = td->td_proc;
u_int ngrp;
int error = 0;
mtx_lock(&Giant);
cred = p->p_ucred;
if ((ngrp = uap->gidsetsize) == 0) {
- p->p_retval[0] = cred->cr_ngroups;
+ td->td_retval[0] = cred->cr_ngroups;
error = 0;
goto done2;
}
@@ -367,7 +375,7 @@ getgroups(p, uap)
(caddr_t)uap->gidset, ngrp * sizeof(gid_t)))) {
goto done2;
}
- p->p_retval[0] = ngrp;
+ td->td_retval[0] = ngrp;
done2:
mtx_unlock(&Giant);
return (error);
@@ -384,18 +392,19 @@ struct setsid_args {
*/
/* ARGSUSED */
int
-setsid(p, uap)
- register struct proc *p;
+setsid(td, uap)
+ register struct thread *td;
struct setsid_args *uap;
{
int error;
+ struct proc *p = td->td_proc;
mtx_lock(&Giant);
if (p->p_pgid == p->p_pid || pgfind(p->p_pid)) {
error = EPERM;
} else {
(void)enterpgrp(p, p->p_pid, 1);
- p->p_retval[0] = p->p_pid;
+ td->td_retval[0] = p->p_pid;
error = 0;
}
mtx_unlock(&Giant);
@@ -426,10 +435,11 @@ struct setpgid_args {
*/
/* ARGSUSED */
int
-setpgid(curp, uap)
- struct proc *curp;
+setpgid(td, uap)
+ struct thread *td;
register struct setpgid_args *uap;
{
+ struct proc *curp = td->td_proc;
register struct proc *targp; /* target process */
register struct pgrp *pgrp; /* target pgrp */
int error;
@@ -510,10 +520,11 @@ struct setuid_args {
*/
/* ARGSUSED */
int
-setuid(p, uap)
- struct proc *p;
+setuid(td, uap)
+ struct thread *td;
struct setuid_args *uap;
{
+ struct proc *p = td->td_proc;
struct ucred *newcred, *oldcred;
uid_t uid;
int error = 0;
@@ -607,10 +618,11 @@ struct seteuid_args {
*/
/* ARGSUSED */
int
-seteuid(p, uap)
- struct proc *p;
+seteuid(td, uap)
+ struct thread *td;
struct seteuid_args *uap;
{
+ struct proc *p = td->td_proc;
struct ucred *newcred, *oldcred;
uid_t euid;
int error = 0;
@@ -650,10 +662,11 @@ struct setgid_args {
*/
/* ARGSUSED */
int
-setgid(p, uap)
- struct proc *p;
+setgid(td, uap)
+ struct thread *td;
struct setgid_args *uap;
{
+ struct proc *p = td->td_proc;
struct ucred *newcred, *oldcred;
gid_t gid;
int error = 0;
@@ -741,10 +754,11 @@ struct setegid_args {
*/
/* ARGSUSED */
int
-setegid(p, uap)
- struct proc *p;
+setegid(td, uap)
+ struct thread *td;
struct setegid_args *uap;
{
+ struct proc *p = td->td_proc;
struct ucred *newcred, *oldcred;
gid_t egid;
int error = 0;
@@ -781,10 +795,11 @@ struct setgroups_args {
*/
/* ARGSUSED */
int
-setgroups(p, uap)
- struct proc *p;
+setgroups(td, uap)
+ struct thread *td;
struct setgroups_args *uap;
{
+ struct proc *p = td->td_proc;
struct ucred *newcred, *oldcred;
u_int ngrp;
int error;
@@ -839,10 +854,11 @@ struct setreuid_args {
*/
/* ARGSUSED */
int
-setreuid(p, uap)
- register struct proc *p;
+setreuid(td, uap)
+ register struct thread *td;
struct setreuid_args *uap;
{
+ struct proc *p = td->td_proc;
struct ucred *newcred, *oldcred;
uid_t ruid, euid;
int error = 0;
@@ -892,10 +908,11 @@ struct setregid_args {
*/
/* ARGSUSED */
int
-setregid(p, uap)
- register struct proc *p;
+setregid(td, uap)
+ register struct thread *td;
struct setregid_args *uap;
{
+ struct proc *p = td->td_proc;
struct ucred *newcred, *oldcred;
gid_t rgid, egid;
int error = 0;
@@ -952,10 +969,11 @@ struct setresuid_args {
*/
/* ARGSUSED */
int
-setresuid(p, uap)
- register struct proc *p;
+setresuid(td, uap)
+ register struct thread *td;
struct setresuid_args *uap;
{
+ struct proc *p = td->td_proc;
struct ucred *newcred, *oldcred;
uid_t ruid, euid, suid;
int error;
@@ -1017,10 +1035,11 @@ struct setresgid_args {
*/
/* ARGSUSED */
int
-setresgid(p, uap)
- register struct proc *p;
+setresgid(td, uap)
+ register struct thread *td;
struct setresgid_args *uap;
{
+ struct proc *p = td->td_proc;
struct ucred *newcred, *oldcred;
gid_t rgid, egid, sgid;
int error;
@@ -1076,11 +1095,12 @@ struct getresuid_args {
*/
/* ARGSUSED */
int
-getresuid(p, uap)
- register struct proc *p;
+getresuid(td, uap)
+ register struct thread *td;
struct getresuid_args *uap;
{
struct ucred *cred;
+ struct proc *p = td->td_proc;
int error1 = 0, error2 = 0, error3 = 0;
mtx_lock(&Giant);
@@ -1111,11 +1131,12 @@ struct getresgid_args {
*/
/* ARGSUSED */
int
-getresgid(p, uap)
- register struct proc *p;
+getresgid(td, uap)
+ register struct thread *td;
struct getresgid_args *uap;
{
struct ucred *cred;
+ struct proc *p = td->td_proc;
int error1 = 0, error2 = 0, error3 = 0;
mtx_lock(&Giant);
@@ -1142,10 +1163,12 @@ struct issetugid_args {
#endif
/* ARGSUSED */
int
-issetugid(p, uap)
- register struct proc *p;
+issetugid(td, uap)
+ register struct thread *td;
struct issetugid_args *uap;
{
+ struct proc *p = td->td_proc;
+
/*
* Note: OpenBSD sets a P_SUGIDEXEC flag set at execve() time,
* we use P_SUGID because we consider changing the owners as
@@ -1154,7 +1177,7 @@ issetugid(p, uap)
* a user without an exec - programs cannot know *everything*
* that libc *might* have put in their data segment.
*/
- p->p_retval[0] = (p->p_flag & P_SUGID) ? 1 : 0;
+ td->td_retval[0] = (p->p_flag & P_SUGID) ? 1 : 0;
return (0);
}
@@ -1162,8 +1185,8 @@ issetugid(p, uap)
* MPSAFE
*/
int
-__setugid(p, uap)
- struct proc *p;
+__setugid(td, uap)
+ struct thread *td;
struct __setugid_args *uap;
{
#ifdef REGRESSION
@@ -1172,10 +1195,10 @@ __setugid(p, uap)
mtx_lock(&Giant);
switch (uap->flag) {
case 0:
- p->p_flag &= ~P_SUGID;
+ td->td_proc->p_flag &= ~P_SUGID;
break;
case 1:
- p->p_flag |= P_SUGID;
+ td->td_proc->p_flag |= P_SUGID;
break;
default:
error = EINVAL;
@@ -1231,6 +1254,30 @@ suser(p)
return suser_xxx(0, p, 0);
}
+/*
+ * version for when the thread pointer is available and not the proc.
+ * (saves having to include proc.h into every file that needs to do the change.)
+ */
+int
+suser_td(td)
+
+ struct thread *td;
+{
+ return suser_xxx(0, td->td_proc, 0);
+}
+
+/*
+ * wrapper to use if you have the thread on hand but not the proc.
+ */
+int
+suser_xxx_td(cred, td, flag)
+ struct ucred *cred;
+ struct thread *td;
+ int flag;
+{
+ return(suser_xxx(cred, td->td_proc, flag));
+}
+
int
suser_xxx(cred, proc, flag)
struct ucred *cred;
@@ -1566,11 +1613,12 @@ struct getlogin_args {
*/
/* ARGSUSED */
int
-getlogin(p, uap)
- struct proc *p;
+getlogin(td, uap)
+ struct thread *td;
struct getlogin_args *uap;
{
int error;
+ struct proc *p = td->td_proc;
mtx_lock(&Giant);
if (uap->namelen > MAXLOGNAME)
@@ -1594,10 +1642,11 @@ struct setlogin_args {
*/
/* ARGSUSED */
int
-setlogin(p, uap)
- struct proc *p;
+setlogin(td, uap)
+ struct thread *td;
struct setlogin_args *uap;
{
+ struct proc *p = td->td_proc;
int error;
char logintmp[MAXLOGNAME];
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index 3b65ace..093e4c2 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -84,10 +84,11 @@ struct getpriority_args {
* MPSAFE
*/
int
-getpriority(curp, uap)
- struct proc *curp;
+getpriority(td, uap)
+ struct thread *td;
register struct getpriority_args *uap;
{
+ struct proc *curp = td->td_proc;
register struct proc *p;
register int low = PRIO_MAX + 1;
int error = 0;
@@ -97,13 +98,13 @@ getpriority(curp, uap)
switch (uap->which) {
case PRIO_PROCESS:
if (uap->who == 0)
- low = curp->p_nice;
+ low = td->td_ksegrp->kg_nice;
else {
p = pfind(uap->who);
if (p == NULL)
break;
if (p_cansee(curp, p) == 0)
- low = p->p_nice;
+ low = p->p_ksegrp.kg_nice /* XXXKSE */ ;
PROC_UNLOCK(p);
}
break;
@@ -116,8 +117,8 @@ getpriority(curp, uap)
else if ((pg = pgfind(uap->who)) == NULL)
break;
LIST_FOREACH(p, &pg->pg_members, p_pglist) {
- if (!p_cansee(curp, p) && p->p_nice < low)
- low = p->p_nice;
+ if (!p_cansee(curp, p) && p->p_ksegrp.kg_nice /* XXXKSE */ < low)
+ low = p->p_ksegrp.kg_nice /* XXXKSE */ ;
}
break;
}
@@ -129,8 +130,8 @@ getpriority(curp, uap)
LIST_FOREACH(p, &allproc, p_list)
if (!p_cansee(curp, p) &&
p->p_ucred->cr_uid == uap->who &&
- p->p_nice < low)
- low = p->p_nice;
+ p->p_ksegrp.kg_nice /* XXXKSE */ < low)
+ low = p->p_ksegrp.kg_nice /* XXXKSE */ ;
sx_sunlock(&allproc_lock);
break;
@@ -140,7 +141,7 @@ getpriority(curp, uap)
}
if (low == PRIO_MAX + 1 && error == 0)
error = ESRCH;
- curp->p_retval[0] = low;
+ td->td_retval[0] = low;
mtx_unlock(&Giant);
return (error);
}
@@ -157,10 +158,11 @@ struct setpriority_args {
*/
/* ARGSUSED */
int
-setpriority(curp, uap)
- struct proc *curp;
+setpriority(td, uap)
+ struct thread *td;
register struct setpriority_args *uap;
{
+ struct proc *curp = td->td_proc;
register struct proc *p;
int found = 0, error = 0;
@@ -201,12 +203,13 @@ setpriority(curp, uap)
if (uap->who == 0)
uap->who = curp->p_ucred->cr_uid;
sx_slock(&allproc_lock);
- LIST_FOREACH(p, &allproc, p_list)
+ FOREACH_PROC_IN_SYSTEM(p) {
if (p->p_ucred->cr_uid == uap->who &&
!p_cansee(curp, p)) {
error = donice(curp, p, uap->prio);
found++;
}
+ }
sx_sunlock(&allproc_lock);
break;
@@ -233,10 +236,10 @@ donice(curp, chgp, n)
n = PRIO_MAX;
if (n < PRIO_MIN)
n = PRIO_MIN;
- if (n < chgp->p_nice && suser(curp))
+ if (n < chgp->p_ksegrp.kg_nice /* XXXKSE */ && suser(curp))
return (EACCES);
- chgp->p_nice = n;
- (void)resetpriority(chgp);
+ chgp->p_ksegrp.kg_nice /* XXXKSE */ = n;
+ (void)resetpriority(&chgp->p_ksegrp); /* XXXKSE */
return (0);
}
@@ -258,10 +261,11 @@ struct rtprio_args {
*/
/* ARGSUSED */
int
-rtprio(curp, uap)
- struct proc *curp;
+rtprio(td, uap)
+ struct thread *td;
register struct rtprio_args *uap;
{
+ struct proc *curp = td->td_proc;
register struct proc *p;
struct rtprio rtp;
int error;
@@ -285,7 +289,7 @@ rtprio(curp, uap)
if ((error = p_cansee(curp, p)))
break;
mtx_lock_spin(&sched_lock);
- pri_to_rtp(&p->p_pri, &rtp);
+ pri_to_rtp(&p->p_ksegrp.kg_pri /* XXXKSE */ , &rtp);
mtx_unlock_spin(&sched_lock);
error = copyout(&rtp, uap->rtp, sizeof(struct rtprio));
break;
@@ -317,7 +321,7 @@ rtprio(curp, uap)
}
}
mtx_lock_spin(&sched_lock);
- error = rtp_to_pri(&rtp, &p->p_pri);
+ error = rtp_to_pri(&rtp, &p->p_ksegrp.kg_pri);
mtx_unlock_spin(&sched_lock);
break;
default:
@@ -387,8 +391,8 @@ struct osetrlimit_args {
*/
/* ARGSUSED */
int
-osetrlimit(p, uap)
- struct proc *p;
+osetrlimit(td, uap)
+ struct thread *td;
register struct osetrlimit_args *uap;
{
struct orlimit olim;
@@ -401,7 +405,7 @@ osetrlimit(p, uap)
lim.rlim_cur = olim.rlim_cur;
lim.rlim_max = olim.rlim_max;
mtx_lock(&Giant);
- error = dosetrlimit(p, uap->which, &lim);
+ error = dosetrlimit(td, uap->which, &lim);
mtx_unlock(&Giant);
return (error);
}
@@ -417,10 +421,11 @@ struct ogetrlimit_args {
*/
/* ARGSUSED */
int
-ogetrlimit(p, uap)
- struct proc *p;
+ogetrlimit(td, uap)
+ struct thread *td;
register struct ogetrlimit_args *uap;
{
+ struct proc *p = td->td_proc;
struct orlimit olim;
int error;
@@ -450,8 +455,8 @@ struct __setrlimit_args {
*/
/* ARGSUSED */
int
-setrlimit(p, uap)
- struct proc *p;
+setrlimit(td, uap)
+ struct thread *td;
register struct __setrlimit_args *uap;
{
struct rlimit alim;
@@ -461,17 +466,18 @@ setrlimit(p, uap)
copyin((caddr_t)uap->rlp, (caddr_t)&alim, sizeof (struct rlimit))))
return (error);
mtx_lock(&Giant);
- error = dosetrlimit(p, uap->which, &alim);
+ error = dosetrlimit(td, uap->which, &alim);
mtx_unlock(&Giant);
return (error);
}
int
-dosetrlimit(p, which, limp)
- struct proc *p;
+dosetrlimit(td, which, limp)
+ struct thread *td;
u_int which;
struct rlimit *limp;
{
+ struct proc *p = td->td_proc;
register struct rlimit *alimp;
int error;
@@ -582,11 +588,12 @@ struct __getrlimit_args {
*/
/* ARGSUSED */
int
-getrlimit(p, uap)
- struct proc *p;
+getrlimit(td, uap)
+ struct thread *td;
register struct __getrlimit_args *uap;
{
int error;
+ struct proc *p = td->td_proc;
if (uap->which >= RLIM_NLIMITS)
return (EINVAL);
@@ -610,85 +617,102 @@ calcru(p, up, sp, ip)
{
/* {user, system, interrupt, total} {ticks, usec}; previous tu: */
u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu;
+ u_int64_t uut = 0, sut = 0, iut = 0;
int s;
struct timeval tv;
+ struct kse *ke;
+ struct ksegrp *kg;
mtx_assert(&sched_lock, MA_OWNED);
/* XXX: why spl-protect ? worst case is an off-by-one report */
- s = splstatclock();
- ut = p->p_uticks;
- st = p->p_sticks;
- it = p->p_iticks;
- splx(s);
- tt = ut + st + it;
- if (tt == 0) {
- st = 1;
- tt = 1;
- }
-
- tu = p->p_runtime;
- if (p == curproc) {
+ FOREACH_KSEGRP_IN_PROC(p, kg) {
+ /* we could accumulate per ksegrp and per process here*/
+ FOREACH_KSE_IN_GROUP(kg, ke) {
+ s = splstatclock();
+ ut = ke->ke_uticks;
+ st = ke->ke_sticks;
+ it = ke->ke_iticks;
+ splx(s);
+
+ tt = ut + st + it;
+ if (tt == 0) {
+ st = 1;
+ tt = 1;
+ }
+
+ tu = p->p_runtime;
+ if (ke == curthread->td_kse) {
/*
* Adjust for the current time slice. This is actually fairly
* important since the error here is on the order of a time
* quantum, which is much greater than the sampling error.
+ * XXXKSE use a different test due to threads on other
+ * processors also being 'current'.
*/
- microuptime(&tv);
- if (timevalcmp(&tv, PCPU_PTR(switchtime), <))
- printf("microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n",
- PCPU_GET(switchtime.tv_sec), PCPU_GET(switchtime.tv_usec),
- tv.tv_sec, tv.tv_usec);
- else
- tu += (tv.tv_usec - PCPU_GET(switchtime.tv_usec)) +
- (tv.tv_sec - PCPU_GET(switchtime.tv_sec)) *
- (int64_t)1000000;
- }
- ptu = p->p_uu + p->p_su + p->p_iu;
- if (tu < ptu || (int64_t)tu < 0) {
- /* XXX no %qd in kernel. Truncate. */
- printf("calcru: negative time of %ld usec for pid %d (%s)\n",
- (long)tu, p->p_pid, p->p_comm);
- tu = ptu;
- }
+ microuptime(&tv);
+ if (timevalcmp(&tv, PCPU_PTR(switchtime), <))
+ printf("microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n",
+ PCPU_GET(switchtime.tv_sec),
+ PCPU_GET(switchtime.tv_usec),
+ tv.tv_sec, tv.tv_usec);
+ else
+ tu += (tv.tv_usec
+ - PCPU_GET(switchtime.tv_usec))
+ + (tv.tv_sec
+ - PCPU_GET(switchtime.tv_sec))
+ * (int64_t)1000000;
+ }
+ ptu = ke->ke_uu + ke->ke_su + ke->ke_iu;
+ if (tu < ptu || (int64_t)tu < 0) {
+ /* XXX no %qd in kernel. Truncate. */
+ printf("calcru: negative time of %ld usec for pid %d (%s)\n",
+ (long)tu, p->p_pid, p->p_comm);
+ tu = ptu;
+ }
- /* Subdivide tu. */
- uu = (tu * ut) / tt;
- su = (tu * st) / tt;
- iu = tu - uu - su;
-
- /* Enforce monotonicity. */
- if (uu < p->p_uu || su < p->p_su || iu < p->p_iu) {
- if (uu < p->p_uu)
- uu = p->p_uu;
- else if (uu + p->p_su + p->p_iu > tu)
- uu = tu - p->p_su - p->p_iu;
- if (st == 0)
- su = p->p_su;
- else {
- su = ((tu - uu) * st) / (st + it);
- if (su < p->p_su)
- su = p->p_su;
- else if (uu + su + p->p_iu > tu)
- su = tu - uu - p->p_iu;
- }
- KASSERT(uu + su + p->p_iu <= tu,
- ("calcru: monotonisation botch 1"));
- iu = tu - uu - su;
- KASSERT(iu >= p->p_iu,
- ("calcru: monotonisation botch 2"));
- }
- p->p_uu = uu;
- p->p_su = su;
- p->p_iu = iu;
-
- up->tv_sec = uu / 1000000;
- up->tv_usec = uu % 1000000;
- sp->tv_sec = su / 1000000;
- sp->tv_usec = su % 1000000;
+ /* Subdivide tu. */
+ uu = (tu * ut) / tt;
+ su = (tu * st) / tt;
+ iu = tu - uu - su;
+
+ /* Enforce monotonicity. */
+ if (uu < ke->ke_uu || su < ke->ke_su || iu < ke->ke_iu) {
+ if (uu < ke->ke_uu)
+ uu = ke->ke_uu;
+ else if (uu + ke->ke_su + ke->ke_iu > tu)
+ uu = tu - ke->ke_su - ke->ke_iu;
+ if (st == 0)
+ su = ke->ke_su;
+ else {
+ su = ((tu - uu) * st) / (st + it);
+ if (su < ke->ke_su)
+ su = ke->ke_su;
+ else if (uu + su + ke->ke_iu > tu)
+ su = tu - uu - ke->ke_iu;
+ }
+ KASSERT(uu + su + ke->ke_iu <= tu,
+ ("calcru: monotonisation botch 1"));
+ iu = tu - uu - su;
+ KASSERT(iu >= ke->ke_iu,
+ ("calcru: monotonisation botch 2"));
+ }
+ ke->ke_uu = uu;
+ ke->ke_su = su;
+ ke->ke_iu = iu;
+ uut += uu;
+ sut += su;
+ iut += iu;
+
+ } /* end kse loop */
+ } /* end kseg loop */
+ up->tv_sec = uut / 1000000;
+ up->tv_usec = uut % 1000000;
+ sp->tv_sec = sut / 1000000;
+ sp->tv_usec = sut % 1000000;
if (ip != NULL) {
- ip->tv_sec = iu / 1000000;
- ip->tv_usec = iu % 1000000;
+ ip->tv_sec = iut / 1000000;
+ ip->tv_usec = iut % 1000000;
}
}
@@ -703,10 +727,11 @@ struct getrusage_args {
*/
/* ARGSUSED */
int
-getrusage(p, uap)
- register struct proc *p;
+getrusage(td, uap)
+ register struct thread *td;
register struct getrusage_args *uap;
{
+ struct proc *p = td->td_proc;
register struct rusage *rup;
int error = 0;
diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c
index 4dd9c62..61efa8d 100644
--- a/sys/kern/kern_shutdown.c
+++ b/sys/kern/kern_shutdown.c
@@ -138,12 +138,12 @@ SYSINIT(shutdown_conf, SI_SUB_INTRINSIC, SI_ORDER_ANY, shutdown_conf, NULL)
*/
/* ARGSUSED */
int
-reboot(struct proc *p, struct reboot_args *uap)
+reboot(struct thread *td, struct reboot_args *uap)
{
int error;
mtx_lock(&Giant);
- if ((error = suser(p)) == 0)
+ if ((error = suser_td(td)) == 0)
boot(uap->opt);
mtx_unlock(&Giant);
return (error);
@@ -237,7 +237,7 @@ boot(int howto)
waittime = 0;
printf("\nsyncing disks... ");
- sync(&proc0, NULL);
+ sync(thread0, NULL);
/*
* With soft updates, some buffers that are
@@ -262,13 +262,13 @@ boot(int howto)
if (nbusy < pbusy)
iter = 0;
pbusy = nbusy;
- sync(&proc0, NULL);
- if (curproc != NULL) {
+ sync(thread0, NULL);
+ if (curthread != NULL) {
DROP_GIANT_NOSWITCH();
for (subiter = 0; subiter < 50 * iter; subiter++) {
mtx_lock_spin(&sched_lock);
- setrunqueue(curproc);
- curproc->p_stats->p_ru.ru_nvcsw++;
+ setrunqueue(curthread);
+ curthread->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch(); /* Allow interrupt threads to run */
mtx_unlock_spin(&sched_lock);
DELAY(1000);
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index 607c78c..233bdbe 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -74,7 +74,7 @@
#define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
-static int coredump __P((struct proc *));
+static int coredump __P((struct thread *));
static int do_sigaction __P((struct proc *p, int sig, struct sigaction *act,
struct sigaction *oact, int old));
static int do_sigprocmask __P((struct proc *p, int how, sigset_t *set,
@@ -351,10 +351,11 @@ struct sigaction_args {
*/
/* ARGSUSED */
int
-sigaction(p, uap)
- struct proc *p;
+sigaction(td, uap)
+ struct thread *td;
register struct sigaction_args *uap;
{
+ struct proc *p = td->td_proc;
struct sigaction act, oact;
register struct sigaction *actp, *oactp;
int error;
@@ -390,10 +391,11 @@ struct osigaction_args {
*/
/* ARGSUSED */
int
-osigaction(p, uap)
- struct proc *p;
+osigaction(td, uap)
+ struct thread *td;
register struct osigaction_args *uap;
{
+ struct proc *p = td->td_proc;
struct osigaction sa;
struct sigaction nsa, osa;
register struct sigaction *nsap, *osap;
@@ -534,7 +536,7 @@ do_sigprocmask(p, how, set, oset, old)
}
/*
- * sigprocmask() - MP SAFE
+ * sigprocmask() - MP SAFE (XXXKSE not under KSE it isn't)
*/
#ifndef _SYS_SYSPROTO_H_
@@ -545,10 +547,11 @@ struct sigprocmask_args {
};
#endif
int
-sigprocmask(p, uap)
- register struct proc *p;
+sigprocmask(td, uap)
+ register struct thread *td;
struct sigprocmask_args *uap;
{
+ struct proc *p = td->td_proc;
sigset_t set, oset;
sigset_t *setp, *osetp;
int error;
@@ -578,16 +581,17 @@ struct osigprocmask_args {
};
#endif
int
-osigprocmask(p, uap)
- register struct proc *p;
+osigprocmask(td, uap)
+ register struct thread *td;
struct osigprocmask_args *uap;
{
+ struct proc *p = td->td_proc;
sigset_t set, oset;
int error;
OSIG2SIG(uap->mask, set);
error = do_sigprocmask(p, uap->how, &set, &oset, 1);
- SIG2OSIG(oset, p->p_retval[0]);
+ SIG2OSIG(oset, td->td_retval[0]);
return (error);
}
#endif /* COMPAT_43 */
@@ -602,10 +606,11 @@ struct sigpending_args {
*/
/* ARGSUSED */
int
-sigpending(p, uap)
- struct proc *p;
+sigpending(td, uap)
+ struct thread *td;
struct sigpending_args *uap;
{
+ struct proc *p = td->td_proc;
sigset_t siglist;
int error;
@@ -629,13 +634,15 @@ struct osigpending_args {
*/
/* ARGSUSED */
int
-osigpending(p, uap)
- struct proc *p;
+osigpending(td, uap)
+ struct thread *td;
struct osigpending_args *uap;
{
+ struct proc *p = td->td_proc;
+
mtx_lock(&Giant);
PROC_LOCK(p);
- SIG2OSIG(p->p_siglist, p->p_retval[0]);
+ SIG2OSIG(p->p_siglist, td->td_retval[0]);
PROC_UNLOCK(p);
mtx_unlock(&Giant);
return (0);
@@ -658,10 +665,11 @@ struct osigvec_args {
*/
/* ARGSUSED */
int
-osigvec(p, uap)
- struct proc *p;
+osigvec(td, uap)
+ struct thread *td;
register struct osigvec_args *uap;
{
+ struct proc *p = td->td_proc;
struct sigvec vec;
struct sigaction nsa, osa;
register struct sigaction *nsap, *osap;
@@ -709,17 +717,18 @@ struct osigblock_args {
* MPSAFE
*/
int
-osigblock(p, uap)
- register struct proc *p;
+osigblock(td, uap)
+ register struct thread *td;
struct osigblock_args *uap;
{
+ struct proc *p = td->td_proc;
sigset_t set;
OSIG2SIG(uap->mask, set);
SIG_CANTMASK(set);
mtx_lock(&Giant);
PROC_LOCK(p);
- SIG2OSIG(p->p_sigmask, p->p_retval[0]);
+ SIG2OSIG(p->p_sigmask, td->td_retval[0]);
SIGSETOR(p->p_sigmask, set);
PROC_UNLOCK(p);
mtx_unlock(&Giant);
@@ -735,17 +744,18 @@ struct osigsetmask_args {
* MPSAFE
*/
int
-osigsetmask(p, uap)
- struct proc *p;
+osigsetmask(td, uap)
+ struct thread *td;
struct osigsetmask_args *uap;
{
+ struct proc *p = td->td_proc;
sigset_t set;
OSIG2SIG(uap->mask, set);
SIG_CANTMASK(set);
mtx_lock(&Giant);
PROC_LOCK(p);
- SIG2OSIG(p->p_sigmask, p->p_retval[0]);
+ SIG2OSIG(p->p_sigmask, td->td_retval[0]);
SIGSETLO(p->p_sigmask, set);
PROC_UNLOCK(p);
mtx_unlock(&Giant);
@@ -757,6 +767,9 @@ osigsetmask(p, uap)
* Suspend process until signal, providing mask to be set
* in the meantime. Note nonstandard calling convention:
* libc stub passes mask, not pointer, to save a copyin.
+ ***** XXXKSE this doesn't make sense under KSE.
+ ***** Do we suspend the thread or all threads in the process?
+ ***** How do we suspend threads running NOW on another processor?
*/
#ifndef _SYS_SYSPROTO_H_
struct sigsuspend_args {
@@ -768,10 +781,11 @@ struct sigsuspend_args {
*/
/* ARGSUSED */
int
-sigsuspend(p, uap)
- register struct proc *p;
+sigsuspend(td, uap)
+ struct thread *td;
struct sigsuspend_args *uap;
{
+ struct proc *p = td->td_proc;
sigset_t mask;
register struct sigacts *ps;
int error;
@@ -814,10 +828,11 @@ struct osigsuspend_args {
*/
/* ARGSUSED */
int
-osigsuspend(p, uap)
- register struct proc *p;
+osigsuspend(td, uap)
+ struct thread *td;
struct osigsuspend_args *uap;
{
+ struct proc *p = td->td_proc;
sigset_t mask;
register struct sigacts *ps;
@@ -850,10 +865,11 @@ struct osigstack_args {
*/
/* ARGSUSED */
int
-osigstack(p, uap)
- struct proc *p;
+osigstack(td, uap)
+ struct thread *td;
register struct osigstack_args *uap;
{
+ struct proc *p = td->td_proc;
struct sigstack ss;
int error = 0;
@@ -862,7 +878,7 @@ osigstack(p, uap)
if (uap->oss != NULL) {
PROC_LOCK(p);
ss.ss_sp = p->p_sigstk.ss_sp;
- ss.ss_onstack = sigonstack(cpu_getstack(p));
+ ss.ss_onstack = sigonstack(cpu_getstack(td));
PROC_UNLOCK(p);
error = copyout(&ss, uap->oss, sizeof(struct sigstack));
if (error)
@@ -896,17 +912,18 @@ struct sigaltstack_args {
*/
/* ARGSUSED */
int
-sigaltstack(p, uap)
- struct proc *p;
+sigaltstack(td, uap)
+ struct thread *td;
register struct sigaltstack_args *uap;
{
+ struct proc *p = td->td_proc;
stack_t ss;
int oonstack;
int error = 0;
mtx_lock(&Giant);
- oonstack = sigonstack(cpu_getstack(p));
+ oonstack = sigonstack(cpu_getstack(td));
if (uap->oss != NULL) {
PROC_LOCK(p);
@@ -1027,10 +1044,11 @@ struct kill_args {
*/
/* ARGSUSED */
int
-kill(cp, uap)
- register struct proc *cp;
+kill(td, uap)
+ register struct thread *td;
register struct kill_args *uap;
{
+ register struct proc *cp = td->td_proc;
register struct proc *p;
int error = 0;
@@ -1080,8 +1098,8 @@ struct okillpg_args {
*/
/* ARGSUSED */
int
-okillpg(p, uap)
- struct proc *p;
+okillpg(td, uap)
+ struct thread *td;
register struct okillpg_args *uap;
{
int error;
@@ -1089,7 +1107,7 @@ okillpg(p, uap)
if ((u_int)uap->signum > _SIG_MAXSIG)
return (EINVAL);
mtx_lock(&Giant);
- error = killpg1(p, uap->signum, uap->pgid, 0);
+ error = killpg1(td->td_proc, uap->signum, uap->pgid, 0);
mtx_unlock(&Giant);
return (error);
}
@@ -1198,6 +1216,8 @@ psignal(p, sig)
{
register int prop;
register sig_t action;
+ struct thread *td;
+ struct ksegrp *kg;
if (sig > _SIG_MAXSIG || sig <= 0) {
printf("psignal: signal %d\n", sig);
@@ -1214,9 +1234,9 @@ psignal(p, sig)
* if signal event is tracked by procfs, give *that*
* a chance, as well.
*/
- if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG))
+ if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG)) {
action = SIG_DFL;
- else {
+ } else {
/*
* If the signal is being ignored,
* then we forget about it immediately.
@@ -1234,10 +1254,27 @@ psignal(p, sig)
action = SIG_DFL;
}
+ /*
+ * bring the priority of a process up if we want it to get
+ * killed in this lifetime.
+ * XXXKSE think if a better way to do this.
+ *
+ * What we need to do is see if there is a thread that will
+ * be able to accept the signal. e.g.
+ * FOREACH_THREAD_IN_PROC() {
+ * if runnable, we're done
+ * else pick one at random.
+ * }
+ */
+ /* XXXKSE
+ * For now there is one thread per proc.
+ * Effectively select one sucker thread..
+ */
+ td = &p->p_thread;
mtx_lock_spin(&sched_lock);
- if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
- (p->p_flag & P_TRACED) == 0)
- p->p_nice = NZERO;
+ if ((p->p_ksegrp.kg_nice > NZERO) && (action == SIG_DFL) &&
+ (prop & SA_KILL) && ((p->p_flag & P_TRACED) == 0))
+ p->p_ksegrp.kg_nice = NZERO; /* XXXKSE */
mtx_unlock_spin(&sched_lock);
if (prop & SA_CONT)
@@ -1266,6 +1303,7 @@ psignal(p, sig)
mtx_unlock_spin(&sched_lock);
return;
}
+
switch (p->p_stat) {
case SSLEEP:
@@ -1275,7 +1313,7 @@ psignal(p, sig)
* be noticed when the process returns through
* trap() or syscall().
*/
- if ((p->p_sflag & PS_SINTR) == 0) {
+ if ((td->td_flags & TDF_SINTR) == 0) {
mtx_unlock_spin(&sched_lock);
goto out;
}
@@ -1357,11 +1395,28 @@ psignal(p, sig)
if (action == SIG_CATCH)
goto runfast;
mtx_lock_spin(&sched_lock);
- if (p->p_wchan == NULL)
- goto run;
- p->p_stat = SSLEEP;
- mtx_unlock_spin(&sched_lock);
- goto out;
+ /*
+ * XXXKSE
+ * do this for each thread.
+ */
+ if (p->p_flag & P_KSES) {
+ mtx_assert(&sched_lock,
+ MA_OWNED | MA_NOTRECURSED);
+ FOREACH_THREAD_IN_PROC(p, td) {
+ if (td->td_wchan == NULL) {
+ setrunnable(td); /* XXXKSE */
+ } else {
+ /* mark it as sleeping */
+ }
+ }
+ mtx_unlock_spin(&sched_lock);
+ goto out;
+ } else {
+ if (p->p_thread.td_wchan == NULL)
+ goto run;
+ p->p_stat = SSLEEP;
+ mtx_unlock_spin(&sched_lock);
+ }
}
if (prop & SA_STOP) {
@@ -1378,13 +1433,25 @@ psignal(p, sig)
* wakeup so that when it is continued, it will be made
* runnable and can look at the signal. But don't make
* the process runnable, leave it stopped.
+ * XXXKSE should we wake ALL blocked threads?
*/
mtx_lock_spin(&sched_lock);
- if (p->p_wchan && p->p_sflag & PS_SINTR) {
- if (p->p_sflag & PS_CVWAITQ)
- cv_waitq_remove(p);
- else
- unsleep(p);
+ if (p->p_flag & P_KSES) {
+ FOREACH_THREAD_IN_PROC(p, td) {
+ if (td->td_wchan && (td->td_flags & TDF_SINTR)){
+ if (td->td_flags & TDF_CVWAITQ)
+ cv_waitq_remove(td);
+ else
+ unsleep(td); /* XXXKSE */
+ }
+ }
+ } else {
+ if (td->td_wchan && td->td_flags & TDF_SINTR) {
+ if (td->td_flags & TDF_CVWAITQ)
+ cv_waitq_remove(td);
+ else
+ unsleep(td); /* XXXKSE */
+ }
}
mtx_unlock_spin(&sched_lock);
goto out;
@@ -1396,9 +1463,21 @@ psignal(p, sig)
* It will either never be noticed, or noticed very soon.
*/
if (p->p_stat == SRUN) {
- signotify(p);
#ifdef SMP
- forward_signal(p);
+ struct kse *ke;
+ struct thread *td = curthread;
+ signotify(&p->p_kse); /* XXXKSE */
+/* we should only deliver to one thread.. but which one? */
+ FOREACH_KSEGRP_IN_PROC(p, kg) {
+ FOREACH_KSE_IN_GROUP(kg, ke) {
+ if (ke->ke_thread == td) {
+ continue;
+ }
+ forward_signal(ke->ke_thread);
+ }
+ }
+#else
+ signotify(&p->p_kse); /* XXXKSE */
#endif
}
mtx_unlock_spin(&sched_lock);
@@ -1409,14 +1488,19 @@ psignal(p, sig)
runfast:
/*
* Raise priority to at least PUSER.
+ * XXXKSE Should we make them all run fast?
+ * Maybe just one would be enough?
*/
mtx_lock_spin(&sched_lock);
- if (p->p_pri.pri_level > PUSER)
- p->p_pri.pri_level = PUSER;
+ FOREACH_KSEGRP_IN_PROC(p, kg) {
+ if (kg->kg_pri.pri_level > PUSER) {
+ kg->kg_pri.pri_level = PUSER;
+ }
+ }
run:
/* If we jump here, sched_lock has to be owned. */
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
- setrunnable(p);
+ setrunnable(td); /* XXXKSE */
mtx_unlock_spin(&sched_lock);
out:
/* If we jump here, sched_lock should not be owned. */
@@ -1623,7 +1707,8 @@ void
postsig(sig)
register int sig;
{
- register struct proc *p = curproc;
+ struct thread *td = curthread;
+ register struct proc *p = td->td_proc;
struct sigacts *ps;
sig_t action;
sigset_t returnmask;
@@ -1647,7 +1732,7 @@ postsig(sig)
* Default action, where the default is to kill
* the process. (Other cases were ignored above.)
*/
- sigexit(p, sig);
+ sigexit(td, sig);
/* NOTREACHED */
} else {
/*
@@ -1722,10 +1807,11 @@ killproc(p, why)
* does not return.
*/
void
-sigexit(p, sig)
- register struct proc *p;
+sigexit(td, sig)
+ struct thread *td;
int sig;
{
+ struct proc *p = td->td_proc;
PROC_LOCK_ASSERT(p, MA_OWNED);
p->p_acflag |= AXSIG;
@@ -1740,7 +1826,7 @@ sigexit(p, sig)
PROC_UNLOCK(p);
if (!mtx_owned(&Giant))
mtx_lock(&Giant);
- if (coredump(p) == 0)
+ if (coredump(td) == 0)
sig |= WCOREFLAG;
if (kern_logsigexit)
log(LOG_INFO,
@@ -1754,7 +1840,7 @@ sigexit(p, sig)
if (!mtx_owned(&Giant))
mtx_lock(&Giant);
}
- exit1(p, W_EXITCODE(0, sig));
+ exit1(td, W_EXITCODE(0, sig));
/* NOTREACHED */
}
@@ -1849,9 +1935,9 @@ const char *name; uid_t uid; pid_t pid; {
*/
static int
-coredump(p)
- register struct proc *p;
+coredump(struct thread *td)
{
+ struct proc *p = td->td_proc;
register struct vnode *vp;
register struct ucred *cred = p->p_ucred;
struct flock lf;
@@ -1889,7 +1975,7 @@ restart:
name = expand_name(p->p_comm, p->p_ucred->cr_uid, p->p_pid);
if (name == NULL)
return (EINVAL);
- NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p);
+ NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */
flags = O_CREAT | FWRITE | O_NOFOLLOW;
error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR);
free(name, M_TEMP);
@@ -1898,7 +1984,7 @@ restart:
NDFREE(&nd, NDF_ONLY_PNBUF);
vp = nd.ni_vp;
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
lf.l_whence = SEEK_SET;
lf.l_start = 0;
lf.l_len = 0;
@@ -1910,7 +1996,7 @@ restart:
if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
lf.l_type = F_UNLCK;
VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
- if ((error = vn_close(vp, FWRITE, cred, p)) != 0)
+ if ((error = vn_close(vp, FWRITE, cred, td)) != 0)
return (error);
if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0)
return (error);
@@ -1919,20 +2005,20 @@ restart:
/* Don't dump to non-regular files or files with links. */
if (vp->v_type != VREG ||
- VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) {
+ VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) {
error = EFAULT;
goto out1;
}
VATTR_NULL(&vattr);
vattr.va_size = 0;
- VOP_LEASE(vp, p, cred, LEASE_WRITE);
- VOP_SETATTR(vp, &vattr, cred, p);
+ VOP_LEASE(vp, td, cred, LEASE_WRITE);
+ VOP_SETATTR(vp, &vattr, cred, td);
PROC_LOCK(p);
p->p_acflag |= ACORE;
PROC_UNLOCK(p);
error = p->p_sysent->sv_coredump ?
- p->p_sysent->sv_coredump(p, vp, limit) :
+ p->p_sysent->sv_coredump(td, vp, limit) :
ENOSYS;
out1:
@@ -1940,7 +2026,7 @@ out1:
VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
vn_finished_write(mp);
out2:
- error1 = vn_close(vp, FWRITE, cred, p);
+ error1 = vn_close(vp, FWRITE, cred, td);
if (error == 0)
error = error1;
return (error);
@@ -1960,10 +2046,12 @@ struct nosys_args {
*/
/* ARGSUSED */
int
-nosys(p, args)
- struct proc *p;
+nosys(td, args)
+ struct thread *td;
struct nosys_args *args;
{
+ struct proc *p = td->td_proc;
+
mtx_lock(&Giant);
PROC_LOCK(p);
psignal(p, SIGSYS);
diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c
index 48364ad..31cde4f 100644
--- a/sys/kern/kern_subr.c
+++ b/sys/kern/kern_subr.c
@@ -66,6 +66,7 @@ uiomove(cp, n, uio)
register int n;
register struct uio *uio;
{
+ struct thread *td = curthread;
register struct iovec *iov;
u_int cnt;
int error = 0;
@@ -73,12 +74,12 @@ uiomove(cp, n, uio)
KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
("uiomove: mode"));
- KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_procp == curproc,
+ KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
("uiomove proc"));
- if (curproc) {
- save = curproc->p_flag & P_DEADLKTREAT;
- curproc->p_flag |= P_DEADLKTREAT;
+ if (td) {
+ save = td->td_flags & TDF_DEADLKTREAT;
+ td->td_flags |= TDF_DEADLKTREAT;
}
while (n > 0 && uio->uio_resid) {
@@ -122,8 +123,10 @@ uiomove(cp, n, uio)
cp += cnt;
n -= cnt;
}
- if (curproc)
- curproc->p_flag = (curproc->p_flag & ~P_DEADLKTREAT) | save;
+ if (td != curthread) printf("uiomove: IT CHANGED!");
+ td = curthread; /* Might things have changed in copyin/copyout? */
+ if (td)
+ td->td_flags = (td->td_flags & ~TDF_DEADLKTREAT) | save;
return (error);
}
@@ -140,7 +143,7 @@ uiomoveco(cp, n, uio, obj)
KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
("uiomoveco: mode"));
- KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_procp == curproc,
+ KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
("uiomoveco proc"));
while (n > 0 && uio->uio_resid) {
@@ -376,14 +379,14 @@ phashinit(elements, type, nentries)
static void
uio_yield()
{
- struct proc *p;
+ struct thread *td;
- p = curproc;
+ td = curthread;
mtx_lock_spin(&sched_lock);
DROP_GIANT_NOSWITCH();
- p->p_pri.pri_level = p->p_pri.pri_user;
- setrunqueue(p);
- p->p_stats->p_ru.ru_nivcsw++;
+ td->td_ksegrp->kg_pri.pri_level = td->td_ksegrp->kg_pri.pri_user;
+ setrunqueue(td);
+ td->td_proc->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index a1eaf98..f61c47c 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -45,10 +45,10 @@ SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq)
* Wrappers which implement old interface; act on global run queue.
*/
-struct proc *
-chooseproc(void)
+struct thread *
+choosethread(void)
{
- return runq_choose(&runq);
+ return (runq_choose(&runq)->ke_thread);
}
int
@@ -58,15 +58,15 @@ procrunnable(void)
}
void
-remrunqueue(struct proc *p)
+remrunqueue(struct thread *td)
{
- runq_remove(&runq, p);
+ runq_remove(&runq, td->td_kse);
}
void
-setrunqueue(struct proc *p)
+setrunqueue(struct thread *td)
{
- runq_add(&runq, p);
+ runq_add(&runq, td->td_kse);
}
/*
@@ -132,15 +132,15 @@ runq_setbit(struct runq *rq, int pri)
* Return true if the specified process is already in the run queue.
*/
static __inline int
-runq_find(struct runq *rq, struct proc *p)
+runq_find(struct runq *rq, struct kse *ke)
{
- struct proc *p2;
+ struct kse *ke2;
int i;
mtx_assert(&sched_lock, MA_OWNED);
for (i = 0; i < RQB_LEN; i++)
- TAILQ_FOREACH(p2, &rq->rq_queues[i], p_procq)
- if (p2 == p)
+ TAILQ_FOREACH(ke2, &rq->rq_queues[i], ke_procq)
+ if (ke2 == ke)
return 1;
return 0;
}
@@ -151,23 +151,30 @@ runq_find(struct runq *rq, struct proc *p)
* corresponding status bit.
*/
void
-runq_add(struct runq *rq, struct proc *p)
+runq_add(struct runq *rq, struct kse *ke)
{
struct rqhead *rqh;
int pri;
+ struct ksegrp *kg = ke->ke_ksegrp;
+#ifdef INVARIANTS
+ struct proc *p = ke->ke_proc;
+#endif
+ if (ke->ke_flags & KEF_ONRUNQ)
+ return;
mtx_assert(&sched_lock, MA_OWNED);
KASSERT(p->p_stat == SRUN, ("runq_add: proc %p (%s) not SRUN",
p, p->p_comm));
- KASSERT(runq_find(rq, p) == 0,
- ("runq_add: proc %p (%s) already in run queue", p, p->p_comm));
- pri = p->p_pri.pri_level / RQ_PPQ;
- p->p_rqindex = pri;
+ KASSERT(runq_find(rq, ke) == 0,
+ ("runq_add: proc %p (%s) already in run queue", ke, p->p_comm));
+ pri = kg->kg_pri.pri_level / RQ_PPQ;
+ ke->ke_rqindex = pri;
runq_setbit(rq, pri);
rqh = &rq->rq_queues[pri];
CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
- p, p->p_pri.pri_level, pri, rqh);
- TAILQ_INSERT_TAIL(rqh, p, p_procq);
+ p, kg->kg_pri.pri_level, pri, rqh);
+ TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
+ ke->ke_flags |= KEF_ONRUNQ;
}
/*
@@ -198,32 +205,33 @@ runq_check(struct runq *rq)
* If there are no runnable processes, the per-cpu idle process is
* returned. Will not return NULL under any circumstances.
*/
-struct proc *
+struct kse *
runq_choose(struct runq *rq)
{
struct rqhead *rqh;
- struct proc *p;
+ struct kse *ke;
int pri;
mtx_assert(&sched_lock, MA_OWNED);
if ((pri = runq_findbit(rq)) != -1) {
rqh = &rq->rq_queues[pri];
- p = TAILQ_FIRST(rqh);
- KASSERT(p != NULL, ("runq_choose: no proc on busy queue"));
- KASSERT(p->p_stat == SRUN,
- ("runq_choose: process %d(%s) in state %d", p->p_pid,
- p->p_comm, p->p_stat));
- CTR3(KTR_RUNQ, "runq_choose: pri=%d p=%p rqh=%p", pri, p, rqh);
- TAILQ_REMOVE(rqh, p, p_procq);
+ ke = TAILQ_FIRST(rqh);
+ KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
+ KASSERT(ke->ke_proc->p_stat == SRUN,
+ ("runq_choose: process %d(%s) in state %d", ke->ke_proc->p_pid,
+ ke->ke_proc->p_comm, ke->ke_proc->p_stat));
+ CTR3(KTR_RUNQ, "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
+ TAILQ_REMOVE(rqh, ke, ke_procq);
if (TAILQ_EMPTY(rqh)) {
CTR0(KTR_RUNQ, "runq_choose: empty");
runq_clrbit(rq, pri);
}
- return (p);
+ ke->ke_flags &= ~KEF_ONRUNQ;
+ return (ke);
}
CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
- return (PCPU_GET(idleproc));
+ return (PCPU_GET(idlethread)->td_kse);
}
/*
@@ -244,20 +252,26 @@ runq_init(struct runq *rq)
* corresponding status bit if the queue becomes empty.
*/
void
-runq_remove(struct runq *rq, struct proc *p)
+runq_remove(struct runq *rq, struct kse *ke)
{
+#ifdef KTR
+ struct ksegrp *kg = ke->ke_ksegrp;
+#endif
struct rqhead *rqh;
int pri;
+ if (!(ke->ke_flags & KEF_ONRUNQ))
+ return;
mtx_assert(&sched_lock, MA_OWNED);
- pri = p->p_rqindex;
+ pri = ke->ke_rqindex;
rqh = &rq->rq_queues[pri];
CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
- p, p->p_pri.pri_level, pri, rqh);
- KASSERT(p != NULL, ("runq_remove: no proc on busy queue"));
- TAILQ_REMOVE(rqh, p, p_procq);
+ ke, kg->kg_pri.pri_level, pri, rqh);
+ KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
+ TAILQ_REMOVE(rqh, ke, ke_procq);
if (TAILQ_EMPTY(rqh)) {
CTR0(KTR_RUNQ, "runq_remove: empty");
runq_clrbit(rq, pri);
}
+ ke->ke_flags &= ~KEF_ONRUNQ;
}
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index e1d1ce6..df18c1e 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -95,7 +95,7 @@ _sx_slock(struct sx *sx, const char *file, int line)
{
mtx_lock(&sx->sx_lock);
- KASSERT(sx->sx_xholder != curproc,
+ KASSERT(sx->sx_xholder != curthread,
("%s (%s): slock while xlock is held @ %s:%d\n", __FUNCTION__,
sx->sx_object.lo_name, file, line));
@@ -148,7 +148,7 @@ _sx_xlock(struct sx *sx, const char *file, int line)
* xlock while in here, we consider it API abuse and put it under
* INVARIANTS.
*/
- KASSERT(sx->sx_xholder != curproc,
+ KASSERT(sx->sx_xholder != curthread,
("%s (%s): xlock already held @ %s:%d", __FUNCTION__,
sx->sx_object.lo_name, file, line));
@@ -163,7 +163,7 @@ _sx_xlock(struct sx *sx, const char *file, int line)
/* Acquire an exclusive lock. */
sx->sx_cnt--;
- sx->sx_xholder = curproc;
+ sx->sx_xholder = curthread;
LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
@@ -178,7 +178,7 @@ _sx_try_xlock(struct sx *sx, const char *file, int line)
mtx_lock(&sx->sx_lock);
if (sx->sx_cnt == 0) {
sx->sx_cnt--;
- sx->sx_xholder = curproc;
+ sx->sx_xholder = curthread;
LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line);
WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
line);
@@ -256,7 +256,7 @@ _sx_try_upgrade(struct sx *sx, const char *file, int line)
if (sx->sx_cnt == 1) {
sx->sx_cnt = -1;
- sx->sx_xholder = curproc;
+ sx->sx_xholder = curthread;
LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 1, file, line);
WITNESS_UPGRADE(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 24da6e9..5278d17 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -107,13 +107,13 @@ SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
* schedulers into account.
*/
void
-maybe_resched(p)
- struct proc *p;
+maybe_resched(kg)
+ struct ksegrp *kg;
{
mtx_assert(&sched_lock, MA_OWNED);
- if (p->p_pri.pri_level < curproc->p_pri.pri_level)
- curproc->p_sflag |= PS_NEEDRESCHED;
+ if (kg->kg_pri.pri_level < curthread->td_ksegrp->kg_pri.pri_level)
+ curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
}
int
@@ -245,59 +245,88 @@ schedcpu(arg)
{
register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
register struct proc *p;
+ register struct kse *ke;
+ register struct ksegrp *kg;
register int realstathz;
+ int awake;
realstathz = stathz ? stathz : hz;
sx_slock(&allproc_lock);
- LIST_FOREACH(p, &allproc, p_list) {
- /*
- * Increment time in/out of memory and sleep time
- * (if sleeping). We ignore overflow; with 16-bit int's
- * (remember them?) overflow takes 45 days.
- */
+ FOREACH_PROC_IN_SYSTEM(p) {
mtx_lock_spin(&sched_lock);
p->p_swtime++;
- if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
- p->p_slptime++;
- p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
- /*
- * If the process has slept the entire second,
- * stop recalculating its priority until it wakes up.
- */
- if (p->p_slptime > 1) {
- mtx_unlock_spin(&sched_lock);
- continue;
- }
+ FOREACH_KSEGRP_IN_PROC(p, kg) {
+ awake = 0;
+ FOREACH_KSE_IN_GROUP(kg, ke) {
+ /*
+ * Increment time in/out of memory and sleep
+ * time (if sleeping). We ignore overflow;
+ * with 16-bit int's (remember them?)
+ * overflow takes 45 days.
+ */
+ /* XXXKSE */
+ /* if ((ke->ke_flags & KEF_ONRUNQ) == 0) */
+ if (p->p_stat == SSLEEP || p->p_stat == SSTOP) {
+ ke->ke_slptime++;
+ } else {
+ ke->ke_slptime = 0;
+ awake = 1;
+ }
+
+ /*
+ * pctcpu is only for ps?
+ * Do it per kse.. and add them up at the end?
+ * XXXKSE
+ */
+ ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >> FSHIFT;
+ /*
+ * If the kse has been idle the entire second,
+ * stop recalculating its priority until
+ * it wakes up.
+ */
+ if (ke->ke_slptime > 1) {
+ continue;
+ }
- /*
- * p_pctcpu is only for ps.
- */
#if (FSHIFT >= CCPU_SHIFT)
- p->p_pctcpu += (realstathz == 100)?
- ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
- 100 * (((fixpt_t) p->p_cpticks)
- << (FSHIFT - CCPU_SHIFT)) / realstathz;
+ ke->ke_pctcpu += (realstathz == 100) ?
+ ((fixpt_t) ke->ke_cpticks) <<
+ (FSHIFT - CCPU_SHIFT) :
+ 100 * (((fixpt_t) ke->ke_cpticks) <<
+ (FSHIFT - CCPU_SHIFT)) / realstathz;
#else
- p->p_pctcpu += ((FSCALE - ccpu) *
- (p->p_cpticks * FSCALE / realstathz)) >> FSHIFT;
+ ke->ke_pctcpu += ((FSCALE - ccpu) *
+ (ke->ke_cpticks * FSCALE / realstathz)) >>
+ FSHIFT;
#endif
- p->p_cpticks = 0;
- p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
- resetpriority(p);
- if (p->p_pri.pri_level >= PUSER) {
- if (p->p_oncpu == NOCPU && /* idle */
- p->p_stat == SRUN &&
- (p->p_sflag & PS_INMEM) &&
- (p->p_pri.pri_level / RQ_PPQ) !=
- (p->p_pri.pri_user / RQ_PPQ)) {
- remrunqueue(p);
- p->p_pri.pri_level = p->p_pri.pri_user;
- setrunqueue(p);
- } else
- p->p_pri.pri_level = p->p_pri.pri_user;
- }
+ ke->ke_cpticks = 0;
+ } /* end of kse loop */
+ if (awake == 0) {
+ kg->kg_slptime++;
+ } else {
+ kg->kg_slptime = 0;
+ }
+ kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
+ resetpriority(kg);
+ if (kg->kg_pri.pri_level >= PUSER &&
+ (p->p_sflag & PS_INMEM)) {
+ int changedqueue =
+ ((kg->kg_pri.pri_level / RQ_PPQ) !=
+ (kg->kg_pri.pri_user / RQ_PPQ));
+
+ kg->kg_pri.pri_level = kg->kg_pri.pri_user;
+ FOREACH_KSE_IN_GROUP(kg, ke) {
+ if ((ke->ke_oncpu == NOCPU) && /* idle */
+ (p->p_stat == SRUN) && /* XXXKSE */
+ changedqueue) {
+ remrunqueue(ke->ke_thread);
+ setrunqueue(ke->ke_thread);
+ }
+ }
+ }
+ } /* end of ksegrp loop */
mtx_unlock_spin(&sched_lock);
- }
+ } /* end of process loop */
sx_sunlock(&allproc_lock);
vmmeter();
wakeup((caddr_t)&lbolt);
@@ -310,21 +339,26 @@ schedcpu(arg)
* least six times the loadfactor will decay p_estcpu to zero.
*/
void
-updatepri(p)
- register struct proc *p;
+updatepri(td)
+ register struct thread *td;
{
- register unsigned int newcpu = p->p_estcpu;
+ register struct ksegrp *kg;
+ register unsigned int newcpu;
register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
- if (p->p_slptime > 5 * loadfac)
- p->p_estcpu = 0;
+ if (td == NULL)
+ return;
+ kg = td->td_ksegrp;
+ newcpu = kg->kg_estcpu;
+ if (kg->kg_slptime > 5 * loadfac)
+ kg->kg_estcpu = 0;
else {
- p->p_slptime--; /* the first time was done in schedcpu */
- while (newcpu && --p->p_slptime)
+ kg->kg_slptime--; /* the first time was done in schedcpu */
+ while (newcpu && --kg->kg_slptime)
newcpu = decay_cpu(loadfac, newcpu);
- p->p_estcpu = newcpu;
+ kg->kg_estcpu = newcpu;
}
- resetpriority(p);
+ resetpriority(td->td_ksegrp);
}
/*
@@ -333,7 +367,7 @@ updatepri(p)
* of 2. Shift right by 8, i.e. drop the bottom 256 worth.
*/
#define TABLESIZE 128
-static TAILQ_HEAD(slpquehead, proc) slpque[TABLESIZE];
+static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
#define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1))
void
@@ -370,6 +404,7 @@ msleep(ident, mtx, priority, wmesg, timo)
const char *wmesg;
{
struct proc *p = curproc;
+ struct thread *td = curthread;
int sig, catch = priority & PCATCH;
int rval = 0;
WITNESS_SAVE_DECL(mtx);
@@ -406,17 +441,18 @@ msleep(ident, mtx, priority, wmesg, timo)
}
KASSERT(p != NULL, ("msleep1"));
- KASSERT(ident != NULL && p->p_stat == SRUN, ("msleep"));
-
- p->p_wchan = ident;
- p->p_wmesg = wmesg;
- p->p_slptime = 0;
- p->p_pri.pri_level = priority & PRIMASK;
- CTR5(KTR_PROC, "msleep: proc %p (pid %d, %s) on %s (%p)", p, p->p_pid,
- p->p_comm, wmesg, ident);
- TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_slpq);
+ KASSERT(ident != NULL && td->td_proc->p_stat == SRUN, ("msleep"));
+
+ td->td_wchan = ident;
+ td->td_wmesg = wmesg;
+ td->td_kse->ke_slptime = 0; /* XXXKSE */
+ td->td_ksegrp->kg_slptime = 0;
+ td->td_ksegrp->kg_pri.pri_level = priority & PRIMASK;
+ CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
+ td, p->p_pid, p->p_comm, wmesg, ident);
+ TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq);
if (timo)
- callout_reset(&p->p_slpcallout, timo, endtsleep, p);
+ callout_reset(&td->td_slpcallout, timo, endtsleep, td);
/*
* We put ourselves on the sleep queue and start our timeout
* before calling CURSIG, as we could stop there, and a wakeup
@@ -424,40 +460,40 @@ msleep(ident, mtx, priority, wmesg, timo)
* A SIGCONT would cause us to be marked as SSLEEP
* without resuming us, thus we must be ready for sleep
* when CURSIG is called. If the wakeup happens while we're
- * stopped, p->p_wchan will be 0 upon return from CURSIG.
+ * stopped, td->td_wchan will be 0 upon return from CURSIG.
*/
if (catch) {
CTR3(KTR_PROC, "msleep caught: proc %p (pid %d, %s)", p,
p->p_pid, p->p_comm);
- p->p_sflag |= PS_SINTR;
+ td->td_flags |= TDF_SINTR;
mtx_unlock_spin(&sched_lock);
PROC_LOCK(p);
sig = CURSIG(p);
mtx_lock_spin(&sched_lock);
PROC_UNLOCK_NOSWITCH(p);
if (sig != 0) {
- if (p->p_wchan != NULL)
- unsleep(p);
- } else if (p->p_wchan == NULL)
+ if (td->td_wchan != NULL)
+ unsleep(td);
+ } else if (td->td_wchan == NULL)
catch = 0;
} else
sig = 0;
- if (p->p_wchan != NULL) {
- p->p_stat = SSLEEP;
+ if (td->td_wchan != NULL) {
+ td->td_proc->p_stat = SSLEEP;
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
}
- CTR3(KTR_PROC, "msleep resume: proc %p (pid %d, %s)", p, p->p_pid,
+ CTR3(KTR_PROC, "msleep resume: proc %p (pid %d, %s)", td, p->p_pid,
p->p_comm);
- KASSERT(p->p_stat == SRUN, ("running but not SRUN"));
- p->p_sflag &= ~PS_SINTR;
- if (p->p_sflag & PS_TIMEOUT) {
- p->p_sflag &= ~PS_TIMEOUT;
+ KASSERT(td->td_proc->p_stat == SRUN, ("running but not SRUN"));
+ td->td_flags &= ~TDF_SINTR;
+ if (td->td_flags & TDF_TIMEOUT) {
+ td->td_flags &= ~TDF_TIMEOUT;
if (sig == 0)
rval = EWOULDBLOCK;
- } else if (p->p_sflag & PS_TIMOFAIL)
- p->p_sflag &= ~PS_TIMOFAIL;
- else if (timo && callout_stop(&p->p_slpcallout) == 0) {
+ } else if (td->td_flags & TDF_TIMOFAIL)
+ td->td_flags &= ~TDF_TIMOFAIL;
+ else if (timo && callout_stop(&td->td_slpcallout) == 0) {
/*
* This isn't supposed to be pretty. If we are here, then
* the endtsleep() callout is currently executing on another
@@ -467,7 +503,7 @@ msleep(ident, mtx, priority, wmesg, timo)
* has a chance to run and the callout may end up waking up
* the wrong msleep(). Yuck.
*/
- p->p_sflag |= PS_TIMEOUT;
+ td->td_flags |= TDF_TIMEOUT;
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
}
@@ -510,28 +546,28 @@ static void
endtsleep(arg)
void *arg;
{
- register struct proc *p;
+ register struct thread *td = arg;
- p = (struct proc *)arg;
- CTR3(KTR_PROC, "endtsleep: proc %p (pid %d, %s)", p, p->p_pid,
- p->p_comm);
+ CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)", td, td->td_proc->p_pid,
+ td->td_proc->p_comm);
mtx_lock_spin(&sched_lock);
/*
* This is the other half of the synchronization with msleep()
* described above. If the PS_TIMEOUT flag is set, we lost the
* race and just need to put the process back on the runqueue.
*/
- if ((p->p_sflag & PS_TIMEOUT) != 0) {
- p->p_sflag &= ~PS_TIMEOUT;
- setrunqueue(p);
- } else if (p->p_wchan != NULL) {
- if (p->p_stat == SSLEEP)
- setrunnable(p);
+ if ((td->td_flags & TDF_TIMEOUT) != 0) {
+ td->td_flags &= ~TDF_TIMEOUT;
+ setrunqueue(td);
+ } else if (td->td_wchan != NULL) {
+ if (td->td_proc->p_stat == SSLEEP) /* XXXKSE */
+ setrunnable(td);
else
- unsleep(p);
- p->p_sflag |= PS_TIMEOUT;
- } else
- p->p_sflag |= PS_TIMOFAIL;
+ unsleep(td);
+ td->td_flags |= TDF_TIMEOUT;
+ } else {
+ td->td_flags |= TDF_TIMOFAIL;
+ }
mtx_unlock_spin(&sched_lock);
}
@@ -539,14 +575,13 @@ endtsleep(arg)
* Remove a process from its wait queue
*/
void
-unsleep(p)
- register struct proc *p;
+unsleep(struct thread *td)
{
mtx_lock_spin(&sched_lock);
- if (p->p_wchan != NULL) {
- TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_slpq);
- p->p_wchan = NULL;
+ if (td->td_wchan != NULL) {
+ TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
+ td->td_wchan = NULL;
}
mtx_unlock_spin(&sched_lock);
}
@@ -559,26 +594,29 @@ wakeup(ident)
register void *ident;
{
register struct slpquehead *qp;
- register struct proc *p;
+ register struct thread *td;
+ struct proc *p;
mtx_lock_spin(&sched_lock);
qp = &slpque[LOOKUP(ident)];
restart:
- TAILQ_FOREACH(p, qp, p_slpq) {
- if (p->p_wchan == ident) {
- TAILQ_REMOVE(qp, p, p_slpq);
- p->p_wchan = NULL;
- if (p->p_stat == SSLEEP) {
+ TAILQ_FOREACH(td, qp, td_slpq) {
+ p = td->td_proc;
+ if (td->td_wchan == ident) {
+ TAILQ_REMOVE(qp, td, td_slpq);
+ td->td_wchan = NULL;
+ if (td->td_proc->p_stat == SSLEEP) {
/* OPTIMIZED EXPANSION OF setrunnable(p); */
- CTR3(KTR_PROC, "wakeup: proc %p (pid %d, %s)",
- p, p->p_pid, p->p_comm);
- if (p->p_slptime > 1)
- updatepri(p);
- p->p_slptime = 0;
- p->p_stat = SRUN;
+ CTR3(KTR_PROC, "wakeup: thread %p (pid %d, %s)",
+ td, p->p_pid, p->p_comm);
+ if (td->td_ksegrp->kg_slptime > 1)
+ updatepri(td);
+ td->td_ksegrp->kg_slptime = 0;
+ td->td_kse->ke_slptime = 0;
+ td->td_proc->p_stat = SRUN;
if (p->p_sflag & PS_INMEM) {
- setrunqueue(p);
- maybe_resched(p);
+ setrunqueue(td);
+ maybe_resched(td->td_ksegrp);
} else {
p->p_sflag |= PS_SWAPINREQ;
wakeup((caddr_t)&proc0);
@@ -601,26 +639,29 @@ wakeup_one(ident)
register void *ident;
{
register struct slpquehead *qp;
+ register struct thread *td;
register struct proc *p;
mtx_lock_spin(&sched_lock);
qp = &slpque[LOOKUP(ident)];
- TAILQ_FOREACH(p, qp, p_slpq) {
- if (p->p_wchan == ident) {
- TAILQ_REMOVE(qp, p, p_slpq);
- p->p_wchan = NULL;
- if (p->p_stat == SSLEEP) {
+ TAILQ_FOREACH(td, qp, td_slpq) {
+ p = td->td_proc;
+ if (td->td_wchan == ident) {
+ TAILQ_REMOVE(qp, td, td_slpq);
+ td->td_wchan = NULL;
+ if (td->td_proc->p_stat == SSLEEP) {
/* OPTIMIZED EXPANSION OF setrunnable(p); */
CTR3(KTR_PROC, "wakeup1: proc %p (pid %d, %s)",
p, p->p_pid, p->p_comm);
- if (p->p_slptime > 1)
- updatepri(p);
- p->p_slptime = 0;
- p->p_stat = SRUN;
+ if (td->td_ksegrp->kg_slptime > 1)
+ updatepri(td);
+ td->td_ksegrp->kg_slptime = 0;
+ td->td_kse->ke_slptime = 0;
+ td->td_proc->p_stat = SRUN;
if (p->p_sflag & PS_INMEM) {
- setrunqueue(p);
- maybe_resched(p);
+ setrunqueue(td);
+ maybe_resched(td->td_ksegrp);
break;
} else {
p->p_sflag |= PS_SWAPINREQ;
@@ -640,7 +681,8 @@ void
mi_switch()
{
struct timeval new_switchtime;
- register struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
+ register struct proc *p = td->td_proc; /* XXX */
#if 0
register struct rlimit *rlim;
#endif
@@ -717,14 +759,14 @@ mi_switch()
p->p_comm);
sched_crit = sched_lock.mtx_savecrit;
sched_nest = sched_lock.mtx_recurse;
- p->p_lastcpu = p->p_oncpu;
- p->p_oncpu = NOCPU;
- p->p_sflag &= ~PS_NEEDRESCHED;
+ td->td_lastcpu = td->td_kse->ke_oncpu;
+ td->td_kse->ke_oncpu = NOCPU;
+ td->td_kse->ke_flags &= ~KEF_NEEDRESCHED;
cpu_switch();
- p->p_oncpu = PCPU_GET(cpuid);
+ td->td_kse->ke_oncpu = PCPU_GET(cpuid);
sched_lock.mtx_savecrit = sched_crit;
sched_lock.mtx_recurse = sched_nest;
- sched_lock.mtx_lock = (uintptr_t)p;
+ sched_lock.mtx_lock = (uintptr_t)td;
CTR3(KTR_PROC, "mi_switch: new proc %p (pid %d, %s)", p, p->p_pid,
p->p_comm);
if (PCPU_GET(switchtime.tv_sec) == 0)
@@ -738,39 +780,42 @@ mi_switch()
* and awakening the swapper if it isn't in memory.
*/
void
-setrunnable(p)
- register struct proc *p;
+setrunnable(struct thread *td)
{
-
+ struct proc *p = td->td_proc;
mtx_lock_spin(&sched_lock);
switch (p->p_stat) {
+ case SZOMB: /* not a thread flag XXXKSE */
+ panic("setrunnabl(1)");
+ }
+ switch (td->td_proc->p_stat) {
case 0:
case SRUN:
- case SZOMB:
case SWAIT:
default:
- panic("setrunnable");
+ panic("setrunnable(2)");
case SSTOP:
case SSLEEP: /* e.g. when sending signals */
- if (p->p_sflag & PS_CVWAITQ)
- cv_waitq_remove(p);
+ if (td->td_flags & TDF_CVWAITQ)
+ cv_waitq_remove(td);
else
- unsleep(p);
+ unsleep(td);
break;
case SIDL:
break;
}
- p->p_stat = SRUN;
- if (p->p_slptime > 1)
- updatepri(p);
- p->p_slptime = 0;
+ td->td_proc->p_stat = SRUN;
+ if (td->td_ksegrp->kg_slptime > 1)
+ updatepri(td);
+ td->td_ksegrp->kg_slptime = 0;
+ td->td_kse->ke_slptime = 0;
if ((p->p_sflag & PS_INMEM) == 0) {
p->p_sflag |= PS_SWAPINREQ;
wakeup((caddr_t)&proc0);
} else {
- setrunqueue(p);
- maybe_resched(p);
+ setrunqueue(td);
+ maybe_resched(td->td_ksegrp);
}
mtx_unlock_spin(&sched_lock);
}
@@ -781,20 +826,20 @@ setrunnable(p)
* than that of the current process.
*/
void
-resetpriority(p)
- register struct proc *p;
+resetpriority(kg)
+ register struct ksegrp *kg;
{
register unsigned int newpriority;
mtx_lock_spin(&sched_lock);
- if (p->p_pri.pri_class == PRI_TIMESHARE) {
- newpriority = PUSER + p->p_estcpu / INVERSE_ESTCPU_WEIGHT +
- NICE_WEIGHT * (p->p_nice - PRIO_MIN);
+ if (kg->kg_pri.pri_class == PRI_TIMESHARE) {
+ newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
+ NICE_WEIGHT * (kg->kg_nice - PRIO_MIN);
newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
PRI_MAX_TIMESHARE);
- p->p_pri.pri_user = newpriority;
+ kg->kg_pri.pri_user = newpriority;
}
- maybe_resched(p);
+ maybe_resched(kg);
mtx_unlock_spin(&sched_lock);
}
@@ -827,16 +872,22 @@ sched_setup(dummy)
* run much recently, and to round-robin among other processes.
*/
void
-schedclock(p)
- struct proc *p;
+schedclock(td)
+ struct thread *td;
{
-
- p->p_cpticks++;
- p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
- if ((p->p_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
- resetpriority(p);
- if (p->p_pri.pri_level >= PUSER)
- p->p_pri.pri_level = p->p_pri.pri_user;
+ struct kse *ke = td->td_kse;
+ struct ksegrp *kg = td->td_ksegrp;
+
+ if (td) {
+ ke->ke_cpticks++;
+ kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
+ if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
+ resetpriority(td->td_ksegrp);
+ if (kg->kg_pri.pri_level >= PUSER)
+ kg->kg_pri.pri_level = kg->kg_pri.pri_user;
+ }
+ } else {
+ panic("schedclock");
}
}
@@ -844,18 +895,20 @@ schedclock(p)
* General purpose yield system call
*/
int
-yield(struct proc *p, struct yield_args *uap)
+yield(struct thread *td, struct yield_args *uap)
{
- p->p_retval[0] = 0;
+
+ struct ksegrp *kg = td->td_ksegrp;
+ td->td_retval[0] = 0;
mtx_lock_spin(&sched_lock);
mtx_assert(&Giant, MA_NOTOWNED);
#if 0
DROP_GIANT_NOSWITCH();
#endif
- p->p_pri.pri_level = PRI_MAX_TIMESHARE;
- setrunqueue(p);
- p->p_stats->p_ru.ru_nvcsw++;
+ kg->kg_pri.pri_level = PRI_MAX_TIMESHARE;
+ setrunqueue(td);
+ kg->kg_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();
mtx_unlock_spin(&sched_lock);
#if 0
diff --git a/sys/kern/kern_syscalls.c b/sys/kern/kern_syscalls.c
index 633406a..4dd623e 100644
--- a/sys/kern/kern_syscalls.c
+++ b/sys/kern/kern_syscalls.c
@@ -39,15 +39,15 @@
* Place holder for system call slots reserved for loadable modules.
*/
int
-lkmnosys(struct proc *p, struct nosys_args *args)
+lkmnosys(struct thread *td, struct nosys_args *args)
{
- return(nosys(p, args));
+ return(nosys(td, args));
}
int
-lkmressys(struct proc *p, struct nosys_args *args)
+lkmressys(struct thread *td, struct nosys_args *args)
{
- return(nosys(p, args));
+ return(nosys(td, args));
}
int
diff --git a/sys/kern/kern_sysctl.c b/sys/kern/kern_sysctl.c
index ce5ba3d..34fcb68 100644
--- a/sys/kern/kern_sysctl.c
+++ b/sys/kern/kern_sysctl.c
@@ -836,7 +836,7 @@ sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l)
}
int
-kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old,
+kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old,
size_t *oldlenp, void *new, size_t newlen, size_t *retval)
{
int error = 0;
@@ -844,7 +844,7 @@ kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old,
bzero(&req, sizeof req);
- req.p = p;
+ req.p = td->td_proc;
if (oldlenp) {
req.oldlen = *oldlenp;
@@ -896,7 +896,7 @@ kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old,
}
int
-kernel_sysctlbyname(struct proc *p, char *name, void *old, size_t *oldlenp,
+kernel_sysctlbyname(struct thread *td, char *name, void *old, size_t *oldlenp,
void *new, size_t newlen, size_t *retval)
{
int oid[CTL_MAXNAME];
@@ -907,12 +907,12 @@ kernel_sysctlbyname(struct proc *p, char *name, void *old, size_t *oldlenp,
oid[1] = 3; /* name2oid */
oidlen = sizeof(oid);
- error = kernel_sysctl(p, oid, 2, oid, &oidlen,
+ error = kernel_sysctl(td, oid, 2, oid, &oidlen,
(void *)name, strlen(name), &plen);
if (error)
return (error);
- error = kernel_sysctl(p, oid, plen / sizeof(int), old, oldlenp,
+ error = kernel_sysctl(td, oid, plen / sizeof(int), old, oldlenp,
new, newlen, retval);
return (error);
}
@@ -1066,7 +1066,7 @@ struct sysctl_args {
* MPSAFE
*/
int
-__sysctl(struct proc *p, struct sysctl_args *uap)
+__sysctl(struct thread *td, struct sysctl_args *uap)
{
int error, name[CTL_MAXNAME];
size_t j;
@@ -1080,7 +1080,7 @@ __sysctl(struct proc *p, struct sysctl_args *uap)
mtx_lock(&Giant);
- error = userland_sysctl(p, name, uap->namelen,
+ error = userland_sysctl(td, name, uap->namelen,
uap->old, uap->oldlenp, 0,
uap->new, uap->newlen, &j);
if (error && error != ENOMEM)
@@ -1100,7 +1100,7 @@ done2:
* must be in kernel space.
*/
int
-userland_sysctl(struct proc *p, int *name, u_int namelen, void *old,
+userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval)
{
int error = 0;
@@ -1108,7 +1108,7 @@ userland_sysctl(struct proc *p, int *name, u_int namelen, void *old,
bzero(&req, sizeof req);
- req.p = p;
+ req.p = td->td_proc;
if (oldlenp) {
if (inkernel) {
@@ -1245,7 +1245,7 @@ struct getkerninfo_args {
* MPSAFE
*/
int
-ogetkerninfo(struct proc *p, struct getkerninfo_args *uap)
+ogetkerninfo(struct thread *td, struct getkerninfo_args *uap)
{
int error, name[6];
size_t size;
@@ -1262,14 +1262,14 @@ ogetkerninfo(struct proc *p, struct getkerninfo_args *uap)
name[3] = (uap->op & 0xff0000) >> 16;
name[4] = uap->op & 0xff;
name[5] = uap->arg;
- error = userland_sysctl(p, name, 6, uap->where, uap->size,
+ error = userland_sysctl(td, name, 6, uap->where, uap->size,
0, 0, 0, &size);
break;
case KINFO_VNODE:
name[0] = CTL_KERN;
name[1] = KERN_VNODE;
- error = userland_sysctl(p, name, 2, uap->where, uap->size,
+ error = userland_sysctl(td, name, 2, uap->where, uap->size,
0, 0, 0, &size);
break;
@@ -1278,35 +1278,35 @@ ogetkerninfo(struct proc *p, struct getkerninfo_args *uap)
name[1] = KERN_PROC;
name[2] = uap->op & 0xff;
name[3] = uap->arg;
- error = userland_sysctl(p, name, 4, uap->where, uap->size,
+ error = userland_sysctl(td, name, 4, uap->where, uap->size,
0, 0, 0, &size);
break;
case KINFO_FILE:
name[0] = CTL_KERN;
name[1] = KERN_FILE;
- error = userland_sysctl(p, name, 2, uap->where, uap->size,
+ error = userland_sysctl(td, name, 2, uap->where, uap->size,
0, 0, 0, &size);
break;
case KINFO_METER:
name[0] = CTL_VM;
name[1] = VM_METER;
- error = userland_sysctl(p, name, 2, uap->where, uap->size,
+ error = userland_sysctl(td, name, 2, uap->where, uap->size,
0, 0, 0, &size);
break;
case KINFO_LOADAVG:
name[0] = CTL_VM;
name[1] = VM_LOADAVG;
- error = userland_sysctl(p, name, 2, uap->where, uap->size,
+ error = userland_sysctl(td, name, 2, uap->where, uap->size,
0, 0, 0, &size);
break;
case KINFO_CLOCKRATE:
name[0] = CTL_KERN;
name[1] = KERN_CLOCKRATE;
- error = userland_sysctl(p, name, 2, uap->where, uap->size,
+ error = userland_sysctl(td, name, 2, uap->where, uap->size,
0, 0, 0, &size);
break;
@@ -1380,7 +1380,7 @@ ogetkerninfo(struct proc *p, struct getkerninfo_args *uap)
break;
}
if (error == 0) {
- p->p_retval[0] = needed ? needed : size;
+ td->td_retval[0] = needed ? needed : size;
if (uap->size) {
error = copyout((caddr_t)&size, (caddr_t)uap->size,
sizeof(size));
diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c
index 545479d..17e3a46 100644
--- a/sys/kern/kern_time.c
+++ b/sys/kern/kern_time.c
@@ -64,7 +64,7 @@ struct timezone tz;
* timers when they expire.
*/
-static int nanosleep1 __P((struct proc *p, struct timespec *rqt,
+static int nanosleep1 __P((struct thread *td, struct timespec *rqt,
struct timespec *rmt));
static int settime __P((struct timeval *));
static void timevalfix __P((struct timeval *));
@@ -151,8 +151,8 @@ struct clock_gettime_args {
*/
/* ARGSUSED */
int
-clock_gettime(p, uap)
- struct proc *p;
+clock_gettime(td, uap)
+ struct thread *td;
struct clock_gettime_args *uap;
{
struct timespec ats;
@@ -177,8 +177,8 @@ struct clock_settime_args {
*/
/* ARGSUSED */
int
-clock_settime(p, uap)
- struct proc *p;
+clock_settime(td, uap)
+ struct thread *td;
struct clock_settime_args *uap;
{
struct timeval atv;
@@ -186,7 +186,7 @@ clock_settime(p, uap)
int error;
mtx_lock(&Giant);
- if ((error = suser(p)) != 0)
+ if ((error = suser_td(td)) != 0)
goto done2;
if (SCARG(uap, clock_id) != CLOCK_REALTIME) {
error = EINVAL;
@@ -214,8 +214,8 @@ struct clock_getres_args {
#endif
int
-clock_getres(p, uap)
- struct proc *p;
+clock_getres(td, uap)
+ struct thread *td;
struct clock_getres_args *uap;
{
struct timespec ts;
@@ -235,8 +235,8 @@ clock_getres(p, uap)
static int nanowait;
static int
-nanosleep1(p, rqt, rmt)
- struct proc *p;
+nanosleep1(td, rqt, rmt)
+ struct thread *td;
struct timespec *rqt, *rmt;
{
struct timespec ts, ts2, ts3;
@@ -285,8 +285,8 @@ struct nanosleep_args {
*/
/* ARGSUSED */
int
-nanosleep(p, uap)
- struct proc *p;
+nanosleep(td, uap)
+ struct thread *td;
struct nanosleep_args *uap;
{
struct timespec rmt, rqt;
@@ -304,7 +304,7 @@ nanosleep(p, uap)
goto done2;
}
}
- error = nanosleep1(p, &rqt, &rmt);
+ error = nanosleep1(td, &rqt, &rmt);
if (error && SCARG(uap, rmtp)) {
int error2;
@@ -328,8 +328,8 @@ struct gettimeofday_args {
*/
/* ARGSUSED */
int
-gettimeofday(p, uap)
- struct proc *p;
+gettimeofday(td, uap)
+ struct thread *td;
register struct gettimeofday_args *uap;
{
struct timeval atv;
@@ -363,8 +363,8 @@ struct settimeofday_args {
*/
/* ARGSUSED */
int
-settimeofday(p, uap)
- struct proc *p;
+settimeofday(td, uap)
+ struct thread *td;
struct settimeofday_args *uap;
{
struct timeval atv;
@@ -373,7 +373,7 @@ settimeofday(p, uap)
mtx_lock(&Giant);
- if ((error = suser(p)))
+ if ((error = suser_td(td)))
goto done2;
/* Verify all parameters before changing time. */
if (uap->tv) {
@@ -414,8 +414,8 @@ struct adjtime_args {
*/
/* ARGSUSED */
int
-adjtime(p, uap)
- struct proc *p;
+adjtime(td, uap)
+ struct thread *td;
register struct adjtime_args *uap;
{
struct timeval atv;
@@ -424,7 +424,7 @@ adjtime(p, uap)
mtx_lock(&Giant);
- if ((error = suser(p)))
+ if ((error = suser_td(td)))
goto done2;
error = copyin((caddr_t)uap->delta, (caddr_t)&atv,
sizeof(struct timeval));
@@ -502,10 +502,11 @@ struct getitimer_args {
*/
/* ARGSUSED */
int
-getitimer(p, uap)
- struct proc *p;
+getitimer(td, uap)
+ struct thread *td;
register struct getitimer_args *uap;
{
+ struct proc *p = td->td_proc;
struct timeval ctv;
struct itimerval aitv;
int s;
@@ -553,10 +554,11 @@ struct setitimer_args {
*/
/* ARGSUSED */
int
-setitimer(p, uap)
- struct proc *p;
+setitimer(td, uap)
+ struct thread *td;
register struct setitimer_args *uap;
{
+ struct proc *p = td->td_proc;
struct itimerval aitv;
struct timeval ctv;
register struct itimerval *itvp;
@@ -572,7 +574,7 @@ setitimer(p, uap)
mtx_lock(&Giant);
if ((uap->itv = uap->oitv) &&
- (error = getitimer(p, (struct getitimer_args *)uap))) {
+ (error = getitimer(td, (struct getitimer_args *)uap))) {
goto done2;
}
if (itvp == 0) {
diff --git a/sys/kern/kern_xxx.c b/sys/kern/kern_xxx.c
index 84d88ce..f3f19ee 100644
--- a/sys/kern/kern_xxx.c
+++ b/sys/kern/kern_xxx.c
@@ -60,8 +60,8 @@ struct gethostname_args {
*/
/* ARGSUSED */
int
-ogethostname(p, uap)
- struct proc *p;
+ogethostname(td, uap)
+ struct thread *td;
struct gethostname_args *uap;
{
int name[2];
@@ -71,7 +71,7 @@ ogethostname(p, uap)
name[0] = CTL_KERN;
name[1] = KERN_HOSTNAME;
mtx_lock(&Giant);
- error = userland_sysctl(p, name, 2, uap->hostname, &len, 1, 0, 0, 0);
+ error = userland_sysctl(td, name, 2, uap->hostname, &len, 1, 0, 0, 0);
mtx_unlock(&Giant);
return(error);
}
@@ -87,8 +87,8 @@ struct sethostname_args {
*/
/* ARGSUSED */
int
-osethostname(p, uap)
- struct proc *p;
+osethostname(td, uap)
+ struct thread *td;
register struct sethostname_args *uap;
{
int name[2];
@@ -97,8 +97,8 @@ osethostname(p, uap)
name[0] = CTL_KERN;
name[1] = KERN_HOSTNAME;
mtx_lock(&Giant);
- if ((error = suser_xxx(0, p, PRISON_ROOT)) == 0) {
- error = userland_sysctl(p, name, 2, 0, 0, 0,
+ if ((error = suser_xxx(0, td->td_proc, PRISON_ROOT)) == 0) {
+ error = userland_sysctl(td, name, 2, 0, 0, 0,
uap->hostname, uap->len, 0);
}
mtx_unlock(&Giant);
@@ -115,12 +115,12 @@ struct ogethostid_args {
*/
/* ARGSUSED */
int
-ogethostid(p, uap)
- struct proc *p;
+ogethostid(td, uap)
+ struct thread *td;
struct ogethostid_args *uap;
{
- *(long *)(p->p_retval) = hostid;
+ *(long *)(td->td_retval) = hostid;
return (0);
}
#endif /* COMPAT_43 || COMPAT_SUNOS */
@@ -136,14 +136,14 @@ struct osethostid_args {
*/
/* ARGSUSED */
int
-osethostid(p, uap)
- struct proc *p;
+osethostid(td, uap)
+ struct thread *td;
struct osethostid_args *uap;
{
int error;
mtx_lock(&Giant);
- if ((error = suser(p)) == 0)
+ if ((error = suser_td(td)))
hostid = uap->hostid;
mtx_unlock(&Giant);
return (error);
@@ -153,8 +153,8 @@ osethostid(p, uap)
* MPSAFE
*/
int
-oquota(p, uap)
- struct proc *p;
+oquota(td, uap)
+ struct thread *td;
struct oquota_args *uap;
{
return (ENOSYS);
@@ -180,8 +180,8 @@ struct uname_args {
*/
/* ARGSUSED */
int
-uname(p, uap)
- struct proc *p;
+uname(td, uap)
+ struct thread *td;
struct uname_args *uap;
{
int name[2], error;
@@ -192,7 +192,7 @@ uname(p, uap)
name[1] = KERN_OSTYPE;
len = sizeof (uap->name->sysname);
mtx_lock(&Giant);
- error = userland_sysctl(p, name, 2, uap->name->sysname, &len,
+ error = userland_sysctl(td, name, 2, uap->name->sysname, &len,
1, 0, 0, 0);
if (error)
goto done2;
@@ -200,7 +200,7 @@ uname(p, uap)
name[1] = KERN_HOSTNAME;
len = sizeof uap->name->nodename;
- error = userland_sysctl(p, name, 2, uap->name->nodename, &len,
+ error = userland_sysctl(td, name, 2, uap->name->nodename, &len,
1, 0, 0, 0);
if (error)
goto done2;
@@ -208,7 +208,7 @@ uname(p, uap)
name[1] = KERN_OSRELEASE;
len = sizeof uap->name->release;
- error = userland_sysctl(p, name, 2, uap->name->release, &len,
+ error = userland_sysctl(td, name, 2, uap->name->release, &len,
1, 0, 0, 0);
if (error)
goto done2;
@@ -217,7 +217,7 @@ uname(p, uap)
/*
name = KERN_VERSION;
len = sizeof uap->name->version;
- error = userland_sysctl(p, name, 2, uap->name->version, &len,
+ error = userland_sysctl(td, name, 2, uap->name->version, &len,
1, 0, 0, 0);
if (error)
goto done2;
@@ -241,7 +241,7 @@ uname(p, uap)
name[0] = CTL_HW;
name[1] = HW_MACHINE;
len = sizeof uap->name->machine;
- error = userland_sysctl(p, name, 2, uap->name->machine, &len,
+ error = userland_sysctl(td, name, 2, uap->name->machine, &len,
1, 0, 0, 0);
if (error)
goto done2;
@@ -263,8 +263,8 @@ struct getdomainname_args {
*/
/* ARGSUSED */
int
-getdomainname(p, uap)
- struct proc *p;
+getdomainname(td, uap)
+ struct thread *td;
struct getdomainname_args *uap;
{
int domainnamelen;
@@ -291,14 +291,14 @@ struct setdomainname_args {
*/
/* ARGSUSED */
int
-setdomainname(p, uap)
- struct proc *p;
+setdomainname(td, uap)
+ struct thread *td;
struct setdomainname_args *uap;
{
int error, domainnamelen;
mtx_lock(&Giant);
- if ((error = suser(p)))
+ if ((error = suser_td(td)))
goto done2;
if ((u_int)uap->len > sizeof (domainname) - 1) {
error = EINVAL;
diff --git a/sys/kern/ksched.c b/sys/kern/ksched.c
index d118b93..6ad2a07 100644
--- a/sys/kern/ksched.c
+++ b/sys/kern/ksched.c
@@ -62,9 +62,9 @@ int ksched_attach(struct ksched **p)
return 0;
}
-int ksched_detach(struct ksched *p)
+int ksched_detach(struct ksched *ks)
{
- p31b_free(p);
+ p31b_free(ks);
return 0;
}
@@ -94,13 +94,13 @@ int ksched_detach(struct ksched *p)
#define P1B_PRIO_MAX rtpprio_to_p4prio(RTP_PRIO_MIN)
static __inline int
-getscheduler(register_t *ret, struct ksched *ksched, struct proc *p)
+getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
{
struct rtprio rtp;
int e = 0;
mtx_lock_spin(&sched_lock);
- pri_to_rtp(&p->p_pri, &rtp);
+ pri_to_rtp(&td->td_ksegrp->kg_pri, &rtp);
mtx_unlock_spin(&sched_lock);
switch (rtp.type)
{
@@ -121,31 +121,31 @@ getscheduler(register_t *ret, struct ksched *ksched, struct proc *p)
}
int ksched_setparam(register_t *ret, struct ksched *ksched,
- struct proc *p, const struct sched_param *param)
+ struct thread *td, const struct sched_param *param)
{
register_t policy;
int e;
- e = getscheduler(&policy, ksched, p);
+ e = getscheduler(&policy, ksched, td);
if (e == 0)
{
if (policy == SCHED_OTHER)
e = EINVAL;
else
- e = ksched_setscheduler(ret, ksched, p, policy, param);
+ e = ksched_setscheduler(ret, ksched, td, policy, param);
}
return e;
}
int ksched_getparam(register_t *ret, struct ksched *ksched,
- struct proc *p, struct sched_param *param)
+ struct thread *td, struct sched_param *param)
{
struct rtprio rtp;
mtx_lock_spin(&sched_lock);
- pri_to_rtp(&p->p_pri, &rtp);
+ pri_to_rtp(&td->td_ksegrp->kg_pri, &rtp);
mtx_unlock_spin(&sched_lock);
if (RTP_PRIO_IS_REALTIME(rtp.type))
param->sched_priority = rtpprio_to_p4prio(rtp.prio);
@@ -161,7 +161,7 @@ int ksched_getparam(register_t *ret, struct ksched *ksched,
*
*/
int ksched_setscheduler(register_t *ret, struct ksched *ksched,
- struct proc *p, int policy, const struct sched_param *param)
+ struct thread *td, int policy, const struct sched_param *param)
{
int e = 0;
struct rtprio rtp;
@@ -179,8 +179,8 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
? RTP_PRIO_FIFO : RTP_PRIO_REALTIME;
mtx_lock_spin(&sched_lock);
- rtp_to_pri(&rtp, &p->p_pri);
- p->p_sflag |= PS_NEEDRESCHED;
+ rtp_to_pri(&rtp, &td->td_ksegrp->kg_pri);
+ td->td_last_kse->ke_flags |= KEF_NEEDRESCHED; /* XXXKSE */
mtx_unlock_spin(&sched_lock);
}
else
@@ -194,7 +194,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
rtp.type = RTP_PRIO_NORMAL;
rtp.prio = p4prio_to_rtpprio(param->sched_priority);
mtx_lock_spin(&sched_lock);
- rtp_to_pri(&rtp, &p->p_pri);
+ rtp_to_pri(&rtp, &td->td_ksegrp->kg_pri);
/* XXX Simply revert to whatever we had for last
* normal scheduler priorities.
@@ -202,7 +202,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
* on the scheduling code: You must leave the
* scheduling info alone.
*/
- p->p_sflag |= PS_NEEDRESCHED;
+ td->td_last_kse->ke_flags |= KEF_NEEDRESCHED; /* XXXKSE */
mtx_unlock_spin(&sched_lock);
}
break;
@@ -211,9 +211,9 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
return e;
}
-int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct proc *p)
+int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
{
- return getscheduler(ret, ksched, p);
+ return getscheduler(ret, ksched, td);
}
/* ksched_yield: Yield the CPU.
@@ -221,7 +221,7 @@ int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct proc *p)
int ksched_yield(register_t *ret, struct ksched *ksched)
{
mtx_lock_spin(&sched_lock);
- curproc->p_sflag |= PS_NEEDRESCHED;
+ curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
return 0;
}
@@ -271,7 +271,7 @@ int ksched_get_priority_min(register_t *ret, struct ksched *ksched, int policy)
}
int ksched_rr_get_interval(register_t *ret, struct ksched *ksched,
- struct proc *p, struct timespec *timespec)
+ struct thread *td, struct timespec *timespec)
{
*timespec = ksched->rr_interval;
diff --git a/sys/kern/link_aout.c b/sys/kern/link_aout.c
index 1218e22..e410b20 100644
--- a/sys/kern/link_aout.c
+++ b/sys/kern/link_aout.c
@@ -193,14 +193,15 @@ static int
link_aout_load_file(linker_class_t lc, const char* filename, linker_file_t* result)
{
struct nameidata nd;
- struct proc* p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
+ struct proc *p = td->td_proc;
int error = 0;
int resid, flags;
struct exec header;
aout_file_t af;
linker_file_t lf = 0;
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, filename, p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, filename, td);
flags = FREAD;
error = vn_open(&nd, &flags, 0);
if (error)
@@ -211,7 +212,7 @@ link_aout_load_file(linker_class_t lc, const char* filename, linker_file_t* resu
* Read the a.out header from the file.
*/
error = vn_rdwr(UIO_READ, nd.ni_vp, (void*) &header, sizeof header, 0,
- UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
+ UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, td);
if (error)
goto out;
@@ -236,7 +237,7 @@ link_aout_load_file(linker_class_t lc, const char* filename, linker_file_t* resu
*/
error = vn_rdwr(UIO_READ, nd.ni_vp, (void*) af->address,
header.a_text + header.a_data, 0,
- UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
+ UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, td);
if (error)
goto out;
bzero(af->address + header.a_text + header.a_data, header.a_bss);
@@ -267,8 +268,8 @@ link_aout_load_file(linker_class_t lc, const char* filename, linker_file_t* resu
out:
if (error && lf)
linker_file_unload(lf);
- VOP_UNLOCK(nd.ni_vp, 0, p);
- vn_close(nd.ni_vp, FREAD, p->p_ucred, p);
+ VOP_UNLOCK(nd.ni_vp, 0, td);
+ vn_close(nd.ni_vp, FREAD, p->p_ucred, td);
return error;
}
diff --git a/sys/kern/link_elf.c b/sys/kern/link_elf.c
index 0abf96c..db29c37 100644
--- a/sys/kern/link_elf.c
+++ b/sys/kern/link_elf.c
@@ -513,7 +513,8 @@ static int
link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* result)
{
struct nameidata nd;
- struct proc* p = curproc; /* XXX */
+ struct thread* td = curthread; /* XXX */
+ struct proc* p = td->td_proc; /* XXX */
Elf_Ehdr *hdr;
caddr_t firstpage;
int nbytes, i;
@@ -546,7 +547,7 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
shdr = NULL;
lf = NULL;
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, filename, p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, filename, td);
flags = FREAD;
error = vn_open(&nd, &flags, 0);
if (error)
@@ -563,7 +564,7 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
}
hdr = (Elf_Ehdr *)firstpage;
error = vn_rdwr(UIO_READ, nd.ni_vp, firstpage, PAGE_SIZE, 0,
- UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
+ UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, td);
nbytes = PAGE_SIZE - resid;
if (error)
goto out;
@@ -702,7 +703,7 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
caddr_t segbase = mapbase + segs[i]->p_vaddr - base_vaddr;
error = vn_rdwr(UIO_READ, nd.ni_vp,
segbase, segs[i]->p_filesz, segs[i]->p_offset,
- UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
+ UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, td);
if (error) {
goto out;
}
@@ -756,7 +757,7 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
}
error = vn_rdwr(UIO_READ, nd.ni_vp,
(caddr_t)shdr, nbytes, hdr->e_shoff,
- UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
+ UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, td);
if (error)
goto out;
symtabindex = -1;
@@ -781,12 +782,12 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
}
error = vn_rdwr(UIO_READ, nd.ni_vp,
ef->symbase, symcnt, shdr[symtabindex].sh_offset,
- UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
+ UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, td);
if (error)
goto out;
error = vn_rdwr(UIO_READ, nd.ni_vp,
ef->strbase, strcnt, shdr[symstrindex].sh_offset,
- UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
+ UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, td);
if (error)
goto out;
@@ -817,8 +818,8 @@ out:
free(shdr, M_LINKER);
if (firstpage)
free(firstpage, M_LINKER);
- VOP_UNLOCK(nd.ni_vp, 0, p);
- vn_close(nd.ni_vp, FREAD, p->p_ucred, p);
+ VOP_UNLOCK(nd.ni_vp, 0, td);
+ vn_close(nd.ni_vp, FREAD, p->p_ucred, td);
return error;
}
diff --git a/sys/kern/link_elf_obj.c b/sys/kern/link_elf_obj.c
index 0abf96c..db29c37 100644
--- a/sys/kern/link_elf_obj.c
+++ b/sys/kern/link_elf_obj.c
@@ -513,7 +513,8 @@ static int
link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* result)
{
struct nameidata nd;
- struct proc* p = curproc; /* XXX */
+ struct thread* td = curthread; /* XXX */
+ struct proc* p = td->td_proc; /* XXX */
Elf_Ehdr *hdr;
caddr_t firstpage;
int nbytes, i;
@@ -546,7 +547,7 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
shdr = NULL;
lf = NULL;
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, filename, p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, filename, td);
flags = FREAD;
error = vn_open(&nd, &flags, 0);
if (error)
@@ -563,7 +564,7 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
}
hdr = (Elf_Ehdr *)firstpage;
error = vn_rdwr(UIO_READ, nd.ni_vp, firstpage, PAGE_SIZE, 0,
- UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
+ UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, td);
nbytes = PAGE_SIZE - resid;
if (error)
goto out;
@@ -702,7 +703,7 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
caddr_t segbase = mapbase + segs[i]->p_vaddr - base_vaddr;
error = vn_rdwr(UIO_READ, nd.ni_vp,
segbase, segs[i]->p_filesz, segs[i]->p_offset,
- UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
+ UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, td);
if (error) {
goto out;
}
@@ -756,7 +757,7 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
}
error = vn_rdwr(UIO_READ, nd.ni_vp,
(caddr_t)shdr, nbytes, hdr->e_shoff,
- UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
+ UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, td);
if (error)
goto out;
symtabindex = -1;
@@ -781,12 +782,12 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
}
error = vn_rdwr(UIO_READ, nd.ni_vp,
ef->symbase, symcnt, shdr[symtabindex].sh_offset,
- UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
+ UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, td);
if (error)
goto out;
error = vn_rdwr(UIO_READ, nd.ni_vp,
ef->strbase, strcnt, shdr[symstrindex].sh_offset,
- UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
+ UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, td);
if (error)
goto out;
@@ -817,8 +818,8 @@ out:
free(shdr, M_LINKER);
if (firstpage)
free(firstpage, M_LINKER);
- VOP_UNLOCK(nd.ni_vp, 0, p);
- vn_close(nd.ni_vp, FREAD, p->p_ucred, p);
+ VOP_UNLOCK(nd.ni_vp, 0, td);
+ vn_close(nd.ni_vp, FREAD, p->p_ucred, td);
return error;
}
diff --git a/sys/kern/makesyscalls.sh b/sys/kern/makesyscalls.sh
index e505379..5134275 100644
--- a/sys/kern/makesyscalls.sh
+++ b/sys/kern/makesyscalls.sh
@@ -114,6 +114,7 @@ s/\$//g
printf "#include <sys/signal.h>\n\n" > sysarg
printf "#include <sys/acl.h>\n\n" > sysarg
printf "struct proc;\n\n" > sysarg
+ printf "struct thread;\n\n" > sysarg
printf "#define\tPAD_(t)\t(sizeof(register_t) <= sizeof(t) ? \\\n" > sysarg
printf "\t\t0 : sizeof(register_t) - sizeof(t))\n\n" > sysarg
printf "#if BYTE_ORDER == LITTLE_ENDIAN\n"> sysarg
@@ -320,7 +321,7 @@ s/\$//g
}
if ($2 != "NOPROTO" && (!nosys || funcname != "nosys") && \
(!lkmnosys || funcname != "lkmnosys")) {
- printf("%s\t%s __P((struct proc *, struct %s *))",
+ printf("%s\t%s __P((struct thread *, struct %s *))",
rettype, funcname, argalias) > sysdcl
printf(";\n") > sysdcl
}
@@ -370,7 +371,7 @@ s/\$//g
else if($2 != "CPT_NOA")
printf("struct %s {\n\tregister_t dummy;\n};\n",
argalias) > sysarg
- printf("%s\to%s __P((struct proc *, struct %s *));\n",
+ printf("%s\to%s __P((struct thread *, struct %s *));\n",
rettype, funcname, argalias) > syscompatdcl
printf("\t{ compat(%s%s,%s) },",
mpsafe, argssize, funcname) > sysent
diff --git a/sys/kern/p1003_1b.c b/sys/kern/p1003_1b.c
index 4e43b22..0409d63 100644
--- a/sys/kern/p1003_1b.c
+++ b/sys/kern/p1003_1b.c
@@ -56,10 +56,10 @@ MALLOC_DEFINE(M_P31B, "p1003.1b", "Posix 1003.1B");
* start to use this when they shouldn't. That will be removed if annoying.
*/
int
-syscall_not_present(struct proc *p, const char *s, struct nosys_args *uap)
+syscall_not_present(struct thread *td, const char *s, struct nosys_args *uap)
{
log(LOG_ERR, "cmd %s pid %d tried to use non-present %s\n",
- p->p_comm, p->p_pid, s);
+ td->td_proc->p_comm, td->td_proc->p_pid, s);
/* a " return nosys(p, uap); " here causes a core dump.
*/
@@ -105,9 +105,10 @@ static int sched_attach(void)
/*
* MPSAFE
*/
-int sched_setparam(struct proc *p,
+int sched_setparam(struct thread *td,
struct sched_setparam_args *uap)
{
+ struct thread *targettd;
struct proc *targetp;
int e;
struct sched_param sched_param;
@@ -118,7 +119,8 @@ int sched_setparam(struct proc *p,
mtx_lock(&Giant);
if (uap->pid == 0) {
- targetp = p;
+ targetp = td->td_proc;
+ targettd = td;
PROC_LOCK(targetp);
} else {
targetp = pfind(uap->pid);
@@ -126,12 +128,13 @@ int sched_setparam(struct proc *p,
e = ESRCH;
goto done2;
}
+ targettd = &targetp->p_thread; /* XXXKSE */
}
- e = p_cansched(p, targetp);
+ e = p_cansched(td->td_proc, targetp);
PROC_UNLOCK(targetp);
if (e == 0) {
- e = ksched_setparam(&p->p_retval[0], ksched, targetp,
+ e = ksched_setparam(&td->td_retval[0], ksched, targettd,
(const struct sched_param *)&sched_param);
}
done2:
@@ -142,16 +145,18 @@ done2:
/*
* MPSAFE
*/
-int sched_getparam(struct proc *p,
+int sched_getparam(struct thread *td,
struct sched_getparam_args *uap)
{
int e;
struct sched_param sched_param;
+ struct thread *targettd;
struct proc *targetp;
mtx_lock(&Giant);
if (uap->pid == 0) {
- targetp = p;
+ targetp = td->td_proc;
+ targettd = td;
PROC_LOCK(targetp);
} else {
targetp = pfind(uap->pid);
@@ -159,28 +164,31 @@ int sched_getparam(struct proc *p,
e = ESRCH;
goto done2;
}
+ targettd = &targetp->p_thread; /* XXXKSE */
}
- e = p_cansee(p, targetp);
+ e = p_cansee(td->td_proc, targetp);
PROC_UNLOCK(targetp);
if (e)
goto done2;
- e = ksched_getparam(&p->p_retval[0], ksched, targetp, &sched_param);
+ e = ksched_getparam(&td->td_retval[0], ksched, targettd, &sched_param);
if (e == 0)
e = copyout(&sched_param, uap->param, sizeof(sched_param));
done2:
mtx_unlock(&Giant);
return (e);
}
+
/*
* MPSAFE
*/
-int sched_setscheduler(struct proc *p,
+int sched_setscheduler(struct thread *td,
struct sched_setscheduler_args *uap)
{
int e;
struct sched_param sched_param;
+ struct thread *targettd;
struct proc *targetp;
e = copyin(uap->param, &sched_param, sizeof(sched_param));
@@ -189,7 +197,8 @@ int sched_setscheduler(struct proc *p,
mtx_lock(&Giant);
if (uap->pid == 0) {
- targetp = p;
+ targetp = td->td_proc;
+ targettd = td;
PROC_LOCK(targetp);
} else {
targetp = pfind(uap->pid);
@@ -197,31 +206,34 @@ int sched_setscheduler(struct proc *p,
e = ESRCH;
goto done2;
}
+ targettd = &targetp->p_thread; /* XXXKSE */
}
- e = p_cansched(p, targetp);
+ e = p_cansched(td->td_proc, targetp);
PROC_UNLOCK(targetp);
if (e == 0) {
- e = ksched_setscheduler(&p->p_retval[0], ksched,
- targetp, uap->policy,
- (const struct sched_param *)&sched_param);
+ e = ksched_setscheduler(&td->td_retval[0], ksched, targettd,
+ uap->policy, (const struct sched_param *)&sched_param);
}
done2:
mtx_unlock(&Giant);
return (e);
}
+
/*
* MPSAFE
*/
-int sched_getscheduler(struct proc *p,
+int sched_getscheduler(struct thread *td,
struct sched_getscheduler_args *uap)
{
int e;
+ struct thread *targettd;
struct proc *targetp;
mtx_lock(&Giant);
if (uap->pid == 0) {
- targetp = p;
+ targetp = td->td_proc;
+ targettd = td;
PROC_LOCK(targetp);
} else {
targetp = pfind(uap->pid);
@@ -229,67 +241,75 @@ int sched_getscheduler(struct proc *p,
e = ESRCH;
goto done2;
}
+ targettd = &targetp->p_thread; /* XXXKSE */
}
- e = p_cansee(p, targetp);
+ e = p_cansee(td->td_proc, targetp);
PROC_UNLOCK(targetp);
if (e == 0)
- e = ksched_getscheduler(&p->p_retval[0], ksched, targetp);
+ e = ksched_getscheduler(&td->td_retval[0], ksched, targettd);
done2:
mtx_unlock(&Giant);
return (e);
}
+
/*
* MPSAFE
*/
-int sched_yield(struct proc *p,
+int sched_yield(struct thread *td,
struct sched_yield_args *uap)
{
int error;
mtx_lock(&Giant);
- error = ksched_yield(&p->p_retval[0], ksched);
+ error = ksched_yield(&td->td_retval[0], ksched);
mtx_unlock(&Giant);
return (error);
}
+
/*
* MPSAFE
*/
-int sched_get_priority_max(struct proc *p,
+int sched_get_priority_max(struct thread *td,
struct sched_get_priority_max_args *uap)
{
int error;
mtx_lock(&Giant);
- error = ksched_get_priority_max(&p->p_retval[0], ksched, uap->policy);
+ error = ksched_get_priority_max(&td->td_retval[0], ksched, uap->policy);
mtx_unlock(&Giant);
return (error);
}
+
/*
* MPSAFE
*/
-int sched_get_priority_min(struct proc *p,
+int sched_get_priority_min(struct thread *td,
struct sched_get_priority_min_args *uap)
{
int error;
+
mtx_lock(&Giant);
- error = ksched_get_priority_min(&p->p_retval[0], ksched, uap->policy);
+ error = ksched_get_priority_min(&td->td_retval[0], ksched, uap->policy);
mtx_unlock(&Giant);
return (error);
}
+
/*
* MPSAFE
*/
-int sched_rr_get_interval(struct proc *p,
+int sched_rr_get_interval(struct thread *td,
struct sched_rr_get_interval_args *uap)
{
int e;
+ struct thread *targettd;
struct proc *targetp;
mtx_lock(&Giant);
if (uap->pid == 0) {
- targetp = p;
+ targettd = td;
+ targetp = td->td_proc;
PROC_LOCK(targetp);
} else {
targetp = pfind(uap->pid);
@@ -297,12 +317,13 @@ int sched_rr_get_interval(struct proc *p,
e = ESRCH;
goto done2;
}
+ targettd = &targetp->p_thread; /* XXXKSE */
}
- e = p_cansee(p, targetp);
+ e = p_cansee(td->td_proc, targetp);
PROC_UNLOCK(targetp);
if (e == 0) {
- e = ksched_rr_get_interval(&p->p_retval[0], ksched, targetp,
+ e = ksched_rr_get_interval(&td->td_retval[0], ksched, targettd,
uap->interval);
}
done2:
diff --git a/sys/kern/subr_acl_posix1e.c b/sys/kern/subr_acl_posix1e.c
index 69dbe85..045d1a8 100644
--- a/sys/kern/subr_acl_posix1e.c
+++ b/sys/kern/subr_acl_posix1e.c
@@ -48,11 +48,11 @@
MALLOC_DEFINE(M_ACL, "acl", "access control list");
-static int vacl_set_acl(struct proc *p, struct vnode *vp, acl_type_t type,
+static int vacl_set_acl( struct thread *td, struct vnode *vp, acl_type_t type,
struct acl *aclp);
-static int vacl_get_acl(struct proc *p, struct vnode *vp, acl_type_t type,
+static int vacl_get_acl( struct thread *td, struct vnode *vp, acl_type_t type,
struct acl *aclp);
-static int vacl_aclcheck(struct proc *p, struct vnode *vp,
+static int vacl_aclcheck( struct thread *td, struct vnode *vp,
acl_type_t type, struct acl *aclp);
/*
@@ -562,7 +562,7 @@ acl_posix1e_check(struct acl *acl)
* Given a vnode, set its ACL.
*/
static int
-vacl_set_acl(struct proc *p, struct vnode *vp, acl_type_t type,
+vacl_set_acl( struct thread *td, struct vnode *vp, acl_type_t type,
struct acl *aclp)
{
struct acl inkernacl;
@@ -571,10 +571,10 @@ vacl_set_acl(struct proc *p, struct vnode *vp, acl_type_t type,
error = copyin(aclp, &inkernacl, sizeof(struct acl));
if (error)
return(error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_SETACL(vp, type, &inkernacl, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_SETACL(vp, type, &inkernacl, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
return(error);
}
@@ -582,16 +582,16 @@ vacl_set_acl(struct proc *p, struct vnode *vp, acl_type_t type,
* Given a vnode, get its ACL.
*/
static int
-vacl_get_acl(struct proc *p, struct vnode *vp, acl_type_t type,
+vacl_get_acl( struct thread *td, struct vnode *vp, acl_type_t type,
struct acl *aclp)
{
struct acl inkernelacl;
int error;
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_GETACL(vp, type, &inkernelacl, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_GETACL(vp, type, &inkernelacl, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
if (error == 0)
error = copyout(&inkernelacl, aclp, sizeof(struct acl));
return (error);
@@ -601,14 +601,14 @@ vacl_get_acl(struct proc *p, struct vnode *vp, acl_type_t type,
* Given a vnode, delete its ACL.
*/
static int
-vacl_delete(struct proc *p, struct vnode *vp, acl_type_t type)
+vacl_delete( struct thread *td, struct vnode *vp, acl_type_t type)
{
int error;
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_SETACL(vp, ACL_TYPE_DEFAULT, 0, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_SETACL(vp, ACL_TYPE_DEFAULT, 0, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
return (error);
}
@@ -616,7 +616,7 @@ vacl_delete(struct proc *p, struct vnode *vp, acl_type_t type)
* Given a vnode, check whether an ACL is appropriate for it
*/
static int
-vacl_aclcheck(struct proc *p, struct vnode *vp, acl_type_t type,
+vacl_aclcheck( struct thread *td, struct vnode *vp, acl_type_t type,
struct acl *aclp)
{
struct acl inkernelacl;
@@ -625,7 +625,7 @@ vacl_aclcheck(struct proc *p, struct vnode *vp, acl_type_t type,
error = copyin(aclp, &inkernelacl, sizeof(struct acl));
if (error)
return(error);
- error = VOP_ACLCHECK(vp, type, &inkernelacl, p->p_ucred, p);
+ error = VOP_ACLCHECK(vp, type, &inkernelacl, td->td_proc->p_ucred, td);
return (error);
}
@@ -641,17 +641,17 @@ vacl_aclcheck(struct proc *p, struct vnode *vp, acl_type_t type,
* MPSAFE
*/
int
-__acl_get_file(struct proc *p, struct __acl_get_file_args *uap)
+__acl_get_file( struct thread *td, struct __acl_get_file_args *uap)
{
struct nameidata nd;
int error;
mtx_lock(&Giant);
/* what flags are required here -- possible not LOCKLEAF? */
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
error = namei(&nd);
if (error == 0) {
- error = vacl_get_acl(p, nd.ni_vp, SCARG(uap, type),
+ error = vacl_get_acl(td, nd.ni_vp, SCARG(uap, type),
SCARG(uap, aclp));
NDFREE(&nd, 0);
}
@@ -665,16 +665,16 @@ __acl_get_file(struct proc *p, struct __acl_get_file_args *uap)
* MPSAFE
*/
int
-__acl_set_file(struct proc *p, struct __acl_set_file_args *uap)
+__acl_set_file( struct thread *td, struct __acl_set_file_args *uap)
{
struct nameidata nd;
int error;
mtx_lock(&Giant);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
error = namei(&nd);
if (error == 0) {
- error = vacl_set_acl(p, nd.ni_vp, SCARG(uap, type),
+ error = vacl_set_acl(td, nd.ni_vp, SCARG(uap, type),
SCARG(uap, aclp));
NDFREE(&nd, 0);
}
@@ -688,15 +688,15 @@ __acl_set_file(struct proc *p, struct __acl_set_file_args *uap)
* MPSAFE
*/
int
-__acl_get_fd(struct proc *p, struct __acl_get_fd_args *uap)
+__acl_get_fd( struct thread *td, struct __acl_get_fd_args *uap)
{
struct file *fp;
int error;
mtx_lock(&Giant);
- error = getvnode(p->p_fd, SCARG(uap, filedes), &fp);
+ error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
if (error == 0) {
- error = vacl_get_acl(p, (struct vnode *)fp->f_data,
+ error = vacl_get_acl(td, (struct vnode *)fp->f_data,
SCARG(uap, type), SCARG(uap, aclp));
}
mtx_unlock(&Giant);
@@ -709,15 +709,15 @@ __acl_get_fd(struct proc *p, struct __acl_get_fd_args *uap)
* MPSAFE
*/
int
-__acl_set_fd(struct proc *p, struct __acl_set_fd_args *uap)
+__acl_set_fd( struct thread *td, struct __acl_set_fd_args *uap)
{
struct file *fp;
int error;
mtx_lock(&Giant);
- error = getvnode(p->p_fd, SCARG(uap, filedes), &fp);
+ error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
if (error == 0) {
- error = vacl_set_acl(p, (struct vnode *)fp->f_data,
+ error = vacl_set_acl(td, (struct vnode *)fp->f_data,
SCARG(uap, type), SCARG(uap, aclp));
}
mtx_unlock(&Giant);
@@ -730,16 +730,16 @@ __acl_set_fd(struct proc *p, struct __acl_set_fd_args *uap)
* MPSAFE
*/
int
-__acl_delete_file(struct proc *p, struct __acl_delete_file_args *uap)
+__acl_delete_file( struct thread *td, struct __acl_delete_file_args *uap)
{
struct nameidata nd;
int error;
mtx_lock(&Giant);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
error = namei(&nd);
if (error == 0) {
- error = vacl_delete(p, nd.ni_vp, SCARG(uap, type));
+ error = vacl_delete(td, nd.ni_vp, SCARG(uap, type));
NDFREE(&nd, 0);
}
mtx_unlock(&Giant);
@@ -752,15 +752,15 @@ __acl_delete_file(struct proc *p, struct __acl_delete_file_args *uap)
* MPSAFE
*/
int
-__acl_delete_fd(struct proc *p, struct __acl_delete_fd_args *uap)
+__acl_delete_fd( struct thread *td, struct __acl_delete_fd_args *uap)
{
struct file *fp;
int error;
mtx_lock(&Giant);
- error = getvnode(p->p_fd, SCARG(uap, filedes), &fp);
+ error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
if (error == 0) {
- error = vacl_delete(p, (struct vnode *)fp->f_data,
+ error = vacl_delete(td, (struct vnode *)fp->f_data,
SCARG(uap, type));
}
mtx_unlock(&Giant);
@@ -773,16 +773,16 @@ __acl_delete_fd(struct proc *p, struct __acl_delete_fd_args *uap)
* MPSAFE
*/
int
-__acl_aclcheck_file(struct proc *p, struct __acl_aclcheck_file_args *uap)
+__acl_aclcheck_file( struct thread *td, struct __acl_aclcheck_file_args *uap)
{
struct nameidata nd;
int error;
mtx_lock(&Giant);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
error = namei(&nd);
if (error == 0) {
- error = vacl_aclcheck(p, nd.ni_vp, SCARG(uap, type),
+ error = vacl_aclcheck(td, nd.ni_vp, SCARG(uap, type),
SCARG(uap, aclp));
NDFREE(&nd, 0);
}
@@ -796,15 +796,15 @@ __acl_aclcheck_file(struct proc *p, struct __acl_aclcheck_file_args *uap)
* MPSAFE
*/
int
-__acl_aclcheck_fd(struct proc *p, struct __acl_aclcheck_fd_args *uap)
+__acl_aclcheck_fd( struct thread *td, struct __acl_aclcheck_fd_args *uap)
{
struct file *fp;
int error;
mtx_lock(&Giant);
- error = getvnode(p->p_fd, SCARG(uap, filedes), &fp);
+ error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
if (error == 0) {
- error = vacl_aclcheck(p, (struct vnode *)fp->f_data,
+ error = vacl_aclcheck(td, (struct vnode *)fp->f_data,
SCARG(uap, type), SCARG(uap, aclp));
}
mtx_unlock(&Giant);
diff --git a/sys/kern/subr_disk.c b/sys/kern/subr_disk.c
index da961ba..4efc5b2 100644
--- a/sys/kern/subr_disk.c
+++ b/sys/kern/subr_disk.c
@@ -225,7 +225,7 @@ SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, 0, NULL,
*/
static int
-diskopen(dev_t dev, int oflags, int devtype, struct proc *p)
+diskopen(dev_t dev, int oflags, int devtype, struct thread *td)
{
dev_t pdev;
struct disk *dp;
@@ -249,7 +249,7 @@ diskopen(dev_t dev, int oflags, int devtype, struct proc *p)
if (!dsisopen(dp->d_slice)) {
if (!pdev->si_iosize_max)
pdev->si_iosize_max = dev->si_iosize_max;
- error = dp->d_devsw->d_open(pdev, oflags, devtype, p);
+ error = dp->d_devsw->d_open(pdev, oflags, devtype, td);
}
/* Inherit properties from the whole/raw dev_t */
@@ -261,7 +261,7 @@ diskopen(dev_t dev, int oflags, int devtype, struct proc *p)
error = dsopen(dev, devtype, dp->d_dsflags, &dp->d_slice, &dp->d_label);
if (!dsisopen(dp->d_slice))
- dp->d_devsw->d_close(pdev, oflags, devtype, p);
+ dp->d_devsw->d_close(pdev, oflags, devtype, td);
out:
dp->d_flags &= ~DISKFLAG_LOCK;
if (dp->d_flags & DISKFLAG_WANTED) {
@@ -273,7 +273,7 @@ out:
}
static int
-diskclose(dev_t dev, int fflag, int devtype, struct proc *p)
+diskclose(dev_t dev, int fflag, int devtype, struct thread *td)
{
struct disk *dp;
int error;
@@ -286,7 +286,7 @@ diskclose(dev_t dev, int fflag, int devtype, struct proc *p)
return (ENXIO);
dsclose(dev, devtype, dp->d_slice);
if (!dsisopen(dp->d_slice))
- error = dp->d_devsw->d_close(dp->d_dev, fflag, devtype, p);
+ error = dp->d_devsw->d_close(dp->d_dev, fflag, devtype, td);
return (error);
}
@@ -325,7 +325,7 @@ diskstrategy(struct bio *bp)
}
static int
-diskioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
+diskioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
{
struct disk *dp;
int error;
@@ -337,7 +337,7 @@ diskioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
return (ENXIO);
error = dsioctl(dev, cmd, data, fflag, &dp->d_slice);
if (error == ENOIOCTL)
- error = dp->d_devsw->d_ioctl(dev, cmd, data, fflag, p);
+ error = dp->d_devsw->d_ioctl(dev, cmd, data, fflag, td);
return (error);
}
diff --git a/sys/kern/subr_diskslice.c b/sys/kern/subr_diskslice.c
index 0e20032..76adaa9 100644
--- a/sys/kern/subr_diskslice.c
+++ b/sys/kern/subr_diskslice.c
@@ -816,9 +816,9 @@ dssize(dev, sspp)
if (ssp == NULL || slice >= ssp->dss_nslices
|| !(ssp->dss_slices[slice].ds_openmask & (1 << part))) {
if (devsw(dev)->d_open(dev, FREAD, S_IFCHR,
- (struct proc *)NULL) != 0)
+ (struct thread *)NULL) != 0)
return (-1);
- devsw(dev)->d_close(dev, FREAD, S_IFCHR, (struct proc *)NULL);
+ devsw(dev)->d_close(dev, FREAD, S_IFCHR, (struct thread *)NULL);
ssp = *sspp;
}
lp = ssp->dss_slices[slice].ds_label;
diff --git a/sys/kern/subr_eventhandler.c b/sys/kern/subr_eventhandler.c
index 7d9fcca..f02fb33 100644
--- a/sys/kern/subr_eventhandler.c
+++ b/sys/kern/subr_eventhandler.c
@@ -112,7 +112,7 @@ eventhandler_register(struct eventhandler_list *list, char *name,
eg->ee.ee_priority = priority;
/* sort it into the list */
- lockmgr(&list->el_lock, LK_EXCLUSIVE, NULL, CURPROC);
+ lockmgr(&list->el_lock, LK_EXCLUSIVE, NULL, curthread);
for (ep = TAILQ_FIRST(&list->el_entries);
ep != NULL;
ep = TAILQ_NEXT(ep, ee_link)) {
@@ -123,7 +123,7 @@ eventhandler_register(struct eventhandler_list *list, char *name,
}
if (ep == NULL)
TAILQ_INSERT_TAIL(&list->el_entries, &eg->ee, ee_link);
- lockmgr(&list->el_lock, LK_RELEASE, NULL, CURPROC);
+ lockmgr(&list->el_lock, LK_RELEASE, NULL, curthread);
mtx_unlock(&eventhandler_mutex);
return(&eg->ee);
}
@@ -134,7 +134,7 @@ eventhandler_deregister(struct eventhandler_list *list, eventhandler_tag tag)
struct eventhandler_entry *ep = tag;
/* XXX insert diagnostic check here? */
- lockmgr(&list->el_lock, LK_EXCLUSIVE, NULL, CURPROC);
+ lockmgr(&list->el_lock, LK_EXCLUSIVE, NULL, curthread);
if (ep != NULL) {
/* remove just this entry */
TAILQ_REMOVE(&list->el_entries, ep, ee_link);
@@ -147,7 +147,7 @@ eventhandler_deregister(struct eventhandler_list *list, eventhandler_tag tag)
free(ep, M_EVENTHANDLER);
}
}
- lockmgr(&list->el_lock, LK_RELEASE, NULL, CURPROC);
+ lockmgr(&list->el_lock, LK_RELEASE, NULL, curthread);
}
struct eventhandler_list *
diff --git a/sys/kern/subr_log.c b/sys/kern/subr_log.c
index afcf63f..7fa5618 100644
--- a/sys/kern/subr_log.c
+++ b/sys/kern/subr_log.c
@@ -98,13 +98,13 @@ SYSCTL_INT(_kern, OID_AUTO, log_wakeups_per_second, CTLFLAG_RW,
/*ARGSUSED*/
static int
-logopen(dev_t dev, int flags, int mode, struct proc *p)
+logopen(dev_t dev, int flags, int mode, struct thread *td)
{
if (log_open)
return (EBUSY);
log_open = 1;
callout_init(&logsoftc.sc_callout, 0);
- fsetown(p->p_pid, &logsoftc.sc_sigio); /* signal process only */
+ fsetown(td->td_proc->p_pid, &logsoftc.sc_sigio); /* signal process only */
callout_reset(&logsoftc.sc_callout, hz / log_wakeups_per_second,
logtimeout, NULL);
return (0);
@@ -112,7 +112,7 @@ logopen(dev_t dev, int flags, int mode, struct proc *p)
/*ARGSUSED*/
static int
-logclose(dev_t dev, int flag, int mode, struct proc *p)
+logclose(dev_t dev, int flag, int mode, struct thread *td)
{
log_open = 0;
@@ -167,7 +167,7 @@ logread(dev_t dev, struct uio *uio, int flag)
/*ARGSUSED*/
static int
-logpoll(dev_t dev, int events, struct proc *p)
+logpoll(dev_t dev, int events, struct thread *td)
{
int s;
int revents = 0;
@@ -178,7 +178,7 @@ logpoll(dev_t dev, int events, struct proc *p)
if (msgbufp->msg_bufr != msgbufp->msg_bufx)
revents |= events & (POLLIN | POLLRDNORM);
else
- selrecord(p, &logsoftc.sc_selp);
+ selrecord(curthread, &logsoftc.sc_selp);
}
splx(s);
return (revents);
@@ -209,7 +209,7 @@ logtimeout(void *arg)
/*ARGSUSED*/
static int
-logioctl(dev_t dev, u_long com, caddr_t data, int flag, struct proc *p)
+logioctl(dev_t dev, u_long com, caddr_t data, int flag, struct thread *td)
{
long l;
int s;
diff --git a/sys/kern/subr_mbuf.c b/sys/kern/subr_mbuf.c
index 08def9c..65095e0 100644
--- a/sys/kern/subr_mbuf.c
+++ b/sys/kern/subr_mbuf.c
@@ -862,7 +862,7 @@ mb_reclaim(void)
* XXX: to avoid this.
*/
#ifdef WITNESS
- KASSERT(witness_list(curproc) == 0,
+ KASSERT(witness_list(curthread) == 0,
("mb_reclaim() called with locks held"));
#endif
diff --git a/sys/kern/subr_prf.c b/sys/kern/subr_prf.c
index 3ce2d7d..8708704 100644
--- a/sys/kern/subr_prf.c
+++ b/sys/kern/subr_prf.c
@@ -109,12 +109,13 @@ tablefull(const char *tab)
int
uprintf(const char *fmt, ...)
{
- struct proc *p = curproc;
+ struct thread *td = curthread;
+ struct proc *p = td->td_proc;
va_list ap;
struct putchar_arg pca;
int retval = 0;
- if (p && p != PCPU_GET(idleproc) && p->p_flag & P_CONTROLT &&
+ if (td && td != PCPU_GET(idlethread) && p->p_flag & P_CONTROLT &&
p->p_session->s_ttyvp) {
va_start(ap, fmt);
pca.tty = p->p_session->s_ttyp;
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
index e177065..ae1d5c3 100644
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -356,8 +356,8 @@ struct profil_args {
*/
/* ARGSUSED */
int
-profil(p, uap)
- struct proc *p;
+profil(td, uap)
+ struct thread *td;
register struct profil_args *uap;
{
register struct uprof *upp;
@@ -371,10 +371,10 @@ profil(p, uap)
goto done2;
}
if (uap->scale == 0) {
- stopprofclock(p);
+ stopprofclock(td->td_proc);
goto done2;
}
- upp = &p->p_stats->p_prof;
+ upp = &td->td_proc->p_stats->p_prof;
/* Block profile interrupts while changing state. */
s = splstatclock();
@@ -382,7 +382,7 @@ profil(p, uap)
upp->pr_scale = uap->scale;
upp->pr_base = uap->samples;
upp->pr_size = uap->size;
- startprofclock(p);
+ startprofclock(td->td_proc);
splx(s);
done2:
@@ -413,8 +413,8 @@ done2:
* inaccurate.
*/
void
-addupc_intr(p, pc, ticks)
- register struct proc *p;
+addupc_intr(ke, pc, ticks)
+ register struct kse *ke;
register uintptr_t pc;
u_int ticks;
{
@@ -425,7 +425,7 @@ addupc_intr(p, pc, ticks)
if (ticks == 0)
return;
- prof = &p->p_stats->p_prof;
+ prof = &ke->ke_proc->p_stats->p_prof;
if (pc < prof->pr_off ||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
return; /* out of range; ignore */
@@ -435,7 +435,7 @@ addupc_intr(p, pc, ticks)
mtx_lock_spin(&sched_lock);
prof->pr_addr = pc;
prof->pr_ticks = ticks;
- p->p_sflag |= PS_OWEUPC | PS_ASTPENDING;
+ ke->ke_flags |= KEF_OWEUPC | KEF_ASTPENDING ;
mtx_unlock_spin(&sched_lock);
}
}
@@ -445,11 +445,12 @@ addupc_intr(p, pc, ticks)
* update fails, we simply turn off profiling.
*/
void
-addupc_task(p, pc, ticks)
- register struct proc *p;
+addupc_task(ke, pc, ticks)
+ register struct kse *ke;
register uintptr_t pc;
u_int ticks;
{
+ struct proc *p = ke->ke_proc;
register struct uprof *prof;
register caddr_t addr;
register u_int i;
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index c107442..03c6612 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -100,7 +100,7 @@ mp_start(void *dummy)
SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_SECOND, mp_start, NULL)
void
-forward_signal(struct proc *p)
+forward_signal(struct thread *td)
{
int id;
@@ -110,9 +110,9 @@ forward_signal(struct proc *p)
* executes ast().
*/
mtx_assert(&sched_lock, MA_OWNED);
- KASSERT(p->p_stat == SRUN, ("forward_signal: process is not SRUN"));
+ KASSERT(td->td_proc->p_stat == SRUN, ("forward_signal: process is not SRUN"));
- CTR1(KTR_SMP, "forward_signal(%p)", p);
+ CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
if (!smp_started || cold || panicstr)
return;
@@ -120,10 +120,10 @@ forward_signal(struct proc *p)
return;
/* No need to IPI ourself. */
- if (p == curproc)
+ if (td == curthread)
return;
- id = p->p_oncpu;
+ id = td->td_kse->ke_oncpu;
if (id == NOCPU)
return;
ipi_selected(1 << id, IPI_AST);
@@ -133,7 +133,7 @@ void
forward_roundrobin(void)
{
struct globaldata *gd;
- struct proc *p;
+ struct thread *td;
u_int id, map;
mtx_assert(&sched_lock, MA_OWNED);
@@ -146,11 +146,11 @@ forward_roundrobin(void)
return;
map = 0;
SLIST_FOREACH(gd, &cpuhead, gd_allcpu) {
- p = gd->gd_curproc;
+ td = gd->gd_curthread;
id = gd->gd_cpuid;
if (id != PCPU_GET(cpuid) && (id & stopped_cpus) == 0 &&
- p != gd->gd_idleproc) {
- p->p_sflag |= PS_NEEDRESCHED;
+ td != gd->gd_idlethread) {
+ td->td_kse->ke_flags |= KEF_NEEDRESCHED;
map |= id;
}
}
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index 727fe69..ad96be7 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -62,11 +62,14 @@
* MPSAFE
*/
void
-userret(p, frame, oticks)
- struct proc *p;
+userret(td, frame, oticks)
+ struct thread *td;
struct trapframe *frame;
u_int oticks;
{
+ struct proc *p = td->td_proc;
+ struct kse *ke = td->td_kse;
+ struct ksegrp *kg = td->td_ksegrp;
int sig;
mtx_lock(&Giant);
@@ -77,8 +80,8 @@ userret(p, frame, oticks)
mtx_unlock(&Giant);
mtx_lock_spin(&sched_lock);
- p->p_pri.pri_level = p->p_pri.pri_user;
- if (p->p_sflag & PS_NEEDRESCHED) {
+ kg->kg_pri.pri_level = kg->kg_pri.pri_user;
+ if (ke->ke_flags & KEF_NEEDRESCHED) {
/*
* Since we are curproc, a clock interrupt could
* change our priority without changing run queues
@@ -88,7 +91,7 @@ userret(p, frame, oticks)
* indicated by our priority.
*/
DROP_GIANT_NOSWITCH();
- setrunqueue(p);
+ setrunqueue(td);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mtx_unlock_spin(&sched_lock);
@@ -105,9 +108,10 @@ userret(p, frame, oticks)
/*
* Charge system time if profiling.
*/
- if (p->p_sflag & PS_PROFIL)
- addupc_task(p, TRAPF_PC(frame),
- ((u_int)p->p_sticks - oticks) * psratio);
+ if (p->p_sflag & PS_PROFIL) {
+ addupc_task(ke, TRAPF_PC(frame),
+ (u_int)(ke->ke_sticks - oticks) * psratio);
+ }
}
/*
@@ -119,42 +123,47 @@ void
ast(framep)
struct trapframe *framep;
{
- struct proc *p = CURPROC;
+ struct thread *td = curthread;
+ struct proc *p = td->td_proc;
+ struct kse *ke = td->td_kse;
u_int prticks, sticks;
critical_t s;
int sflag;
+ int flags;
#if defined(DEV_NPX) && !defined(SMP)
int ucode;
#endif
KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
#ifdef WITNESS
- if (witness_list(p))
+ if (witness_list(td))
panic("Returning to user mode with mutex(s) held");
#endif
mtx_assert(&Giant, MA_NOTOWNED);
s = critical_enter();
- while ((p->p_sflag & (PS_ASTPENDING | PS_NEEDRESCHED)) != 0) {
+ while ((ke->ke_flags & (KEF_ASTPENDING | KEF_NEEDRESCHED)) != 0) {
critical_exit(s);
- p->p_frame = framep;
+ td->td_frame = framep;
/*
* This updates the p_sflag's for the checks below in one
* "atomic" operation with turning off the astpending flag.
* If another AST is triggered while we are handling the
* AST's saved in sflag, the astpending flag will be set and
* we will loop again.
+ * XXXKSE Can't do it atomically in KSE
*/
mtx_lock_spin(&sched_lock);
- sticks = p->p_sticks;
+ sticks = ke->ke_sticks;
sflag = p->p_sflag;
- p->p_sflag &= ~(PS_OWEUPC | PS_ALRMPEND | PS_PROFPEND |
- PS_ASTPENDING);
+ flags = ke->ke_flags;
+ p->p_sflag &= ~(PS_PROFPEND | PS_ALRMPEND);
+ ke->ke_flags &= ~(KEF_OWEUPC | KEF_ASTPENDING);
cnt.v_soft++;
- if (sflag & PS_OWEUPC) {
+ if (flags & KEF_OWEUPC) {
prticks = p->p_stats->p_prof.pr_ticks;
p->p_stats->p_prof.pr_ticks = 0;
mtx_unlock_spin(&sched_lock);
- addupc_task(p, p->p_stats->p_prof.pr_addr, prticks);
+ addupc_task(ke, p->p_stats->p_prof.pr_addr, prticks);
} else
mtx_unlock_spin(&sched_lock);
if (sflag & PS_ALRMPEND) {
@@ -178,7 +187,7 @@ ast(framep)
PROC_UNLOCK(p);
}
- userret(p, framep, sticks);
+ userret(td, framep, sticks);
s = critical_enter();
}
mtx_assert(&Giant, MA_NOTOWNED);
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 646d99a..bbcac94 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -86,9 +86,9 @@
#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
- : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
+ : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
-#define SET_PRIO(p, pri) (p)->p_pri.pri_level = (pri)
+#define SET_PRIO(td, pri) (td)->td_ksegrp->kg_pri.pri_level = (pri)
/*
* Lock classes for sleep and spin mutexes.
@@ -105,113 +105,118 @@ struct lock_class lock_class_mtx_spin = {
/*
* Prototypes for non-exported routines.
*/
-static void propagate_priority(struct proc *);
+static void propagate_priority(struct thread *);
static void
-propagate_priority(struct proc *p)
+propagate_priority(struct thread *td)
{
- int pri = p->p_pri.pri_level;
- struct mtx *m = p->p_blocked;
+ struct ksegrp *kg = td->td_ksegrp;
+ int pri = kg->kg_pri.pri_level;
+ struct mtx *m = td->td_blocked;
mtx_assert(&sched_lock, MA_OWNED);
for (;;) {
- struct proc *p1;
+ struct thread *td1;
- p = mtx_owner(m);
+ td = mtx_owner(m);
- if (p == NULL) {
+ if (td == NULL) {
/*
* This really isn't quite right. Really
- * ought to bump priority of process that
+ * ought to bump priority of thread that
* next acquires the mutex.
*/
MPASS(m->mtx_lock == MTX_CONTESTED);
return;
}
- MPASS(p->p_magic == P_MAGIC);
- KASSERT(p->p_stat != SSLEEP, ("sleeping process owns a mutex"));
- if (p->p_pri.pri_level <= pri)
+ MPASS(td->td_proc->p_magic == P_MAGIC);
+ KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex"));
+ if (kg->kg_pri.pri_level <= pri) /* lower is higher priority */
return;
/*
- * Bump this process' priority.
+ * Bump this thread's priority.
*/
- SET_PRIO(p, pri);
+ SET_PRIO(td, pri);
/*
* If lock holder is actually running, just bump priority.
*/
- if (p->p_oncpu != NOCPU) {
- MPASS(p->p_stat == SRUN || p->p_stat == SZOMB || p->p_stat == SSTOP);
+ /* XXXKSE this test is not sufficient */
+ if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) {
+ MPASS(td->td_proc->p_stat == SRUN
+ || td->td_proc->p_stat == SZOMB
+ || td->td_proc->p_stat == SSTOP);
return;
}
#ifndef SMP
/*
- * For UP, we check to see if p is curproc (this shouldn't
+ * For UP, we check to see if td is curthread (this shouldn't
* ever happen however as it would mean we are in a deadlock.)
*/
- KASSERT(p != curproc, ("Deadlock detected"));
+ KASSERT(td != curthread, ("Deadlock detected"));
#endif
/*
- * If on run queue move to new run queue, and
- * quit.
+ * If on run queue move to new run queue, and quit.
+ * XXXKSE this gets a lot more complicated under threads
+ * but try anyhow.
*/
- if (p->p_stat == SRUN) {
- MPASS(p->p_blocked == NULL);
- remrunqueue(p);
- setrunqueue(p);
+ if (td->td_proc->p_stat == SRUN) {
+ MPASS(td->td_blocked == NULL);
+ remrunqueue(td);
+ setrunqueue(td);
return;
}
/*
* If we aren't blocked on a mutex, we should be.
*/
- KASSERT(p->p_stat == SMTX, (
+ KASSERT(td->td_proc->p_stat == SMTX, (
"process %d(%s):%d holds %s but isn't blocked on a mutex\n",
- p->p_pid, p->p_comm, p->p_stat,
+ td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat,
m->mtx_object.lo_name));
/*
- * Pick up the mutex that p is blocked on.
+ * Pick up the mutex that td is blocked on.
*/
- m = p->p_blocked;
+ m = td->td_blocked;
MPASS(m != NULL);
/*
- * Check if the proc needs to be moved up on
+ * Check if the thread needs to be moved up on
* the blocked chain
*/
- if (p == TAILQ_FIRST(&m->mtx_blocked)) {
+ if (td == TAILQ_FIRST(&m->mtx_blocked)) {
continue;
}
- p1 = TAILQ_PREV(p, procqueue, p_procq);
- if (p1->p_pri.pri_level <= pri) {
+ td1 = TAILQ_PREV(td, threadqueue, td_blkq);
+ if (td1->td_ksegrp->kg_pri.pri_level <= pri) {
continue;
}
/*
- * Remove proc from blocked chain and determine where
- * it should be moved up to. Since we know that p1 has
- * a lower priority than p, we know that at least one
- * process in the chain has a lower priority and that
- * p1 will thus not be NULL after the loop.
+ * Remove thread from blocked chain and determine where
+ * it should be moved up to. Since we know that td1 has
+ * a lower priority than td, we know that at least one
+ * thread in the chain has a lower priority and that
+ * td1 will thus not be NULL after the loop.
*/
- TAILQ_REMOVE(&m->mtx_blocked, p, p_procq);
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) {
- MPASS(p1->p_magic == P_MAGIC);
- if (p1->p_pri.pri_level > pri)
+ TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq);
+ TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) {
+ MPASS(td1->td_proc->p_magic == P_MAGIC);
+ if (td1->td_ksegrp->kg_pri.pri_level > pri)
break;
}
- MPASS(p1 != NULL);
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ MPASS(td1 != NULL);
+ TAILQ_INSERT_BEFORE(td1, td, td_blkq);
CTR4(KTR_LOCK,
"propagate_priority: p %p moved before %p on [%p] %s",
- p, p1, m, m->mtx_object.lo_name);
+ td, td1, m, m->mtx_object.lo_name);
}
}
@@ -257,7 +262,7 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
{
int rval;
- MPASS(curproc != NULL);
+ MPASS(curthread != NULL);
/*
* _mtx_trylock does not accept MTX_NOSWITCH option.
@@ -265,7 +270,7 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
KASSERT((opts & MTX_NOSWITCH) == 0,
("mtx_trylock() called with invalid option flag(s) %d", opts));
- rval = _obtain_lock(m, curproc);
+ rval = _obtain_lock(m, curthread);
LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
if (rval) {
@@ -291,9 +296,10 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
void
_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
{
- struct proc *p = curproc;
+ struct thread *td = curthread;
+ struct ksegrp *kg = td->td_ksegrp;
- if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
+ if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
m->mtx_recurse++;
atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
@@ -306,9 +312,9 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
"_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
- while (!_obtain_lock(m, p)) {
+ while (!_obtain_lock(m, td)) {
uintptr_t v;
- struct proc *p1;
+ struct thread *td1;
mtx_lock_spin(&sched_lock);
/*
@@ -322,15 +328,15 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
/*
* The mutex was marked contested on release. This means that
- * there are processes blocked on it.
+ * there are threads blocked on it.
*/
if (v == MTX_CONTESTED) {
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- MPASS(p1 != NULL);
- m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
+ td1 = TAILQ_FIRST(&m->mtx_blocked);
+ MPASS(td1 != NULL);
+ m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
- if (p1->p_pri.pri_level < p->p_pri.pri_level)
- SET_PRIO(p, p1->p_pri.pri_level);
+ if (td1->td_ksegrp->kg_pri.pri_level < kg->kg_pri.pri_level)
+ SET_PRIO(td, td1->td_ksegrp->kg_pri.pri_level);
mtx_unlock_spin(&sched_lock);
return;
}
@@ -357,8 +363,8 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
* If we're borrowing an interrupted thread's VM context, we
* must clean up before going to sleep.
*/
- if (p->p_ithd != NULL) {
- struct ithd *it = p->p_ithd;
+ if (td->td_ithd != NULL) {
+ struct ithd *it = td->td_ithd;
if (it->it_interrupted) {
if (LOCK_LOG_TEST(&m->mtx_object, opts))
@@ -374,39 +380,39 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
* Put us on the list of threads blocked on this mutex.
*/
if (TAILQ_EMPTY(&m->mtx_blocked)) {
- p1 = (struct proc *)(m->mtx_lock & MTX_FLAGMASK);
- LIST_INSERT_HEAD(&p1->p_contested, m, mtx_contested);
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ td1 = (struct thread *)(m->mtx_lock & MTX_FLAGMASK);
+ LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
} else {
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
- if (p1->p_pri.pri_level > p->p_pri.pri_level)
+ TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq)
+ if (td1->td_ksegrp->kg_pri.pri_level > kg->kg_pri.pri_level)
break;
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ if (td1)
+ TAILQ_INSERT_BEFORE(td1, td, td_blkq);
else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
}
/*
* Save who we're blocked on.
*/
- p->p_blocked = m;
- p->p_mtxname = m->mtx_object.lo_name;
- p->p_stat = SMTX;
- propagate_priority(p);
+ td->td_blocked = m;
+ td->td_mtxname = m->mtx_object.lo_name;
+ td->td_proc->p_stat = SMTX;
+ propagate_priority(td);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR3(KTR_LOCK,
- "_mtx_lock_sleep: p %p blocked on [%p] %s", p, m,
+ "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
m->mtx_object.lo_name);
- p->p_stats->p_ru.ru_nvcsw++;
+ td->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR3(KTR_LOCK,
"_mtx_lock_sleep: p %p free from blocked on [%p] %s",
- p, m, m->mtx_object.lo_name);
+ td, m, m->mtx_object.lo_name);
mtx_unlock_spin(&sched_lock);
}
@@ -430,7 +436,7 @@ _mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
for (;;) {
- if (_obtain_lock(m, curproc))
+ if (_obtain_lock(m, curthread))
break;
/* Give interrupts a chance while we spin. */
@@ -467,11 +473,13 @@ _mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
void
_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
{
- struct proc *p, *p1;
+ struct thread *td, *td1;
struct mtx *m1;
int pri;
+ struct ksegrp *kg;
- p = curproc;
+ td = curthread;
+ kg = td->td_ksegrp;
if (mtx_recursed(m)) {
if (--(m->mtx_recurse) == 0)
@@ -485,11 +493,11 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- MPASS(p->p_magic == P_MAGIC);
- MPASS(p1->p_magic == P_MAGIC);
+ td1 = TAILQ_FIRST(&m->mtx_blocked);
+ MPASS(td->td_proc->p_magic == P_MAGIC);
+ MPASS(td1->td_proc->p_magic == P_MAGIC);
- TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
+ TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq);
if (TAILQ_EMPTY(&m->mtx_blocked)) {
LIST_REMOVE(m, mtx_contested);
@@ -500,28 +508,28 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
pri = PRI_MAX;
- LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
- int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_pri.pri_level;
+ LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
+ int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_ksegrp->kg_pri.pri_level;
if (cp < pri)
pri = cp;
}
- if (pri > p->p_pri.pri_native)
- pri = p->p_pri.pri_native;
- SET_PRIO(p, pri);
+ if (pri > kg->kg_pri.pri_native)
+ pri = kg->kg_pri.pri_native;
+ SET_PRIO(td, pri);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
- m, p1);
+ m, td1);
- p1->p_blocked = NULL;
- p1->p_stat = SRUN;
- setrunqueue(p1);
+ td1->td_blocked = NULL;
+ td1->td_proc->p_stat = SRUN;
+ setrunqueue(td1);
- if ((opts & MTX_NOSWITCH) == 0 && p1->p_pri.pri_level < pri) {
+ if ((opts & MTX_NOSWITCH) == 0 && td1->td_ksegrp->kg_pri.pri_level < pri) {
#ifdef notyet
- if (p->p_ithd != NULL) {
- struct ithd *it = p->p_ithd;
+ if (td->td_ithd != NULL) {
+ struct ithd *it = td->td_ithd;
if (it->it_interrupted) {
if (LOCK_LOG_TEST(&m->mtx_object, opts))
@@ -532,13 +540,13 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
}
}
#endif
- setrunqueue(p);
+ setrunqueue(td);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR2(KTR_LOCK,
"_mtx_unlock_sleep: %p switching out lock=%p", m,
(void *)m->mtx_lock);
- p->p_stats->p_ru.ru_nivcsw++;
+ td->td_proc->p_stats->p_ru.ru_nivcsw++;
mi_switch();
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index ecbafd0..51ccd49 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -464,6 +464,7 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
struct lock_class *class;
struct witness *w, *w1;
struct proc *p;
+ struct thread *td;
int i, j;
#ifdef DDB
int go_into_ddb = 0;
@@ -474,7 +475,8 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
return;
w = lock->lo_witness;
class = lock->lo_class;
- p = curproc;
+ td = curthread;
+ p = td->td_proc;
/*
* We have to hold a spinlock to keep lock_list valid across the check
@@ -493,7 +495,7 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
panic("blockable sleep lock (%s) %s @ %s:%d",
class->lc_name, lock->lo_name, file, line);
}
- lock_list = &p->p_sleeplocks;
+ lock_list = &td->td_sleeplocks;
}
mtx_unlock_spin(&w_mtx);
@@ -732,7 +734,7 @@ witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
class->lc_name, lock->lo_name, file, line);
- instance = find_instance(curproc->p_sleeplocks, lock);
+ instance = find_instance(curthread->td_sleeplocks, lock);
if (instance == NULL)
panic("upgrade of unlocked lock (%s) %s @ %s:%d",
class->lc_name, lock->lo_name, file, line);
@@ -763,7 +765,7 @@ witness_downgrade(struct lock_object *lock, int flags, const char *file,
if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
class->lc_name, lock->lo_name, file, line);
- instance = find_instance(curproc->p_sleeplocks, lock);
+ instance = find_instance(curthread->td_sleeplocks, lock);
if (instance == NULL)
panic("downgrade of unlocked lock (%s) %s @ %s:%d",
class->lc_name, lock->lo_name, file, line);
@@ -784,16 +786,18 @@ witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
struct lock_instance *instance;
struct lock_class *class;
struct proc *p;
+ struct thread *td;
critical_t s;
int i, j;
if (witness_cold || witness_dead || lock->lo_witness == NULL ||
panicstr != NULL)
return;
- p = curproc;
+ td = curthread;
+ p = td->td_proc;
class = lock->lo_class;
if (class->lc_flags & LC_SLEEPLOCK)
- lock_list = &p->p_sleeplocks;
+ lock_list = &td->td_sleeplocks;
else
lock_list = PCPU_PTR(spinlocks);
for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
@@ -883,6 +887,7 @@ witness_sleep(int check_only, struct lock_object *lock, const char *file,
struct lock_list_entry **lock_list, *lle;
struct lock_instance *lock1;
struct proc *p;
+ struct thread *td;
critical_t savecrit;
int i, n;
@@ -894,8 +899,9 @@ witness_sleep(int check_only, struct lock_object *lock, const char *file,
* Preemption bad because we need PCPU_PTR(spinlocks) to not change.
*/
savecrit = critical_enter();
- p = curproc;
- lock_list = &p->p_sleeplocks;
+ td = curthread;
+ p = td->td_proc;
+ lock_list = &td->td_sleeplocks;
again:
for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
for (i = lle->ll_count - 1; i >= 0; i--) {
@@ -920,7 +926,7 @@ again:
lock1->li_lock->lo_name, lock1->li_file,
lock1->li_line);
}
- if (lock_list == &p->p_sleeplocks) {
+ if (lock_list == &td->td_sleeplocks) {
lock_list = PCPU_PTR(spinlocks);
goto again;
}
@@ -1323,35 +1329,35 @@ witness_list_locks(struct lock_list_entry **lock_list)
}
/*
- * Calling this on p != curproc is bad unless we are in ddb.
+ * Calling this on td != curthread is bad unless we are in ddb.
*/
int
-witness_list(struct proc *p)
+witness_list(struct thread *td)
{
critical_t savecrit;
int nheld;
KASSERT(!witness_cold, ("%s: witness_cold", __func__));
#ifdef DDB
- KASSERT(p == curproc || db_active,
- ("%s: p != curproc and we aren't in the debugger", __func__));
+ KASSERT(td == curthread || db_active,
+ ("%s: td != curthread and we aren't in the debugger", __func__));
if (!db_active && witness_dead)
return (0);
#else
- KASSERT(p == curproc, ("%s: p != curproc", __func__));
+ KASSERT(td == curthread, ("%s: p != curthread", __func__));
if (witness_dead)
return (0);
#endif
- nheld = witness_list_locks(&p->p_sleeplocks);
+ nheld = witness_list_locks(&td->td_sleeplocks);
/*
- * We only handle spinlocks if p == curproc. This is somewhat broken
+ * We only handle spinlocks if td == curthread. This is somewhat broken
* if p is currently executing on some other CPU and holds spin locks
* as we won't display those locks. If we had a MI way of getting
* the per-cpu data for a given cpu then we could use p->p_oncpu to
* get the list of spinlocks for this process and "fix" this.
*/
- if (p == curproc) {
+ if (td == curthread) {
/*
* Preemption bad because we need PCPU_PTR(spinlocks) to not
* change.
@@ -1374,7 +1380,7 @@ witness_save(struct lock_object *lock, const char **filep, int *linep)
if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
panic("%s: lock (%s) %s is not a sleep lock", __func__,
lock->lo_class->lc_name, lock->lo_name);
- instance = find_instance(curproc->p_sleeplocks, lock);
+ instance = find_instance(curthread->td_sleeplocks, lock);
if (instance == NULL)
panic("%s: lock (%s) %s not locked", __func__,
lock->lo_class->lc_name, lock->lo_name);
@@ -1393,7 +1399,7 @@ witness_restore(struct lock_object *lock, const char *file, int line)
if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
panic("%s: lock (%s) %s is not a sleep lock", __func__,
lock->lo_class->lc_name, lock->lo_name);
- instance = find_instance(curproc->p_sleeplocks, lock);
+ instance = find_instance(curthread->td_sleeplocks, lock);
if (instance == NULL)
panic("%s: lock (%s) %s not locked", __func__,
lock->lo_class->lc_name, lock->lo_name);
@@ -1412,7 +1418,7 @@ witness_assert(struct lock_object *lock, int flags, const char *file, int line)
if (lock->lo_witness == NULL || witness_dead || panicstr != NULL)
return;
if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) != 0)
- instance = find_instance(curproc->p_sleeplocks, lock);
+ instance = find_instance(curthread->td_sleeplocks, lock);
else if ((lock->lo_class->lc_flags & LC_SPINLOCK) != 0)
instance = find_instance(PCPU_GET(spinlocks), lock);
else
@@ -1464,15 +1470,16 @@ witness_assert(struct lock_object *lock, int flags, const char *file, int line)
DB_SHOW_COMMAND(locks, db_witness_list)
{
- struct proc *p;
+ struct thread *td;
pid_t pid;
+ struct proc *p;
if (have_addr) {
pid = (addr % 16) + ((addr >> 4) % 16) * 10 +
((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 +
((addr >> 16) % 16) * 10000;
/* sx_slock(&allproc_lock); */
- LIST_FOREACH(p, &allproc, p_list) {
+ FOREACH_PROC_IN_SYSTEM(p) {
if (p->p_pid == pid)
break;
}
@@ -1481,9 +1488,11 @@ DB_SHOW_COMMAND(locks, db_witness_list)
db_printf("pid %d not found\n", pid);
return;
}
- } else
- p = curproc;
- witness_list(p);
+ td = &p->p_thread; /* XXXKSE */
+ } else {
+ td = curthread;
+ }
+ witness_list(td);
}
DB_SHOW_COMMAND(witness, db_witness_display)
diff --git a/sys/kern/subr_xxx.c b/sys/kern/subr_xxx.c
index f3fd2ed..094b67c 100644
--- a/sys/kern/subr_xxx.c
+++ b/sys/kern/subr_xxx.c
@@ -80,22 +80,22 @@ nullop()
*/
int
-noopen(dev, flags, fmt, p)
+noopen(dev, flags, fmt, td)
dev_t dev;
int flags;
int fmt;
- struct proc *p;
+ struct thread *td;
{
return (ENODEV);
}
int
-noclose(dev, flags, fmt, p)
+noclose(dev, flags, fmt, td)
dev_t dev;
int flags;
int fmt;
- struct proc *p;
+ struct thread *td;
{
return (ENODEV);
@@ -122,12 +122,12 @@ nowrite(dev, uio, ioflag)
}
int
-noioctl(dev, cmd, data, flags, p)
+noioctl(dev, cmd, data, flags, td)
dev_t dev;
u_long cmd;
caddr_t data;
int flags;
- struct proc *p;
+ struct thread *td;
{
return (ENODEV);
@@ -172,22 +172,22 @@ nodump(dev)
* minor number.
*/
int
-nullopen(dev, flags, fmt, p)
+nullopen(dev, flags, fmt, td)
dev_t dev;
int flags;
int fmt;
- struct proc *p;
+ struct thread *td;
{
return (0);
}
int
-nullclose(dev, flags, fmt, p)
+nullclose(dev, flags, fmt, td)
dev_t dev;
int flags;
int fmt;
- struct proc *p;
+ struct thread *td;
{
return (0);
diff --git a/sys/kern/sys_generic.c b/sys/kern/sys_generic.c
index 945530a..a897979 100644
--- a/sys/kern/sys_generic.c
+++ b/sys/kern/sys_generic.c
@@ -74,13 +74,13 @@ static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
MALLOC_DEFINE(M_IOV, "iov", "large iov's");
-static int pollscan __P((struct proc *, struct pollfd *, u_int));
-static int pollholddrop __P((struct proc *, struct pollfd *, u_int, int));
-static int selscan __P((struct proc *, fd_mask **, fd_mask **, int));
-static int selholddrop __P((struct proc *, fd_mask *, fd_mask *, int, int));
-static int dofileread __P((struct proc *, struct file *, int, void *,
+static int pollscan __P((struct thread *, struct pollfd *, u_int));
+static int pollholddrop __P((struct thread *, struct pollfd *, u_int, int));
+static int selscan __P((struct thread *, fd_mask **, fd_mask **, int));
+static int selholddrop __P((struct thread *, fd_mask *, fd_mask *, int, int));
+static int dofileread __P((struct thread *, struct file *, int, void *,
size_t, off_t, int));
-static int dofilewrite __P((struct proc *, struct file *, int,
+static int dofilewrite __P((struct thread *, struct file *, int,
const void *, size_t, off_t, int));
struct file*
@@ -113,18 +113,18 @@ struct read_args {
* MPSAFE
*/
int
-read(p, uap)
- struct proc *p;
+read(td, uap)
+ struct thread *td;
register struct read_args *uap;
{
register struct file *fp;
int error;
mtx_lock(&Giant);
- if ((fp = holdfp(p->p_fd, uap->fd, FREAD)) != NULL) {
- error = dofileread(p, fp, uap->fd, uap->buf,
+ if ((fp = holdfp(td->td_proc->p_fd, uap->fd, FREAD)) != NULL) {
+ error = dofileread(td, fp, uap->fd, uap->buf,
uap->nbyte, (off_t)-1, 0);
- fdrop(fp, p);
+ fdrop(fp, td);
} else {
error = EBADF;
}
@@ -148,23 +148,23 @@ struct pread_args {
* MPSAFE
*/
int
-pread(p, uap)
- struct proc *p;
+pread(td, uap)
+ struct thread *td;
register struct pread_args *uap;
{
register struct file *fp;
int error;
mtx_lock(&Giant);
- if ((fp = holdfp(p->p_fd, uap->fd, FREAD)) == NULL) {
+ if ((fp = holdfp(td->td_proc->p_fd, uap->fd, FREAD)) == NULL) {
error = EBADF;
} else if (fp->f_type != DTYPE_VNODE) {
error = ESPIPE;
- fdrop(fp, p);
+ fdrop(fp, td);
} else {
- error = dofileread(p, fp, uap->fd, uap->buf, uap->nbyte,
+ error = dofileread(td, fp, uap->fd, uap->buf, uap->nbyte,
uap->offset, FOF_OFFSET);
- fdrop(fp, p);
+ fdrop(fp, td);
}
mtx_unlock(&Giant);
return(error);
@@ -174,8 +174,8 @@ pread(p, uap)
* Code common for read and pread
*/
int
-dofileread(p, fp, fd, buf, nbyte, offset, flags)
- struct proc *p;
+dofileread(td, fp, fd, buf, nbyte, offset, flags)
+ struct thread *td;
struct file *fp;
int fd, flags;
void *buf;
@@ -201,12 +201,12 @@ dofileread(p, fp, fd, buf, nbyte, offset, flags)
auio.uio_resid = nbyte;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_USERSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
#ifdef KTRACE
/*
* if tracing, save a copy of iovec
*/
- if (KTRPOINT(p, KTR_GENIO)) {
+ if (KTRPOINT(td->td_proc, KTR_GENIO)) {
ktriov = aiov;
ktruio = auio;
didktr = 1;
@@ -214,7 +214,7 @@ dofileread(p, fp, fd, buf, nbyte, offset, flags)
#endif
cnt = nbyte;
- if ((error = fo_read(fp, &auio, fp->f_cred, flags, p))) {
+ if ((error = fo_read(fp, &auio, fp->f_cred, flags, td))) {
if (auio.uio_resid != cnt && (error == ERESTART ||
error == EINTR || error == EWOULDBLOCK))
error = 0;
@@ -224,10 +224,10 @@ dofileread(p, fp, fd, buf, nbyte, offset, flags)
if (didktr && error == 0) {
ktruio.uio_iov = &ktriov;
ktruio.uio_resid = cnt;
- ktrgenio(p->p_tracep, fd, UIO_READ, &ktruio, error);
+ ktrgenio(td->td_proc->p_tracep, fd, UIO_READ, &ktruio, error);
}
#endif
- p->p_retval[0] = cnt;
+ td->td_retval[0] = cnt;
return (error);
}
@@ -245,8 +245,8 @@ struct readv_args {
* MPSAFE
*/
int
-readv(p, uap)
- struct proc *p;
+readv(td, uap)
+ struct thread *td;
register struct readv_args *uap;
{
register struct file *fp;
@@ -262,7 +262,7 @@ readv(p, uap)
struct uio ktruio;
#endif
mtx_lock(&Giant);
- fdp = p->p_fd;
+ fdp = td->td_proc->p_fd;
if ((fp = holdfp(fdp, uap->fd, FREAD)) == NULL) {
error = EBADF;
@@ -285,7 +285,7 @@ readv(p, uap)
auio.uio_iovcnt = uap->iovcnt;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_USERSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_offset = -1;
if ((error = copyin((caddr_t)uap->iovp, (caddr_t)iov, iovlen)))
goto done;
@@ -302,14 +302,14 @@ readv(p, uap)
/*
* if tracing, save a copy of iovec
*/
- if (KTRPOINT(p, KTR_GENIO)) {
+ if (KTRPOINT(td->td_proc, KTR_GENIO)) {
MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
bcopy((caddr_t)auio.uio_iov, (caddr_t)ktriov, iovlen);
ktruio = auio;
}
#endif
cnt = auio.uio_resid;
- if ((error = fo_read(fp, &auio, fp->f_cred, 0, p))) {
+ if ((error = fo_read(fp, &auio, fp->f_cred, 0, td))) {
if (auio.uio_resid != cnt && (error == ERESTART ||
error == EINTR || error == EWOULDBLOCK))
error = 0;
@@ -320,15 +320,15 @@ readv(p, uap)
if (error == 0) {
ktruio.uio_iov = ktriov;
ktruio.uio_resid = cnt;
- ktrgenio(p->p_tracep, uap->fd, UIO_READ, &ktruio,
+ ktrgenio(td->td_proc->p_tracep, uap->fd, UIO_READ, &ktruio,
error);
}
FREE(ktriov, M_TEMP);
}
#endif
- p->p_retval[0] = cnt;
+ td->td_retval[0] = cnt;
done:
- fdrop(fp, p);
+ fdrop(fp, td);
if (needfree)
FREE(needfree, M_IOV);
done2:
@@ -350,18 +350,18 @@ struct write_args {
* MPSAFE
*/
int
-write(p, uap)
- struct proc *p;
+write(td, uap)
+ struct thread *td;
register struct write_args *uap;
{
register struct file *fp;
int error;
mtx_lock(&Giant);
- if ((fp = holdfp(p->p_fd, uap->fd, FWRITE)) != NULL) {
- error = dofilewrite(p, fp, uap->fd, uap->buf, uap->nbyte,
+ if ((fp = holdfp(td->td_proc->p_fd, uap->fd, FWRITE)) != NULL) {
+ error = dofilewrite(td, fp, uap->fd, uap->buf, uap->nbyte,
(off_t)-1, 0);
- fdrop(fp, p);
+ fdrop(fp, td);
} else {
error = EBADF;
}
@@ -385,31 +385,31 @@ struct pwrite_args {
* MPSAFE
*/
int
-pwrite(p, uap)
- struct proc *p;
+pwrite(td, uap)
+ struct thread *td;
register struct pwrite_args *uap;
{
register struct file *fp;
int error;
mtx_lock(&Giant);
- if ((fp = holdfp(p->p_fd, uap->fd, FWRITE)) == NULL) {
+ if ((fp = holdfp(td->td_proc->p_fd, uap->fd, FWRITE)) == NULL) {
error = EBADF;
} else if (fp->f_type != DTYPE_VNODE) {
error = ESPIPE;
- fdrop(fp, p);
+ fdrop(fp, td);
} else {
- error = dofilewrite(p, fp, uap->fd, uap->buf, uap->nbyte,
+ error = dofilewrite(td, fp, uap->fd, uap->buf, uap->nbyte,
uap->offset, FOF_OFFSET);
- fdrop(fp, p);
+ fdrop(fp, td);
}
mtx_unlock(&Giant);
return(error);
}
static int
-dofilewrite(p, fp, fd, buf, nbyte, offset, flags)
- struct proc *p;
+dofilewrite(td, fp, fd, buf, nbyte, offset, flags)
+ struct thread *td;
struct file *fp;
int fd, flags;
const void *buf;
@@ -435,12 +435,12 @@ dofilewrite(p, fp, fd, buf, nbyte, offset, flags)
auio.uio_resid = nbyte;
auio.uio_rw = UIO_WRITE;
auio.uio_segflg = UIO_USERSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
#ifdef KTRACE
/*
* if tracing, save a copy of iovec and uio
*/
- if (KTRPOINT(p, KTR_GENIO)) {
+ if (KTRPOINT(td->td_proc, KTR_GENIO)) {
ktriov = aiov;
ktruio = auio;
didktr = 1;
@@ -449,14 +449,14 @@ dofilewrite(p, fp, fd, buf, nbyte, offset, flags)
cnt = nbyte;
if (fp->f_type == DTYPE_VNODE)
bwillwrite();
- if ((error = fo_write(fp, &auio, fp->f_cred, flags, p))) {
+ if ((error = fo_write(fp, &auio, fp->f_cred, flags, td))) {
if (auio.uio_resid != cnt && (error == ERESTART ||
error == EINTR || error == EWOULDBLOCK))
error = 0;
if (error == EPIPE) {
- PROC_LOCK(p);
- psignal(p, SIGPIPE);
- PROC_UNLOCK(p);
+ PROC_LOCK(td->td_proc);
+ psignal(td->td_proc, SIGPIPE);
+ PROC_UNLOCK(td->td_proc);
}
}
cnt -= auio.uio_resid;
@@ -464,10 +464,10 @@ dofilewrite(p, fp, fd, buf, nbyte, offset, flags)
if (didktr && error == 0) {
ktruio.uio_iov = &ktriov;
ktruio.uio_resid = cnt;
- ktrgenio(p->p_tracep, fd, UIO_WRITE, &ktruio, error);
+ ktrgenio(td->td_proc->p_tracep, fd, UIO_WRITE, &ktruio, error);
}
#endif
- p->p_retval[0] = cnt;
+ td->td_retval[0] = cnt;
return (error);
}
@@ -485,8 +485,8 @@ struct writev_args {
* MPSAFE
*/
int
-writev(p, uap)
- struct proc *p;
+writev(td, uap)
+ struct thread *td;
register struct writev_args *uap;
{
register struct file *fp;
@@ -503,7 +503,7 @@ writev(p, uap)
#endif
mtx_lock(&Giant);
- fdp = p->p_fd;
+ fdp = td->td_proc->p_fd;
if ((fp = holdfp(fdp, uap->fd, FWRITE)) == NULL) {
error = EBADF;
goto done2;
@@ -526,7 +526,7 @@ writev(p, uap)
auio.uio_iovcnt = uap->iovcnt;
auio.uio_rw = UIO_WRITE;
auio.uio_segflg = UIO_USERSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_offset = -1;
if ((error = copyin((caddr_t)uap->iovp, (caddr_t)iov, iovlen)))
goto done;
@@ -543,7 +543,7 @@ writev(p, uap)
/*
* if tracing, save a copy of iovec and uio
*/
- if (KTRPOINT(p, KTR_GENIO)) {
+ if (KTRPOINT(td->td_proc, KTR_GENIO)) {
MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
bcopy((caddr_t)auio.uio_iov, (caddr_t)ktriov, iovlen);
ktruio = auio;
@@ -552,14 +552,14 @@ writev(p, uap)
cnt = auio.uio_resid;
if (fp->f_type == DTYPE_VNODE)
bwillwrite();
- if ((error = fo_write(fp, &auio, fp->f_cred, 0, p))) {
+ if ((error = fo_write(fp, &auio, fp->f_cred, 0, td))) {
if (auio.uio_resid != cnt && (error == ERESTART ||
error == EINTR || error == EWOULDBLOCK))
error = 0;
if (error == EPIPE) {
- PROC_LOCK(p);
- psignal(p, SIGPIPE);
- PROC_UNLOCK(p);
+ PROC_LOCK(td->td_proc);
+ psignal(td->td_proc, SIGPIPE);
+ PROC_UNLOCK(td->td_proc);
}
}
cnt -= auio.uio_resid;
@@ -568,15 +568,15 @@ writev(p, uap)
if (error == 0) {
ktruio.uio_iov = ktriov;
ktruio.uio_resid = cnt;
- ktrgenio(p->p_tracep, uap->fd, UIO_WRITE, &ktruio,
+ ktrgenio(td->td_proc->p_tracep, uap->fd, UIO_WRITE, &ktruio,
error);
}
FREE(ktriov, M_TEMP);
}
#endif
- p->p_retval[0] = cnt;
+ td->td_retval[0] = cnt;
done:
- fdrop(fp, p);
+ fdrop(fp, td);
if (needfree)
FREE(needfree, M_IOV);
done2:
@@ -599,8 +599,8 @@ struct ioctl_args {
*/
/* ARGSUSED */
int
-ioctl(p, uap)
- struct proc *p;
+ioctl(td, uap)
+ struct thread *td;
register struct ioctl_args *uap;
{
register struct file *fp;
@@ -617,7 +617,7 @@ ioctl(p, uap)
} ubuf;
mtx_lock(&Giant);
- fdp = p->p_fd;
+ fdp = td->td_proc->p_fd;
if ((u_int)uap->fd >= fdp->fd_nfiles ||
(fp = fdp->fd_ofiles[uap->fd]) == NULL) {
error = EBADF;
@@ -663,7 +663,7 @@ ioctl(p, uap)
if (error) {
if (memp)
free(memp, M_IOCTLOPS);
- fdrop(fp, p);
+ fdrop(fp, td);
goto done2;
}
} else {
@@ -686,7 +686,7 @@ ioctl(p, uap)
fp->f_flag |= FNONBLOCK;
else
fp->f_flag &= ~FNONBLOCK;
- error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, p);
+ error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td);
break;
case FIOASYNC:
@@ -694,11 +694,11 @@ ioctl(p, uap)
fp->f_flag |= FASYNC;
else
fp->f_flag &= ~FASYNC;
- error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, p);
+ error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, td);
break;
default:
- error = fo_ioctl(fp, com, data, p);
+ error = fo_ioctl(fp, com, data, td);
/*
* Copy any data to user, size was
* already set and checked above.
@@ -709,7 +709,7 @@ ioctl(p, uap)
}
if (memp)
free(memp, M_IOCTLOPS);
- fdrop(fp, p);
+ fdrop(fp, td);
done2:
mtx_unlock(&Giant);
return (error);
@@ -733,8 +733,8 @@ struct select_args {
* MPSAFE
*/
int
-select(p, uap)
- register struct proc *p;
+select(td, uap)
+ register struct thread *td;
register struct select_args *uap;
{
/*
@@ -755,8 +755,8 @@ select(p, uap)
mtx_lock(&Giant);
- if (uap->nd > p->p_fd->fd_nfiles)
- uap->nd = p->p_fd->fd_nfiles; /* forgiving; slightly wrong */
+ if (uap->nd > td->td_proc->p_fd->fd_nfiles)
+ uap->nd = td->td_proc->p_fd->fd_nfiles; /* forgiving; slightly wrong */
/*
* Allocate just enough bits for the non-null fd_sets. Use the
@@ -828,16 +828,16 @@ select(p, uap)
atv.tv_sec = 0;
atv.tv_usec = 0;
}
- selholddrop(p, hibits, hobits, uap->nd, 1);
+ selholddrop(td, hibits, hobits, uap->nd, 1);
timo = 0;
- PROC_LOCK(p);
+ PROC_LOCK(td->td_proc);
retry:
ncoll = nselcoll;
- p->p_flag |= P_SELECT;
- PROC_UNLOCK(p);
- error = selscan(p, ibits, obits, uap->nd);
- PROC_LOCK(p);
- if (error || p->p_retval[0])
+ td->td_flags |= TDF_SELECT;
+ PROC_UNLOCK(td->td_proc);
+ error = selscan(td, ibits, obits, uap->nd);
+ PROC_LOCK(td->td_proc);
+ if (error || td->td_retval[0])
goto done;
if (atv.tv_sec || atv.tv_usec) {
getmicrouptime(&rtv);
@@ -845,15 +845,15 @@ retry:
/*
* An event of our interest may occur during locking a process.
* In order to avoid missing the event that occured during locking
- * the process, test P_SELECT and rescan file descriptors if
+ * the process, test TDF_SELECT and rescan file descriptors if
* necessary.
*/
- if ((p->p_flag & P_SELECT) == 0 || nselcoll != ncoll) {
+ if ((td->td_flags & TDF_SELECT) == 0 || nselcoll != ncoll) {
ncoll = nselcoll;
- p->p_flag |= P_SELECT;
- PROC_UNLOCK(p);
- error = selscan(p, ibits, obits, uap->nd);
- PROC_LOCK(p);
+ td->td_flags |= TDF_SELECT;
+ PROC_UNLOCK(td->td_proc);
+ error = selscan(td, ibits, obits, uap->nd);
+ PROC_LOCK(td->td_proc);
}
goto done;
}
@@ -862,20 +862,20 @@ retry:
timo = ttv.tv_sec > 24 * 60 * 60 ?
24 * 60 * 60 * hz : tvtohz(&ttv);
}
- p->p_flag &= ~P_SELECT;
+ td->td_flags &= ~TDF_SELECT;
if (timo > 0)
- error = cv_timedwait_sig(&selwait, &p->p_mtx, timo);
+ error = cv_timedwait_sig(&selwait, &td->td_proc->p_mtx, timo);
else
- error = cv_wait_sig(&selwait, &p->p_mtx);
+ error = cv_wait_sig(&selwait, &td->td_proc->p_mtx);
if (error == 0)
goto retry;
done:
- p->p_flag &= ~P_SELECT;
- PROC_UNLOCK(p);
- selholddrop(p, hibits, hobits, uap->nd, 0);
+ td->td_flags &= ~TDF_SELECT;
+ PROC_UNLOCK(td->td_proc);
+ selholddrop(td, hibits, hobits, uap->nd, 0);
done_noproclock:
/* select is not restarted after signals... */
if (error == ERESTART)
@@ -903,12 +903,12 @@ done_noproclock:
}
static int
-selholddrop(p, ibits, obits, nfd, hold)
- struct proc *p;
+selholddrop(td, ibits, obits, nfd, hold)
+ struct thread *td;
fd_mask *ibits, *obits;
int nfd, hold;
{
- struct filedesc *fdp = p->p_fd;
+ struct filedesc *fdp = td->td_proc->p_fd;
int i, fd;
fd_mask bits;
struct file *fp;
@@ -930,19 +930,19 @@ selholddrop(p, ibits, obits, nfd, hold)
obits[(fd)/NFDBITS] |=
((fd_mask)1 << ((fd) % NFDBITS));
} else
- fdrop(fp, p);
+ fdrop(fp, td);
}
}
return (0);
}
static int
-selscan(p, ibits, obits, nfd)
- struct proc *p;
+selscan(td, ibits, obits, nfd)
+ struct thread *td;
fd_mask **ibits, **obits;
int nfd;
{
- struct filedesc *fdp = p->p_fd;
+ struct filedesc *fdp = td->td_proc->p_fd;
int msk, i, fd;
fd_mask bits;
struct file *fp;
@@ -962,7 +962,7 @@ selscan(p, ibits, obits, nfd)
fp = fdp->fd_ofiles[fd];
if (fp == NULL)
return (EBADF);
- if (fo_poll(fp, flag[msk], fp->f_cred, p)) {
+ if (fo_poll(fp, flag[msk], fp->f_cred, td)) {
obits[msk][(fd)/NFDBITS] |=
((fd_mask)1 << ((fd) % NFDBITS));
n++;
@@ -970,7 +970,7 @@ selscan(p, ibits, obits, nfd)
}
}
}
- p->p_retval[0] = n;
+ td->td_retval[0] = n;
return (0);
}
@@ -988,8 +988,8 @@ struct poll_args {
* MPSAFE
*/
int
-poll(p, uap)
- struct proc *p;
+poll(td, uap)
+ struct thread *td;
struct poll_args *uap;
{
caddr_t bits;
@@ -1011,7 +1011,8 @@ poll(p, uap)
* least enough for the current limits. We want to be reasonably
* safe, but not overly restrictive.
*/
- if (nfds > p->p_rlimit[RLIMIT_NOFILE].rlim_cur && nfds > FD_SETSIZE) {
+ if ((nfds > td->td_proc->p_rlimit[RLIMIT_NOFILE].rlim_cur) &&
+ (nfds > FD_SETSIZE)) {
error = EINVAL;
goto done2;
}
@@ -1043,16 +1044,16 @@ poll(p, uap)
atv.tv_sec = 0;
atv.tv_usec = 0;
}
- pollholddrop(p, heldbits, nfds, 1);
+ pollholddrop(td, heldbits, nfds, 1);
timo = 0;
- PROC_LOCK(p);
+ PROC_LOCK(td->td_proc);
retry:
ncoll = nselcoll;
- p->p_flag |= P_SELECT;
- PROC_UNLOCK(p);
- error = pollscan(p, (struct pollfd *)bits, nfds);
- PROC_LOCK(p);
- if (error || p->p_retval[0])
+ td->td_flags |= TDF_SELECT;
+ PROC_UNLOCK(td->td_proc);
+ error = pollscan(td, (struct pollfd *)bits, nfds);
+ PROC_LOCK(td->td_proc);
+ if (error || td->td_retval[0])
goto done;
if (atv.tv_sec || atv.tv_usec) {
getmicrouptime(&rtv);
@@ -1060,15 +1061,15 @@ retry:
/*
* An event of our interest may occur during locking a process.
* In order to avoid missing the event that occured during locking
- * the process, test P_SELECT and rescan file descriptors if
+ * the process, test TDF_SELECT and rescan file descriptors if
* necessary.
*/
- if ((p->p_flag & P_SELECT) == 0 || nselcoll != ncoll) {
+ if ((td->td_flags & TDF_SELECT) == 0 || nselcoll != ncoll) {
ncoll = nselcoll;
- p->p_flag |= P_SELECT;
- PROC_UNLOCK(p);
- error = pollscan(p, (struct pollfd *)bits, nfds);
- PROC_LOCK(p);
+ td->td_flags |= TDF_SELECT;
+ PROC_UNLOCK(td->td_proc);
+ error = pollscan(td, (struct pollfd *)bits, nfds);
+ PROC_LOCK(td->td_proc);
}
goto done;
}
@@ -1077,18 +1078,18 @@ retry:
timo = ttv.tv_sec > 24 * 60 * 60 ?
24 * 60 * 60 * hz : tvtohz(&ttv);
}
- p->p_flag &= ~P_SELECT;
+ td->td_flags &= ~TDF_SELECT;
if (timo > 0)
- error = cv_timedwait_sig(&selwait, &p->p_mtx, timo);
+ error = cv_timedwait_sig(&selwait, &td->td_proc->p_mtx, timo);
else
- error = cv_wait_sig(&selwait, &p->p_mtx);
+ error = cv_wait_sig(&selwait, &td->td_proc->p_mtx);
if (error == 0)
goto retry;
done:
- p->p_flag &= ~P_SELECT;
- PROC_UNLOCK(p);
- pollholddrop(p, heldbits, nfds, 0);
+ td->td_flags &= ~TDF_SELECT;
+ PROC_UNLOCK(td->td_proc);
+ pollholddrop(td, heldbits, nfds, 0);
done_noproclock:
/* poll is not restarted after signals... */
if (error == ERESTART)
@@ -1111,13 +1112,13 @@ done2:
}
static int
-pollholddrop(p, fds, nfd, hold)
- struct proc *p;
+pollholddrop(td, fds, nfd, hold)
+ struct thread *td;
struct pollfd *fds;
u_int nfd;
int hold;
{
- register struct filedesc *fdp = p->p_fd;
+ register struct filedesc *fdp = td->td_proc->p_fd;
int i;
struct file *fp;
@@ -1131,19 +1132,19 @@ pollholddrop(p, fds, nfd, hold)
} else
fds->revents = 0;
} else if(fp != NULL && fds->revents)
- fdrop(fp, p);
+ fdrop(fp, td);
}
}
return (0);
}
static int
-pollscan(p, fds, nfd)
- struct proc *p;
+pollscan(td, fds, nfd)
+ struct thread *td;
struct pollfd *fds;
u_int nfd;
{
- register struct filedesc *fdp = p->p_fd;
+ register struct filedesc *fdp = td->td_proc->p_fd;
int i;
struct file *fp;
int n = 0;
@@ -1165,13 +1166,13 @@ pollscan(p, fds, nfd)
* POLLERR if appropriate.
*/
fds->revents = fo_poll(fp, fds->events,
- fp->f_cred, p);
+ fp->f_cred, td);
if (fds->revents != 0)
n++;
}
}
}
- p->p_retval[0] = n;
+ td->td_retval[0] = n;
return (0);
}
@@ -1190,41 +1191,57 @@ struct openbsd_poll_args {
* MPSAFE
*/
int
-openbsd_poll(p, uap)
- register struct proc *p;
+openbsd_poll(td, uap)
+ register struct thread *td;
register struct openbsd_poll_args *uap;
{
- return (poll(p, (struct poll_args *)uap));
+ return (poll(td, (struct poll_args *)uap));
}
/*ARGSUSED*/
int
-seltrue(dev, events, p)
+seltrue(dev, events, td)
dev_t dev;
int events;
- struct proc *p;
+ struct thread *td;
{
return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
}
+static int
+find_thread_in_proc(struct proc *p, struct thread *td)
+{
+ struct thread *td2;
+ FOREACH_THREAD_IN_PROC(p, td2) {
+ if (td2 == td) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
/*
* Record a select request.
*/
void
selrecord(selector, sip)
- struct proc *selector;
+ struct thread *selector;
struct selinfo *sip;
{
struct proc *p;
pid_t mypid;
- mypid = selector->p_pid;
- if (sip->si_pid == mypid)
+ mypid = selector->td_proc->p_pid;
+ if ((sip->si_pid == mypid) &&
+ (sip->si_thread == selector)) { /* XXXKSE should be an ID? */
return;
- if (sip->si_pid && (p = pfind(sip->si_pid))) {
+ }
+ if (sip->si_pid &&
+ (p = pfind(sip->si_pid)) &&
+ (find_thread_in_proc(p, sip->si_thread))) {
mtx_lock_spin(&sched_lock);
- if (p->p_wchan == (caddr_t)&selwait) {
+ if (sip->si_thread->td_wchan == (caddr_t)&selwait) {
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
sip->si_flags |= SI_COLL;
@@ -1234,6 +1251,7 @@ selrecord(selector, sip)
PROC_UNLOCK(p);
}
sip->si_pid = mypid;
+ sip->si_thread = selector;
}
/*
@@ -1243,6 +1261,7 @@ void
selwakeup(sip)
register struct selinfo *sip;
{
+ struct thread *td;
register struct proc *p;
if (sip->si_pid == 0)
@@ -1254,17 +1273,22 @@ selwakeup(sip)
}
p = pfind(sip->si_pid);
sip->si_pid = 0;
+ td = sip->si_thread;
if (p != NULL) {
+ if (!find_thread_in_proc(p, td)) {
+ PROC_UNLOCK(p); /* lock is in pfind() */;
+ return;
+ }
mtx_lock_spin(&sched_lock);
- if (p->p_wchan == (caddr_t)&selwait) {
- if (p->p_stat == SSLEEP)
- setrunnable(p);
+ if (td->td_wchan == (caddr_t)&selwait) {
+ if (td->td_proc->p_stat == SSLEEP)
+ setrunnable(td);
else
- cv_waitq_remove(p);
+ cv_waitq_remove(td);
} else
- p->p_flag &= ~P_SELECT;
+ td->td_flags &= ~TDF_SELECT;
mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
+ PROC_UNLOCK(p); /* Lock is in pfind() */
}
}
diff --git a/sys/kern/sys_pipe.c b/sys/kern/sys_pipe.c
index dcc57d8..b543ddf 100644
--- a/sys/kern/sys_pipe.c
+++ b/sys/kern/sys_pipe.c
@@ -90,15 +90,15 @@
* interfaces to the outside world
*/
static int pipe_read __P((struct file *fp, struct uio *uio,
- struct ucred *cred, int flags, struct proc *p));
+ struct ucred *cred, int flags, struct thread *td));
static int pipe_write __P((struct file *fp, struct uio *uio,
- struct ucred *cred, int flags, struct proc *p));
-static int pipe_close __P((struct file *fp, struct proc *p));
+ struct ucred *cred, int flags, struct thread *td));
+static int pipe_close __P((struct file *fp, struct thread *td));
static int pipe_poll __P((struct file *fp, int events, struct ucred *cred,
- struct proc *p));
+ struct thread *td));
static int pipe_kqfilter __P((struct file *fp, struct knote *kn));
-static int pipe_stat __P((struct file *fp, struct stat *sb, struct proc *p));
-static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct proc *p));
+static int pipe_stat __P((struct file *fp, struct stat *sb, struct thread *td));
+static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct thread *td));
static struct fileops pipeops = {
pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter,
@@ -166,13 +166,13 @@ static vm_zone_t pipe_zone;
/* ARGSUSED */
int
-pipe(p, uap)
- struct proc *p;
+pipe(td, uap)
+ struct thread *td;
struct pipe_args /* {
int dummy;
} */ *uap;
{
- struct filedesc *fdp = p->p_fd;
+ struct filedesc *fdp = td->td_proc->p_fd;
struct file *rf, *wf;
struct pipe *rpipe, *wpipe;
int fd, error;
@@ -190,14 +190,14 @@ pipe(p, uap)
rpipe->pipe_state |= PIPE_DIRECTOK;
wpipe->pipe_state |= PIPE_DIRECTOK;
- error = falloc(p, &rf, &fd);
+ error = falloc(td, &rf, &fd);
if (error) {
pipeclose(rpipe);
pipeclose(wpipe);
return (error);
}
fhold(rf);
- p->p_retval[0] = fd;
+ td->td_retval[0] = fd;
/*
* Warning: once we've gotten past allocation of the fd for the
@@ -209,13 +209,13 @@ pipe(p, uap)
rf->f_type = DTYPE_PIPE;
rf->f_data = (caddr_t)rpipe;
rf->f_ops = &pipeops;
- error = falloc(p, &wf, &fd);
+ error = falloc(td, &wf, &fd);
if (error) {
- if (fdp->fd_ofiles[p->p_retval[0]] == rf) {
- fdp->fd_ofiles[p->p_retval[0]] = NULL;
- fdrop(rf, p);
+ if (fdp->fd_ofiles[td->td_retval[0]] == rf) {
+ fdp->fd_ofiles[td->td_retval[0]] = NULL;
+ fdrop(rf, td);
}
- fdrop(rf, p);
+ fdrop(rf, td);
/* rpipe has been closed by fdrop(). */
pipeclose(wpipe);
return (error);
@@ -224,11 +224,11 @@ pipe(p, uap)
wf->f_type = DTYPE_PIPE;
wf->f_data = (caddr_t)wpipe;
wf->f_ops = &pipeops;
- p->p_retval[1] = fd;
+ td->td_retval[1] = fd;
rpipe->pipe_peer = wpipe;
wpipe->pipe_peer = rpipe;
- fdrop(rf, p);
+ fdrop(rf, td);
return (0);
}
@@ -390,11 +390,11 @@ pipeselwakeup(cpipe)
/* ARGSUSED */
static int
-pipe_read(fp, uio, cred, flags, p)
+pipe_read(fp, uio, cred, flags, td)
struct file *fp;
struct uio *uio;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
int flags;
{
struct pipe *rpipe = (struct pipe *) fp->f_data;
@@ -755,11 +755,11 @@ error1:
#endif
static int
-pipe_write(fp, uio, cred, flags, p)
+pipe_write(fp, uio, cred, flags, td)
struct file *fp;
struct uio *uio;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
int flags;
{
int error = 0;
@@ -1029,11 +1029,11 @@ pipe_write(fp, uio, cred, flags, p)
* we implement a very minimal set of ioctls for compatibility with sockets.
*/
int
-pipe_ioctl(fp, cmd, data, p)
+pipe_ioctl(fp, cmd, data, td)
struct file *fp;
u_long cmd;
caddr_t data;
- struct proc *p;
+ struct thread *td;
{
struct pipe *mpipe = (struct pipe *)fp->f_data;
@@ -1078,11 +1078,11 @@ pipe_ioctl(fp, cmd, data, p)
}
int
-pipe_poll(fp, events, cred, p)
+pipe_poll(fp, events, cred, td)
struct file *fp;
int events;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
{
struct pipe *rpipe = (struct pipe *)fp->f_data;
struct pipe *wpipe;
@@ -1108,12 +1108,12 @@ pipe_poll(fp, events, cred, p)
if (revents == 0) {
if (events & (POLLIN | POLLRDNORM)) {
- selrecord(p, &rpipe->pipe_sel);
+ selrecord(curthread, &rpipe->pipe_sel);
rpipe->pipe_state |= PIPE_SEL;
}
if (events & (POLLOUT | POLLWRNORM)) {
- selrecord(p, &wpipe->pipe_sel);
+ selrecord(curthread, &wpipe->pipe_sel);
wpipe->pipe_state |= PIPE_SEL;
}
}
@@ -1122,10 +1122,10 @@ pipe_poll(fp, events, cred, p)
}
static int
-pipe_stat(fp, ub, p)
+pipe_stat(fp, ub, td)
struct file *fp;
struct stat *ub;
- struct proc *p;
+ struct thread *td;
{
struct pipe *pipe = (struct pipe *)fp->f_data;
@@ -1148,9 +1148,9 @@ pipe_stat(fp, ub, p)
/* ARGSUSED */
static int
-pipe_close(fp, p)
+pipe_close(fp, td)
struct file *fp;
- struct proc *p;
+ struct thread *td;
{
struct pipe *cpipe = (struct pipe *)fp->f_data;
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
index a81a768..96b38a0 100644
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -202,10 +202,11 @@ struct ptrace_args {
#endif
int
-ptrace(curp, uap)
- struct proc *curp;
+ptrace(td, uap)
+ struct thread *td;
struct ptrace_args *uap;
{
+ struct proc *curp = td->td_proc;
struct proc *p;
struct iovec iov;
struct uio uio;
@@ -314,14 +315,14 @@ ptrace(curp, uap)
/*
* Single step fixup ala procfs
*/
- FIX_SSTEP(p);
+ FIX_SSTEP(&p->p_thread); /* XXXKSE */
#endif
/*
* Actually do the requests
*/
- curp->p_retval[0] = 0;
+ td->td_retval[0] = 0;
switch (uap->req) {
case PT_TRACE_ME:
@@ -356,15 +357,15 @@ ptrace(curp, uap)
PHOLD(p);
if (uap->req == PT_STEP) {
- if ((error = ptrace_single_step (p))) {
+ if ((error = ptrace_single_step (td))) {
PRELE(p);
return error;
}
}
if (uap->addr != (caddr_t)1) {
- fill_kinfo_proc (p, &p->p_addr->u_kproc);
- if ((error = ptrace_set_pc (p,
+ fill_kinfo_proc (p, &p->p_uarea->u_kproc);
+ if ((error = ptrace_set_pc (td,
(u_long)(uintfptr_t)uap->addr))) {
PRELE(p);
return error;
@@ -403,7 +404,7 @@ ptrace(curp, uap)
mtx_lock_spin(&sched_lock);
if (p->p_stat == SSTOP) {
p->p_xstat = uap->data;
- setrunnable(p);
+ setrunnable(&p->p_thread); /* XXXKSE */
mtx_unlock_spin(&sched_lock);
} else {
mtx_unlock_spin(&sched_lock);
@@ -421,7 +422,7 @@ ptrace(curp, uap)
case PT_READ_I:
case PT_READ_D:
/* write = 0 set above */
- iov.iov_base = write ? (caddr_t)&uap->data : (caddr_t)curp->p_retval;
+ iov.iov_base = write ? (caddr_t)&uap->data : (caddr_t)td->td_retval;
iov.iov_len = sizeof(int);
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
@@ -429,7 +430,7 @@ ptrace(curp, uap)
uio.uio_resid = sizeof(int);
uio.uio_segflg = UIO_SYSSPACE; /* ie: the uap */
uio.uio_rw = write ? UIO_WRITE : UIO_READ;
- uio.uio_procp = p;
+ uio.uio_td = td;
error = procfs_domem(curp, p, NULL, &uio);
if (uio.uio_resid != 0) {
/*
@@ -460,7 +461,7 @@ ptrace(curp, uap)
/* write = 0 above */
#endif /* PT_SETREGS */
#if defined(PT_SETREGS) || defined(PT_GETREGS)
- if (!procfs_validregs(p)) /* no P_SYSTEM procs please */
+ if (!procfs_validregs(td)) /* no P_SYSTEM procs please */
return EINVAL;
else {
iov.iov_base = uap->addr;
@@ -471,7 +472,7 @@ ptrace(curp, uap)
uio.uio_resid = sizeof(struct reg);
uio.uio_segflg = UIO_USERSPACE;
uio.uio_rw = write ? UIO_WRITE : UIO_READ;
- uio.uio_procp = curp;
+ uio.uio_td = td;
return (procfs_doregs(curp, p, NULL, &uio));
}
#endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */
@@ -486,7 +487,7 @@ ptrace(curp, uap)
/* write = 0 above */
#endif /* PT_SETFPREGS */
#if defined(PT_SETFPREGS) || defined(PT_GETFPREGS)
- if (!procfs_validfpregs(p)) /* no P_SYSTEM procs please */
+ if (!procfs_validfpregs(td)) /* no P_SYSTEM procs please */
return EINVAL;
else {
iov.iov_base = uap->addr;
@@ -497,7 +498,7 @@ ptrace(curp, uap)
uio.uio_resid = sizeof(struct fpreg);
uio.uio_segflg = UIO_USERSPACE;
uio.uio_rw = write ? UIO_WRITE : UIO_READ;
- uio.uio_procp = curp;
+ uio.uio_td = td;
return (procfs_dofpregs(curp, p, NULL, &uio));
}
#endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */
@@ -512,7 +513,7 @@ ptrace(curp, uap)
/* write = 0 above */
#endif /* PT_SETDBREGS */
#if defined(PT_SETDBREGS) || defined(PT_GETDBREGS)
- if (!procfs_validdbregs(p)) /* no P_SYSTEM procs please */
+ if (!procfs_validdbregs(td)) /* no P_SYSTEM procs please */
return EINVAL;
else {
iov.iov_base = uap->addr;
@@ -523,7 +524,7 @@ ptrace(curp, uap)
uio.uio_resid = sizeof(struct dbreg);
uio.uio_segflg = UIO_USERSPACE;
uio.uio_rw = write ? UIO_WRITE : UIO_READ;
- uio.uio_procp = curp;
+ uio.uio_td = td;
return (procfs_dodbregs(curp, p, NULL, &uio));
}
#endif /* defined(PT_SETDBREGS) || defined(PT_GETDBREGS) */
diff --git a/sys/kern/sys_socket.c b/sys/kern/sys_socket.c
index dc72a8a..8822fcb 100644
--- a/sys/kern/sys_socket.c
+++ b/sys/kern/sys_socket.c
@@ -57,11 +57,11 @@ struct fileops socketops = {
/* ARGSUSED */
int
-soo_read(fp, uio, cred, flags, p)
+soo_read(fp, uio, cred, flags, td)
struct file *fp;
struct uio *uio;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
int flags;
{
struct socket *so = (struct socket *)fp->f_data;
@@ -70,24 +70,24 @@ soo_read(fp, uio, cred, flags, p)
/* ARGSUSED */
int
-soo_write(fp, uio, cred, flags, p)
+soo_write(fp, uio, cred, flags, td)
struct file *fp;
struct uio *uio;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
int flags;
{
struct socket *so = (struct socket *)fp->f_data;
return so->so_proto->pr_usrreqs->pru_sosend(so, 0, uio, 0, 0, 0,
- uio->uio_procp);
+ uio->uio_td);
}
int
-soo_ioctl(fp, cmd, data, p)
+soo_ioctl(fp, cmd, data, td)
struct file *fp;
u_long cmd;
register caddr_t data;
- struct proc *p;
+ struct thread *td;
{
register struct socket *so = (struct socket *)fp->f_data;
@@ -140,28 +140,28 @@ soo_ioctl(fp, cmd, data, p)
* different entry since a socket's unnecessary
*/
if (IOCGROUP(cmd) == 'i')
- return (ifioctl(so, cmd, data, p));
+ return (ifioctl(so, cmd, data, td));
if (IOCGROUP(cmd) == 'r')
return (rtioctl(cmd, data));
- return ((*so->so_proto->pr_usrreqs->pru_control)(so, cmd, data, 0, p));
+ return ((*so->so_proto->pr_usrreqs->pru_control)(so, cmd, data, 0, td));
}
int
-soo_poll(fp, events, cred, p)
+soo_poll(fp, events, cred, td)
struct file *fp;
int events;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
{
struct socket *so = (struct socket *)fp->f_data;
- return so->so_proto->pr_usrreqs->pru_sopoll(so, events, cred, p);
+ return so->so_proto->pr_usrreqs->pru_sopoll(so, events, cred, td);
}
int
-soo_stat(fp, ub, p)
+soo_stat(fp, ub, td)
struct file *fp;
struct stat *ub;
- struct proc *p;
+ struct thread *td;
{
struct socket *so = (struct socket *)fp->f_data;
@@ -184,9 +184,9 @@ soo_stat(fp, ub, p)
/* ARGSUSED */
int
-soo_close(fp, p)
+soo_close(fp, td)
struct file *fp;
- struct proc *p;
+ struct thread *td;
{
int error = 0;
diff --git a/sys/kern/sysv_ipc.c b/sys/kern/sysv_ipc.c
index f28d660..70e2f4b 100644
--- a/sys/kern/sysv_ipc.c
+++ b/sys/kern/sysv_ipc.c
@@ -83,11 +83,12 @@ shmexit(p)
*/
int
-ipcperm(p, perm, mode)
- struct proc *p;
+ipcperm(td, perm, mode)
+ struct thread *td;
struct ipc_perm *perm;
int mode;
{
+ struct proc *p = td->td_proc;
struct ucred *cred = p->p_ucred;
/* Check for user match. */
diff --git a/sys/kern/sysv_msg.c b/sys/kern/sysv_msg.c
index d6badbe..76bb22c 100644
--- a/sys/kern/sysv_msg.c
+++ b/sys/kern/sysv_msg.c
@@ -269,8 +269,8 @@ MODULE_VERSION(sysvmsg, 1);
* MPSAFE
*/
int
-msgsys(p, uap)
- struct proc *p;
+msgsys(td, uap)
+ struct thread *td;
/* XXX actually varargs. */
struct msgsys_args /* {
u_int which;
@@ -284,8 +284,7 @@ msgsys(p, uap)
int error;
mtx_lock(&Giant);
-
- if (!jail_sysvipc_allowed && jailed(p->p_ucred)) {
+ if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) {
error = ENOSYS;
goto done2;
}
@@ -293,7 +292,7 @@ msgsys(p, uap)
error = EINVAL;
goto done2;
}
- error = (*msgcalls[uap->which])(p, &uap->a2);
+ error = (*msgcalls[uap->which])(td, &uap->a2);
done2:
mtx_unlock(&Giant);
return (error);
@@ -335,8 +334,8 @@ struct msgctl_args {
* MPSAFE
*/
int
-msgctl(p, uap)
- struct proc *p;
+msgctl(td, uap)
+ struct thread *td;
register struct msgctl_args *uap;
{
int msqid = uap->msqid;
@@ -350,8 +349,7 @@ msgctl(p, uap)
printf("call to msgctl(%d, %d, 0x%x)\n", msqid, cmd, user_msqptr);
#endif
mtx_lock(&Giant);
-
- if (!jail_sysvipc_allowed && jailed(p->p_ucred)) {
+ if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) {
error = ENOSYS;
goto done2;
}
@@ -392,7 +390,7 @@ msgctl(p, uap)
case IPC_RMID:
{
struct msg *msghdr;
- if ((error = ipcperm(p, &msqptr->msg_perm, IPC_M)))
+ if ((error = ipcperm(td, &msqptr->msg_perm, IPC_M)))
goto done2;
/* Free the message headers */
msghdr = msqptr->msg_first;
@@ -420,12 +418,12 @@ msgctl(p, uap)
break;
case IPC_SET:
- if ((error = ipcperm(p, &msqptr->msg_perm, IPC_M)))
+ if ((error = ipcperm(td, &msqptr->msg_perm, IPC_M)))
goto done2;
if ((error = copyin(user_msqptr, &msqbuf, sizeof(msqbuf))) != 0)
goto done2;
if (msqbuf.msg_qbytes > msqptr->msg_qbytes) {
- error = suser(p);
+ error = suser_td(td);
if (error)
goto done2;
}
@@ -452,7 +450,7 @@ msgctl(p, uap)
break;
case IPC_STAT:
- if ((error = ipcperm(p, &msqptr->msg_perm, IPC_R))) {
+ if ((error = ipcperm(td, &msqptr->msg_perm, IPC_R))) {
#ifdef MSG_DEBUG_OK
printf("requester doesn't have read access\n");
#endif
@@ -471,7 +469,7 @@ msgctl(p, uap)
}
if (error == 0)
- p->p_retval[0] = rval;
+ td->td_retval[0] = rval;
done2:
mtx_unlock(&Giant);
return(error);
@@ -488,14 +486,14 @@ struct msgget_args {
* MPSAFE
*/
int
-msgget(p, uap)
- struct proc *p;
+msgget(td, uap)
+ struct thread *td;
register struct msgget_args *uap;
{
int msqid, error = 0;
int key = uap->key;
int msgflg = uap->msgflg;
- struct ucred *cred = p->p_ucred;
+ struct ucred *cred = td->td_proc->p_ucred;
register struct msqid_ds *msqptr = NULL;
#ifdef MSG_DEBUG_OK
@@ -503,8 +501,7 @@ msgget(p, uap)
#endif
mtx_lock(&Giant);
-
- if (!jail_sysvipc_allowed && jailed(p->p_ucred)) {
+ if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) {
error = ENOSYS;
goto done2;
}
@@ -527,7 +524,7 @@ msgget(p, uap)
error = EEXIST;
goto done2;
}
- if ((error = ipcperm(p, &msqptr->msg_perm, msgflg & 0700 ))) {
+ if ((error = ipcperm(td, &msqptr->msg_perm, msgflg & 0700 ))) {
#ifdef MSG_DEBUG_OK
printf("requester doesn't have 0%o access\n",
msgflg & 0700);
@@ -592,7 +589,7 @@ msgget(p, uap)
found:
/* Construct the unique msqid */
- p->p_retval[0] = IXSEQ_TO_IPCID(msqid, msqptr->msg_perm);
+ td->td_retval[0] = IXSEQ_TO_IPCID(msqid, msqptr->msg_perm);
done2:
mtx_unlock(&Giant);
return (error);
@@ -611,8 +608,8 @@ struct msgsnd_args {
* MPSAFE
*/
int
-msgsnd(p, uap)
- struct proc *p;
+msgsnd(td, uap)
+ struct thread *td;
register struct msgsnd_args *uap;
{
int msqid = uap->msqid;
@@ -629,8 +626,7 @@ msgsnd(p, uap)
msgflg);
#endif
mtx_lock(&Giant);
-
- if (!jail_sysvipc_allowed && jailed(p->p_ucred)) {
+ if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) {
error = ENOSYS;
goto done2;
}
@@ -662,7 +658,7 @@ msgsnd(p, uap)
goto done2;
}
- if ((error = ipcperm(p, &msqptr->msg_perm, IPC_W))) {
+ if ((error = ipcperm(td, &msqptr->msg_perm, IPC_W))) {
#ifdef MSG_DEBUG_OK
printf("requester doesn't have write access\n");
#endif
@@ -929,11 +925,11 @@ msgsnd(p, uap)
msqptr->msg_cbytes += msghdr->msg_ts;
msqptr->msg_qnum++;
- msqptr->msg_lspid = p->p_pid;
+ msqptr->msg_lspid = td->td_proc->p_pid;
msqptr->msg_stime = time_second;
wakeup((caddr_t)msqptr);
- p->p_retval[0] = 0;
+ td->td_retval[0] = 0;
done2:
mtx_unlock(&Giant);
return (error);
@@ -953,8 +949,8 @@ struct msgrcv_args {
* MPSAFE
*/
int
-msgrcv(p, uap)
- struct proc *p;
+msgrcv(td, uap)
+ struct thread *td;
register struct msgrcv_args *uap;
{
int msqid = uap->msqid;
@@ -974,8 +970,7 @@ msgrcv(p, uap)
#endif
mtx_lock(&Giant);
-
- if (!jail_sysvipc_allowed && jailed(p->p_ucred)) {
+ if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) {
error = ENOSYS;
goto done2;
}
@@ -1007,7 +1002,7 @@ msgrcv(p, uap)
goto done2;
}
- if ((error = ipcperm(p, &msqptr->msg_perm, IPC_R))) {
+ if ((error = ipcperm(td, &msqptr->msg_perm, IPC_R))) {
#ifdef MSG_DEBUG_OK
printf("requester doesn't have read access\n");
#endif
@@ -1159,7 +1154,7 @@ msgrcv(p, uap)
msqptr->msg_cbytes -= msghdr->msg_ts;
msqptr->msg_qnum--;
- msqptr->msg_lrpid = p->p_pid;
+ msqptr->msg_lrpid = td->td_proc->p_pid;
msqptr->msg_rtime = time_second;
/*
@@ -1228,7 +1223,7 @@ msgrcv(p, uap)
msg_freehdr(msghdr);
wakeup((caddr_t)msqptr);
- p->p_retval[0] = msgsz;
+ td->td_retval[0] = msgsz;
done2:
mtx_unlock(&Giant);
return (error);
diff --git a/sys/kern/sysv_sem.c b/sys/kern/sysv_sem.c
index 0b9a0b6..b8bf079 100644
--- a/sys/kern/sysv_sem.c
+++ b/sys/kern/sysv_sem.c
@@ -34,15 +34,15 @@ static int sysctl_sema __P((SYSCTL_HANDLER_ARGS));
#ifndef _SYS_SYSPROTO_H_
struct __semctl_args;
-int __semctl __P((struct proc *p, struct __semctl_args *uap));
+int __semctl __P((struct thread *td, struct __semctl_args *uap));
struct semget_args;
-int semget __P((struct proc *p, struct semget_args *uap));
+int semget __P((struct thread *td, struct semget_args *uap));
struct semop_args;
-int semop __P((struct proc *p, struct semop_args *uap));
+int semop __P((struct thread *td, struct semop_args *uap));
#endif
-static struct sem_undo *semu_alloc __P((struct proc *p));
-static int semundo_adjust __P((struct proc *p, struct sem_undo **supptr,
+static struct sem_undo *semu_alloc __P((struct thread *td));
+static int semundo_adjust __P((struct thread *td, struct sem_undo **supptr,
int semid, int semnum, int adjval));
static void semundo_clear __P((int semid, int semnum));
@@ -250,8 +250,8 @@ MODULE_VERSION(sysvsem, 1);
* MPSAFE
*/
int
-semsys(p, uap)
- struct proc *p;
+semsys(td, uap)
+ struct thread *td;
/* XXX actually varargs. */
struct semsys_args /* {
u_int which;
@@ -264,17 +264,15 @@ semsys(p, uap)
int error;
mtx_lock(&Giant);
-
- if (!jail_sysvipc_allowed && jailed(p->p_ucred)) {
+ if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) {
error = ENOSYS;
goto done2;
}
-
if (uap->which >= sizeof(semcalls)/sizeof(semcalls[0])) {
error = EINVAL;
goto done2;
}
- error = (*semcalls[uap->which])(p, &uap->a2);
+ error = (*semcalls[uap->which])(td, &uap->a2);
done2:
mtx_unlock(&Giant);
return (error);
@@ -286,8 +284,8 @@ done2:
*/
static struct sem_undo *
-semu_alloc(p)
- struct proc *p;
+semu_alloc(td)
+ struct thread *td;
{
register int i;
register struct sem_undo *suptr;
@@ -312,7 +310,7 @@ semu_alloc(p)
suptr->un_next = semu_list;
semu_list = suptr;
suptr->un_cnt = 0;
- suptr->un_proc = p;
+ suptr->un_proc = td->td_proc;
return(suptr);
}
}
@@ -356,12 +354,13 @@ semu_alloc(p)
*/
static int
-semundo_adjust(p, supptr, semid, semnum, adjval)
- register struct proc *p;
+semundo_adjust(td, supptr, semid, semnum, adjval)
+ register struct thread *td;
struct sem_undo **supptr;
int semid, semnum;
int adjval;
{
+ struct proc *p = td->td_proc;
register struct sem_undo *suptr;
register struct undo *sunptr;
int i;
@@ -381,7 +380,7 @@ semundo_adjust(p, supptr, semid, semnum, adjval)
if (suptr == NULL) {
if (adjval == 0)
return(0);
- suptr = semu_alloc(p);
+ suptr = semu_alloc(td);
if (suptr == NULL)
return(ENOSPC);
*supptr = suptr;
@@ -466,8 +465,8 @@ struct __semctl_args {
* MPSAFE
*/
int
-__semctl(p, uap)
- struct proc *p;
+__semctl(td, uap)
+ struct thread *td;
register struct __semctl_args *uap;
{
int semid = uap->semid;
@@ -475,7 +474,7 @@ __semctl(p, uap)
int cmd = uap->cmd;
union semun *arg = uap->arg;
union semun real_arg;
- struct ucred *cred = p->p_ucred;
+ struct ucred *cred = td->td_proc->p_ucred;
int i, rval, error;
struct semid_ds sbuf;
register struct semid_ds *semaptr;
@@ -484,8 +483,7 @@ __semctl(p, uap)
printf("call to semctl(%d, %d, %d, 0x%x)\n", semid, semnum, cmd, arg);
#endif
mtx_lock(&Giant);
-
- if (!jail_sysvipc_allowed && jailed(p->p_ucred)) {
+ if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) {
error = ENOSYS;
goto done2;
}
@@ -508,7 +506,7 @@ __semctl(p, uap)
switch (cmd) {
case IPC_RMID:
- if ((error = ipcperm(p, &semaptr->sem_perm, IPC_M)))
+ if ((error = ipcperm(td, &semaptr->sem_perm, IPC_M)))
goto done2;
semaptr->sem_perm.cuid = cred->cr_uid;
semaptr->sem_perm.uid = cred->cr_uid;
@@ -526,7 +524,7 @@ __semctl(p, uap)
break;
case IPC_SET:
- if ((error = ipcperm(p, &semaptr->sem_perm, IPC_M)))
+ if ((error = ipcperm(td, &semaptr->sem_perm, IPC_M)))
goto done2;
if ((error = copyin(arg, &real_arg, sizeof(real_arg))) != 0)
goto done2;
@@ -542,7 +540,7 @@ __semctl(p, uap)
break;
case IPC_STAT:
- if ((error = ipcperm(p, &semaptr->sem_perm, IPC_R)))
+ if ((error = ipcperm(td, &semaptr->sem_perm, IPC_R)))
goto done2;
if ((error = copyin(arg, &real_arg, sizeof(real_arg))) != 0)
goto done2;
@@ -551,7 +549,7 @@ __semctl(p, uap)
break;
case GETNCNT:
- if ((error = ipcperm(p, &semaptr->sem_perm, IPC_R)))
+ if ((error = ipcperm(td, &semaptr->sem_perm, IPC_R)))
goto done2;
if (semnum < 0 || semnum >= semaptr->sem_nsems) {
error = EINVAL;
@@ -561,7 +559,7 @@ __semctl(p, uap)
break;
case GETPID:
- if ((error = ipcperm(p, &semaptr->sem_perm, IPC_R)))
+ if ((error = ipcperm(td, &semaptr->sem_perm, IPC_R)))
goto done2;
if (semnum < 0 || semnum >= semaptr->sem_nsems) {
error = EINVAL;
@@ -571,7 +569,7 @@ __semctl(p, uap)
break;
case GETVAL:
- if ((error = ipcperm(p, &semaptr->sem_perm, IPC_R)))
+ if ((error = ipcperm(td, &semaptr->sem_perm, IPC_R)))
goto done2;
if (semnum < 0 || semnum >= semaptr->sem_nsems) {
error = EINVAL;
@@ -581,7 +579,7 @@ __semctl(p, uap)
break;
case GETALL:
- if ((error = ipcperm(p, &semaptr->sem_perm, IPC_R)))
+ if ((error = ipcperm(td, &semaptr->sem_perm, IPC_R)))
goto done2;
if ((error = copyin(arg, &real_arg, sizeof(real_arg))) != 0)
goto done2;
@@ -594,7 +592,7 @@ __semctl(p, uap)
break;
case GETZCNT:
- if ((error = ipcperm(p, &semaptr->sem_perm, IPC_R)))
+ if ((error = ipcperm(td, &semaptr->sem_perm, IPC_R)))
goto done2;
if (semnum < 0 || semnum >= semaptr->sem_nsems) {
error = EINVAL;
@@ -604,7 +602,7 @@ __semctl(p, uap)
break;
case SETVAL:
- if ((error = ipcperm(p, &semaptr->sem_perm, IPC_W)))
+ if ((error = ipcperm(td, &semaptr->sem_perm, IPC_W)))
goto done2;
if (semnum < 0 || semnum >= semaptr->sem_nsems) {
error = EINVAL;
@@ -618,7 +616,7 @@ __semctl(p, uap)
break;
case SETALL:
- if ((error = ipcperm(p, &semaptr->sem_perm, IPC_W)))
+ if ((error = ipcperm(td, &semaptr->sem_perm, IPC_W)))
goto done2;
if ((error = copyin(arg, &real_arg, sizeof(real_arg))) != 0)
goto done2;
@@ -639,7 +637,7 @@ __semctl(p, uap)
}
if (error == 0)
- p->p_retval[0] = rval;
+ td->td_retval[0] = rval;
done2:
mtx_unlock(&Giant);
return(error);
@@ -657,22 +655,21 @@ struct semget_args {
* MPSAFE
*/
int
-semget(p, uap)
- struct proc *p;
+semget(td, uap)
+ struct thread *td;
register struct semget_args *uap;
{
int semid, error = 0;
int key = uap->key;
int nsems = uap->nsems;
int semflg = uap->semflg;
- struct ucred *cred = p->p_ucred;
+ struct ucred *cred = td->td_proc->p_ucred;
#ifdef SEM_DEBUG
printf("semget(0x%x, %d, 0%o)\n", key, nsems, semflg);
#endif
mtx_lock(&Giant);
-
- if (!jail_sysvipc_allowed && jailed(p->p_ucred)) {
+ if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) {
error = ENOSYS;
goto done2;
}
@@ -687,7 +684,7 @@ semget(p, uap)
#ifdef SEM_DEBUG
printf("found public key\n");
#endif
- if ((error = ipcperm(p, &sema[semid].sem_perm,
+ if ((error = ipcperm(td, &sema[semid].sem_perm,
semflg & 0700))) {
goto done2;
}
@@ -771,7 +768,7 @@ semget(p, uap)
}
found:
- p->p_retval[0] = IXSEQ_TO_IPCID(semid, sema[semid].sem_perm);
+ td->td_retval[0] = IXSEQ_TO_IPCID(semid, sema[semid].sem_perm);
done2:
mtx_unlock(&Giant);
return (error);
@@ -789,8 +786,8 @@ struct semop_args {
* MPSAFE
*/
int
-semop(p, uap)
- struct proc *p;
+semop(td, uap)
+ struct thread *td;
register struct semop_args *uap;
{
int semid = uap->semid;
@@ -808,8 +805,7 @@ semop(p, uap)
#endif
mtx_lock(&Giant);
-
- if (!jail_sysvipc_allowed && jailed(p->p_ucred)) {
+ if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) {
error = ENOSYS;
goto done2;
}
@@ -831,7 +827,7 @@ semop(p, uap)
goto done2;
}
- if ((error = ipcperm(p, &semaptr->sem_perm, IPC_W))) {
+ if ((error = ipcperm(td, &semaptr->sem_perm, IPC_W))) {
#ifdef SEM_DEBUG
printf("error = %d from ipcperm\n", error);
#endif
@@ -1000,7 +996,7 @@ done:
adjval = sops[i].sem_op;
if (adjval == 0)
continue;
- error = semundo_adjust(p, &suptr, semid,
+ error = semundo_adjust(td, &suptr, semid,
sops[i].sem_num, -adjval);
if (error == 0)
continue;
@@ -1020,7 +1016,7 @@ done:
adjval = sops[j].sem_op;
if (adjval == 0)
continue;
- if (semundo_adjust(p, &suptr, semid,
+ if (semundo_adjust(td, &suptr, semid,
sops[j].sem_num, adjval) != 0)
panic("semop - can't undo undos");
}
@@ -1040,7 +1036,7 @@ done:
for (i = 0; i < nsops; i++) {
sopptr = &sops[i];
semptr = &semaptr->sem_base[sopptr->sem_num];
- semptr->sempid = p->p_pid;
+ semptr->sempid = td->td_proc->p_pid;
}
/* Do a wakeup if any semaphore was up'd. */
@@ -1056,7 +1052,7 @@ done:
#ifdef SEM_DEBUG
printf("semop: done\n");
#endif
- p->p_retval[0] = 0;
+ td->td_retval[0] = 0;
done2:
mtx_unlock(&Giant);
return (error);
diff --git a/sys/kern/sysv_shm.c b/sys/kern/sysv_shm.c
index ce9574c..649308d 100644
--- a/sys/kern/sysv_shm.c
+++ b/sys/kern/sysv_shm.c
@@ -62,11 +62,11 @@
static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
struct oshmctl_args;
-static int oshmctl __P((struct proc *p, struct oshmctl_args *uap));
+static int oshmctl __P((struct thread *td, struct oshmctl_args *uap));
-static int shmget_allocate_segment __P((struct proc *p,
+static int shmget_allocate_segment __P((struct thread *td,
struct shmget_args *uap, int mode));
-static int shmget_existing __P((struct proc *p, struct shmget_args *uap,
+static int shmget_existing __P((struct thread *td, struct shmget_args *uap,
int mode, int segnum));
/* XXX casting to (sy_call_t *) is bogus, as usual. */
@@ -97,7 +97,7 @@ struct shmmap_state {
static void shm_deallocate_segment __P((struct shmid_ds *));
static int shm_find_segment_by_key __P((key_t));
static struct shmid_ds *shm_find_segment_by_shmid __P((int));
-static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
+static int shm_delete_mapping __P((struct proc *p, struct shmmap_state *));
static void shmrealloc __P((void));
static void shminit __P((void));
static int sysvshm_modload __P((struct module *, int, void *));
@@ -237,21 +237,20 @@ struct shmdt_args {
* MPSAFE
*/
int
-shmdt(p, uap)
- struct proc *p;
+shmdt(td, uap)
+ struct thread *td;
struct shmdt_args *uap;
{
+ struct proc *p = td->td_proc;
struct shmmap_state *shmmap_s;
int i;
int error = 0;
mtx_lock(&Giant);
-
if (!jail_sysvipc_allowed && jailed(p->p_ucred)) {
error = ENOSYS;
goto done2;
}
-
shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
if (shmmap_s == NULL) {
error = EINVAL;
@@ -285,10 +284,11 @@ struct shmat_args {
* MPSAFE
*/
int
-shmat(p, uap)
- struct proc *p;
+shmat(td, uap)
+ struct thread *td;
struct shmat_args *uap;
{
+ struct proc *p = td->td_proc;
int i, flags;
struct shmid_ds *shmseg;
struct shmmap_state *shmmap_s = NULL;
@@ -300,12 +300,10 @@ shmat(p, uap)
int error = 0;
mtx_lock(&Giant);
-
if (!jail_sysvipc_allowed && jailed(p->p_ucred)) {
error = ENOSYS;
goto done2;
}
-
shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
if (shmmap_s == NULL) {
size = shminfo.shmseg * sizeof(struct shmmap_state);
@@ -319,7 +317,7 @@ shmat(p, uap)
error = EINVAL;
goto done2;
}
- error = ipcperm(p, &shmseg->shm_perm,
+ error = ipcperm(td, &shmseg->shm_perm,
(uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
if (error)
goto done2;
@@ -376,7 +374,7 @@ shmat(p, uap)
shmseg->shm_lpid = p->p_pid;
shmseg->shm_atime = time_second;
shmseg->shm_nattch++;
- p->p_retval[0] = attach_va;
+ td->td_retval[0] = attach_va;
done2:
mtx_unlock(&Giant);
return (error);
@@ -404,8 +402,8 @@ struct oshmctl_args {
* MPSAFE
*/
static int
-oshmctl(p, uap)
- struct proc *p;
+oshmctl(td, uap)
+ struct thread *td;
struct oshmctl_args *uap;
{
#ifdef COMPAT_43
@@ -414,12 +412,10 @@ oshmctl(p, uap)
struct oshmid_ds outbuf;
mtx_lock(&Giant);
-
- if (!jail_sysvipc_allowed && jailed(p->p_ucred)) {
+ if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) {
error = ENOSYS;
goto done2;
}
-
shmseg = shm_find_segment_by_shmid(uap->shmid);
if (shmseg == NULL) {
error = EINVAL;
@@ -427,7 +423,7 @@ oshmctl(p, uap)
}
switch (uap->cmd) {
case IPC_STAT:
- error = ipcperm(p, &shmseg->shm_perm, IPC_R);
+ error = ipcperm(td, &shmseg->shm_perm, IPC_R);
if (error)
goto done2;
outbuf.shm_perm = shmseg->shm_perm;
@@ -445,7 +441,7 @@ oshmctl(p, uap)
break;
default:
/* XXX casting to (sy_call_t *) is bogus, as usual. */
- error = ((sy_call_t *)shmctl)(p, uap);
+ error = ((sy_call_t *)shmctl)(td, uap);
break;
}
done2:
@@ -468,8 +464,8 @@ struct shmctl_args {
* MPSAFE
*/
int
-shmctl(p, uap)
- struct proc *p;
+shmctl(td, uap)
+ struct thread *td;
struct shmctl_args *uap;
{
int error = 0;
@@ -477,12 +473,10 @@ shmctl(p, uap)
struct shmid_ds *shmseg;
mtx_lock(&Giant);
-
- if (!jail_sysvipc_allowed && jailed(p->p_ucred)) {
+ if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) {
error = ENOSYS;
goto done2;
}
-
shmseg = shm_find_segment_by_shmid(uap->shmid);
if (shmseg == NULL) {
error = EINVAL;
@@ -490,7 +484,7 @@ shmctl(p, uap)
}
switch (uap->cmd) {
case IPC_STAT:
- error = ipcperm(p, &shmseg->shm_perm, IPC_R);
+ error = ipcperm(td, &shmseg->shm_perm, IPC_R);
if (error)
goto done2;
error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf));
@@ -498,7 +492,7 @@ shmctl(p, uap)
goto done2;
break;
case IPC_SET:
- error = ipcperm(p, &shmseg->shm_perm, IPC_M);
+ error = ipcperm(td, &shmseg->shm_perm, IPC_M);
if (error)
goto done2;
error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf));
@@ -512,7 +506,7 @@ shmctl(p, uap)
shmseg->shm_ctime = time_second;
break;
case IPC_RMID:
- error = ipcperm(p, &shmseg->shm_perm, IPC_M);
+ error = ipcperm(td, &shmseg->shm_perm, IPC_M);
if (error)
goto done2;
shmseg->shm_perm.key = IPC_PRIVATE;
@@ -544,8 +538,8 @@ struct shmget_args {
#endif
static int
-shmget_existing(p, uap, mode, segnum)
- struct proc *p;
+shmget_existing(td, uap, mode, segnum)
+ struct thread *td;
struct shmget_args *uap;
int mode;
int segnum;
@@ -568,23 +562,23 @@ shmget_existing(p, uap, mode, segnum)
}
if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
return EEXIST;
- error = ipcperm(p, &shmseg->shm_perm, mode);
+ error = ipcperm(td, &shmseg->shm_perm, mode);
if (error)
return error;
if (uap->size && uap->size > shmseg->shm_segsz)
return EINVAL;
- p->p_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
+ td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
return 0;
}
static int
-shmget_allocate_segment(p, uap, mode)
- struct proc *p;
+shmget_allocate_segment(td, uap, mode)
+ struct thread *td;
struct shmget_args *uap;
int mode;
{
int i, segnum, shmid, size;
- struct ucred *cred = p->p_ucred;
+ struct ucred *cred = td->td_proc->p_ucred;
struct shmid_ds *shmseg;
struct shm_handle *shm_handle;
@@ -641,7 +635,7 @@ shmget_allocate_segment(p, uap, mode)
shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
(mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
shmseg->shm_segsz = uap->size;
- shmseg->shm_cpid = p->p_pid;
+ shmseg->shm_cpid = td->td_proc->p_pid;
shmseg->shm_lpid = shmseg->shm_nattch = 0;
shmseg->shm_atime = shmseg->shm_dtime = 0;
shmseg->shm_ctime = time_second;
@@ -655,7 +649,7 @@ shmget_allocate_segment(p, uap, mode)
shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
wakeup((caddr_t)shmseg);
}
- p->p_retval[0] = shmid;
+ td->td_retval[0] = shmid;
return 0;
}
@@ -663,26 +657,24 @@ shmget_allocate_segment(p, uap, mode)
* MPSAFE
*/
int
-shmget(p, uap)
- struct proc *p;
+shmget(td, uap)
+ struct thread *td;
struct shmget_args *uap;
{
int segnum, mode;
int error;
mtx_lock(&Giant);
-
- if (!jail_sysvipc_allowed && jailed(p->p_ucred)) {
+ if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) {
error = ENOSYS;
goto done2;
}
-
mode = uap->shmflg & ACCESSPERMS;
if (uap->key != IPC_PRIVATE) {
again:
segnum = shm_find_segment_by_key(uap->key);
if (segnum >= 0) {
- error = shmget_existing(p, uap, mode, segnum);
+ error = shmget_existing(td, uap, mode, segnum);
if (error == EAGAIN)
goto again;
goto done2;
@@ -692,7 +684,7 @@ shmget(p, uap)
goto done2;
}
}
- error = shmget_allocate_segment(p, uap, mode);
+ error = shmget_allocate_segment(td, uap, mode);
done2:
mtx_unlock(&Giant);
return (error);
@@ -702,8 +694,8 @@ done2:
* MPSAFE
*/
int
-shmsys(p, uap)
- struct proc *p;
+shmsys(td, uap)
+ struct thread *td;
/* XXX actually varargs. */
struct shmsys_args /* {
u_int which;
@@ -715,17 +707,15 @@ shmsys(p, uap)
int error;
mtx_lock(&Giant);
-
- if (!jail_sysvipc_allowed && jailed(p->p_ucred)) {
+ if (!jail_sysvipc_allowed && jailed(td->td_proc->p_ucred)) {
error = ENOSYS;
goto done2;
}
-
if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) {
error = EINVAL;
goto done2;
}
- error = (*shmcalls[uap->which])(p, &uap->a2);
+ error = (*shmcalls[uap->which])(td, &uap->a2);
done2:
mtx_unlock(&Giant);
return (error);
diff --git a/sys/kern/tty.c b/sys/kern/tty.c
index 1095864..a4d42fc 100644
--- a/sys/kern/tty.c
+++ b/sys/kern/tty.c
@@ -1065,10 +1065,10 @@ ttioctl(tp, cmd, data, flag)
}
int
-ttypoll(dev, events, p)
+ttypoll(dev, events, td)
dev_t dev;
int events;
- struct proc *p;
+ struct thread *td;
{
int s;
int revents = 0;
@@ -1084,7 +1084,7 @@ ttypoll(dev, events, p)
if (ttnread(tp) > 0 || ISSET(tp->t_state, TS_ZOMBIE))
revents |= events & (POLLIN | POLLRDNORM);
else
- selrecord(p, &tp->t_rsel);
+ selrecord(td, &tp->t_rsel);
}
if (events & (POLLOUT | POLLWRNORM)) {
if ((tp->t_outq.c_cc <= tp->t_olowat &&
@@ -1092,7 +1092,7 @@ ttypoll(dev, events, p)
|| ISSET(tp->t_state, TS_ZOMBIE))
revents |= events & (POLLOUT | POLLWRNORM);
else
- selrecord(p, &tp->t_wsel);
+ selrecord(td, &tp->t_wsel);
}
splx(s);
return (revents);
@@ -2337,8 +2337,8 @@ ttyinfo(tp)
if (proc_compare(pick, p))
pick = p;
- stmp = pick->p_stat == SRUN ? "running" :
- pick->p_wmesg ? pick->p_wmesg : "iowait";
+ stmp = pick->p_stat == SRUN ? "running" : /* XXXKSE */
+ pick->p_thread.td_wmesg ? pick->p_thread.td_wmesg : "iowait";
calcru(pick, &utime, &stime, NULL);
ltmp = pick->p_stat == SIDL || pick->p_stat == SWAIT ||
pick->p_stat == SZOMB ? 0 :
@@ -2386,6 +2386,8 @@ proc_compare(p1, p2)
register struct proc *p1, *p2;
{
+ int esta, estb;
+ struct ksegrp *kg;
mtx_assert(&sched_lock, MA_OWNED);
if (p1 == NULL)
return (1);
@@ -2402,9 +2404,16 @@ proc_compare(p1, p2)
/*
* tie - favor one with highest recent cpu utilization
*/
- if (p2->p_estcpu > p1->p_estcpu)
+ esta = estb = 0;
+ FOREACH_KSEGRP_IN_PROC(p1,kg) {
+ esta += kg->kg_estcpu;
+ }
+ FOREACH_KSEGRP_IN_PROC(p2,kg) {
+ estb += kg->kg_estcpu;
+ }
+ if (estb > esta)
return (1);
- if (p1->p_estcpu > p2->p_estcpu)
+ if (esta > estb)
return (0);
return (p2->p_pid > p1->p_pid); /* tie - return highest pid */
}
@@ -2420,6 +2429,7 @@ proc_compare(p1, p2)
return (p2->p_pid > p1->p_pid); /* tie - return highest pid */
}
+#if 0 /* XXXKSE */
/*
* pick the one with the smallest sleep time
*/
@@ -2434,6 +2444,7 @@ proc_compare(p1, p2)
return (1);
if (p2->p_sflag & PS_SINTR && (p1->p_sflag & PS_SINTR) == 0)
return (0);
+#endif
return (p2->p_pid > p1->p_pid); /* tie - return highest pid */
}
diff --git a/sys/kern/tty_conf.c b/sys/kern/tty_conf.c
index ba3d785..0609dc9 100644
--- a/sys/kern/tty_conf.c
+++ b/sys/kern/tty_conf.c
@@ -198,12 +198,12 @@ l_nostart(tp)
* discipline specific ioctl command.
*/
int
-l_nullioctl(tp, cmd, data, flags, p)
+l_nullioctl(tp, cmd, data, flags, td)
struct tty *tp;
u_long cmd;
char *data;
int flags;
- struct proc *p;
+ struct thread *td;
{
return (ENOIOCTL);
diff --git a/sys/kern/tty_cons.c b/sys/kern/tty_cons.c
index 478c7e9..70c7fee 100644
--- a/sys/kern/tty_cons.c
+++ b/sys/kern/tty_cons.c
@@ -219,9 +219,9 @@ sysctl_kern_consmute(SYSCTL_HANDLER_ARGS)
*/
cninit_finish();
if(cn_is_open)
- /* XXX curproc is not what we want really */
+ /* XXX curthread is not what we want really */
error = cnopen(cn_dev_t, openflag,
- openmode, curproc);
+ openmode, curthread);
/* if it failed, back it out */
if ( error != 0) cnuninit();
} else if (!ocn_mute && cn_mute) {
@@ -231,7 +231,7 @@ sysctl_kern_consmute(SYSCTL_HANDLER_ARGS)
*/
if(cn_is_open)
error = cnclose(cn_dev_t, openflag,
- openmode, curproc);
+ openmode, curthread);
if ( error == 0) cnuninit();
}
if (error != 0) {
@@ -248,10 +248,10 @@ SYSCTL_PROC(_kern, OID_AUTO, consmute, CTLTYPE_INT|CTLFLAG_RW,
0, sizeof cn_mute, sysctl_kern_consmute, "I", "");
static int
-cnopen(dev, flag, mode, p)
+cnopen(dev, flag, mode, td)
dev_t dev;
int flag, mode;
- struct proc *p;
+ struct thread *td;
{
dev_t cndev, physdev;
int retval = 0;
@@ -266,7 +266,7 @@ cnopen(dev, flag, mode, p)
* bypass this and go straight to the device.
*/
if(!cn_mute)
- retval = (*cn_phys_open)(physdev, flag, mode, p);
+ retval = (*cn_phys_open)(physdev, flag, mode, td);
if (retval == 0) {
/*
* check if we openned it via /dev/console or
@@ -285,10 +285,10 @@ cnopen(dev, flag, mode, p)
}
static int
-cnclose(dev, flag, mode, p)
+cnclose(dev, flag, mode, td)
dev_t dev;
int flag, mode;
- struct proc *p;
+ struct thread *td;
{
dev_t cndev;
struct tty *cn_tp;
@@ -323,7 +323,7 @@ cnclose(dev, flag, mode, p)
dev = cndev;
}
if(cn_phys_close)
- return ((*cn_phys_close)(dev, flag, mode, p));
+ return ((*cn_phys_close)(dev, flag, mode, td));
return (0);
}
@@ -360,12 +360,12 @@ cnwrite(dev, uio, flag)
}
static int
-cnioctl(dev, cmd, data, flag, p)
+cnioctl(dev, cmd, data, flag, td)
dev_t dev;
u_long cmd;
caddr_t data;
int flag;
- struct proc *p;
+ struct thread *td;
{
int error;
@@ -376,28 +376,28 @@ cnioctl(dev, cmd, data, flag, p)
* output from the "virtual" console.
*/
if (cmd == TIOCCONS && constty) {
- error = suser(p);
+ error = suser_td(td);
if (error)
return (error);
constty = NULL;
return (0);
}
dev = cn_tab->cn_dev;
- return ((*devsw(dev)->d_ioctl)(dev, cmd, data, flag, p));
+ return ((*devsw(dev)->d_ioctl)(dev, cmd, data, flag, td));
}
static int
-cnpoll(dev, events, p)
+cnpoll(dev, events, td)
dev_t dev;
int events;
- struct proc *p;
+ struct thread *td;
{
if ((cn_tab == NULL) || cn_mute)
return (1);
dev = cn_tab->cn_dev;
- return ((*devsw(dev)->d_poll)(dev, events, p));
+ return ((*devsw(dev)->d_poll)(dev, events, td));
}
static int
diff --git a/sys/kern/tty_pty.c b/sys/kern/tty_pty.c
index 8672077..68e93a9 100644
--- a/sys/kern/tty_pty.c
+++ b/sys/kern/tty_pty.c
@@ -164,11 +164,12 @@ ptyinit(dev_t devc)
/*ARGSUSED*/
static int
-ptsopen(dev, flag, devtype, p)
+ptsopen(dev, flag, devtype, td)
dev_t dev;
int flag, devtype;
- struct proc *p;
+ struct thread *td;
{
+ struct proc *p = td->td_proc;
register struct tty *tp;
int error;
struct pt_ioctl *pti;
@@ -206,10 +207,10 @@ ptsopen(dev, flag, devtype, p)
}
static int
-ptsclose(dev, flag, mode, p)
+ptsclose(dev, flag, mode, td)
dev_t dev;
int flag, mode;
- struct proc *p;
+ struct thread *td;
{
register struct tty *tp;
int err;
@@ -227,7 +228,8 @@ ptsread(dev, uio, flag)
struct uio *uio;
int flag;
{
- struct proc *p = curproc;
+ struct thread *td = curthread;
+ struct proc *p = td->td_proc;
register struct tty *tp = dev->si_tty;
register struct pt_ioctl *pti = dev->si_drv1;
int error = 0;
@@ -326,11 +328,12 @@ ptcwakeup(tp, flag)
}
static int
-ptcopen(dev, flag, devtype, p)
+ptcopen(dev, flag, devtype, td)
dev_t dev;
int flag, devtype;
- struct proc *p;
+ struct thread *td;
{
+ struct proc *p = td->td_proc;
register struct tty *tp;
struct pt_ioctl *pti;
@@ -355,11 +358,11 @@ ptcopen(dev, flag, devtype, p)
}
static int
-ptcclose(dev, flags, fmt, p)
+ptcclose(dev, flags, fmt, td)
dev_t dev;
int flags;
int fmt;
- struct proc *p;
+ struct thread *td;
{
register struct tty *tp;
@@ -471,10 +474,10 @@ ptsstop(tp, flush)
}
static int
-ptcpoll(dev, events, p)
+ptcpoll(dev, events, td)
dev_t dev;
int events;
- struct proc *p;
+ struct thread *td;
{
register struct tty *tp = dev->si_tty;
struct pt_ioctl *pti = dev->si_drv1;
@@ -482,7 +485,7 @@ ptcpoll(dev, events, p)
int s;
if ((tp->t_state & TS_CONNECTED) == 0)
- return (seltrue(dev, events, p) | POLLHUP);
+ return (seltrue(dev, events, td) | POLLHUP);
/*
* Need to block timeouts (ttrstart).
@@ -510,10 +513,10 @@ ptcpoll(dev, events, p)
if (revents == 0) {
if (events & (POLLIN | POLLRDNORM))
- selrecord(p, &pti->pt_selr);
+ selrecord(curthread, &pti->pt_selr);
if (events & (POLLOUT | POLLWRNORM))
- selrecord(p, &pti->pt_selw);
+ selrecord(curthread, &pti->pt_selw);
}
splx(s);
@@ -632,12 +635,12 @@ block:
/*ARGSUSED*/
static int
-ptyioctl(dev, cmd, data, flag, p)
+ptyioctl(dev, cmd, data, flag, td)
dev_t dev;
u_long cmd;
caddr_t data;
int flag;
- struct proc *p;
+ struct thread *td;
{
register struct tty *tp = dev->si_tty;
register struct pt_ioctl *pti = dev->si_drv1;
@@ -741,7 +744,7 @@ ptyioctl(dev, cmd, data, flag, p)
}
return(0);
}
- error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p);
+ error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, td);
if (error == ENOIOCTL)
error = ttioctl(tp, cmd, data, flag);
if (error == ENOIOCTL) {
diff --git a/sys/kern/tty_tty.c b/sys/kern/tty_tty.c
index b175719..1c6635c 100644
--- a/sys/kern/tty_tty.c
+++ b/sys/kern/tty_tty.c
@@ -71,23 +71,23 @@ static struct cdevsw ctty_cdevsw = {
/* flags */ D_TTY,
};
-#define cttyvp(p) ((p)->p_flag & P_CONTROLT ? (p)->p_session->s_ttyvp : NULL)
+#define cttyvp(td) ((td)->td_proc->p_flag & P_CONTROLT ? (td)->td_proc->p_session->s_ttyvp : NULL)
/*ARGSUSED*/
static int
-cttyopen(dev, flag, mode, p)
+cttyopen(dev, flag, mode, td)
dev_t dev;
int flag, mode;
- struct proc *p;
+ struct thread *td;
{
- struct vnode *ttyvp = cttyvp(p);
+ struct vnode *ttyvp = cttyvp(td);
int error;
if (ttyvp == NULL)
return (ENXIO);
- vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_OPEN(ttyvp, flag, NOCRED, p);
- VOP_UNLOCK(ttyvp, 0, p);
+ vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_OPEN(ttyvp, flag, NOCRED, td);
+ VOP_UNLOCK(ttyvp, 0, td);
return (error);
}
@@ -98,15 +98,15 @@ cttyread(dev, uio, flag)
struct uio *uio;
int flag;
{
- struct proc *p = uio->uio_procp;
- register struct vnode *ttyvp = cttyvp(p);
+ struct thread *td = uio->uio_td;
+ register struct vnode *ttyvp = cttyvp(td);
int error;
if (ttyvp == NULL)
return (EIO);
- vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY, td);
error = VOP_READ(ttyvp, uio, flag, NOCRED);
- VOP_UNLOCK(ttyvp, 0, p);
+ VOP_UNLOCK(ttyvp, 0, td);
return (error);
}
@@ -117,8 +117,8 @@ cttywrite(dev, uio, flag)
struct uio *uio;
int flag;
{
- struct proc *p = uio->uio_procp;
- struct vnode *ttyvp = cttyvp(uio->uio_procp);
+ struct thread *td = uio->uio_td;
+ struct vnode *ttyvp = cttyvp(uio->uio_td);
struct mount *mp;
int error;
@@ -128,51 +128,51 @@ cttywrite(dev, uio, flag)
if (ttyvp->v_type != VCHR &&
(error = vn_start_write(ttyvp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY, td);
error = VOP_WRITE(ttyvp, uio, flag, NOCRED);
- VOP_UNLOCK(ttyvp, 0, p);
+ VOP_UNLOCK(ttyvp, 0, td);
vn_finished_write(mp);
return (error);
}
/*ARGSUSED*/
static int
-cttyioctl(dev, cmd, addr, flag, p)
+cttyioctl(dev, cmd, addr, flag, td)
dev_t dev;
u_long cmd;
caddr_t addr;
int flag;
- struct proc *p;
+ struct thread *td;
{
- struct vnode *ttyvp = cttyvp(p);
+ struct vnode *ttyvp = cttyvp(td);
if (ttyvp == NULL)
return (EIO);
if (cmd == TIOCSCTTY) /* don't allow controlling tty to be set */
return EINVAL; /* to controlling tty -- infinite recursion */
if (cmd == TIOCNOTTY) {
- if (!SESS_LEADER(p)) {
- p->p_flag &= ~P_CONTROLT;
+ if (!SESS_LEADER(td->td_proc)) {
+ td->td_proc->p_flag &= ~P_CONTROLT;
return (0);
} else
return (EINVAL);
}
- return (VOP_IOCTL(ttyvp, cmd, addr, flag, NOCRED, p));
+ return (VOP_IOCTL(ttyvp, cmd, addr, flag, NOCRED, td));
}
/*ARGSUSED*/
static int
-cttypoll(dev, events, p)
+cttypoll(dev, events, td)
dev_t dev;
int events;
- struct proc *p;
+ struct thread *td;
{
- struct vnode *ttyvp = cttyvp(p);
+ struct vnode *ttyvp = cttyvp(td);
if (ttyvp == NULL)
/* try operation to get EOF/failure */
- return (seltrue(dev, events, p));
- return (VOP_POLL(ttyvp, events, p->p_ucred, p));
+ return (seltrue(dev, events, td));
+ return (VOP_POLL(ttyvp, events, td->td_proc->p_ucred, td));
}
static void ctty_clone __P((void *arg, char *name, int namelen, dev_t *dev));
@@ -188,7 +188,7 @@ ctty_clone(void *arg, char *name, int namelen, dev_t *dev)
return;
if (strcmp(name, "tty"))
return;
- vp = cttyvp(curproc);
+ vp = cttyvp(curthread);
if (vp == NULL) {
if (ctty)
*dev = ctty;
diff --git a/sys/kern/uipc_sockbuf.c b/sys/kern/uipc_sockbuf.c
index e352e36..b7042ff 100644
--- a/sys/kern/uipc_sockbuf.c
+++ b/sys/kern/uipc_sockbuf.c
@@ -221,10 +221,10 @@ sonewconn(head, connstatus)
}
struct socket *
-sonewconn3(head, connstatus, p)
+sonewconn3(head, connstatus, td)
register struct socket *head;
int connstatus;
- struct proc *p;
+ struct thread *td;
{
register struct socket *so;
@@ -240,7 +240,7 @@ sonewconn3(head, connstatus, p)
so->so_state = head->so_state | SS_NOFDREF;
so->so_proto = head->so_proto;
so->so_timeo = head->so_timeo;
- so->so_cred = p ? p->p_ucred : head->so_cred;
+ so->so_cred = td ? td->td_proc->p_ucred : head->so_cred;
crhold(so->so_cred);
if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) ||
(*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
@@ -391,11 +391,11 @@ soreserve(so, sndcc, rcvcc)
register struct socket *so;
u_long sndcc, rcvcc;
{
- struct proc *p = curproc;
+ struct thread *td = curthread;
- if (sbreserve(&so->so_snd, sndcc, so, p) == 0)
+ if (sbreserve(&so->so_snd, sndcc, so, td) == 0)
goto bad;
- if (sbreserve(&so->so_rcv, rcvcc, so, p) == 0)
+ if (sbreserve(&so->so_rcv, rcvcc, so, td) == 0)
goto bad2;
if (so->so_rcv.sb_lowat == 0)
so->so_rcv.sb_lowat = 1;
@@ -416,21 +416,21 @@ bad:
* if buffering efficiency is near the normal case.
*/
int
-sbreserve(sb, cc, so, p)
+sbreserve(sb, cc, so, td)
struct sockbuf *sb;
u_long cc;
struct socket *so;
- struct proc *p;
+ struct thread *td;
{
/*
- * p will only be NULL when we're in an interrupt
+ * td will only be NULL when we're in an interrupt
* (e.g. in tcp_input())
*/
if ((u_quad_t)cc > (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES))
return (0);
if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc,
- p ? p->p_rlimit[RLIMIT_SBSIZE].rlim_cur : RLIM_INFINITY)) {
+ td ? td->td_proc->p_rlimit[RLIMIT_SBSIZE].rlim_cur : RLIM_INFINITY)) {
return (0);
}
sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
@@ -876,7 +876,7 @@ pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
}
int
-pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct proc *p)
+pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
{
return EOPNOTSUPP;
}
@@ -889,13 +889,13 @@ pru_connect2_notsupp(struct socket *so1, struct socket *so2)
int
pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
- struct ifnet *ifp, struct proc *p)
+ struct ifnet *ifp, struct thread *td)
{
return EOPNOTSUPP;
}
int
-pru_listen_notsupp(struct socket *so, struct proc *p)
+pru_listen_notsupp(struct socket *so, struct thread *td)
{
return EOPNOTSUPP;
}
diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c
index 9a2cb93..f4edaaf 100644
--- a/sys/kern/uipc_socket.c
+++ b/sys/kern/uipc_socket.c
@@ -125,12 +125,12 @@ soalloc(waitok)
}
int
-socreate(dom, aso, type, proto, p)
+socreate(dom, aso, type, proto, td)
int dom;
struct socket **aso;
register int type;
int proto;
- struct proc *p;
+ struct thread *td;
{
register struct protosw *prp;
register struct socket *so;
@@ -144,7 +144,7 @@ socreate(dom, aso, type, proto, p)
if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
return (EPROTONOSUPPORT);
- if (jailed(p->p_ucred) && jail_socket_unixiproute_only &&
+ if (jailed(td->td_proc->p_ucred) && jail_socket_unixiproute_only &&
prp->pr_domain->dom_family != PF_LOCAL &&
prp->pr_domain->dom_family != PF_INET &&
prp->pr_domain->dom_family != PF_ROUTE) {
@@ -153,17 +153,17 @@ socreate(dom, aso, type, proto, p)
if (prp->pr_type != type)
return (EPROTOTYPE);
- so = soalloc(p != 0);
+ so = soalloc(td != 0);
if (so == 0)
return (ENOBUFS);
TAILQ_INIT(&so->so_incomp);
TAILQ_INIT(&so->so_comp);
so->so_type = type;
- so->so_cred = p->p_ucred;
+ so->so_cred = td->td_proc->p_ucred;
crhold(so->so_cred);
so->so_proto = prp;
- error = (*prp->pr_usrreqs->pru_attach)(so, proto, p);
+ error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
if (error) {
so->so_state |= SS_NOFDREF;
sofree(so);
@@ -174,15 +174,15 @@ socreate(dom, aso, type, proto, p)
}
int
-sobind(so, nam, p)
+sobind(so, nam, td)
struct socket *so;
struct sockaddr *nam;
- struct proc *p;
+ struct thread *td;
{
int s = splnet();
int error;
- error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p);
+ error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td);
splx(s);
return (error);
}
@@ -215,15 +215,15 @@ sodealloc(so)
}
int
-solisten(so, backlog, p)
+solisten(so, backlog, td)
register struct socket *so;
int backlog;
- struct proc *p;
+ struct thread *td;
{
int s, error;
s = splnet();
- error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p);
+ error = (*so->so_proto->pr_usrreqs->pru_listen)(so, td);
if (error) {
splx(s);
return (error);
@@ -369,10 +369,10 @@ soaccept(so, nam)
}
int
-soconnect(so, nam, p)
+soconnect(so, nam, td)
register struct socket *so;
struct sockaddr *nam;
- struct proc *p;
+ struct thread *td;
{
int s;
int error;
@@ -391,7 +391,7 @@ soconnect(so, nam, p)
(error = sodisconnect(so))))
error = EISCONN;
else
- error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, p);
+ error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td);
splx(s);
return (error);
}
@@ -449,14 +449,14 @@ bad:
* Data and control buffers are freed on return.
*/
int
-sosend(so, addr, uio, top, control, flags, p)
+sosend(so, addr, uio, top, control, flags, td)
register struct socket *so;
struct sockaddr *addr;
struct uio *uio;
struct mbuf *top;
struct mbuf *control;
int flags;
- struct proc *p;
+ struct thread *td;
{
struct mbuf **mp;
register struct mbuf *m;
@@ -486,8 +486,8 @@ sosend(so, addr, uio, top, control, flags, p)
dontroute =
(flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
(so->so_proto->pr_flags & PR_ATOMIC);
- if (p)
- p->p_stats->p_ru.ru_msgsnd++;
+ if (td)
+ td->td_proc->p_stats->p_ru.ru_msgsnd++;
if (control)
clen = control->m_len;
#define snderr(errno) { error = errno; splx(s); goto release; }
@@ -624,7 +624,7 @@ nopages:
PRUS_EOF :
/* If there is more to send set PRUS_MORETOCOME */
(resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
- top, addr, control, p);
+ top, addr, control, td);
splx(s);
if (dontroute)
so->so_options &= ~SO_DONTROUTE;
@@ -774,8 +774,8 @@ restart:
goto restart;
}
dontblock:
- if (uio->uio_procp)
- uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
+ if (uio->uio_td)
+ uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++;
nextrecord = m->m_nextpkt;
if (pr->pr_flags & PR_ADDR) {
KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
@@ -1103,7 +1103,7 @@ sooptcopyin(sopt, buf, len, minlen)
if (valsize > len)
sopt->sopt_valsize = valsize = len;
- if (sopt->sopt_p != 0)
+ if (sopt->sopt_td != 0)
return (copyin(sopt->sopt_val, buf, valsize));
bcopy(sopt->sopt_val, buf, valsize);
@@ -1189,7 +1189,7 @@ sosetopt(so, sopt)
case SO_RCVBUF:
if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
&so->so_snd : &so->so_rcv, (u_long)optval,
- so, curproc) == 0) {
+ so, curthread) == 0) {
error = ENOBUFS;
goto bad;
}
@@ -1279,7 +1279,7 @@ sooptcopyout(sopt, buf, len)
valsize = min(len, sopt->sopt_valsize);
sopt->sopt_valsize = valsize;
if (sopt->sopt_val != 0) {
- if (sopt->sopt_p != 0)
+ if (sopt->sopt_td != 0)
error = copyout(buf, sopt->sopt_val, valsize);
else
bcopy(buf, sopt->sopt_val, valsize);
@@ -1394,11 +1394,11 @@ soopt_getm(struct sockopt *sopt, struct mbuf **mp)
struct mbuf *m, *m_prev;
int sopt_size = sopt->sopt_valsize;
- MGET(m, sopt->sopt_p ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
+ MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
if (m == 0)
return ENOBUFS;
if (sopt_size > MLEN) {
- MCLGET(m, sopt->sopt_p ? M_TRYWAIT : M_DONTWAIT);
+ MCLGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT);
if ((m->m_flags & M_EXT) == 0) {
m_free(m);
return ENOBUFS;
@@ -1412,13 +1412,13 @@ soopt_getm(struct sockopt *sopt, struct mbuf **mp)
m_prev = m;
while (sopt_size) {
- MGET(m, sopt->sopt_p ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
+ MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
if (m == 0) {
m_freem(*mp);
return ENOBUFS;
}
if (sopt_size > MLEN) {
- MCLGET(m, sopt->sopt_p ? M_TRYWAIT : M_DONTWAIT);
+ MCLGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT);
if ((m->m_flags & M_EXT) == 0) {
m_freem(*mp);
return ENOBUFS;
@@ -1443,7 +1443,7 @@ soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
if (sopt->sopt_val == NULL)
return 0;
while (m != NULL && sopt->sopt_valsize >= m->m_len) {
- if (sopt->sopt_p != NULL) {
+ if (sopt->sopt_td != NULL) {
int error;
error = copyin(sopt->sopt_val, mtod(m, char *),
@@ -1473,7 +1473,7 @@ soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
if (sopt->sopt_val == NULL)
return 0;
while (m != NULL && sopt->sopt_valsize >= m->m_len) {
- if (sopt->sopt_p != NULL) {
+ if (sopt->sopt_td != NULL) {
int error;
error = copyout(mtod(m, char *), sopt->sopt_val,
@@ -1508,7 +1508,7 @@ sohasoutofband(so)
}
int
-sopoll(struct socket *so, int events, struct ucred *cred, struct proc *p)
+sopoll(struct socket *so, int events, struct ucred *cred, struct thread *td)
{
int revents = 0;
int s = splnet();
@@ -1527,12 +1527,12 @@ sopoll(struct socket *so, int events, struct ucred *cred, struct proc *p)
if (revents == 0) {
if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
- selrecord(p, &so->so_rcv.sb_sel);
+ selrecord(curthread, &so->so_rcv.sb_sel);
so->so_rcv.sb_flags |= SB_SEL;
}
if (events & (POLLOUT | POLLWRNORM)) {
- selrecord(p, &so->so_snd.sb_sel);
+ selrecord(curthread, &so->so_snd.sb_sel);
so->so_snd.sb_flags |= SB_SEL;
}
}
diff --git a/sys/kern/uipc_socket2.c b/sys/kern/uipc_socket2.c
index e352e36..b7042ff 100644
--- a/sys/kern/uipc_socket2.c
+++ b/sys/kern/uipc_socket2.c
@@ -221,10 +221,10 @@ sonewconn(head, connstatus)
}
struct socket *
-sonewconn3(head, connstatus, p)
+sonewconn3(head, connstatus, td)
register struct socket *head;
int connstatus;
- struct proc *p;
+ struct thread *td;
{
register struct socket *so;
@@ -240,7 +240,7 @@ sonewconn3(head, connstatus, p)
so->so_state = head->so_state | SS_NOFDREF;
so->so_proto = head->so_proto;
so->so_timeo = head->so_timeo;
- so->so_cred = p ? p->p_ucred : head->so_cred;
+ so->so_cred = td ? td->td_proc->p_ucred : head->so_cred;
crhold(so->so_cred);
if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) ||
(*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
@@ -391,11 +391,11 @@ soreserve(so, sndcc, rcvcc)
register struct socket *so;
u_long sndcc, rcvcc;
{
- struct proc *p = curproc;
+ struct thread *td = curthread;
- if (sbreserve(&so->so_snd, sndcc, so, p) == 0)
+ if (sbreserve(&so->so_snd, sndcc, so, td) == 0)
goto bad;
- if (sbreserve(&so->so_rcv, rcvcc, so, p) == 0)
+ if (sbreserve(&so->so_rcv, rcvcc, so, td) == 0)
goto bad2;
if (so->so_rcv.sb_lowat == 0)
so->so_rcv.sb_lowat = 1;
@@ -416,21 +416,21 @@ bad:
* if buffering efficiency is near the normal case.
*/
int
-sbreserve(sb, cc, so, p)
+sbreserve(sb, cc, so, td)
struct sockbuf *sb;
u_long cc;
struct socket *so;
- struct proc *p;
+ struct thread *td;
{
/*
- * p will only be NULL when we're in an interrupt
+ * td will only be NULL when we're in an interrupt
* (e.g. in tcp_input())
*/
if ((u_quad_t)cc > (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES))
return (0);
if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc,
- p ? p->p_rlimit[RLIMIT_SBSIZE].rlim_cur : RLIM_INFINITY)) {
+ td ? td->td_proc->p_rlimit[RLIMIT_SBSIZE].rlim_cur : RLIM_INFINITY)) {
return (0);
}
sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
@@ -876,7 +876,7 @@ pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
}
int
-pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct proc *p)
+pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
{
return EOPNOTSUPP;
}
@@ -889,13 +889,13 @@ pru_connect2_notsupp(struct socket *so1, struct socket *so2)
int
pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
- struct ifnet *ifp, struct proc *p)
+ struct ifnet *ifp, struct thread *td)
{
return EOPNOTSUPP;
}
int
-pru_listen_notsupp(struct socket *so, struct proc *p)
+pru_listen_notsupp(struct socket *so, struct thread *td)
{
return EOPNOTSUPP;
}
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index ecf4406..e09951f 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -77,14 +77,14 @@ SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
static struct sf_buf *sf_buf_alloc(void);
static void sf_buf_free(caddr_t addr, void *args);
-static int sendit __P((struct proc *p, int s, struct msghdr *mp, int flags));
-static int recvit __P((struct proc *p, int s, struct msghdr *mp,
+static int sendit __P((struct thread *td, int s, struct msghdr *mp, int flags));
+static int recvit __P((struct thread *td, int s, struct msghdr *mp,
caddr_t namelenp));
-static int accept1 __P((struct proc *p, struct accept_args *uap, int compat));
-static int getsockname1 __P((struct proc *p, struct getsockname_args *uap,
+static int accept1 __P((struct thread *td, struct accept_args *uap, int compat));
+static int getsockname1 __P((struct thread *td, struct getsockname_args *uap,
int compat));
-static int getpeername1 __P((struct proc *p, struct getpeername_args *uap,
+static int getpeername1 __P((struct thread *td, struct getpeername_args *uap,
int compat));
/*
@@ -113,8 +113,8 @@ extern struct fileops socketops;
* MPSAFE
*/
int
-socket(p, uap)
- struct proc *p;
+socket(td, uap)
+ struct thread *td;
register struct socket_args /* {
int domain;
int type;
@@ -127,26 +127,25 @@ socket(p, uap)
int fd, error;
mtx_lock(&Giant);
-
- fdp = p->p_fd;
- error = falloc(p, &fp, &fd);
+ fdp = td->td_proc->p_fd;
+ error = falloc(td, &fp, &fd);
if (error)
goto done2;
fhold(fp);
- error = socreate(uap->domain, &so, uap->type, uap->protocol, p);
+ error = socreate(uap->domain, &so, uap->type, uap->protocol, td);
if (error) {
if (fdp->fd_ofiles[fd] == fp) {
fdp->fd_ofiles[fd] = NULL;
- fdrop(fp, p);
+ fdrop(fp, td);
}
} else {
fp->f_data = (caddr_t)so;
fp->f_flag = FREAD|FWRITE;
fp->f_ops = &socketops;
fp->f_type = DTYPE_SOCKET;
- p->p_retval[0] = fd;
+ td->td_retval[0] = fd;
}
- fdrop(fp, p);
+ fdrop(fp, td);
done2:
mtx_unlock(&Giant);
return (error);
@@ -157,8 +156,8 @@ done2:
*/
/* ARGSUSED */
int
-bind(p, uap)
- struct proc *p;
+bind(td, uap)
+ struct thread *td;
register struct bind_args /* {
int s;
caddr_t name;
@@ -170,18 +169,17 @@ bind(p, uap)
int error;
mtx_lock(&Giant);
-
- error = holdsock(p->p_fd, uap->s, &fp);
+ error = holdsock(td->td_proc->p_fd, uap->s, &fp);
if (error)
goto done2;
error = getsockaddr(&sa, uap->name, uap->namelen);
if (error) {
- fdrop(fp, p);
+ fdrop(fp, td);
goto done2;
}
- error = sobind((struct socket *)fp->f_data, sa, p);
+ error = sobind((struct socket *)fp->f_data, sa, td);
FREE(sa, M_SONAME);
- fdrop(fp, p);
+ fdrop(fp, td);
done2:
mtx_unlock(&Giant);
return (error);
@@ -192,8 +190,8 @@ done2:
*/
/* ARGSUSED */
int
-listen(p, uap)
- struct proc *p;
+listen(td, uap)
+ struct thread *td;
register struct listen_args /* {
int s;
int backlog;
@@ -203,10 +201,10 @@ listen(p, uap)
int error;
mtx_lock(&Giant);
- error = holdsock(p->p_fd, uap->s, &fp);
+ error = holdsock(td->td_proc->p_fd, uap->s, &fp);
if (error == 0) {
- error = solisten((struct socket *)fp->f_data, uap->backlog, p);
- fdrop(fp, p);
+ error = solisten((struct socket *)fp->f_data, uap->backlog, td);
+ fdrop(fp, td);
}
mtx_unlock(&Giant);
return(error);
@@ -217,8 +215,8 @@ listen(p, uap)
* MPSAFE
*/
static int
-accept1(p, uap, compat)
- struct proc *p;
+accept1(td, uap, compat)
+ struct thread *td;
register struct accept_args /* {
int s;
caddr_t name;
@@ -236,8 +234,7 @@ accept1(p, uap, compat)
short fflag; /* type must match fp->f_flag */
mtx_lock(&Giant);
- fdp = p->p_fd;
-
+ fdp = td->td_proc->p_fd;
if (uap->name) {
error = copyin((caddr_t)uap->anamelen, (caddr_t)&namelen,
sizeof (namelen));
@@ -290,7 +287,7 @@ accept1(p, uap, compat)
head->so_qlen--;
fflag = lfp->f_flag;
- error = falloc(p, &nfp, &fd);
+ error = falloc(td, &nfp, &fd);
if (error) {
/*
* Probably ran out of file descriptors. Put the
@@ -305,7 +302,7 @@ accept1(p, uap, compat)
goto done;
}
fhold(nfp);
- p->p_retval[0] = fd;
+ td->td_retval[0] = fd;
/* connection has been removed from the listen queue */
KNOTE(&head->so_rcv.sb_sel.si_note, 0);
@@ -367,7 +364,7 @@ noconnection:
if (error) {
if (fdp->fd_ofiles[fd] == nfp) {
fdp->fd_ofiles[fd] = NULL;
- fdrop(nfp, p);
+ fdrop(nfp, td);
}
}
splx(s);
@@ -377,8 +374,8 @@ noconnection:
*/
done:
if (nfp != NULL)
- fdrop(nfp, p);
- fdrop(lfp, p);
+ fdrop(nfp, td);
+ fdrop(lfp, td);
done2:
mtx_unlock(&Giant);
return (error);
@@ -388,11 +385,12 @@ done2:
* MPSAFE (accept1() is MPSAFE)
*/
int
-accept(p, uap)
- struct proc *p;
+accept(td, uap)
+ struct thread *td;
struct accept_args *uap;
{
- return (accept1(p, uap, 0));
+
+ return (accept1(td, uap, 0));
}
#ifdef COMPAT_OLDSOCK
@@ -400,11 +398,12 @@ accept(p, uap)
* MPSAFE (accept1() is MPSAFE)
*/
int
-oaccept(p, uap)
- struct proc *p;
+oaccept(td, uap)
+ struct thread *td;
struct accept_args *uap;
{
- return (accept1(p, uap, 1));
+
+ return (accept1(td, uap, 1));
}
#endif /* COMPAT_OLDSOCK */
@@ -413,8 +412,8 @@ oaccept(p, uap)
*/
/* ARGSUSED */
int
-connect(p, uap)
- struct proc *p;
+connect(td, uap)
+ struct thread *td;
register struct connect_args /* {
int s;
caddr_t name;
@@ -427,8 +426,7 @@ connect(p, uap)
int error, s;
mtx_lock(&Giant);
-
- error = holdsock(p->p_fd, uap->s, &fp);
+ error = holdsock(td->td_proc->p_fd, uap->s, &fp);
if (error)
goto done2;
so = (struct socket *)fp->f_data;
@@ -439,7 +437,7 @@ connect(p, uap)
error = getsockaddr(&sa, uap->name, uap->namelen);
if (error)
goto done;
- error = soconnect(so, sa, p);
+ error = soconnect(so, sa, td);
if (error)
goto bad;
if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
@@ -465,7 +463,7 @@ bad:
if (error == ERESTART)
error = EINTR;
done:
- fdrop(fp, p);
+ fdrop(fp, td);
done2:
mtx_unlock(&Giant);
return (error);
@@ -475,8 +473,8 @@ done2:
* MPSAFE
*/
int
-socketpair(p, uap)
- struct proc *p;
+socketpair(td, uap)
+ struct thread *td;
register struct socketpair_args /* {
int domain;
int type;
@@ -484,26 +482,25 @@ socketpair(p, uap)
int *rsv;
} */ *uap;
{
- register struct filedesc *fdp = p->p_fd;
+ register struct filedesc *fdp = td->td_proc->p_fd;
struct file *fp1, *fp2;
struct socket *so1, *so2;
int fd, error, sv[2];
mtx_lock(&Giant);
-
- error = socreate(uap->domain, &so1, uap->type, uap->protocol, p);
+ error = socreate(uap->domain, &so1, uap->type, uap->protocol, td);
if (error)
goto done2;
- error = socreate(uap->domain, &so2, uap->type, uap->protocol, p);
+ error = socreate(uap->domain, &so2, uap->type, uap->protocol, td);
if (error)
goto free1;
- error = falloc(p, &fp1, &fd);
+ error = falloc(td, &fp1, &fd);
if (error)
goto free2;
fhold(fp1);
sv[0] = fd;
fp1->f_data = (caddr_t)so1;
- error = falloc(p, &fp2, &fd);
+ error = falloc(td, &fp2, &fd);
if (error)
goto free3;
fhold(fp2);
@@ -524,21 +521,21 @@ socketpair(p, uap)
fp1->f_ops = fp2->f_ops = &socketops;
fp1->f_type = fp2->f_type = DTYPE_SOCKET;
error = copyout((caddr_t)sv, (caddr_t)uap->rsv, 2 * sizeof (int));
- fdrop(fp1, p);
- fdrop(fp2, p);
+ fdrop(fp1, td);
+ fdrop(fp2, td);
goto done2;
free4:
if (fdp->fd_ofiles[sv[1]] == fp2) {
fdp->fd_ofiles[sv[1]] = NULL;
- fdrop(fp2, p);
+ fdrop(fp2, td);
}
- fdrop(fp2, p);
+ fdrop(fp2, td);
free3:
if (fdp->fd_ofiles[sv[0]] == fp1) {
fdp->fd_ofiles[sv[0]] = NULL;
- fdrop(fp1, p);
+ fdrop(fp1, td);
}
- fdrop(fp1, p);
+ fdrop(fp1, td);
free2:
(void)soclose(so2);
free1:
@@ -549,8 +546,8 @@ done2:
}
static int
-sendit(p, s, mp, flags)
- register struct proc *p;
+sendit(td, s, mp, flags)
+ register struct thread *td;
int s;
register struct msghdr *mp;
int flags;
@@ -568,27 +565,27 @@ sendit(p, s, mp, flags)
struct uio ktruio;
#endif
- error = holdsock(p->p_fd, s, &fp);
+ error = holdsock(td->td_proc->p_fd, s, &fp);
if (error)
return (error);
auio.uio_iov = mp->msg_iov;
auio.uio_iovcnt = mp->msg_iovlen;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_rw = UIO_WRITE;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_offset = 0; /* XXX */
auio.uio_resid = 0;
iov = mp->msg_iov;
for (i = 0; i < mp->msg_iovlen; i++, iov++) {
if ((auio.uio_resid += iov->iov_len) < 0) {
- fdrop(fp, p);
+ fdrop(fp, td);
return (EINVAL);
}
}
if (mp->msg_name) {
error = getsockaddr(&to, mp->msg_name, mp->msg_namelen);
if (error) {
- fdrop(fp, p);
+ fdrop(fp, td);
return (error);
}
} else {
@@ -627,7 +624,7 @@ sendit(p, s, mp, flags)
control = 0;
}
#ifdef KTRACE
- if (KTRPOINT(p, KTR_GENIO)) {
+ if (KTRPOINT(td->td_proc, KTR_GENIO)) {
int iovlen = auio.uio_iovcnt * sizeof (struct iovec);
MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
@@ -638,31 +635,31 @@ sendit(p, s, mp, flags)
len = auio.uio_resid;
so = (struct socket *)fp->f_data;
error = so->so_proto->pr_usrreqs->pru_sosend(so, to, &auio, 0, control,
- flags, p);
+ flags, td);
if (error) {
if (auio.uio_resid != len && (error == ERESTART ||
error == EINTR || error == EWOULDBLOCK))
error = 0;
if (error == EPIPE) {
- PROC_LOCK(p);
- psignal(p, SIGPIPE);
- PROC_UNLOCK(p);
+ PROC_LOCK(td->td_proc);
+ psignal(td->td_proc, SIGPIPE);
+ PROC_UNLOCK(td->td_proc);
}
}
if (error == 0)
- p->p_retval[0] = len - auio.uio_resid;
+ td->td_retval[0] = len - auio.uio_resid;
#ifdef KTRACE
if (ktriov != NULL) {
if (error == 0) {
ktruio.uio_iov = ktriov;
- ktruio.uio_resid = p->p_retval[0];
- ktrgenio(p->p_tracep, s, UIO_WRITE, &ktruio, error);
+ ktruio.uio_resid = td->td_retval[0];
+ ktrgenio(td->td_proc->p_tracep, s, UIO_WRITE, &ktruio, error);
}
FREE(ktriov, M_TEMP);
}
#endif
bad:
- fdrop(fp, p);
+ fdrop(fp, td);
if (to)
FREE(to, M_SONAME);
return (error);
@@ -672,8 +669,8 @@ bad:
* MPSAFE
*/
int
-sendto(p, uap)
- struct proc *p;
+sendto(td, uap)
+ struct thread *td;
register struct sendto_args /* {
int s;
caddr_t buf;
@@ -698,7 +695,7 @@ sendto(p, uap)
aiov.iov_base = uap->buf;
aiov.iov_len = uap->len;
mtx_lock(&Giant);
- error = sendit(p, uap->s, &msg, uap->flags);
+ error = sendit(td, uap->s, &msg, uap->flags);
mtx_unlock(&Giant);
return (error);
}
@@ -708,8 +705,8 @@ sendto(p, uap)
* MPSAFE
*/
int
-osend(p, uap)
- struct proc *p;
+osend(td, uap)
+ struct thread *td;
register struct osend_args /* {
int s;
caddr_t buf;
@@ -730,7 +727,7 @@ osend(p, uap)
msg.msg_control = 0;
msg.msg_flags = 0;
mtx_lock(&Giant);
- error = sendit(p, uap->s, &msg, uap->flags);
+ error = sendit(td, uap->s, &msg, uap->flags);
mtx_unlock(&Giant);
return (error);
}
@@ -739,8 +736,8 @@ osend(p, uap)
* MPSAFE
*/
int
-osendmsg(p, uap)
- struct proc *p;
+osendmsg(td, uap)
+ struct thread *td;
register struct osendmsg_args /* {
int s;
caddr_t msg;
@@ -772,7 +769,7 @@ osendmsg(p, uap)
goto done;
msg.msg_flags = MSG_COMPAT;
msg.msg_iov = iov;
- error = sendit(p, uap->s, &msg, uap->flags);
+ error = sendit(td, uap->s, &msg, uap->flags);
done:
if (iov != aiov)
FREE(iov, M_IOV);
@@ -786,8 +783,8 @@ done2:
* MPSAFE
*/
int
-sendmsg(p, uap)
- struct proc *p;
+sendmsg(td, uap)
+ struct thread *td;
register struct sendmsg_args /* {
int s;
caddr_t msg;
@@ -799,7 +796,6 @@ sendmsg(p, uap)
int error;
mtx_lock(&Giant);
-
error = copyin(uap->msg, (caddr_t)&msg, sizeof (msg));
if (error)
goto done2;
@@ -822,7 +818,7 @@ sendmsg(p, uap)
#ifdef COMPAT_OLDSOCK
msg.msg_flags = 0;
#endif
- error = sendit(p, uap->s, &msg, uap->flags);
+ error = sendit(td, uap->s, &msg, uap->flags);
done:
if (iov != aiov)
FREE(iov, M_IOV);
@@ -832,8 +828,8 @@ done2:
}
static int
-recvit(p, s, mp, namelenp)
- register struct proc *p;
+recvit(td, s, mp, namelenp)
+ register struct thread *td;
int s;
register struct msghdr *mp;
caddr_t namelenp;
@@ -852,25 +848,25 @@ recvit(p, s, mp, namelenp)
struct uio ktruio;
#endif
- error = holdsock(p->p_fd, s, &fp);
+ error = holdsock(td->td_proc->p_fd, s, &fp);
if (error)
return (error);
auio.uio_iov = mp->msg_iov;
auio.uio_iovcnt = mp->msg_iovlen;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_rw = UIO_READ;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_offset = 0; /* XXX */
auio.uio_resid = 0;
iov = mp->msg_iov;
for (i = 0; i < mp->msg_iovlen; i++, iov++) {
if ((auio.uio_resid += iov->iov_len) < 0) {
- fdrop(fp, p);
+ fdrop(fp, td);
return (EINVAL);
}
}
#ifdef KTRACE
- if (KTRPOINT(p, KTR_GENIO)) {
+ if (KTRPOINT(td->td_proc, KTR_GENIO)) {
int iovlen = auio.uio_iovcnt * sizeof (struct iovec);
MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
@@ -893,14 +889,14 @@ recvit(p, s, mp, namelenp)
if (error == 0) {
ktruio.uio_iov = ktriov;
ktruio.uio_resid = len - auio.uio_resid;
- ktrgenio(p->p_tracep, s, UIO_READ, &ktruio, error);
+ ktrgenio(td->td_proc->p_tracep, s, UIO_READ, &ktruio, error);
}
FREE(ktriov, M_TEMP);
}
#endif
if (error)
goto out;
- p->p_retval[0] = len - auio.uio_resid;
+ td->td_retval[0] = len - auio.uio_resid;
if (mp->msg_name) {
len = mp->msg_namelen;
if (len <= 0 || fromsa == 0)
@@ -979,7 +975,7 @@ recvit(p, s, mp, namelenp)
mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control;
}
out:
- fdrop(fp, p);
+ fdrop(fp, td);
if (fromsa)
FREE(fromsa, M_SONAME);
if (control)
@@ -991,8 +987,8 @@ out:
* MPSAFE
*/
int
-recvfrom(p, uap)
- struct proc *p;
+recvfrom(td, uap)
+ struct thread *td;
register struct recvfrom_args /* {
int s;
caddr_t buf;
@@ -1007,7 +1003,6 @@ recvfrom(p, uap)
int error;
mtx_lock(&Giant);
-
if (uap->fromlenaddr) {
error = copyin((caddr_t)uap->fromlenaddr,
(caddr_t)&msg.msg_namelen, sizeof (msg.msg_namelen));
@@ -1023,7 +1018,7 @@ recvfrom(p, uap)
aiov.iov_len = uap->len;
msg.msg_control = 0;
msg.msg_flags = uap->flags;
- error = recvit(p, uap->s, &msg, (caddr_t)uap->fromlenaddr);
+ error = recvit(td, uap->s, &msg, (caddr_t)uap->fromlenaddr);
done2:
mtx_unlock(&Giant);
return(error);
@@ -1034,13 +1029,13 @@ done2:
* MPSAFE
*/
int
-orecvfrom(p, uap)
- struct proc *p;
+orecvfrom(td, uap)
+ struct thread *td;
struct recvfrom_args *uap;
{
uap->flags |= MSG_COMPAT;
- return (recvfrom(p, uap));
+ return (recvfrom(td, uap));
}
#endif
@@ -1050,8 +1045,8 @@ orecvfrom(p, uap)
* MPSAFE
*/
int
-orecv(p, uap)
- struct proc *p;
+orecv(td, uap)
+ struct thread *td;
register struct orecv_args /* {
int s;
caddr_t buf;
@@ -1072,7 +1067,7 @@ orecv(p, uap)
aiov.iov_len = uap->len;
msg.msg_control = 0;
msg.msg_flags = uap->flags;
- error = recvit(p, uap->s, &msg, (caddr_t)0);
+ error = recvit(td, uap->s, &msg, (caddr_t)0);
mtx_unlock(&Giant);
return (error);
}
@@ -1085,8 +1080,8 @@ orecv(p, uap)
* MPSAFE
*/
int
-orecvmsg(p, uap)
- struct proc *p;
+orecvmsg(td, uap)
+ struct thread *td;
register struct orecvmsg_args /* {
int s;
struct omsghdr *msg;
@@ -1120,7 +1115,7 @@ orecvmsg(p, uap)
if (error)
goto done;
msg.msg_iov = iov;
- error = recvit(p, uap->s, &msg, (caddr_t)&uap->msg->msg_namelen);
+ error = recvit(td, uap->s, &msg, (caddr_t)&uap->msg->msg_namelen);
if (msg.msg_controllen && error == 0)
error = copyout((caddr_t)&msg.msg_controllen,
@@ -1138,8 +1133,8 @@ done2:
* MPSAFE
*/
int
-recvmsg(p, uap)
- struct proc *p;
+recvmsg(td, uap)
+ struct thread *td;
register struct recvmsg_args /* {
int s;
struct msghdr *msg;
@@ -1151,7 +1146,6 @@ recvmsg(p, uap)
register int error;
mtx_lock(&Giant);
-
error = copyin((caddr_t)uap->msg, (caddr_t)&msg, sizeof (msg));
if (error)
goto done2;
@@ -1177,7 +1171,7 @@ recvmsg(p, uap)
(unsigned)(msg.msg_iovlen * sizeof (struct iovec)));
if (error)
goto done;
- error = recvit(p, uap->s, &msg, (caddr_t)0);
+ error = recvit(td, uap->s, &msg, (caddr_t)0);
if (!error) {
msg.msg_iov = uiov;
error = copyout((caddr_t)&msg, (caddr_t)uap->msg, sizeof(msg));
@@ -1195,8 +1189,8 @@ done2:
*/
/* ARGSUSED */
int
-shutdown(p, uap)
- struct proc *p;
+shutdown(td, uap)
+ struct thread *td;
register struct shutdown_args /* {
int s;
int how;
@@ -1206,11 +1200,10 @@ shutdown(p, uap)
int error;
mtx_lock(&Giant);
-
- error = holdsock(p->p_fd, uap->s, &fp);
+ error = holdsock(td->td_proc->p_fd, uap->s, &fp);
if (error == 0) {
error = soshutdown((struct socket *)fp->f_data, uap->how);
- fdrop(fp, p);
+ fdrop(fp, td);
}
mtx_unlock(&Giant);
return(error);
@@ -1221,8 +1214,8 @@ shutdown(p, uap)
*/
/* ARGSUSED */
int
-setsockopt(p, uap)
- struct proc *p;
+setsockopt(td, uap)
+ struct thread *td;
register struct setsockopt_args /* {
int s;
int level;
@@ -1241,16 +1234,16 @@ setsockopt(p, uap)
return (EINVAL);
mtx_lock(&Giant);
- error = holdsock(p->p_fd, uap->s, &fp);
+ error = holdsock(td->td_proc->p_fd, uap->s, &fp);
if (error == 0) {
sopt.sopt_dir = SOPT_SET;
sopt.sopt_level = uap->level;
sopt.sopt_name = uap->name;
sopt.sopt_val = uap->val;
sopt.sopt_valsize = uap->valsize;
- sopt.sopt_p = p;
+ sopt.sopt_td = td;
error = sosetopt((struct socket *)fp->f_data, &sopt);
- fdrop(fp, p);
+ fdrop(fp, td);
}
mtx_unlock(&Giant);
return(error);
@@ -1261,8 +1254,8 @@ setsockopt(p, uap)
*/
/* ARGSUSED */
int
-getsockopt(p, uap)
- struct proc *p;
+getsockopt(td, uap)
+ struct thread *td;
register struct getsockopt_args /* {
int s;
int level;
@@ -1276,19 +1269,18 @@ getsockopt(p, uap)
struct sockopt sopt;
mtx_lock(&Giant);
-
- error = holdsock(p->p_fd, uap->s, &fp);
+ error = holdsock(td->td_proc->p_fd, uap->s, &fp);
if (error)
goto done2;
if (uap->val) {
error = copyin((caddr_t)uap->avalsize, (caddr_t)&valsize,
sizeof (valsize));
if (error) {
- fdrop(fp, p);
+ fdrop(fp, td);
goto done2;
}
if (valsize < 0) {
- fdrop(fp, p);
+ fdrop(fp, td);
error = EINVAL;
goto done2;
}
@@ -1301,7 +1293,7 @@ getsockopt(p, uap)
sopt.sopt_name = uap->name;
sopt.sopt_val = uap->val;
sopt.sopt_valsize = (size_t)valsize; /* checked non-negative above */
- sopt.sopt_p = p;
+ sopt.sopt_td = td;
error = sogetopt((struct socket *)fp->f_data, &sopt);
if (error == 0) {
@@ -1309,7 +1301,7 @@ getsockopt(p, uap)
error = copyout((caddr_t)&valsize,
(caddr_t)uap->avalsize, sizeof (valsize));
}
- fdrop(fp, p);
+ fdrop(fp, td);
done2:
mtx_unlock(&Giant);
return (error);
@@ -1322,8 +1314,8 @@ done2:
*/
/* ARGSUSED */
static int
-getsockname1(p, uap, compat)
- struct proc *p;
+getsockname1(td, uap, compat)
+ struct thread *td;
register struct getsockname_args /* {
int fdes;
caddr_t asa;
@@ -1337,13 +1329,12 @@ getsockname1(p, uap, compat)
int len, error;
mtx_lock(&Giant);
-
- error = holdsock(p->p_fd, uap->fdes, &fp);
+ error = holdsock(td->td_proc->p_fd, uap->fdes, &fp);
if (error)
goto done2;
error = copyin((caddr_t)uap->alen, (caddr_t)&len, sizeof (len));
if (error) {
- fdrop(fp, p);
+ fdrop(fp, td);
goto done2;
}
so = (struct socket *)fp->f_data;
@@ -1369,7 +1360,7 @@ gotnothing:
bad:
if (sa)
FREE(sa, M_SONAME);
- fdrop(fp, p);
+ fdrop(fp, td);
done2:
mtx_unlock(&Giant);
return (error);
@@ -1379,11 +1370,12 @@ done2:
* MPSAFE
*/
int
-getsockname(p, uap)
- struct proc *p;
+getsockname(td, uap)
+ struct thread *td;
struct getsockname_args *uap;
{
- return (getsockname1(p, uap, 0));
+
+ return (getsockname1(td, uap, 0));
}
#ifdef COMPAT_OLDSOCK
@@ -1391,11 +1383,12 @@ getsockname(p, uap)
* MPSAFE
*/
int
-ogetsockname(p, uap)
- struct proc *p;
+ogetsockname(td, uap)
+ struct thread *td;
struct getsockname_args *uap;
{
- return (getsockname1(p, uap, 1));
+
+ return (getsockname1(td, uap, 1));
}
#endif /* COMPAT_OLDSOCK */
@@ -1406,8 +1399,8 @@ ogetsockname(p, uap)
*/
/* ARGSUSED */
static int
-getpeername1(p, uap, compat)
- struct proc *p;
+getpeername1(td, uap, compat)
+ struct thread *td;
register struct getpeername_args /* {
int fdes;
caddr_t asa;
@@ -1421,19 +1414,18 @@ getpeername1(p, uap, compat)
int len, error;
mtx_lock(&Giant);
-
- error = holdsock(p->p_fd, uap->fdes, &fp);
+ error = holdsock(td->td_proc->p_fd, uap->fdes, &fp);
if (error)
goto done2;
so = (struct socket *)fp->f_data;
if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
- fdrop(fp, p);
+ fdrop(fp, td);
error = ENOTCONN;
goto done2;
}
error = copyin((caddr_t)uap->alen, (caddr_t)&len, sizeof (len));
if (error) {
- fdrop(fp, p);
+ fdrop(fp, td);
goto done2;
}
sa = 0;
@@ -1458,7 +1450,7 @@ gotnothing:
bad:
if (sa)
FREE(sa, M_SONAME);
- fdrop(fp, p);
+ fdrop(fp, td);
done2:
mtx_unlock(&Giant);
return (error);
@@ -1468,11 +1460,12 @@ done2:
* MPSAFE
*/
int
-getpeername(p, uap)
- struct proc *p;
+getpeername(td, uap)
+ struct thread *td;
struct getpeername_args *uap;
{
- return (getpeername1(p, uap, 0));
+
+ return (getpeername1(td, uap, 0));
}
#ifdef COMPAT_OLDSOCK
@@ -1480,12 +1473,13 @@ getpeername(p, uap)
* MPSAFE
*/
int
-ogetpeername(p, uap)
- struct proc *p;
+ogetpeername(td, uap)
+ struct thread *td;
struct ogetpeername_args *uap;
{
+
/* XXX uap should have type `getpeername_args *' to begin with. */
- return (getpeername1(p, (struct getpeername_args *)uap, 1));
+ return (getpeername1(td, (struct getpeername_args *)uap, 1));
}
#endif /* COMPAT_OLDSOCK */
@@ -1682,10 +1676,10 @@ sf_buf_free(caddr_t addr, void *args)
*
*/
int
-sendfile(struct proc *p, struct sendfile_args *uap)
+sendfile(struct thread *td, struct sendfile_args *uap)
{
struct file *fp;
- struct filedesc *fdp = p->p_fd;
+ struct filedesc *fdp = td->td_proc->p_fd;
struct vnode *vp;
struct vm_object *obj;
struct socket *so;
@@ -1698,7 +1692,6 @@ sendfile(struct proc *p, struct sendfile_args *uap)
int error = 0, s;
mtx_lock(&Giant);
-
vp = NULL;
/*
* Do argument checking. Must be a regular file in, stream
@@ -1719,8 +1712,8 @@ sendfile(struct proc *p, struct sendfile_args *uap)
error = EINVAL;
goto done;
}
- fdrop(fp, p);
- error = holdsock(p->p_fd, uap->s, &fp);
+ fdrop(fp, td);
+ error = holdsock(td->td_proc->p_fd, uap->s, &fp);
if (error)
goto done;
so = (struct socket *)fp->f_data;
@@ -1752,10 +1745,10 @@ sendfile(struct proc *p, struct sendfile_args *uap)
nuap.fd = uap->s;
nuap.iovp = hdtr.headers;
nuap.iovcnt = hdtr.hdr_cnt;
- error = writev(p, &nuap);
+ error = writev(td, &nuap);
if (error)
goto done;
- sbytes += p->p_retval[0];
+ sbytes += td->td_retval[0];
}
}
@@ -1856,11 +1849,11 @@ retry_lookup:
auio.uio_offset = trunc_page(off);
auio.uio_segflg = UIO_NOCOPY;
auio.uio_rw = UIO_READ;
- auio.uio_procp = p;
- vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, p);
+ auio.uio_td = td;
+ vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
error = VOP_READ(vp, &auio, IO_VMIO | ((MAXBSIZE / bsize) << 16),
- p->p_ucred);
- VOP_UNLOCK(vp, 0, p);
+ td->td_proc->p_ucred);
+ VOP_UNLOCK(vp, 0, td);
vm_page_flag_clear(pg, PG_ZERO);
vm_page_io_finish(pg);
if (error) {
@@ -1972,7 +1965,7 @@ retry_space:
}
goto retry_space;
}
- error = (*so->so_proto->pr_usrreqs->pru_send)(so, 0, m, 0, 0, p);
+ error = (*so->so_proto->pr_usrreqs->pru_send)(so, 0, m, 0, 0, td);
splx(s);
if (error) {
sbunlock(&so->so_snd);
@@ -1988,19 +1981,19 @@ retry_space:
nuap.fd = uap->s;
nuap.iovp = hdtr.trailers;
nuap.iovcnt = hdtr.trl_cnt;
- error = writev(p, &nuap);
+ error = writev(td, &nuap);
if (error)
goto done;
- sbytes += p->p_retval[0];
+ sbytes += td->td_retval[0];
}
done:
/*
- * If there was no error we have to clear p->p_retval[0]
+ * If there was no error we have to clear td->td_retval[0]
* because it may have been set by writev.
*/
if (error == 0) {
- p->p_retval[0] = 0;
+ td->td_retval[0] = 0;
}
if (uap->sbytes != NULL) {
copyout(&sbytes, uap->sbytes, sizeof(off_t));
@@ -2008,8 +2001,7 @@ done:
if (vp)
vrele(vp);
if (fp)
- fdrop(fp, p);
+ fdrop(fp, td);
mtx_unlock(&Giant);
return (error);
}
-
diff --git a/sys/kern/uipc_usrreq.c b/sys/kern/uipc_usrreq.c
index 043bc6b..52c60cf 100644
--- a/sys/kern/uipc_usrreq.c
+++ b/sys/kern/uipc_usrreq.c
@@ -80,9 +80,9 @@ static ino_t unp_ino; /* prototype for fake inode numbers */
static int unp_attach __P((struct socket *));
static void unp_detach __P((struct unpcb *));
-static int unp_bind __P((struct unpcb *,struct sockaddr *, struct proc *));
+static int unp_bind __P((struct unpcb *,struct sockaddr *, struct thread *));
static int unp_connect __P((struct socket *,struct sockaddr *,
- struct proc *));
+ struct thread *));
static void unp_disconnect __P((struct unpcb *));
static void unp_shutdown __P((struct unpcb *));
static void unp_drop __P((struct unpcb *, int));
@@ -90,7 +90,7 @@ static void unp_gc __P((void));
static void unp_scan __P((struct mbuf *, void (*)(struct file *)));
static void unp_mark __P((struct file *));
static void unp_discard __P((struct file *));
-static int unp_internalize __P((struct mbuf *, struct proc *));
+static int unp_internalize __P((struct mbuf *, struct thread *));
static int unp_listen __P((struct unpcb *, struct proc *));
static int
@@ -127,7 +127,7 @@ uipc_accept(struct socket *so, struct sockaddr **nam)
}
static int
-uipc_attach(struct socket *so, int proto, struct proc *p)
+uipc_attach(struct socket *so, int proto, struct thread *td)
{
struct unpcb *unp = sotounpcb(so);
@@ -137,24 +137,24 @@ uipc_attach(struct socket *so, int proto, struct proc *p)
}
static int
-uipc_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
+uipc_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
{
struct unpcb *unp = sotounpcb(so);
if (unp == 0)
return EINVAL;
- return unp_bind(unp, nam, p);
+ return unp_bind(unp, nam, td);
}
static int
-uipc_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
+uipc_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
{
struct unpcb *unp = sotounpcb(so);
if (unp == 0)
return EINVAL;
- return unp_connect(so, nam, curproc);
+ return unp_connect(so, nam, curthread);
}
static int
@@ -194,13 +194,13 @@ uipc_disconnect(struct socket *so)
}
static int
-uipc_listen(struct socket *so, struct proc *p)
+uipc_listen(struct socket *so, struct thread *td)
{
struct unpcb *unp = sotounpcb(so);
if (unp == 0 || unp->unp_vnode == 0)
return EINVAL;
- return unp_listen(unp, p);
+ return unp_listen(unp, td->td_proc);
}
static int
@@ -258,7 +258,7 @@ uipc_rcvd(struct socket *so, int flags)
static int
uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
- struct mbuf *control, struct proc *p)
+ struct mbuf *control, struct thread *td)
{
int error = 0;
struct unpcb *unp = sotounpcb(so);
@@ -274,7 +274,7 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
goto release;
}
- if (control && (error = unp_internalize(control, p)))
+ if (control && (error = unp_internalize(control, td)))
goto release;
switch (so->so_type) {
@@ -287,7 +287,7 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
error = EISCONN;
break;
}
- error = unp_connect(so, nam, p);
+ error = unp_connect(so, nam, td);
if (error)
break;
} else {
@@ -320,7 +320,7 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
*/
if ((so->so_state & SS_ISCONNECTED) == 0) {
if (nam) {
- error = unp_connect(so, nam, p);
+ error = unp_connect(so, nam, td);
if (error)
break; /* XXX */
} else {
@@ -534,7 +534,7 @@ unp_attach(so)
unp_count++;
LIST_INIT(&unp->unp_refs);
unp->unp_socket = so;
- unp->unp_rvnode = curproc->p_fd->fd_rdir;
+ unp->unp_rvnode = curthread->td_proc->p_fd->fd_rdir;
LIST_INSERT_HEAD(so->so_type == SOCK_DGRAM ? &unp_dhead
: &unp_shead, unp, unp_link);
so->so_pcb = (caddr_t)unp;
@@ -576,10 +576,10 @@ unp_detach(unp)
}
static int
-unp_bind(unp, nam, p)
+unp_bind(unp, nam, td)
struct unpcb *unp;
struct sockaddr *nam;
- struct proc *p;
+ struct thread *td;
{
struct sockaddr_un *soun = (struct sockaddr_un *)nam;
struct vnode *vp;
@@ -599,7 +599,7 @@ unp_bind(unp, nam, p)
buf[namelen] = 0; /* null-terminate the string */
restart:
NDINIT(&nd, CREATE, NOFOLLOW | LOCKPARENT, UIO_SYSSPACE,
- buf, p);
+ buf, td);
/* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */
error = namei(&nd);
if (error) {
@@ -627,8 +627,8 @@ restart:
}
VATTR_NULL(&vattr);
vattr.va_type = VSOCK;
- vattr.va_mode = (ACCESSPERMS & ~p->p_fd->fd_cmask);
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
+ vattr.va_mode = (ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask);
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
NDFREE(&nd, NDF_ONLY_PNBUF);
vput(nd.ni_dvp);
@@ -640,17 +640,17 @@ restart:
vp->v_socket = unp->unp_socket;
unp->unp_vnode = vp;
unp->unp_addr = (struct sockaddr_un *)dup_sockaddr(nam, 1);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
free(buf, M_TEMP);
return (0);
}
static int
-unp_connect(so, nam, p)
+unp_connect(so, nam, td)
struct socket *so;
struct sockaddr *nam;
- struct proc *p;
+ struct thread *td;
{
register struct sockaddr_un *soun = (struct sockaddr_un *)nam;
register struct vnode *vp;
@@ -666,7 +666,7 @@ unp_connect(so, nam, p)
strncpy(buf, soun->sun_path, len);
buf[len] = 0;
- NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, buf, p);
+ NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, buf, td);
error = namei(&nd);
if (error)
return (error);
@@ -676,7 +676,7 @@ unp_connect(so, nam, p)
error = ENOTSOCK;
goto bad;
}
- error = VOP_ACCESS(vp, VWRITE, p->p_ucred, p);
+ error = VOP_ACCESS(vp, VWRITE, td->td_proc->p_ucred, td);
if (error)
goto bad;
so2 = vp->v_socket;
@@ -690,7 +690,7 @@ unp_connect(so, nam, p)
}
if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
if ((so2->so_options & SO_ACCEPTCONN) == 0 ||
- (so3 = sonewconn3(so2, 0, p)) == 0) {
+ (so3 = sonewconn3(so2, 0, td)) == 0) {
error = ECONNREFUSED;
goto bad;
}
@@ -710,9 +710,9 @@ unp_connect(so, nam, p)
* (which is now).
*/
memset(&unp3->unp_peercred, '\0', sizeof(unp3->unp_peercred));
- unp3->unp_peercred.cr_uid = p->p_ucred->cr_uid;
- unp3->unp_peercred.cr_ngroups = p->p_ucred->cr_ngroups;
- memcpy(unp3->unp_peercred.cr_groups, p->p_ucred->cr_groups,
+ unp3->unp_peercred.cr_uid = td->td_proc->p_ucred->cr_uid;
+ unp3->unp_peercred.cr_ngroups = td->td_proc->p_ucred->cr_ngroups;
+ memcpy(unp3->unp_peercred.cr_groups, td->td_proc->p_ucred->cr_groups,
sizeof(unp3->unp_peercred.cr_groups));
unp3->unp_flags |= UNP_HAVEPC;
/*
@@ -956,7 +956,7 @@ int
unp_externalize(rights)
struct mbuf *rights;
{
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
register int i;
register struct cmsghdr *cm = mtod(rights, struct cmsghdr *);
register int *fdp;
@@ -969,7 +969,7 @@ unp_externalize(rights)
/*
* if the new FD's will not fit, then we free them all
*/
- if (!fdavail(p, newfds)) {
+ if (!fdavail(td, newfds)) {
rp = (struct file **)CMSG_DATA(cm);
for (i = 0; i < newfds; i++) {
fp = *rp;
@@ -997,10 +997,10 @@ unp_externalize(rights)
fdp = (int *)(cm + 1);
rp = (struct file **)CMSG_DATA(cm);
for (i = 0; i < newfds; i++) {
- if (fdalloc(p, 0, &f))
+ if (fdalloc(td, 0, &f))
panic("unp_externalize");
fp = *rp++;
- p->p_fd->fd_ofiles[f] = fp;
+ td->td_proc->p_fd->fd_ofiles[f] = fp;
fp->f_msgcount--;
unp_rights--;
*fdp++ = f;
@@ -1009,10 +1009,10 @@ unp_externalize(rights)
fdp = (int *)(cm + 1) + newfds - 1;
rp = (struct file **)CMSG_DATA(cm) + newfds - 1;
for (i = 0; i < newfds; i++) {
- if (fdalloc(p, 0, &f))
+ if (fdalloc(td, 0, &f))
panic("unp_externalize");
fp = *rp--;
- p->p_fd->fd_ofiles[f] = fp;
+ td->td_proc->p_fd->fd_ofiles[f] = fp;
fp->f_msgcount--;
unp_rights--;
*fdp-- = f;
@@ -1043,10 +1043,11 @@ unp_init(void)
#endif
static int
-unp_internalize(control, p)
+unp_internalize(control, td)
struct mbuf *control;
- struct proc *p;
+ struct thread *td;
{
+ struct proc *p = td->td_proc;
struct filedesc *fdescp = p->p_fd;
register struct cmsghdr *cm = mtod(control, struct cmsghdr *);
register struct file **rp;
@@ -1308,7 +1309,7 @@ unp_gc()
sorflush((struct socket *)(tfp->f_data));
}
for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp)
- closef(*fpp, (struct proc *) NULL);
+ closef(*fpp, (struct thread *) NULL);
free((caddr_t)extra_ref, M_FILE);
unp_gcing = 0;
}
@@ -1386,5 +1387,5 @@ unp_discard(fp)
fp->f_msgcount--;
unp_rights--;
- (void) closef(fp, (struct proc *)NULL);
+ (void) closef(fp, (struct thread *)NULL);
}
diff --git a/sys/kern/vfs_acl.c b/sys/kern/vfs_acl.c
index 69dbe85..045d1a8 100644
--- a/sys/kern/vfs_acl.c
+++ b/sys/kern/vfs_acl.c
@@ -48,11 +48,11 @@
MALLOC_DEFINE(M_ACL, "acl", "access control list");
-static int vacl_set_acl(struct proc *p, struct vnode *vp, acl_type_t type,
+static int vacl_set_acl( struct thread *td, struct vnode *vp, acl_type_t type,
struct acl *aclp);
-static int vacl_get_acl(struct proc *p, struct vnode *vp, acl_type_t type,
+static int vacl_get_acl( struct thread *td, struct vnode *vp, acl_type_t type,
struct acl *aclp);
-static int vacl_aclcheck(struct proc *p, struct vnode *vp,
+static int vacl_aclcheck( struct thread *td, struct vnode *vp,
acl_type_t type, struct acl *aclp);
/*
@@ -562,7 +562,7 @@ acl_posix1e_check(struct acl *acl)
* Given a vnode, set its ACL.
*/
static int
-vacl_set_acl(struct proc *p, struct vnode *vp, acl_type_t type,
+vacl_set_acl( struct thread *td, struct vnode *vp, acl_type_t type,
struct acl *aclp)
{
struct acl inkernacl;
@@ -571,10 +571,10 @@ vacl_set_acl(struct proc *p, struct vnode *vp, acl_type_t type,
error = copyin(aclp, &inkernacl, sizeof(struct acl));
if (error)
return(error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_SETACL(vp, type, &inkernacl, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_SETACL(vp, type, &inkernacl, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
return(error);
}
@@ -582,16 +582,16 @@ vacl_set_acl(struct proc *p, struct vnode *vp, acl_type_t type,
* Given a vnode, get its ACL.
*/
static int
-vacl_get_acl(struct proc *p, struct vnode *vp, acl_type_t type,
+vacl_get_acl( struct thread *td, struct vnode *vp, acl_type_t type,
struct acl *aclp)
{
struct acl inkernelacl;
int error;
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_GETACL(vp, type, &inkernelacl, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_GETACL(vp, type, &inkernelacl, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
if (error == 0)
error = copyout(&inkernelacl, aclp, sizeof(struct acl));
return (error);
@@ -601,14 +601,14 @@ vacl_get_acl(struct proc *p, struct vnode *vp, acl_type_t type,
* Given a vnode, delete its ACL.
*/
static int
-vacl_delete(struct proc *p, struct vnode *vp, acl_type_t type)
+vacl_delete( struct thread *td, struct vnode *vp, acl_type_t type)
{
int error;
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_SETACL(vp, ACL_TYPE_DEFAULT, 0, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_SETACL(vp, ACL_TYPE_DEFAULT, 0, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
return (error);
}
@@ -616,7 +616,7 @@ vacl_delete(struct proc *p, struct vnode *vp, acl_type_t type)
* Given a vnode, check whether an ACL is appropriate for it
*/
static int
-vacl_aclcheck(struct proc *p, struct vnode *vp, acl_type_t type,
+vacl_aclcheck( struct thread *td, struct vnode *vp, acl_type_t type,
struct acl *aclp)
{
struct acl inkernelacl;
@@ -625,7 +625,7 @@ vacl_aclcheck(struct proc *p, struct vnode *vp, acl_type_t type,
error = copyin(aclp, &inkernelacl, sizeof(struct acl));
if (error)
return(error);
- error = VOP_ACLCHECK(vp, type, &inkernelacl, p->p_ucred, p);
+ error = VOP_ACLCHECK(vp, type, &inkernelacl, td->td_proc->p_ucred, td);
return (error);
}
@@ -641,17 +641,17 @@ vacl_aclcheck(struct proc *p, struct vnode *vp, acl_type_t type,
* MPSAFE
*/
int
-__acl_get_file(struct proc *p, struct __acl_get_file_args *uap)
+__acl_get_file( struct thread *td, struct __acl_get_file_args *uap)
{
struct nameidata nd;
int error;
mtx_lock(&Giant);
/* what flags are required here -- possible not LOCKLEAF? */
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
error = namei(&nd);
if (error == 0) {
- error = vacl_get_acl(p, nd.ni_vp, SCARG(uap, type),
+ error = vacl_get_acl(td, nd.ni_vp, SCARG(uap, type),
SCARG(uap, aclp));
NDFREE(&nd, 0);
}
@@ -665,16 +665,16 @@ __acl_get_file(struct proc *p, struct __acl_get_file_args *uap)
* MPSAFE
*/
int
-__acl_set_file(struct proc *p, struct __acl_set_file_args *uap)
+__acl_set_file( struct thread *td, struct __acl_set_file_args *uap)
{
struct nameidata nd;
int error;
mtx_lock(&Giant);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
error = namei(&nd);
if (error == 0) {
- error = vacl_set_acl(p, nd.ni_vp, SCARG(uap, type),
+ error = vacl_set_acl(td, nd.ni_vp, SCARG(uap, type),
SCARG(uap, aclp));
NDFREE(&nd, 0);
}
@@ -688,15 +688,15 @@ __acl_set_file(struct proc *p, struct __acl_set_file_args *uap)
* MPSAFE
*/
int
-__acl_get_fd(struct proc *p, struct __acl_get_fd_args *uap)
+__acl_get_fd( struct thread *td, struct __acl_get_fd_args *uap)
{
struct file *fp;
int error;
mtx_lock(&Giant);
- error = getvnode(p->p_fd, SCARG(uap, filedes), &fp);
+ error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
if (error == 0) {
- error = vacl_get_acl(p, (struct vnode *)fp->f_data,
+ error = vacl_get_acl(td, (struct vnode *)fp->f_data,
SCARG(uap, type), SCARG(uap, aclp));
}
mtx_unlock(&Giant);
@@ -709,15 +709,15 @@ __acl_get_fd(struct proc *p, struct __acl_get_fd_args *uap)
* MPSAFE
*/
int
-__acl_set_fd(struct proc *p, struct __acl_set_fd_args *uap)
+__acl_set_fd( struct thread *td, struct __acl_set_fd_args *uap)
{
struct file *fp;
int error;
mtx_lock(&Giant);
- error = getvnode(p->p_fd, SCARG(uap, filedes), &fp);
+ error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
if (error == 0) {
- error = vacl_set_acl(p, (struct vnode *)fp->f_data,
+ error = vacl_set_acl(td, (struct vnode *)fp->f_data,
SCARG(uap, type), SCARG(uap, aclp));
}
mtx_unlock(&Giant);
@@ -730,16 +730,16 @@ __acl_set_fd(struct proc *p, struct __acl_set_fd_args *uap)
* MPSAFE
*/
int
-__acl_delete_file(struct proc *p, struct __acl_delete_file_args *uap)
+__acl_delete_file( struct thread *td, struct __acl_delete_file_args *uap)
{
struct nameidata nd;
int error;
mtx_lock(&Giant);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
error = namei(&nd);
if (error == 0) {
- error = vacl_delete(p, nd.ni_vp, SCARG(uap, type));
+ error = vacl_delete(td, nd.ni_vp, SCARG(uap, type));
NDFREE(&nd, 0);
}
mtx_unlock(&Giant);
@@ -752,15 +752,15 @@ __acl_delete_file(struct proc *p, struct __acl_delete_file_args *uap)
* MPSAFE
*/
int
-__acl_delete_fd(struct proc *p, struct __acl_delete_fd_args *uap)
+__acl_delete_fd( struct thread *td, struct __acl_delete_fd_args *uap)
{
struct file *fp;
int error;
mtx_lock(&Giant);
- error = getvnode(p->p_fd, SCARG(uap, filedes), &fp);
+ error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
if (error == 0) {
- error = vacl_delete(p, (struct vnode *)fp->f_data,
+ error = vacl_delete(td, (struct vnode *)fp->f_data,
SCARG(uap, type));
}
mtx_unlock(&Giant);
@@ -773,16 +773,16 @@ __acl_delete_fd(struct proc *p, struct __acl_delete_fd_args *uap)
* MPSAFE
*/
int
-__acl_aclcheck_file(struct proc *p, struct __acl_aclcheck_file_args *uap)
+__acl_aclcheck_file( struct thread *td, struct __acl_aclcheck_file_args *uap)
{
struct nameidata nd;
int error;
mtx_lock(&Giant);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
error = namei(&nd);
if (error == 0) {
- error = vacl_aclcheck(p, nd.ni_vp, SCARG(uap, type),
+ error = vacl_aclcheck(td, nd.ni_vp, SCARG(uap, type),
SCARG(uap, aclp));
NDFREE(&nd, 0);
}
@@ -796,15 +796,15 @@ __acl_aclcheck_file(struct proc *p, struct __acl_aclcheck_file_args *uap)
* MPSAFE
*/
int
-__acl_aclcheck_fd(struct proc *p, struct __acl_aclcheck_fd_args *uap)
+__acl_aclcheck_fd( struct thread *td, struct __acl_aclcheck_fd_args *uap)
{
struct file *fp;
int error;
mtx_lock(&Giant);
- error = getvnode(p->p_fd, SCARG(uap, filedes), &fp);
+ error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
if (error == 0) {
- error = vacl_aclcheck(p, (struct vnode *)fp->f_data,
+ error = vacl_aclcheck(td, (struct vnode *)fp->f_data,
SCARG(uap, type), SCARG(uap, aclp));
}
mtx_unlock(&Giant);
diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c
index a695e5d..89a0c65 100644
--- a/sys/kern/vfs_aio.c
+++ b/sys/kern/vfs_aio.c
@@ -153,10 +153,10 @@ SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout,
#define AIOP_FREE 0x1 /* proc on free queue */
#define AIOP_SCHED 0x2 /* proc explicitly scheduled */
-struct aioproclist {
- int aioprocflags; /* AIO proc flags */
- TAILQ_ENTRY(aioproclist) list; /* List of processes */
- struct proc *aioproc; /* The AIO thread */
+struct aiothreadlist {
+ int aiothreadflags; /* AIO proc flags */
+ TAILQ_ENTRY(aiothreadlist) list; /* List of processes */
+ struct thread *aiothread; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
@@ -201,7 +201,7 @@ struct kaioinfo {
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */
-static TAILQ_HEAD(,aioproclist) aio_freeproc, aio_activeproc;
+static TAILQ_HEAD(,aiothreadlist) aio_freeproc, aio_activeproc;
static TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
static TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
@@ -210,7 +210,7 @@ static void aio_onceonly(void *);
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void);
-static int aio_aqueue(struct proc *p, struct aiocb *job, int type);
+static int aio_aqueue(struct thread *td, struct aiocb *job, int type);
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
@@ -233,7 +233,7 @@ aio_onceonly(void *na)
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
- aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
+ aiop_zone = zinit("AIOP", sizeof (struct aiothreadlist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO", AIO_LISTIO_MAX * sizeof (struct
@@ -284,7 +284,7 @@ static int
aio_free_entry(struct aiocblist *aiocbe)
{
struct kaioinfo *ki;
- struct aioproclist *aiop;
+ struct aiothreadlist *aiop;
struct aio_liojob *lj;
struct proc *p;
int error;
@@ -335,7 +335,13 @@ aio_free_entry(struct aiocblist *aiocbe)
}
/* aiocbe is going away, we need to destroy any knotes */
- knote_remove(p, &aiocbe->klist);
+ knote_remove(&p->p_thread, &aiocbe->klist); /* XXXKSE */
+ /* XXXKSE Note the thread here is used to eventually find the
+ * owning process again, but it is also used to do a fo_close
+ * and that requires the thread. (but does it require the
+ * OWNING thread? (or maby the running thread?)
+ * There is a semantic problem here...
+ */
if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags & KAIO_RUNDOWN)
&& ((ki->kaio_buffer_count == 0) && (ki->kaio_queue_count == 0)))) {
@@ -352,7 +358,7 @@ aio_free_entry(struct aiocblist *aiocbe)
TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist);
splx(s);
} else if (aiocbe->jobstate == JOBST_JOBQPROC) {
- aiop = aiocbe->jobaioproc;
+ aiop = aiocbe->jobaiothread;
TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list);
} else if (aiocbe->jobstate == JOBST_JOBQGLOBAL) {
TAILQ_REMOVE(&aio_jobs, aiocbe, list);
@@ -521,7 +527,7 @@ restart4:
* Select a job to run (called by an AIO daemon).
*/
static struct aiocblist *
-aio_selectjob(struct aioproclist *aiop)
+aio_selectjob(struct aiothreadlist *aiop)
{
int s;
struct aiocblist *aiocbe;
@@ -561,7 +567,9 @@ static void
aio_process(struct aiocblist *aiocbe)
{
struct filedesc *fdp;
- struct proc *userp, *mycp;
+ struct thread *td;
+ struct proc *userp;
+ struct proc *mycp;
struct aiocb *cb;
struct file *fp;
struct uio auio;
@@ -574,10 +582,10 @@ aio_process(struct aiocblist *aiocbe)
int inblock_st, inblock_end;
userp = aiocbe->userproc;
+ td = curthread;
+ mycp = td->td_proc;
cb = &aiocbe->uaiocb;
- mycp = curproc;
-
fdp = mycp->p_fd;
fd = cb->aio_fildes;
fp = fdp->fd_ofiles[fd];
@@ -588,7 +596,7 @@ aio_process(struct aiocblist *aiocbe)
return;
}
- aiov.iov_base = (void *)cb->aio_buf;
+ aiov.iov_base = cb->aio_buf;
aiov.iov_len = cb->aio_nbytes;
auio.uio_iov = &aiov;
@@ -597,7 +605,7 @@ aio_process(struct aiocblist *aiocbe)
auio.uio_resid = cb->aio_nbytes;
cnt = cb->aio_nbytes;
auio.uio_segflg = UIO_USERSPACE;
- auio.uio_procp = mycp;
+ auio.uio_td = td;
inblock_st = mycp->p_stats->p_ru.ru_inblock;
oublock_st = mycp->p_stats->p_ru.ru_oublock;
@@ -608,12 +616,12 @@ aio_process(struct aiocblist *aiocbe)
fhold(fp);
if (cb->aio_lio_opcode == LIO_READ) {
auio.uio_rw = UIO_READ;
- error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, mycp);
+ error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td);
} else {
auio.uio_rw = UIO_WRITE;
- error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, mycp);
+ error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td);
}
- fdrop(fp, mycp);
+ fdrop(fp, td);
inblock_end = mycp->p_stats->p_ru.ru_inblock;
oublock_end = mycp->p_stats->p_ru.ru_oublock;
@@ -646,16 +654,17 @@ aio_daemon(void *uproc)
struct aio_liojob *lj;
struct aiocb *cb;
struct aiocblist *aiocbe;
- struct aioproclist *aiop;
+ struct aiothreadlist *aiop;
struct kaioinfo *ki;
struct proc *curcp, *mycp, *userp;
struct vmspace *myvm, *tmpvm;
+ struct thread *td = curthread;
mtx_lock(&Giant);
/*
* Local copies of curproc (cp) and vmspace (myvm)
*/
- mycp = curproc;
+ mycp = td->td_proc;
myvm = mycp->p_vmspace;
if (mycp->p_textvp) {
@@ -668,8 +677,8 @@ aio_daemon(void *uproc)
* per daemon.
*/
aiop = zalloc(aiop_zone);
- aiop->aioproc = mycp;
- aiop->aioprocflags |= AIOP_FREE;
+ aiop->aiothread = td;
+ aiop->aiothreadflags |= AIOP_FREE;
TAILQ_INIT(&aiop->jobtorun);
s = splnet();
@@ -688,7 +697,7 @@ aio_daemon(void *uproc)
* filedescriptors, except as temporarily inherited from the client.
* Credentials are also cloned, and made equivalent to "root".
*/
- fdfree(mycp);
+ fdfree(td);
mycp->p_fd = NULL;
mycp->p_ucred = crcopy(mycp->p_ucred);
mycp->p_ucred->cr_uid = 0;
@@ -705,7 +714,7 @@ aio_daemon(void *uproc)
/*
* Wakeup parent process. (Parent sleeps to keep from blasting away
- * creating to many daemons.)
+ * and creating too many daemons.)
*/
wakeup(mycp);
@@ -719,14 +728,14 @@ aio_daemon(void *uproc)
/*
* Take daemon off of free queue
*/
- if (aiop->aioprocflags & AIOP_FREE) {
+ if (aiop->aiothreadflags & AIOP_FREE) {
s = splnet();
TAILQ_REMOVE(&aio_freeproc, aiop, list);
TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
- aiop->aioprocflags &= ~AIOP_FREE;
+ aiop->aiothreadflags &= ~AIOP_FREE;
splx(s);
}
- aiop->aioprocflags &= ~AIOP_SCHED;
+ aiop->aiothreadflags &= ~AIOP_SCHED;
/*
* Check for jobs.
@@ -755,7 +764,7 @@ aio_daemon(void *uproc)
mycp->p_vmspace->vm_refcnt++;
/* Activate the new mapping. */
- pmap_activate(mycp);
+ pmap_activate(&mycp->p_thread);
/*
* If the old address space wasn't the daemons
@@ -775,7 +784,7 @@ aio_daemon(void *uproc)
* because they were originally freed.
*/
if (mycp->p_fd)
- fdfree(mycp);
+ fdfree(td);
mycp->p_fd = fdshare(userp);
curcp = userp;
}
@@ -787,7 +796,7 @@ aio_daemon(void *uproc)
ki->kaio_active_count++;
/* Do the I/O function. */
- aiocbe->jobaioproc = aiop;
+ aiocbe->jobaiothread = aiop;
aio_process(aiocbe);
/* Decrement the active job count. */
@@ -866,7 +875,7 @@ aio_daemon(void *uproc)
mycp->p_vmspace = myvm;
/* Activate the daemon's address space. */
- pmap_activate(mycp);
+ pmap_activate(&mycp->p_thread);
#ifdef DIAGNOSTIC
if (tmpvm == myvm) {
printf("AIOD: vmspace problem -- %d\n",
@@ -881,7 +890,7 @@ aio_daemon(void *uproc)
* descriptors.
*/
if (mycp->p_fd)
- fdfree(mycp);
+ fdfree(td);
mycp->p_fd = NULL;
curcp = mycp;
}
@@ -895,19 +904,19 @@ aio_daemon(void *uproc)
if (TAILQ_EMPTY(&aio_freeproc))
wakeup(&aio_freeproc);
TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
- aiop->aioprocflags |= AIOP_FREE;
+ aiop->aiothreadflags |= AIOP_FREE;
splx(s);
/*
* If daemon is inactive for a long time, allow it to exit,
* thereby freeing resources.
*/
- if (((aiop->aioprocflags & AIOP_SCHED) == 0) && tsleep(mycp,
+ if (((aiop->aiothreadflags & AIOP_SCHED) == 0) && tsleep(mycp,
PRIBIO, "aiordy", aiod_lifetime)) {
s = splnet();
if ((TAILQ_FIRST(&aio_jobs) == NULL) &&
(TAILQ_FIRST(&aiop->jobtorun) == NULL)) {
- if ((aiop->aioprocflags & AIOP_FREE) &&
+ if ((aiop->aiothreadflags & AIOP_FREE) &&
(num_aio_procs > target_aio_procs)) {
TAILQ_REMOVE(&aio_freeproc, aiop, list);
splx(s);
@@ -1033,7 +1042,7 @@ aio_qphysio(struct proc *p, struct aiocblist *aiocbe)
bp->b_flags = B_PHYS;
bp->b_iodone = aio_physwakeup;
bp->b_saveaddr = bp->b_data;
- bp->b_data = (void *)cb->aio_buf;
+ bp->b_data = cb->aio_buf;
bp->b_blkno = btodb(cb->aio_offset);
if (cb->aio_lio_opcode == LIO_WRITE) {
@@ -1163,7 +1172,7 @@ aio_swake(struct socket *so, struct sockbuf *sb)
struct proc *p;
struct kaioinfo *ki = NULL;
int opcode, wakecount = 0;
- struct aioproclist *aiop;
+ struct aiothreadlist *aiop;
if (sb == &so->so_snd) {
opcode = LIO_WRITE;
@@ -1192,8 +1201,8 @@ aio_swake(struct socket *so, struct sockbuf *sb)
if ((aiop = TAILQ_FIRST(&aio_freeproc)) != 0) {
TAILQ_REMOVE(&aio_freeproc, aiop, list);
TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
- aiop->aioprocflags &= ~AIOP_FREE;
- wakeup(aiop->aioproc);
+ aiop->aiothreadflags &= ~AIOP_FREE;
+ wakeup(aiop->aiothread);
}
}
#endif /* VFS_AIO */
@@ -1205,8 +1214,9 @@ aio_swake(struct socket *so, struct sockbuf *sb)
* technique is done in this code.
*/
static int
-_aio_aqueue(struct proc *p, struct aiocb *job, struct aio_liojob *lj, int type)
+_aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int type)
{
+ struct proc *p = td->td_proc;
struct filedesc *fdp;
struct file *fp;
unsigned int fd;
@@ -1215,7 +1225,7 @@ _aio_aqueue(struct proc *p, struct aiocb *job, struct aio_liojob *lj, int type)
int error;
int opcode;
struct aiocblist *aiocbe;
- struct aioproclist *aiop;
+ struct aiothreadlist *aiop;
struct kaioinfo *ki;
struct kevent kev;
struct kqueue *kq;
@@ -1347,7 +1357,7 @@ _aio_aqueue(struct proc *p, struct aiocb *job, struct aio_liojob *lj, int type)
kev.ident = (uintptr_t)aiocbe;
kev.filter = EVFILT_AIO;
kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
- error = kqueue_register(kq, &kev, p);
+ error = kqueue_register(kq, &kev, td);
aqueue_fail:
if (error) {
zfree(aiocb_zone, aiocbe);
@@ -1431,22 +1441,22 @@ retryproc:
if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
TAILQ_REMOVE(&aio_freeproc, aiop, list);
TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
- aiop->aioprocflags &= ~AIOP_FREE;
- wakeup(aiop->aioproc);
+ aiop->aiothreadflags &= ~AIOP_FREE;
+ wakeup(aiop->aiothread);
} else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
((ki->kaio_active_count + num_aio_resv_start) <
ki->kaio_maxactive_count)) {
num_aio_resv_start++;
if ((error = aio_newproc()) == 0) {
num_aio_resv_start--;
- p->p_retval[0] = 0;
+ td->td_retval[0] = 0;
goto retryproc;
}
num_aio_resv_start--;
}
splx(s);
done:
- fdrop(fp, p);
+ fdrop(fp, td);
return error;
}
@@ -1454,8 +1464,9 @@ done:
* This routine queues an AIO request, checking for quotas.
*/
static int
-aio_aqueue(struct proc *p, struct aiocb *job, int type)
+aio_aqueue(struct thread *td, struct aiocb *job, int type)
{
+ struct proc *p = td->td_proc;
struct kaioinfo *ki;
if (p->p_aioinfo == NULL)
@@ -1468,7 +1479,7 @@ aio_aqueue(struct proc *p, struct aiocb *job, int type)
if (ki->kaio_queue_count >= ki->kaio_qallowed_count)
return EAGAIN;
- return _aio_aqueue(p, job, NULL, type);
+ return _aio_aqueue(td, job, NULL, type);
}
#endif /* VFS_AIO */
@@ -1477,11 +1488,12 @@ aio_aqueue(struct proc *p, struct aiocb *job, int type)
* released.
*/
int
-aio_return(struct proc *p, struct aio_return_args *uap)
+aio_return(struct thread *td, struct aio_return_args *uap)
{
#ifndef VFS_AIO
return ENOSYS;
#else
+ struct proc *p = td->td_proc;
int s;
int jobref;
struct aiocblist *cb, *ncb;
@@ -1505,10 +1517,10 @@ aio_return(struct proc *p, struct aio_return_args *uap)
jobref) {
splx(s);
if (ujob == cb->uuaiocb) {
- p->p_retval[0] =
+ td->td_retval[0] =
cb->uaiocb._aiocb_private.status;
} else
- p->p_retval[0] = EFAULT;
+ td->td_retval[0] = EFAULT;
if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
curproc->p_stats->p_ru.ru_oublock +=
cb->outputcharge;
@@ -1531,10 +1543,10 @@ aio_return(struct proc *p, struct aio_return_args *uap)
== jobref) {
splx(s);
if (ujob == cb->uuaiocb) {
- p->p_retval[0] =
+ td->td_retval[0] =
cb->uaiocb._aiocb_private.status;
} else
- p->p_retval[0] = EFAULT;
+ td->td_retval[0] = EFAULT;
aio_free_entry(cb);
return 0;
}
@@ -1549,11 +1561,12 @@ aio_return(struct proc *p, struct aio_return_args *uap)
* Allow a process to wakeup when any of the I/O requests are completed.
*/
int
-aio_suspend(struct proc *p, struct aio_suspend_args *uap)
+aio_suspend(struct thread *td, struct aio_suspend_args *uap)
{
#ifndef VFS_AIO
return ENOSYS;
#else
+ struct proc *p = td->td_proc;
struct timeval atv;
struct timespec ts;
struct aiocb *const *cbptr, *cbp;
@@ -1666,11 +1679,12 @@ aio_suspend(struct proc *p, struct aio_suspend_args *uap)
* progress.
*/
int
-aio_cancel(struct proc *p, struct aio_cancel_args *uap)
+aio_cancel(struct thread *td, struct aio_cancel_args *uap)
{
#ifndef VFS_AIO
return ENOSYS;
#else
+ struct proc *p = td->td_proc;
struct kaioinfo *ki;
struct aiocblist *cbe, *cbn;
struct file *fp;
@@ -1694,7 +1708,7 @@ aio_cancel(struct proc *p, struct aio_cancel_args *uap)
vp = (struct vnode *)fp->f_data;
if (vn_isdisk(vp,&error)) {
- p->p_retval[0] = AIO_NOTCANCELED;
+ td->td_retval[0] = AIO_NOTCANCELED;
return 0;
}
} else if (fp->f_type == DTYPE_SOCKET) {
@@ -1733,7 +1747,7 @@ aio_cancel(struct proc *p, struct aio_cancel_args *uap)
splx(s);
if ((cancelled) && (uap->aiocbp)) {
- p->p_retval[0] = AIO_CANCELED;
+ td->td_retval[0] = AIO_CANCELED;
return 0;
}
@@ -1777,16 +1791,16 @@ aio_cancel(struct proc *p, struct aio_cancel_args *uap)
if (notcancelled) {
- p->p_retval[0] = AIO_NOTCANCELED;
+ td->td_retval[0] = AIO_NOTCANCELED;
return 0;
}
if (cancelled) {
- p->p_retval[0] = AIO_CANCELED;
+ td->td_retval[0] = AIO_CANCELED;
return 0;
}
- p->p_retval[0] = AIO_ALLDONE;
+ td->td_retval[0] = AIO_ALLDONE;
return 0;
#endif /* VFS_AIO */
@@ -1798,11 +1812,12 @@ aio_cancel(struct proc *p, struct aio_cancel_args *uap)
* subroutine.
*/
int
-aio_error(struct proc *p, struct aio_error_args *uap)
+aio_error(struct thread *td, struct aio_error_args *uap)
{
#ifndef VFS_AIO
return ENOSYS;
#else
+ struct proc *p = td->td_proc;
int s;
struct aiocblist *cb;
struct kaioinfo *ki;
@@ -1820,7 +1835,7 @@ aio_error(struct proc *p, struct aio_error_args *uap)
plist)) {
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
jobref) {
- p->p_retval[0] = cb->uaiocb._aiocb_private.error;
+ td->td_retval[0] = cb->uaiocb._aiocb_private.error;
return 0;
}
}
@@ -1831,7 +1846,7 @@ aio_error(struct proc *p, struct aio_error_args *uap)
plist)) {
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
jobref) {
- p->p_retval[0] = EINPROGRESS;
+ td->td_retval[0] = EINPROGRESS;
splx(s);
return 0;
}
@@ -1841,7 +1856,7 @@ aio_error(struct proc *p, struct aio_error_args *uap)
plist)) {
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
jobref) {
- p->p_retval[0] = EINPROGRESS;
+ td->td_retval[0] = EINPROGRESS;
splx(s);
return 0;
}
@@ -1853,7 +1868,7 @@ aio_error(struct proc *p, struct aio_error_args *uap)
plist)) {
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
jobref) {
- p->p_retval[0] = cb->uaiocb._aiocb_private.error;
+ td->td_retval[0] = cb->uaiocb._aiocb_private.error;
splx(s);
return 0;
}
@@ -1863,7 +1878,7 @@ aio_error(struct proc *p, struct aio_error_args *uap)
plist)) {
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
jobref) {
- p->p_retval[0] = EINPROGRESS;
+ td->td_retval[0] = EINPROGRESS;
splx(s);
return 0;
}
@@ -1883,31 +1898,32 @@ aio_error(struct proc *p, struct aio_error_args *uap)
}
int
-aio_read(struct proc *p, struct aio_read_args *uap)
+aio_read(struct thread *td, struct aio_read_args *uap)
{
#ifndef VFS_AIO
return ENOSYS;
#else
- return aio_aqueue(p, uap->aiocbp, LIO_READ);
+ return aio_aqueue(td, uap->aiocbp, LIO_READ);
#endif /* VFS_AIO */
}
int
-aio_write(struct proc *p, struct aio_write_args *uap)
+aio_write(struct thread *td, struct aio_write_args *uap)
{
#ifndef VFS_AIO
return ENOSYS;
#else
- return aio_aqueue(p, uap->aiocbp, LIO_WRITE);
+ return aio_aqueue(td, uap->aiocbp, LIO_WRITE);
#endif /* VFS_AIO */
}
int
-lio_listio(struct proc *p, struct lio_listio_args *uap)
+lio_listio(struct thread *td, struct lio_listio_args *uap)
{
#ifndef VFS_AIO
return ENOSYS;
#else
+ struct proc *p = td->td_proc;
int nent, nentqueued;
struct aiocb *iocb, * const *cbptr;
struct aiocblist *cb;
@@ -1975,7 +1991,7 @@ lio_listio(struct proc *p, struct lio_listio_args *uap)
for (i = 0; i < uap->nent; i++) {
iocb = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]);
if (((intptr_t)iocb != -1) && ((intptr_t)iocb != NULL)) {
- error = _aio_aqueue(p, iocb, lj, 0);
+ error = _aio_aqueue(td, iocb, lj, 0);
if (error == 0)
nentqueued++;
else
@@ -2173,11 +2189,12 @@ aio_physwakeup(struct buf *bp)
#endif /* VFS_AIO */
int
-aio_waitcomplete(struct proc *p, struct aio_waitcomplete_args *uap)
+aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
{
#ifndef VFS_AIO
return ENOSYS;
#else
+ struct proc *p = td->td_proc;
struct timeval atv;
struct timespec ts;
struct aiocb **cbptr;
@@ -2212,7 +2229,7 @@ aio_waitcomplete(struct proc *p, struct aio_waitcomplete_args *uap)
for (;;) {
if ((cb = TAILQ_FIRST(&ki->kaio_jobdone)) != 0) {
suword(uap->aiocbp, (int)cb->uuaiocb);
- p->p_retval[0] = cb->uaiocb._aiocb_private.status;
+ td->td_retval[0] = cb->uaiocb._aiocb_private.status;
if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
curproc->p_stats->p_ru.ru_oublock +=
cb->outputcharge;
@@ -2230,7 +2247,7 @@ aio_waitcomplete(struct proc *p, struct aio_waitcomplete_args *uap)
if ((cb = TAILQ_FIRST(&ki->kaio_bufdone)) != 0 ) {
splx(s);
suword(uap->aiocbp, (int)cb->uuaiocb);
- p->p_retval[0] = cb->uaiocb._aiocb_private.status;
+ td->td_retval[0] = cb->uaiocb._aiocb_private.status;
aio_free_entry(cb);
return cb->uaiocb._aiocb_private.error;
}
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index e66a93e..39e09b3 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -594,8 +594,8 @@ breadn(struct vnode * vp, daddr_t blkno, int size,
/* if not found in cache, do some I/O */
if ((bp->b_flags & B_CACHE) == 0) {
- if (curproc != PCPU_GET(idleproc))
- curproc->p_stats->p_ru.ru_inblock++;
+ if (curthread != PCPU_GET(idlethread))
+ curthread->td_proc->p_stats->p_ru.ru_inblock++;
bp->b_iocmd = BIO_READ;
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
@@ -615,8 +615,8 @@ breadn(struct vnode * vp, daddr_t blkno, int size,
rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
if ((rabp->b_flags & B_CACHE) == 0) {
- if (curproc != PCPU_GET(idleproc))
- curproc->p_stats->p_ru.ru_inblock++;
+ if (curthread != PCPU_GET(idlethread))
+ curthread->td_proc->p_stats->p_ru.ru_inblock++;
rabp->b_flags |= B_ASYNC;
rabp->b_flags &= ~B_INVAL;
rabp->b_ioflags &= ~BIO_ERROR;
@@ -753,8 +753,8 @@ bwrite(struct buf * bp)
bp->b_runningbufspace = bp->b_bufsize;
runningbufspace += bp->b_runningbufspace;
- if (curproc != PCPU_GET(idleproc))
- curproc->p_stats->p_ru.ru_oublock++;
+ if (curthread != PCPU_GET(idlethread))
+ curthread->td_proc->p_stats->p_ru.ru_oublock++;
splx(s);
if (oldflags & B_ASYNC)
BUF_KERNPROC(bp);
@@ -2183,7 +2183,7 @@ loop:
* XXX remove if 0 sections (clean this up after its proven)
*/
if (numfreebuffers == 0) {
- if (curproc == PCPU_GET(idleproc))
+ if (curthread == PCPU_GET(idlethread))
return NULL;
needsbuffer |= VFS_BIO_NEED_ANY;
}
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index af3035a..9483b9b 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -575,7 +575,7 @@ vfs_cache_lookup(ap)
struct componentname *cnp = ap->a_cnp;
struct ucred *cred = cnp->cn_cred;
int flags = cnp->cn_flags;
- struct proc *p = cnp->cn_proc;
+ struct thread *td = cnp->cn_thread;
u_long vpid; /* capability number of vnode */
*vpp = NULL;
@@ -589,7 +589,7 @@ vfs_cache_lookup(ap)
(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
return (EROFS);
- error = VOP_ACCESS(dvp, VEXEC, cred, p);
+ error = VOP_ACCESS(dvp, VEXEC, cred, td);
if (error)
return (error);
@@ -609,17 +609,17 @@ vfs_cache_lookup(ap)
VREF(vp);
error = 0;
} else if (flags & ISDOTDOT) {
- VOP_UNLOCK(dvp, 0, p);
+ VOP_UNLOCK(dvp, 0, td);
cnp->cn_flags |= PDIRUNLOCK;
- error = vget(vp, LK_EXCLUSIVE, p);
+ error = vget(vp, LK_EXCLUSIVE, td);
if (!error && lockparent && (flags & ISLASTCN)) {
- if ((error = vn_lock(dvp, LK_EXCLUSIVE, p)) == 0)
+ if ((error = vn_lock(dvp, LK_EXCLUSIVE, td)) == 0)
cnp->cn_flags &= ~PDIRUNLOCK;
}
} else {
- error = vget(vp, LK_EXCLUSIVE, p);
+ error = vget(vp, LK_EXCLUSIVE, td);
if (!lockparent || error || !(flags & ISLASTCN)) {
- VOP_UNLOCK(dvp, 0, p);
+ VOP_UNLOCK(dvp, 0, td);
cnp->cn_flags |= PDIRUNLOCK;
}
}
@@ -632,12 +632,12 @@ vfs_cache_lookup(ap)
return (0);
vput(vp);
if (lockparent && dvp != vp && (flags & ISLASTCN)) {
- VOP_UNLOCK(dvp, 0, p);
+ VOP_UNLOCK(dvp, 0, td);
cnp->cn_flags |= PDIRUNLOCK;
}
}
if (cnp->cn_flags & PDIRUNLOCK) {
- error = vn_lock(dvp, LK_EXCLUSIVE, p);
+ error = vn_lock(dvp, LK_EXCLUSIVE, td);
if (error)
return (error);
cnp->cn_flags &= ~PDIRUNLOCK;
@@ -663,8 +663,8 @@ static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
int
-__getcwd(p, uap)
- struct proc *p;
+__getcwd(td, uap)
+ struct thread *td;
struct __getcwd_args *uap;
{
char *bp, *buf;
@@ -683,7 +683,7 @@ __getcwd(p, uap)
buf = bp = malloc(uap->buflen, M_TEMP, M_WAITOK);
bp += uap->buflen - 1;
*bp = '\0';
- fdp = p->p_fd;
+ fdp = td->td_proc->p_fd;
slash_prefixed = 0;
for (vp = fdp->fd_cdir; vp != fdp->fd_rdir && vp != rootvnode;) {
if (vp->v_flag & VROOT) {
diff --git a/sys/kern/vfs_conf.c b/sys/kern/vfs_conf.c
index e651ef0..98c3f13 100644
--- a/sys/kern/vfs_conf.c
+++ b/sys/kern/vfs_conf.c
@@ -227,7 +227,7 @@ vfs_mountroot_try(char *mountfrom)
*/
strncpy(mp->mnt_stat.f_mntonname, "/", MNAMELEN);
- error = VFS_MOUNT(mp, NULL, NULL, NULL, curproc);
+ error = VFS_MOUNT(mp, NULL, NULL, NULL, curthread);
done:
if (vfsname != NULL)
@@ -236,7 +236,7 @@ done:
free(path, M_MOUNT);
if (error != 0) {
if (mp != NULL) {
- vfs_unbusy(mp, curproc);
+ vfs_unbusy(mp, curthread);
free(mp, M_MOUNT);
}
printf("Root mount failed: %d\n", error);
@@ -249,7 +249,7 @@ done:
/* sanity check system clock against root filesystem timestamp */
inittodr(mp->mnt_time);
- vfs_unbusy(mp, curproc);
+ vfs_unbusy(mp, curthread);
}
return(error);
}
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index 1a1f55b..c5d1f02 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -243,16 +243,16 @@ vop_stdlock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
int a_flags;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
#ifndef DEBUG_LOCKS
- return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock, ap->a_p));
+ return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock, ap->a_td));
#else
return (debuglockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock,
- ap->a_p, "vop_stdlock", vp->filename, vp->line));
+ ap->a_td, "vop_stdlock", vp->filename, vp->line));
#endif
}
@@ -261,35 +261,35 @@ vop_stdunlock(ap)
struct vop_unlock_args /* {
struct vnode *a_vp;
int a_flags;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE, &vp->v_interlock,
- ap->a_p));
+ ap->a_td));
}
int
vop_stdislocked(ap)
struct vop_islocked_args /* {
struct vnode *a_vp;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
- return (lockstatus(&ap->a_vp->v_lock, ap->a_p));
+ return (lockstatus(&ap->a_vp->v_lock, ap->a_td));
}
int
vop_stdinactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
- VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
+ VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
return (0);
}
@@ -302,7 +302,7 @@ vop_nopoll(ap)
struct vnode *a_vp;
int a_events;
struct ucred *a_cred;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
/*
@@ -327,11 +327,11 @@ vop_stdpoll(ap)
struct vnode *a_vp;
int a_events;
struct ucred *a_cred;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
if (ap->a_events & ~POLLSTANDARD)
- return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events));
+ return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
}
@@ -346,7 +346,7 @@ vop_sharedlock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
int a_flags;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
/*
@@ -393,9 +393,9 @@ vop_sharedlock(ap)
if (flags & LK_INTERLOCK)
vnflags |= LK_INTERLOCK;
#ifndef DEBUG_LOCKS
- return (lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_p));
+ return (lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_td));
#else
- return (debuglockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_p,
+ return (debuglockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_td,
"vop_sharedlock", vp->filename, vp->line));
#endif
}
@@ -411,7 +411,7 @@ vop_nolock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
int a_flags;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
#ifdef notyet
@@ -450,7 +450,7 @@ vop_nolock(ap)
}
if (flags & LK_INTERLOCK)
vnflags |= LK_INTERLOCK;
- return(lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_p));
+ return(lockmgr(&vp->v_lock, vnflags, &vp->v_interlock, ap->a_td));
#else /* for now */
/*
* Since we are not using the lock manager, we must clear
@@ -470,7 +470,7 @@ vop_nounlock(ap)
struct vop_unlock_args /* {
struct vnode *a_vp;
int a_flags;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
@@ -490,7 +490,7 @@ int
vop_noislocked(ap)
struct vop_islocked_args /* {
struct vnode *a_vp;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
@@ -517,12 +517,12 @@ vop_stdcreatevobject(ap)
struct vop_createvobject_args /* {
struct vnode *vp;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
struct ucred *cred = ap->a_cred;
- struct proc *p = ap->a_p;
+ struct thread *td = ap->a_td;
struct vattr vat;
vm_object_t object;
int error = 0;
@@ -535,7 +535,7 @@ vop_stdcreatevobject(ap)
retry:
if ((object = vp->v_object) == NULL) {
if (vp->v_type == VREG || vp->v_type == VDIR) {
- if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
+ if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0)
goto retn;
object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
} else if (devsw(vp->v_rdev) != NULL) {
@@ -556,9 +556,9 @@ retry:
vp->v_usecount--;
} else {
if (object->flags & OBJ_DEAD) {
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
tsleep(object, PVM, "vodead", 0);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
goto retry;
}
}
@@ -677,21 +677,21 @@ vop_stdputpages(ap)
* used to fill the vfs fucntion table to get reasonable default return values.
*/
int
-vfs_stdmount (mp, path, data, ndp, p)
+vfs_stdmount (mp, path, data, ndp, td)
struct mount *mp;
char *path;
caddr_t data;
struct nameidata *ndp;
- struct proc *p;
+ struct thread *td;
{
return (0);
}
int
-vfs_stdunmount (mp, mntflags, p)
+vfs_stdunmount (mp, mntflags, td)
struct mount *mp;
int mntflags;
- struct proc *p;
+ struct thread *td;
{
return (0);
}
@@ -705,10 +705,10 @@ vfs_stdroot (mp, vpp)
}
int
-vfs_stdstatfs (mp, sbp, p)
+vfs_stdstatfs (mp, sbp, td)
struct mount *mp;
struct statfs *sbp;
- struct proc *p;
+ struct thread *td;
{
return (EOPNOTSUPP);
}
@@ -722,31 +722,31 @@ vfs_stdvptofh (vp, fhp)
}
int
-vfs_stdstart (mp, flags, p)
+vfs_stdstart (mp, flags, td)
struct mount *mp;
int flags;
- struct proc *p;
+ struct thread *td;
{
return (0);
}
int
-vfs_stdquotactl (mp, cmds, uid, arg, p)
+vfs_stdquotactl (mp, cmds, uid, arg, td)
struct mount *mp;
int cmds;
uid_t uid;
caddr_t arg;
- struct proc *p;
+ struct thread *td;
{
return (EOPNOTSUPP);
}
int
-vfs_stdsync (mp, waitfor, cred, p)
+vfs_stdsync (mp, waitfor, cred, td)
struct mount *mp;
int waitfor;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
{
return (0);
}
@@ -784,13 +784,13 @@ vfs_stduninit (vfsp)
}
int
-vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, p)
+vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
struct mount *mp;
int cmd;
struct vnode *filename_vp;
int attrnamespace;
const char *attrname;
- struct proc *p;
+ struct thread *td;
{
return(EOPNOTSUPP);
}
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index bb39292..4e05b8d 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -75,18 +75,18 @@
#include <vm/vm_zone.h>
#include <vm/vm_page.h>
-static int change_dir __P((struct nameidata *ndp, struct proc *p));
+static int change_dir __P((struct nameidata *ndp, struct thread *td));
static void checkdirs __P((struct vnode *olddp, struct vnode *newdp));
static int chroot_refuse_vdir_fds __P((struct filedesc *fdp));
static int getutimes __P((const struct timeval *, struct timespec *));
-static int setfown __P((struct proc *, struct vnode *, uid_t, gid_t));
-static int setfmode __P((struct proc *, struct vnode *, int));
-static int setfflags __P((struct proc *, struct vnode *, int));
-static int setutimes __P((struct proc *, struct vnode *,
+static int setfown __P((struct thread *td, struct vnode *, uid_t, gid_t));
+static int setfmode __P((struct thread *td, struct vnode *, int));
+static int setfflags __P((struct thread *td, struct vnode *, int));
+static int setutimes __P((struct thread *td, struct vnode *,
const struct timespec *, int));
static int usermount = 0; /* if 1, non-root can mount fs. */
-int (*union_dircheckp) __P((struct proc *, struct vnode **, struct file *));
+int (*union_dircheckp) __P((struct thread *td, struct vnode **, struct file *));
SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, "");
@@ -107,8 +107,8 @@ struct mount_args {
#endif
/* ARGSUSED */
int
-mount(p, uap)
- struct proc *p;
+mount(td, uap)
+ struct thread *td;
struct mount_args /* {
syscallarg(char *) type;
syscallarg(char *) path;
@@ -133,7 +133,7 @@ mount(p, uap)
error = copyinstr(SCARG(uap, path), fspath, MNAMELEN, NULL);
if (error)
goto finish;
- error = vfs_mount(p, fstype, fspath, SCARG(uap, flags),
+ error = vfs_mount(td, fstype, fspath, SCARG(uap, flags),
SCARG(uap, data));
finish:
free(fstype, M_TEMP);
@@ -150,8 +150,8 @@ finish:
* into userspace.
*/
int
-vfs_mount(p, fstype, fspath, fsflags, fsdata)
- struct proc *p;
+vfs_mount(td, fstype, fspath, fsflags, fsdata)
+ struct thread *td;
const char *fstype;
char *fspath;
int fsflags;
@@ -163,6 +163,7 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
int error, flag = 0, flag2 = 0;
struct vattr va;
struct nameidata nd;
+ struct proc *p = td->td_proc;
/*
* Be ultra-paranoid about making sure the type and fspath
@@ -173,13 +174,13 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
(strlen(fspath) >= MNAMELEN - 1))
return (ENAMETOOLONG);
- if (usermount == 0 && (error = suser(p)))
+ if (usermount == 0 && (error = suser_td(td)))
return (error);
/*
* Do not allow NFS export by non-root users.
*/
if (fsflags & MNT_EXPORTED) {
- error = suser(p);
+ error = suser_td(td);
if (error)
return (error);
}
@@ -191,7 +192,7 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
/*
* Get vnode to be covered
*/
- NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspath, p);
+ NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspath, td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
@@ -218,11 +219,11 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
* permitted to update it.
*/
if (mp->mnt_stat.f_owner != p->p_ucred->cr_uid &&
- (error = suser(p))) {
+ (error = suser_td(td))) {
vput(vp);
return (error);
}
- if (vfs_busy(mp, LK_NOWAIT, 0, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, 0, td)) {
vput(vp);
return (EBUSY);
}
@@ -230,7 +231,7 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
mtx_unlock(&vp->v_interlock);
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
vput(vp);
return (EBUSY);
}
@@ -238,20 +239,20 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
mtx_unlock(&vp->v_interlock);
mp->mnt_flag |= fsflags &
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
goto update;
}
/*
* If the user is not root, ensure that they own the directory
* onto which we are attempting to mount.
*/
- if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) ||
+ if ((error = VOP_GETATTR(vp, &va, p->p_ucred, td)) ||
(va.va_uid != p->p_ucred->cr_uid &&
- (error = suser(p)))) {
+ (error = suser_td(td)))) {
vput(vp);
return (error);
}
- if ((error = vinvalbuf(vp, V_SAVE, p->p_ucred, p, 0, 0)) != 0) {
+ if ((error = vinvalbuf(vp, V_SAVE, p->p_ucred, td, 0, 0)) != 0) {
vput(vp);
return (error);
}
@@ -266,7 +267,7 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
linker_file_t lf;
/* Only load modules for root (very important!) */
- if ((error = suser(p)) != 0) {
+ if ((error = suser_td(td)) != 0) {
vput(vp);
return error;
}
@@ -304,7 +305,7 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
*/
mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
- (void)vfs_busy(mp, LK_NOWAIT, 0, p);
+ (void)vfs_busy(mp, LK_NOWAIT, 0, td);
mp->mnt_op = vfsp->vfc_vfsops;
mp->mnt_vfc = vfsp;
vfsp->vfc_refcount++;
@@ -317,7 +318,7 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
strncpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN);
mp->mnt_stat.f_mntonname[MNAMELEN - 1] = '\0';
mp->mnt_iosize_max = DFLTPHYS;
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
update:
/*
* Set the mount level flags.
@@ -339,7 +340,7 @@ update:
* XXX The final recipients of VFS_MOUNT just overwrite the ndp they
* get. No freeing of cn_pnbuf.
*/
- error = VFS_MOUNT(mp, fspath, fsdata, &nd, p);
+ error = VFS_MOUNT(mp, fspath, fsdata, &nd, td);
if (mp->mnt_flag & MNT_UPDATE) {
if (mp->mnt_kern_flag & MNTK_WANTRDWR)
mp->mnt_flag &= ~MNT_RDONLY;
@@ -358,14 +359,14 @@ update:
vrele(mp->mnt_syncer);
mp->mnt_syncer = NULL;
}
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
mtx_unlock(&vp->v_interlock);
vrele(vp);
return (error);
}
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
/*
* Put the new filesystem on the mount list after root.
*/
@@ -384,18 +385,18 @@ update:
panic("mount: lost mount");
checkdirs(vp, newdp);
vput(newdp);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
if ((mp->mnt_flag & MNT_RDONLY) == 0)
error = vfs_allocate_syncvnode(mp);
- vfs_unbusy(mp, p);
- if ((error = VFS_START(mp, 0, p)) != 0)
+ vfs_unbusy(mp, td);
+ if ((error = VFS_START(mp, 0, td)) != 0)
vrele(vp);
} else {
mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
mtx_unlock(&vp->v_interlock);
mp->mnt_vfc->vfc_refcount--;
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
free((caddr_t)mp, M_MOUNT);
vput(vp);
}
@@ -454,8 +455,8 @@ struct unmount_args {
#endif
/* ARGSUSED */
int
-unmount(p, uap)
- struct proc *p;
+unmount(td, uap)
+ struct thread *td;
register struct unmount_args /* {
syscallarg(char *) path;
syscallarg(int) flags;
@@ -467,7 +468,7 @@ unmount(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
@@ -478,8 +479,8 @@ unmount(p, uap)
* Only root, or the user that did the original mount is
* permitted to unmount this filesystem.
*/
- if ((mp->mnt_stat.f_owner != p->p_ucred->cr_uid) &&
- (error = suser(p))) {
+ if ((mp->mnt_stat.f_owner != td->td_proc->p_ucred->cr_uid) &&
+ (error = suser_td(td))) {
vput(vp);
return (error);
}
@@ -500,17 +501,17 @@ unmount(p, uap)
return (EINVAL);
}
vput(vp);
- return (dounmount(mp, SCARG(uap, flags), p));
+ return (dounmount(mp, SCARG(uap, flags), td));
}
/*
* Do the actual file system unmount.
*/
int
-dounmount(mp, flags, p)
+dounmount(mp, flags, td)
struct mount *mp;
int flags;
- struct proc *p;
+ struct thread *td;
{
struct vnode *coveredvp, *fsrootvp;
int error;
@@ -518,7 +519,7 @@ dounmount(mp, flags, p)
mtx_lock(&mountlist_mtx);
mp->mnt_kern_flag |= MNTK_UNMOUNT;
- lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_mtx, p);
+ lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_mtx, td);
vn_start_write(NULL, &mp, V_WAIT);
if (mp->mnt_flag & MNT_EXPUBLIC)
@@ -541,9 +542,9 @@ dounmount(mp, flags, p)
vput(fsrootvp);
}
if (((mp->mnt_flag & MNT_RDONLY) ||
- (error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p)) == 0) ||
+ (error = VFS_SYNC(mp, MNT_WAIT, td->td_proc->p_ucred, td)) == 0) ||
(flags & MNT_FORCE)) {
- error = VFS_UNMOUNT(mp, flags, p);
+ error = VFS_UNMOUNT(mp, flags, td);
}
vn_finished_write(mp);
if (error) {
@@ -563,7 +564,7 @@ dounmount(mp, flags, p)
mp->mnt_kern_flag &= ~MNTK_UNMOUNT;
mp->mnt_flag |= async_flag;
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK,
- &mountlist_mtx, p);
+ &mountlist_mtx, td);
if (mp->mnt_kern_flag & MNTK_MWAIT)
wakeup((caddr_t)mp);
return (error);
@@ -575,7 +576,7 @@ dounmount(mp, flags, p)
mp->mnt_vfc->vfc_refcount--;
if (!LIST_EMPTY(&mp->mnt_vnodelist))
panic("unmount: dangling vnode");
- lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, p);
+ lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, td);
lockdestroy(&mp->mnt_lock);
if (coveredvp != NULL)
vrele(coveredvp);
@@ -601,8 +602,8 @@ SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, "");
/* ARGSUSED */
int
-sync(p, uap)
- struct proc *p;
+sync(td, uap)
+ struct thread *td;
struct sync_args *uap;
{
struct mount *mp, *nmp;
@@ -610,7 +611,7 @@ sync(p, uap)
mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -620,13 +621,13 @@ sync(p, uap)
mp->mnt_flag &= ~MNT_ASYNC;
vfs_msync(mp, MNT_NOWAIT);
VFS_SYNC(mp, MNT_NOWAIT,
- ((p != NULL) ? p->p_ucred : NOCRED), p);
+ ((td != NULL) ? td->td_proc->p_ucred : NOCRED), td);
mp->mnt_flag |= asyncflag;
vn_finished_write(mp);
}
mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
}
mtx_unlock(&mountlist_mtx);
#if 0
@@ -661,8 +662,8 @@ struct quotactl_args {
#endif
/* ARGSUSED */
int
-quotactl(p, uap)
- struct proc *p;
+quotactl(td, uap)
+ struct thread *td;
register struct quotactl_args /* {
syscallarg(char *) path;
syscallarg(int) cmd;
@@ -674,9 +675,9 @@ quotactl(p, uap)
int error;
struct nameidata nd;
- if (jailed(p->p_ucred) && !prison_quotas)
+ if (jailed(td->td_proc->p_ucred) && !prison_quotas)
return (EPERM);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
@@ -685,7 +686,7 @@ quotactl(p, uap)
if (error)
return (error);
error = VFS_QUOTACTL(mp, SCARG(uap, cmd), SCARG(uap, uid),
- SCARG(uap, arg), p);
+ SCARG(uap, arg), td);
vn_finished_write(mp);
return (error);
}
@@ -701,8 +702,8 @@ struct statfs_args {
#endif
/* ARGSUSED */
int
-statfs(p, uap)
- struct proc *p;
+statfs(td, uap)
+ struct thread *td;
register struct statfs_args /* {
syscallarg(char *) path;
syscallarg(struct statfs *) buf;
@@ -714,18 +715,18 @@ statfs(p, uap)
struct nameidata nd;
struct statfs sb;
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
mp = nd.ni_vp->v_mount;
sp = &mp->mnt_stat;
NDFREE(&nd, NDF_ONLY_PNBUF);
vrele(nd.ni_vp);
- error = VFS_STATFS(mp, sp, p);
+ error = VFS_STATFS(mp, sp, td);
if (error)
return (error);
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
- if (suser_xxx(p->p_ucred, 0, 0)) {
+ if (suser_xxx(td->td_proc->p_ucred, 0, 0)) {
bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
sp = &sb;
@@ -744,8 +745,8 @@ struct fstatfs_args {
#endif
/* ARGSUSED */
int
-fstatfs(p, uap)
- struct proc *p;
+fstatfs(td, uap)
+ struct thread *td;
register struct fstatfs_args /* {
syscallarg(int) fd;
syscallarg(struct statfs *) buf;
@@ -757,15 +758,15 @@ fstatfs(p, uap)
int error;
struct statfs sb;
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
mp = ((struct vnode *)fp->f_data)->v_mount;
sp = &mp->mnt_stat;
- error = VFS_STATFS(mp, sp, p);
+ error = VFS_STATFS(mp, sp, td);
if (error)
return (error);
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
- if (suser_xxx(p->p_ucred, 0, 0)) {
+ if (suser_xxx(td->td_proc->p_ucred, 0, 0)) {
bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
sp = &sb;
@@ -784,8 +785,8 @@ struct getfsstat_args {
};
#endif
int
-getfsstat(p, uap)
- struct proc *p;
+getfsstat(td, uap)
+ struct thread *td;
register struct getfsstat_args /* {
syscallarg(struct statfs *) buf;
syscallarg(long) bufsize;
@@ -802,7 +803,7 @@ getfsstat(p, uap)
count = 0;
mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -815,16 +816,16 @@ getfsstat(p, uap)
*/
if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
(SCARG(uap, flags) & MNT_WAIT)) &&
- (error = VFS_STATFS(mp, sp, p))) {
+ (error = VFS_STATFS(mp, sp, td))) {
mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
continue;
}
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
error = copyout((caddr_t)sp, sfsp, sizeof(*sp));
if (error) {
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
return (error);
}
sfsp += sizeof(*sp);
@@ -832,13 +833,13 @@ getfsstat(p, uap)
count++;
mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
}
mtx_unlock(&mountlist_mtx);
if (sfsp && count > maxcount)
- p->p_retval[0] = maxcount;
+ td->td_retval[0] = maxcount;
else
- p->p_retval[0] = count;
+ td->td_retval[0] = count;
return (0);
}
@@ -852,13 +853,13 @@ struct fchdir_args {
#endif
/* ARGSUSED */
int
-fchdir(p, uap)
- struct proc *p;
+fchdir(td, uap)
+ struct thread *td;
struct fchdir_args /* {
syscallarg(int) fd;
} */ *uap;
{
- register struct filedesc *fdp = p->p_fd;
+ register struct filedesc *fdp = td->td_proc->p_fd;
struct vnode *vp, *tdp;
struct mount *mp;
struct file *fp;
@@ -868,16 +869,16 @@ fchdir(p, uap)
return (error);
vp = (struct vnode *)fp->f_data;
VREF(vp);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (vp->v_type != VDIR)
error = ENOTDIR;
else
- error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p);
+ error = VOP_ACCESS(vp, VEXEC, td->td_proc->p_ucred, td);
while (!error && (mp = vp->v_mountedhere) != NULL) {
- if (vfs_busy(mp, 0, 0, p))
+ if (vfs_busy(mp, 0, 0, td))
continue;
error = VFS_ROOT(mp, &tdp);
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
if (error)
break;
vput(vp);
@@ -887,7 +888,7 @@ fchdir(p, uap)
vput(vp);
return (error);
}
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
vrele(fdp->fd_cdir);
fdp->fd_cdir = vp;
return (0);
@@ -903,19 +904,19 @@ struct chdir_args {
#endif
/* ARGSUSED */
int
-chdir(p, uap)
- struct proc *p;
+chdir(td, uap)
+ struct thread *td;
struct chdir_args /* {
syscallarg(char *) path;
} */ *uap;
{
- register struct filedesc *fdp = p->p_fd;
+ register struct filedesc *fdp = td->td_proc->p_fd;
int error;
struct nameidata nd;
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
- SCARG(uap, path), p);
- if ((error = change_dir(&nd, p)) != 0)
+ SCARG(uap, path), td);
+ if ((error = change_dir(&nd, td)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
vrele(fdp->fd_cdir);
@@ -971,17 +972,17 @@ struct chroot_args {
#endif
/* ARGSUSED */
int
-chroot(p, uap)
- struct proc *p;
+chroot(td, uap)
+ struct thread *td;
struct chroot_args /* {
syscallarg(char *) path;
} */ *uap;
{
- register struct filedesc *fdp = p->p_fd;
+ register struct filedesc *fdp = td->td_proc->p_fd;
int error;
struct nameidata nd;
- error = suser_xxx(0, p, PRISON_ROOT);
+ error = suser_xxx(0, td->td_proc, PRISON_ROOT);
if (error)
return (error);
if (chroot_allow_open_directories == 0 ||
@@ -990,8 +991,8 @@ chroot(p, uap)
if (error)
return (error);
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
- SCARG(uap, path), p);
- if ((error = change_dir(&nd, p)) != 0)
+ SCARG(uap, path), td);
+ if ((error = change_dir(&nd, td)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
vrele(fdp->fd_rdir);
@@ -1007,9 +1008,9 @@ chroot(p, uap)
* Common routine for chroot and chdir.
*/
static int
-change_dir(ndp, p)
+change_dir(ndp, td)
register struct nameidata *ndp;
- struct proc *p;
+ struct thread *td;
{
struct vnode *vp;
int error;
@@ -1021,11 +1022,11 @@ change_dir(ndp, p)
if (vp->v_type != VDIR)
error = ENOTDIR;
else
- error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p);
+ error = VOP_ACCESS(vp, VEXEC, td->td_proc->p_ucred, td);
if (error)
vput(vp);
else
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
return (error);
}
@@ -1041,14 +1042,15 @@ struct open_args {
};
#endif
int
-open(p, uap)
- struct proc *p;
+open(td, uap)
+ struct thread *td;
register struct open_args /* {
syscallarg(char *) path;
syscallarg(int) flags;
syscallarg(int) mode;
} */ *uap;
{
+ struct proc *p = td->td_proc;
struct filedesc *fdp = p->p_fd;
struct file *fp;
struct vnode *vp;
@@ -1064,13 +1066,13 @@ open(p, uap)
if ((oflags & O_ACCMODE) == O_ACCMODE)
return (EINVAL);
flags = FFLAGS(oflags);
- error = falloc(p, &nfp, &indx);
+ error = falloc(td, &nfp, &indx);
if (error)
return (error);
fp = nfp;
cmode = ((SCARG(uap, mode) &~ fdp->fd_cmask) & ALLPERMS) &~ S_ISTXT;
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
- p->p_dupfd = -indx - 1; /* XXX check for fdopen */
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
+ td->td_dupfd = -indx - 1; /* XXX check for fdopen */
/*
* Bump the ref count to prevent another process from closing
* the descriptor while we are blocked in vn_open()
@@ -1081,7 +1083,7 @@ open(p, uap)
/*
* release our own reference
*/
- fdrop(fp, p);
+ fdrop(fp, td);
/*
* handle special fdopen() case. bleh. dupfdopen() is
@@ -1089,10 +1091,10 @@ open(p, uap)
* if it succeeds.
*/
if ((error == ENODEV || error == ENXIO) &&
- p->p_dupfd >= 0 && /* XXX from fdopen */
+ td->td_dupfd >= 0 && /* XXX from fdopen */
(error =
- dupfdopen(p, fdp, indx, p->p_dupfd, flags, error)) == 0) {
- p->p_retval[0] = indx;
+ dupfdopen(td, fdp, indx, td->td_dupfd, flags, error)) == 0) {
+ td->td_retval[0] = indx;
return (0);
}
/*
@@ -1101,14 +1103,14 @@ open(p, uap)
*/
if (fdp->fd_ofiles[indx] == fp) {
fdp->fd_ofiles[indx] = NULL;
- fdrop(fp, p);
+ fdrop(fp, td);
}
if (error == ERESTART)
error = EINTR;
return (error);
}
- p->p_dupfd = 0;
+ td->td_dupfd = 0;
NDFREE(&nd, NDF_ONLY_PNBUF);
vp = nd.ni_vp;
@@ -1123,10 +1125,10 @@ open(p, uap)
if (fp->f_count == 1) {
KASSERT(fdp->fd_ofiles[indx] != fp,
("Open file descriptor lost all refs"));
- VOP_UNLOCK(vp, 0, p);
- vn_close(vp, flags & FMASK, fp->f_cred, p);
- fdrop(fp, p);
- p->p_retval[0] = indx;
+ VOP_UNLOCK(vp, 0, td);
+ vn_close(vp, flags & FMASK, fp->f_cred, td);
+ fdrop(fp, td);
+ td->td_retval[0] = indx;
return 0;
}
@@ -1134,7 +1136,7 @@ open(p, uap)
fp->f_flag = flags & FMASK;
fp->f_ops = &vnops;
fp->f_type = (vp->v_type == VFIFO ? DTYPE_FIFO : DTYPE_VNODE);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
if (flags & (O_EXLOCK | O_SHLOCK)) {
lf.l_whence = SEEK_SET;
lf.l_start = 0;
@@ -1153,12 +1155,12 @@ open(p, uap)
if (flags & O_TRUNC) {
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
goto bad;
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
VATTR_NULL(&vat);
vat.va_size = 0;
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_SETATTR(vp, &vat, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_SETATTR(vp, &vat, p->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
if (error)
goto bad;
@@ -1170,15 +1172,15 @@ open(p, uap)
* Release our private reference, leaving the one associated with
* the descriptor table intact.
*/
- fdrop(fp, p);
- p->p_retval[0] = indx;
+ fdrop(fp, td);
+ td->td_retval[0] = indx;
return (0);
bad:
if (fdp->fd_ofiles[indx] == fp) {
fdp->fd_ofiles[indx] = NULL;
- fdrop(fp, p);
+ fdrop(fp, td);
}
- fdrop(fp, p);
+ fdrop(fp, td);
return (error);
}
@@ -1193,8 +1195,8 @@ struct ocreat_args {
};
#endif
int
-ocreat(p, uap)
- struct proc *p;
+ocreat(td, uap)
+ struct thread *td;
register struct ocreat_args /* {
syscallarg(char *) path;
syscallarg(int) mode;
@@ -1209,7 +1211,7 @@ ocreat(p, uap)
SCARG(&nuap, path) = SCARG(uap, path);
SCARG(&nuap, mode) = SCARG(uap, mode);
SCARG(&nuap, flags) = O_WRONLY | O_CREAT | O_TRUNC;
- return (open(p, &nuap));
+ return (open(td, &nuap));
}
#endif /* COMPAT_43 */
@@ -1225,8 +1227,8 @@ struct mknod_args {
#endif
/* ARGSUSED */
int
-mknod(p, uap)
- struct proc *p;
+mknod(td, uap)
+ struct thread *td;
register struct mknod_args /* {
syscallarg(char *) path;
syscallarg(int) mode;
@@ -1243,17 +1245,17 @@ mknod(p, uap)
switch (SCARG(uap, mode) & S_IFMT) {
case S_IFCHR:
case S_IFBLK:
- error = suser(p);
+ error = suser_td(td);
break;
default:
- error = suser_xxx(0, p, PRISON_ROOT);
+ error = suser_xxx(0, td->td_proc, PRISON_ROOT);
break;
}
if (error)
return (error);
restart:
bwillwrite();
- NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
@@ -1262,7 +1264,7 @@ restart:
error = EEXIST;
} else {
VATTR_NULL(&vattr);
- vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ p->p_fd->fd_cmask;
+ vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ td->td_proc->p_fd->fd_cmask;
vattr.va_rdev = SCARG(uap, dev);
whiteout = 0;
@@ -1292,7 +1294,7 @@ restart:
goto restart;
}
if (!error) {
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
if (whiteout)
error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, CREATE);
else {
@@ -1321,8 +1323,8 @@ struct mkfifo_args {
#endif
/* ARGSUSED */
int
-mkfifo(p, uap)
- struct proc *p;
+mkfifo(td, uap)
+ struct thread *td;
register struct mkfifo_args /* {
syscallarg(char *) path;
syscallarg(int) mode;
@@ -1335,7 +1337,7 @@ mkfifo(p, uap)
restart:
bwillwrite();
- NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
if (nd.ni_vp != NULL) {
@@ -1353,8 +1355,8 @@ restart:
}
VATTR_NULL(&vattr);
vattr.va_type = VFIFO;
- vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ p->p_fd->fd_cmask;
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
+ vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ td->td_proc->p_fd->fd_cmask;
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
if (error == 0)
vput(nd.ni_vp);
@@ -1375,8 +1377,8 @@ struct link_args {
#endif
/* ARGSUSED */
int
-link(p, uap)
- struct proc *p;
+link(td, uap)
+ struct thread *td;
register struct link_args /* {
syscallarg(char *) path;
syscallarg(char *) link;
@@ -1388,7 +1390,7 @@ link(p, uap)
int error;
bwillwrite();
- NDINIT(&nd, LOOKUP, FOLLOW|NOOBJ, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW|NOOBJ, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
@@ -1401,14 +1403,14 @@ link(p, uap)
vrele(vp);
return (error);
}
- NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), p);
+ NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), td);
if ((error = namei(&nd)) == 0) {
if (nd.ni_vp != NULL) {
vrele(nd.ni_vp);
error = EEXIST;
} else {
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
error = VOP_LINK(nd.ni_dvp, vp, &nd.ni_cnd);
}
NDFREE(&nd, NDF_ONLY_PNBUF);
@@ -1432,8 +1434,8 @@ struct symlink_args {
#endif
/* ARGSUSED */
int
-symlink(p, uap)
- struct proc *p;
+symlink(td, uap)
+ struct thread *td;
register struct symlink_args /* {
syscallarg(char *) path;
syscallarg(char *) link;
@@ -1450,7 +1452,7 @@ symlink(p, uap)
goto out;
restart:
bwillwrite();
- NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), p);
+ NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), td);
if ((error = namei(&nd)) != 0)
goto out;
if (nd.ni_vp) {
@@ -1468,8 +1470,8 @@ restart:
goto restart;
}
VATTR_NULL(&vattr);
- vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask;
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
+ vattr.va_mode = ACCESSPERMS &~ td->td_proc->p_fd->fd_cmask;
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path);
NDFREE(&nd, NDF_ONLY_PNBUF);
if (error == 0)
@@ -1488,8 +1490,8 @@ out:
*/
/* ARGSUSED */
int
-undelete(p, uap)
- struct proc *p;
+undelete(td, uap)
+ struct thread *td;
register struct undelete_args /* {
syscallarg(char *) path;
} */ *uap;
@@ -1501,7 +1503,7 @@ undelete(p, uap)
restart:
bwillwrite();
NDINIT(&nd, DELETE, LOCKPARENT|DOWHITEOUT, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
error = namei(&nd);
if (error)
return (error);
@@ -1520,7 +1522,7 @@ restart:
return (error);
goto restart;
}
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, DELETE);
NDFREE(&nd, NDF_ONLY_PNBUF);
vput(nd.ni_dvp);
@@ -1540,8 +1542,8 @@ struct unlink_args {
#endif
/* ARGSUSED */
int
-unlink(p, uap)
- struct proc *p;
+unlink(td, uap)
+ struct thread *td;
struct unlink_args /* {
syscallarg(char *) path;
} */ *uap;
@@ -1553,7 +1555,7 @@ unlink(p, uap)
restart:
bwillwrite();
- NDINIT(&nd, DELETE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, DELETE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
@@ -1576,10 +1578,10 @@ restart:
return (error);
goto restart;
}
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (!error) {
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
error = VOP_REMOVE(nd.ni_dvp, vp, &nd.ni_cnd);
}
NDFREE(&nd, NDF_ONLY_PNBUF);
@@ -1603,8 +1605,8 @@ struct lseek_args {
};
#endif
int
-lseek(p, uap)
- struct proc *p;
+lseek(td, uap)
+ struct thread *td;
register struct lseek_args /* {
syscallarg(int) fd;
syscallarg(int) pad;
@@ -1612,8 +1614,8 @@ lseek(p, uap)
syscallarg(int) whence;
} */ *uap;
{
- struct ucred *cred = p->p_ucred;
- register struct filedesc *fdp = p->p_fd;
+ struct ucred *cred = td->td_proc->p_ucred;
+ register struct filedesc *fdp = td->td_proc->p_fd;
register struct file *fp;
struct vattr vattr;
struct vnode *vp;
@@ -1637,7 +1639,7 @@ lseek(p, uap)
offset += fp->f_offset;
break;
case L_XTND:
- error = VOP_GETATTR(vp, &vattr, cred, p);
+ error = VOP_GETATTR(vp, &vattr, cred, td);
if (error)
return (error);
if (noneg &&
@@ -1654,7 +1656,7 @@ lseek(p, uap)
if (noneg && offset < 0)
return (EINVAL);
fp->f_offset = offset;
- *(off_t *)(p->p_retval) = fp->f_offset;
+ *(off_t *)(td->td_retval) = fp->f_offset;
return (0);
}
@@ -1670,8 +1672,8 @@ struct olseek_args {
};
#endif
int
-olseek(p, uap)
- struct proc *p;
+olseek(td, uap)
+ struct thread *td;
register struct olseek_args /* {
syscallarg(int) fd;
syscallarg(long) offset;
@@ -1689,7 +1691,7 @@ olseek(p, uap)
SCARG(&nuap, fd) = SCARG(uap, fd);
SCARG(&nuap, offset) = SCARG(uap, offset);
SCARG(&nuap, whence) = SCARG(uap, whence);
- error = lseek(p, &nuap);
+ error = lseek(td, &nuap);
return (error);
}
#endif /* COMPAT_43 */
@@ -1704,8 +1706,8 @@ struct access_args {
};
#endif
int
-access(p, uap)
- struct proc *p;
+access(td, uap)
+ struct thread *td;
register struct access_args /* {
syscallarg(char *) path;
syscallarg(int) flags;
@@ -1716,7 +1718,7 @@ access(p, uap)
int error, flags;
struct nameidata nd;
- cred = p->p_ucred;
+ cred = td->td_proc->p_ucred;
/*
* Create and modify a temporary credential instead of one that
* is potentially shared. This could also mess up socket
@@ -1729,9 +1731,9 @@ access(p, uap)
tmpcred = crdup(cred);
tmpcred->cr_uid = cred->cr_ruid;
tmpcred->cr_groups[0] = cred->cr_rgid;
- p->p_ucred = tmpcred;
+ td->td_proc->p_ucred = tmpcred;
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
goto out1;
vp = nd.ni_vp;
@@ -1746,12 +1748,12 @@ access(p, uap)
if (SCARG(uap, flags) & X_OK)
flags |= VEXEC;
if ((flags & VWRITE) == 0 || (error = vn_writechk(vp)) == 0)
- error = VOP_ACCESS(vp, flags, tmpcred, p);
+ error = VOP_ACCESS(vp, flags, tmpcred, td);
}
NDFREE(&nd, NDF_ONLY_PNBUF);
vput(vp);
out1:
- p->p_ucred = cred;
+ td->td_proc->p_ucred = cred;
crfree(tmpcred);
return (error);
}
@@ -1768,8 +1770,8 @@ struct ostat_args {
#endif
/* ARGSUSED */
int
-ostat(p, uap)
- struct proc *p;
+ostat(td, uap)
+ struct thread *td;
register struct ostat_args /* {
syscallarg(char *) path;
syscallarg(struct ostat *) ub;
@@ -1781,11 +1783,11 @@ ostat(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = vn_stat(nd.ni_vp, &sb, p);
+ error = vn_stat(nd.ni_vp, &sb, td);
vput(nd.ni_vp);
if (error)
return (error);
@@ -1805,8 +1807,8 @@ struct olstat_args {
#endif
/* ARGSUSED */
int
-olstat(p, uap)
- struct proc *p;
+olstat(td, uap)
+ struct thread *td;
register struct olstat_args /* {
syscallarg(char *) path;
syscallarg(struct ostat *) ub;
@@ -1819,11 +1821,11 @@ olstat(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
- error = vn_stat(vp, &sb, p);
+ error = vn_stat(vp, &sb, td);
NDFREE(&nd, NDF_ONLY_PNBUF);
vput(vp);
if (error)
@@ -1874,8 +1876,8 @@ struct stat_args {
#endif
/* ARGSUSED */
int
-stat(p, uap)
- struct proc *p;
+stat(td, uap)
+ struct thread *td;
register struct stat_args /* {
syscallarg(char *) path;
syscallarg(struct stat *) ub;
@@ -1886,10 +1888,10 @@ stat(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
- error = vn_stat(nd.ni_vp, &sb, p);
+ error = vn_stat(nd.ni_vp, &sb, td);
NDFREE(&nd, NDF_ONLY_PNBUF);
vput(nd.ni_vp);
if (error)
@@ -1909,8 +1911,8 @@ struct lstat_args {
#endif
/* ARGSUSED */
int
-lstat(p, uap)
- struct proc *p;
+lstat(td, uap)
+ struct thread *td;
register struct lstat_args /* {
syscallarg(char *) path;
syscallarg(struct stat *) ub;
@@ -1922,11 +1924,11 @@ lstat(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
- error = vn_stat(vp, &sb, p);
+ error = vn_stat(vp, &sb, td);
NDFREE(&nd, NDF_ONLY_PNBUF);
vput(vp);
if (error)
@@ -1974,8 +1976,8 @@ struct nstat_args {
#endif
/* ARGSUSED */
int
-nstat(p, uap)
- struct proc *p;
+nstat(td, uap)
+ struct thread *td;
register struct nstat_args /* {
syscallarg(char *) path;
syscallarg(struct nstat *) ub;
@@ -1987,11 +1989,11 @@ nstat(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = vn_stat(nd.ni_vp, &sb, p);
+ error = vn_stat(nd.ni_vp, &sb, td);
vput(nd.ni_vp);
if (error)
return (error);
@@ -2011,8 +2013,8 @@ struct lstat_args {
#endif
/* ARGSUSED */
int
-nlstat(p, uap)
- struct proc *p;
+nlstat(td, uap)
+ struct thread *td;
register struct nlstat_args /* {
syscallarg(char *) path;
syscallarg(struct nstat *) ub;
@@ -2025,12 +2027,12 @@ nlstat(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = vn_stat(vp, &sb, p);
+ error = vn_stat(vp, &sb, td);
vput(vp);
if (error)
return (error);
@@ -2050,8 +2052,8 @@ struct pathconf_args {
#endif
/* ARGSUSED */
int
-pathconf(p, uap)
- struct proc *p;
+pathconf(td, uap)
+ struct thread *td;
register struct pathconf_args /* {
syscallarg(char *) path;
syscallarg(int) name;
@@ -2061,11 +2063,11 @@ pathconf(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = VOP_PATHCONF(nd.ni_vp, SCARG(uap, name), p->p_retval);
+ error = VOP_PATHCONF(nd.ni_vp, SCARG(uap, name), td->td_retval);
vput(nd.ni_vp);
return (error);
}
@@ -2082,8 +2084,8 @@ struct readlink_args {
#endif
/* ARGSUSED */
int
-readlink(p, uap)
- struct proc *p;
+readlink(td, uap)
+ struct thread *td;
register struct readlink_args /* {
syscallarg(char *) path;
syscallarg(char *) buf;
@@ -2097,7 +2099,7 @@ readlink(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
@@ -2112,12 +2114,12 @@ readlink(p, uap)
auio.uio_offset = 0;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_USERSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_resid = SCARG(uap, count);
- error = VOP_READLINK(vp, &auio, p->p_ucred);
+ error = VOP_READLINK(vp, &auio, td->td_proc->p_ucred);
}
vput(vp);
- p->p_retval[0] = SCARG(uap, count) - auio.uio_resid;
+ td->td_retval[0] = SCARG(uap, count) - auio.uio_resid;
return (error);
}
@@ -2125,8 +2127,8 @@ readlink(p, uap)
* Common implementation code for chflags() and fchflags().
*/
static int
-setfflags(p, vp, flags)
- struct proc *p;
+setfflags(td, vp, flags)
+ struct thread *td;
struct vnode *vp;
int flags;
{
@@ -2141,17 +2143,17 @@ setfflags(p, vp, flags)
* chown can't fail when done as root.
*/
if ((vp->v_type == VCHR || vp->v_type == VBLK) &&
- ((error = suser_xxx(p->p_ucred, p, PRISON_ROOT)) != 0))
+ ((error = suser_xxx(td->td_proc->p_ucred, td->td_proc, PRISON_ROOT)) != 0))
return (error);
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VATTR_NULL(&vattr);
vattr.va_flags = flags;
- error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ error = VOP_SETATTR(vp, &vattr, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return (error);
}
@@ -2167,8 +2169,8 @@ struct chflags_args {
#endif
/* ARGSUSED */
int
-chflags(p, uap)
- struct proc *p;
+chflags(td, uap)
+ struct thread *td;
register struct chflags_args /* {
syscallarg(char *) path;
syscallarg(int) flags;
@@ -2177,11 +2179,11 @@ chflags(p, uap)
int error;
struct nameidata nd;
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = setfflags(p, nd.ni_vp, SCARG(uap, flags));
+ error = setfflags(td, nd.ni_vp, SCARG(uap, flags));
vrele(nd.ni_vp);
return error;
}
@@ -2197,8 +2199,8 @@ struct fchflags_args {
#endif
/* ARGSUSED */
int
-fchflags(p, uap)
- struct proc *p;
+fchflags(td, uap)
+ struct thread *td;
register struct fchflags_args /* {
syscallarg(int) fd;
syscallarg(int) flags;
@@ -2207,17 +2209,17 @@ fchflags(p, uap)
struct file *fp;
int error;
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
- return setfflags(p, (struct vnode *) fp->f_data, SCARG(uap, flags));
+ return setfflags(td, (struct vnode *) fp->f_data, SCARG(uap, flags));
}
/*
* Common implementation code for chmod(), lchmod() and fchmod().
*/
static int
-setfmode(p, vp, mode)
- struct proc *p;
+setfmode(td, vp, mode)
+ struct thread *td;
struct vnode *vp;
int mode;
{
@@ -2227,12 +2229,12 @@ setfmode(p, vp, mode)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VATTR_NULL(&vattr);
vattr.va_mode = mode & ALLPERMS;
- error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ error = VOP_SETATTR(vp, &vattr, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return error;
}
@@ -2248,8 +2250,8 @@ struct chmod_args {
#endif
/* ARGSUSED */
int
-chmod(p, uap)
- struct proc *p;
+chmod(td, uap)
+ struct thread *td;
register struct chmod_args /* {
syscallarg(char *) path;
syscallarg(int) mode;
@@ -2258,11 +2260,11 @@ chmod(p, uap)
int error;
struct nameidata nd;
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = setfmode(p, nd.ni_vp, SCARG(uap, mode));
+ error = setfmode(td, nd.ni_vp, SCARG(uap, mode));
vrele(nd.ni_vp);
return error;
}
@@ -2278,8 +2280,8 @@ struct lchmod_args {
#endif
/* ARGSUSED */
int
-lchmod(p, uap)
- struct proc *p;
+lchmod(td, uap)
+ struct thread *td;
register struct lchmod_args /* {
syscallarg(char *) path;
syscallarg(int) mode;
@@ -2288,11 +2290,11 @@ lchmod(p, uap)
int error;
struct nameidata nd;
- NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = setfmode(p, nd.ni_vp, SCARG(uap, mode));
+ error = setfmode(td, nd.ni_vp, SCARG(uap, mode));
vrele(nd.ni_vp);
return error;
}
@@ -2308,8 +2310,8 @@ struct fchmod_args {
#endif
/* ARGSUSED */
int
-fchmod(p, uap)
- struct proc *p;
+fchmod(td, uap)
+ struct thread *td;
register struct fchmod_args /* {
syscallarg(int) fd;
syscallarg(int) mode;
@@ -2318,17 +2320,17 @@ fchmod(p, uap)
struct file *fp;
int error;
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
- return setfmode(p, (struct vnode *)fp->f_data, SCARG(uap, mode));
+ return setfmode(td, (struct vnode *)fp->f_data, SCARG(uap, mode));
}
/*
* Common implementation for chown(), lchown(), and fchown()
*/
static int
-setfown(p, vp, uid, gid)
- struct proc *p;
+setfown(td, vp, uid, gid)
+ struct thread *td;
struct vnode *vp;
uid_t uid;
gid_t gid;
@@ -2339,13 +2341,13 @@ setfown(p, vp, uid, gid)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VATTR_NULL(&vattr);
vattr.va_uid = uid;
vattr.va_gid = gid;
- error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ error = VOP_SETATTR(vp, &vattr, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return error;
}
@@ -2362,8 +2364,8 @@ struct chown_args {
#endif
/* ARGSUSED */
int
-chown(p, uap)
- struct proc *p;
+chown(td, uap)
+ struct thread *td;
register struct chown_args /* {
syscallarg(char *) path;
syscallarg(int) uid;
@@ -2373,11 +2375,11 @@ chown(p, uap)
int error;
struct nameidata nd;
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = setfown(p, nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
+ error = setfown(td, nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
vrele(nd.ni_vp);
return (error);
}
@@ -2394,8 +2396,8 @@ struct lchown_args {
#endif
/* ARGSUSED */
int
-lchown(p, uap)
- struct proc *p;
+lchown(td, uap)
+ struct thread *td;
register struct lchown_args /* {
syscallarg(char *) path;
syscallarg(int) uid;
@@ -2405,11 +2407,11 @@ lchown(p, uap)
int error;
struct nameidata nd;
- NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = setfown(p, nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
+ error = setfown(td, nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
vrele(nd.ni_vp);
return (error);
}
@@ -2426,8 +2428,8 @@ struct fchown_args {
#endif
/* ARGSUSED */
int
-fchown(p, uap)
- struct proc *p;
+fchown(td, uap)
+ struct thread *td;
register struct fchown_args /* {
syscallarg(int) fd;
syscallarg(int) uid;
@@ -2437,9 +2439,9 @@ fchown(p, uap)
struct file *fp;
int error;
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
- return setfown(p, (struct vnode *)fp->f_data,
+ return setfown(td, (struct vnode *)fp->f_data,
SCARG(uap, uid), SCARG(uap, gid));
}
@@ -2471,8 +2473,8 @@ getutimes(usrtvp, tsp)
* Common implementation code for utimes(), lutimes(), and futimes().
*/
static int
-setutimes(p, vp, ts, nullflag)
- struct proc *p;
+setutimes(td, vp, ts, nullflag)
+ struct thread *td;
struct vnode *vp;
const struct timespec *ts;
int nullflag;
@@ -2483,15 +2485,15 @@ setutimes(p, vp, ts, nullflag)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VATTR_NULL(&vattr);
vattr.va_atime = ts[0];
vattr.va_mtime = ts[1];
if (nullflag)
vattr.va_vaflags |= VA_UTIMES_NULL;
- error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ error = VOP_SETATTR(vp, &vattr, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return error;
}
@@ -2507,8 +2509,8 @@ struct utimes_args {
#endif
/* ARGSUSED */
int
-utimes(p, uap)
- struct proc *p;
+utimes(td, uap)
+ struct thread *td;
register struct utimes_args /* {
syscallarg(char *) path;
syscallarg(struct timeval *) tptr;
@@ -2522,11 +2524,11 @@ utimes(p, uap)
usrtvp = SCARG(uap, tptr);
if ((error = getutimes(usrtvp, ts)) != 0)
return (error);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = setutimes(p, nd.ni_vp, ts, usrtvp == NULL);
+ error = setutimes(td, nd.ni_vp, ts, usrtvp == NULL);
vrele(nd.ni_vp);
return (error);
}
@@ -2542,8 +2544,8 @@ struct lutimes_args {
#endif
/* ARGSUSED */
int
-lutimes(p, uap)
- struct proc *p;
+lutimes(td, uap)
+ struct thread *td;
register struct lutimes_args /* {
syscallarg(char *) path;
syscallarg(struct timeval *) tptr;
@@ -2557,11 +2559,11 @@ lutimes(p, uap)
usrtvp = SCARG(uap, tptr);
if ((error = getutimes(usrtvp, ts)) != 0)
return (error);
- NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = setutimes(p, nd.ni_vp, ts, usrtvp == NULL);
+ error = setutimes(td, nd.ni_vp, ts, usrtvp == NULL);
vrele(nd.ni_vp);
return (error);
}
@@ -2577,8 +2579,8 @@ struct futimes_args {
#endif
/* ARGSUSED */
int
-futimes(p, uap)
- struct proc *p;
+futimes(td, uap)
+ struct thread *td;
register struct futimes_args /* {
syscallarg(int ) fd;
syscallarg(struct timeval *) tptr;
@@ -2592,9 +2594,9 @@ futimes(p, uap)
usrtvp = SCARG(uap, tptr);
if ((error = getutimes(usrtvp, ts)) != 0)
return (error);
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
- return setutimes(p, (struct vnode *)fp->f_data, ts, usrtvp == NULL);
+ return setutimes(td, (struct vnode *)fp->f_data, ts, usrtvp == NULL);
}
/*
@@ -2609,8 +2611,8 @@ struct truncate_args {
#endif
/* ARGSUSED */
int
-truncate(p, uap)
- struct proc *p;
+truncate(td, uap)
+ struct thread *td;
register struct truncate_args /* {
syscallarg(char *) path;
syscallarg(int) pad;
@@ -2625,7 +2627,7 @@ truncate(p, uap)
if (uap->length < 0)
return(EINVAL);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
@@ -2634,15 +2636,15 @@ truncate(p, uap)
return (error);
}
NDFREE(&nd, NDF_ONLY_PNBUF);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (vp->v_type == VDIR)
error = EISDIR;
else if ((error = vn_writechk(vp)) == 0 &&
- (error = VOP_ACCESS(vp, VWRITE, p->p_ucred, p)) == 0) {
+ (error = VOP_ACCESS(vp, VWRITE, td->td_proc->p_ucred, td)) == 0) {
VATTR_NULL(&vattr);
vattr.va_size = SCARG(uap, length);
- error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
+ error = VOP_SETATTR(vp, &vattr, td->td_proc->p_ucred, td);
}
vput(vp);
vn_finished_write(mp);
@@ -2661,8 +2663,8 @@ struct ftruncate_args {
#endif
/* ARGSUSED */
int
-ftruncate(p, uap)
- struct proc *p;
+ftruncate(td, uap)
+ struct thread *td;
register struct ftruncate_args /* {
syscallarg(int) fd;
syscallarg(int) pad;
@@ -2677,23 +2679,23 @@ ftruncate(p, uap)
if (uap->length < 0)
return(EINVAL);
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
if ((fp->f_flag & FWRITE) == 0)
return (EINVAL);
vp = (struct vnode *)fp->f_data;
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (vp->v_type == VDIR)
error = EISDIR;
else if ((error = vn_writechk(vp)) == 0) {
VATTR_NULL(&vattr);
vattr.va_size = SCARG(uap, length);
- error = VOP_SETATTR(vp, &vattr, fp->f_cred, p);
+ error = VOP_SETATTR(vp, &vattr, fp->f_cred, td);
}
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return (error);
}
@@ -2710,8 +2712,8 @@ struct otruncate_args {
#endif
/* ARGSUSED */
int
-otruncate(p, uap)
- struct proc *p;
+otruncate(td, uap)
+ struct thread *td;
register struct otruncate_args /* {
syscallarg(char *) path;
syscallarg(long) length;
@@ -2725,7 +2727,7 @@ otruncate(p, uap)
SCARG(&nuap, path) = SCARG(uap, path);
SCARG(&nuap, length) = SCARG(uap, length);
- return (truncate(p, &nuap));
+ return (truncate(td, &nuap));
}
/*
@@ -2739,8 +2741,8 @@ struct oftruncate_args {
#endif
/* ARGSUSED */
int
-oftruncate(p, uap)
- struct proc *p;
+oftruncate(td, uap)
+ struct thread *td;
register struct oftruncate_args /* {
syscallarg(int) fd;
syscallarg(long) length;
@@ -2754,7 +2756,7 @@ oftruncate(p, uap)
SCARG(&nuap, fd) = SCARG(uap, fd);
SCARG(&nuap, length) = SCARG(uap, length);
- return (ftruncate(p, &nuap));
+ return (ftruncate(td, &nuap));
}
#endif /* COMPAT_43 || COMPAT_SUNOS */
@@ -2768,8 +2770,8 @@ struct fsync_args {
#endif
/* ARGSUSED */
int
-fsync(p, uap)
- struct proc *p;
+fsync(td, uap)
+ struct thread *td;
struct fsync_args /* {
syscallarg(int) fd;
} */ *uap;
@@ -2782,22 +2784,22 @@ fsync(p, uap)
GIANT_REQUIRED;
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
vp = (struct vnode *)fp->f_data;
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (VOP_GETVOBJECT(vp, &obj) == 0) {
vm_object_page_clean(obj, 0, 0, 0);
}
- error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
+ error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, td);
#ifdef SOFTUPDATES
if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
error = softdep_fsync(vp);
#endif
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return (error);
}
@@ -2814,8 +2816,8 @@ struct rename_args {
#endif
/* ARGSUSED */
int
-rename(p, uap)
- struct proc *p;
+rename(td, uap)
+ struct thread *td;
register struct rename_args /* {
syscallarg(char *) from;
syscallarg(char *) to;
@@ -2828,7 +2830,7 @@ rename(p, uap)
bwillwrite();
NDINIT(&fromnd, DELETE, WANTPARENT | SAVESTART, UIO_USERSPACE,
- SCARG(uap, from), p);
+ SCARG(uap, from), td);
if ((error = namei(&fromnd)) != 0)
return (error);
fvp = fromnd.ni_vp;
@@ -2839,7 +2841,7 @@ rename(p, uap)
goto out1;
}
NDINIT(&tond, RENAME, LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART | NOOBJ,
- UIO_USERSPACE, SCARG(uap, to), p);
+ UIO_USERSPACE, SCARG(uap, to), td);
if (fromnd.ni_vp->v_type == VDIR)
tond.ni_cnd.cn_flags |= WILLBEDIR;
if ((error = namei(&tond)) != 0) {
@@ -2876,12 +2878,12 @@ rename(p, uap)
error = -1;
out:
if (!error) {
- VOP_LEASE(tdvp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(tdvp, td, td->td_proc->p_ucred, LEASE_WRITE);
if (fromnd.ni_dvp != tdvp) {
- VOP_LEASE(fromnd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(fromnd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
}
if (tvp) {
- VOP_LEASE(tvp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(tvp, td, td->td_proc->p_ucred, LEASE_WRITE);
}
error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd,
tond.ni_dvp, tond.ni_vp, &tond.ni_cnd);
@@ -2924,13 +2926,24 @@ struct mkdir_args {
#endif
/* ARGSUSED */
int
-mkdir(p, uap)
- struct proc *p;
+mkdir(td, uap)
+ struct thread *td;
register struct mkdir_args /* {
syscallarg(char *) path;
syscallarg(int) mode;
} */ *uap;
{
+
+ return vn_mkdir(uap->path, uap->mode, UIO_USERSPACE, td);
+}
+
+int
+vn_mkdir(path, mode, segflg, td)
+ char *path;
+ int mode;
+ enum uio_seg segflg;
+ struct thread *td;
+{
struct mount *mp;
struct vnode *vp;
struct vattr vattr;
@@ -2939,7 +2952,7 @@ mkdir(p, uap)
restart:
bwillwrite();
- NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, CREATE, LOCKPARENT, segflg, path, td);
nd.ni_cnd.cn_flags |= WILLBEDIR;
if ((error = namei(&nd)) != 0)
return (error);
@@ -2959,8 +2972,8 @@ restart:
}
VATTR_NULL(&vattr);
vattr.va_type = VDIR;
- vattr.va_mode = (SCARG(uap, mode) & ACCESSPERMS) &~ p->p_fd->fd_cmask;
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
+ vattr.va_mode = (mode & ACCESSPERMS) &~ td->td_proc->p_fd->fd_cmask;
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
NDFREE(&nd, NDF_ONLY_PNBUF);
vput(nd.ni_dvp);
@@ -2982,8 +2995,8 @@ struct rmdir_args {
#endif
/* ARGSUSED */
int
-rmdir(p, uap)
- struct proc *p;
+rmdir(td, uap)
+ struct thread *td;
struct rmdir_args /* {
syscallarg(char *) path;
} */ *uap;
@@ -2996,7 +3009,7 @@ rmdir(p, uap)
restart:
bwillwrite();
NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
@@ -3029,8 +3042,8 @@ restart:
return (error);
goto restart;
}
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
error = VOP_RMDIR(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
vn_finished_write(mp);
out:
@@ -3058,8 +3071,8 @@ struct ogetdirentries_args {
};
#endif
int
-ogetdirentries(p, uap)
- struct proc *p;
+ogetdirentries(td, uap)
+ struct thread *td;
register struct ogetdirentries_args /* {
syscallarg(int) fd;
syscallarg(char *) buf;
@@ -3079,7 +3092,7 @@ ogetdirentries(p, uap)
/* XXX arbitrary sanity limit on `count'. */
if (SCARG(uap, count) > 64 * 1024)
return (EINVAL);
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
if ((fp->f_flag & FREAD) == 0)
return (EBADF);
@@ -3093,9 +3106,9 @@ unionread:
auio.uio_iovcnt = 1;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_USERSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_resid = SCARG(uap, count);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
loff = auio.uio_offset = fp->f_offset;
# if (BYTE_ORDER != LITTLE_ENDIAN)
if (vp->v_mount->mnt_maxsymlinklen <= 0) {
@@ -3148,12 +3161,12 @@ unionread:
}
FREE(dirbuf, M_TEMP);
}
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
if (error)
return (error);
if (SCARG(uap, count) == auio.uio_resid) {
if (union_dircheckp) {
- error = union_dircheckp(p, &vp, fp);
+ error = union_dircheckp(td, &vp, fp);
if (error == -1)
goto unionread;
if (error)
@@ -3172,7 +3185,7 @@ unionread:
}
error = copyout((caddr_t)&loff, (caddr_t)SCARG(uap, basep),
sizeof(long));
- p->p_retval[0] = SCARG(uap, count) - auio.uio_resid;
+ td->td_retval[0] = SCARG(uap, count) - auio.uio_resid;
return (error);
}
#endif /* COMPAT_43 */
@@ -3189,8 +3202,8 @@ struct getdirentries_args {
};
#endif
int
-getdirentries(p, uap)
- struct proc *p;
+getdirentries(td, uap)
+ struct thread *td;
register struct getdirentries_args /* {
syscallarg(int) fd;
syscallarg(char *) buf;
@@ -3205,7 +3218,7 @@ getdirentries(p, uap)
long loff;
int error, eofflag;
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
if ((fp->f_flag & FREAD) == 0)
return (EBADF);
@@ -3219,19 +3232,19 @@ unionread:
auio.uio_iovcnt = 1;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_USERSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_resid = SCARG(uap, count);
- /* vn_lock(vp, LK_SHARED | LK_RETRY, p); */
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ /* vn_lock(vp, LK_SHARED | LK_RETRY, td); */
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
loff = auio.uio_offset = fp->f_offset;
error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL);
fp->f_offset = auio.uio_offset;
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
if (error)
return (error);
if (SCARG(uap, count) == auio.uio_resid) {
if (union_dircheckp) {
- error = union_dircheckp(p, &vp, fp);
+ error = union_dircheckp(td, &vp, fp);
if (error == -1)
goto unionread;
if (error)
@@ -3252,7 +3265,7 @@ unionread:
error = copyout((caddr_t)&loff, (caddr_t)SCARG(uap, basep),
sizeof(long));
}
- p->p_retval[0] = SCARG(uap, count) - auio.uio_resid;
+ td->td_retval[0] = SCARG(uap, count) - auio.uio_resid;
return (error);
}
#ifndef _SYS_SYSPROTO_H_
@@ -3263,8 +3276,8 @@ struct getdents_args {
};
#endif
int
-getdents(p, uap)
- struct proc *p;
+getdents(td, uap)
+ struct thread *td;
register struct getdents_args /* {
syscallarg(int) fd;
syscallarg(char *) buf;
@@ -3276,7 +3289,7 @@ getdents(p, uap)
ap.buf = uap->buf;
ap.count = uap->count;
ap.basep = NULL;
- return getdirentries(p, &ap);
+ return getdirentries(td, &ap);
}
/*
@@ -3290,16 +3303,16 @@ struct umask_args {
};
#endif
int
-umask(p, uap)
- struct proc *p;
+umask(td, uap)
+ struct thread *td;
struct umask_args /* {
syscallarg(int) newmask;
} */ *uap;
{
register struct filedesc *fdp;
- fdp = p->p_fd;
- p->p_retval[0] = fdp->fd_cmask;
+ fdp = td->td_proc->p_fd;
+ td->td_retval[0] = fdp->fd_cmask;
fdp->fd_cmask = SCARG(uap, newmask) & ALLPERMS;
return (0);
}
@@ -3315,8 +3328,8 @@ struct revoke_args {
#endif
/* ARGSUSED */
int
-revoke(p, uap)
- struct proc *p;
+revoke(td, uap)
+ struct thread *td;
register struct revoke_args /* {
syscallarg(char *) path;
} */ *uap;
@@ -3327,7 +3340,7 @@ revoke(p, uap)
int error;
struct nameidata nd;
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
@@ -3336,10 +3349,10 @@ revoke(p, uap)
error = EINVAL;
goto out;
}
- if ((error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) != 0)
+ if ((error = VOP_GETATTR(vp, &vattr, td->td_proc->p_ucred, td)) != 0)
goto out;
- if (p->p_ucred->cr_uid != vattr.va_uid &&
- (error = suser_xxx(0, p, PRISON_ROOT)))
+ if (td->td_proc->p_ucred->cr_uid != vattr.va_uid &&
+ (error = suser_xxx(0, td->td_proc, PRISON_ROOT)))
goto out;
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
goto out;
@@ -3380,8 +3393,8 @@ struct getfh_args {
};
#endif
int
-getfh(p, uap)
- struct proc *p;
+getfh(td, uap)
+ struct thread *td;
register struct getfh_args *uap;
{
struct nameidata nd;
@@ -3392,10 +3405,10 @@ getfh(p, uap)
/*
* Must be super user
*/
- error = suser(p);
+ error = suser_td(td);
if (error)
return (error);
- NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, uap->fname, p);
+ NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, uap->fname, td);
error = namei(&nd);
if (error)
return (error);
@@ -3425,13 +3438,14 @@ struct fhopen_args {
};
#endif
int
-fhopen(p, uap)
- struct proc *p;
+fhopen(td, uap)
+ struct thread *td;
struct fhopen_args /* {
syscallarg(const struct fhandle *) u_fhp;
syscallarg(int) flags;
} */ *uap;
{
+ struct proc *p = td->td_proc;
struct mount *mp;
struct vnode *vp;
struct fhandle fhp;
@@ -3447,7 +3461,7 @@ fhopen(p, uap)
/*
* Must be super user
*/
- error = suser(p);
+ error = suser_td(td);
if (error)
return (error);
@@ -3498,33 +3512,33 @@ fhopen(p, uap)
if (fmode & FREAD)
mode |= VREAD;
if (mode) {
- error = VOP_ACCESS(vp, mode, p->p_ucred, p);
+ error = VOP_ACCESS(vp, mode, p->p_ucred, td);
if (error)
goto bad;
}
if (fmode & O_TRUNC) {
- VOP_UNLOCK(vp, 0, p); /* XXX */
+ VOP_UNLOCK(vp, 0, td); /* XXX */
if ((error = vn_start_write(NULL, &mp, V_WAIT | PCATCH)) != 0) {
vrele(vp);
return (error);
}
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */
+ VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); /* XXX */
VATTR_NULL(vap);
vap->va_size = 0;
- error = VOP_SETATTR(vp, vap, p->p_ucred, p);
+ error = VOP_SETATTR(vp, vap, p->p_ucred, td);
vn_finished_write(mp);
if (error)
goto bad;
}
- error = VOP_OPEN(vp, fmode, p->p_ucred, p);
+ error = VOP_OPEN(vp, fmode, p->p_ucred, td);
if (error)
goto bad;
/*
* Make sure that a VM object is created for VMIO support.
*/
if (vn_canvmio(vp) == TRUE) {
- if ((error = vfs_object_create(vp, p, p->p_ucred)) != 0)
+ if ((error = vfs_object_create(vp, td, p->p_ucred)) != 0)
goto bad;
}
if (fmode & FWRITE)
@@ -3534,7 +3548,7 @@ fhopen(p, uap)
* end of vn_open code
*/
- if ((error = falloc(p, &nfp, &indx)) != 0)
+ if ((error = falloc(td, &nfp, &indx)) != 0)
goto bad;
fp = nfp;
@@ -3558,7 +3572,7 @@ fhopen(p, uap)
type = F_FLOCK;
if ((fmode & FNONBLOCK) == 0)
type |= F_WAIT;
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
/*
* The lock request failed. Normally close the
@@ -3567,23 +3581,23 @@ fhopen(p, uap)
*/
if (fdp->fd_ofiles[indx] == fp) {
fdp->fd_ofiles[indx] = NULL;
- fdrop(fp, p);
+ fdrop(fp, td);
}
/*
* release our private reference
*/
- fdrop(fp, p);
+ fdrop(fp, td);
return(error);
}
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
fp->f_flag |= FHASLOCK;
}
if ((vp->v_type == VREG) && (VOP_GETVOBJECT(vp, NULL) != 0))
- vfs_object_create(vp, p, p->p_ucred);
+ vfs_object_create(vp, td, p->p_ucred);
- VOP_UNLOCK(vp, 0, p);
- fdrop(fp, p);
- p->p_retval[0] = indx;
+ VOP_UNLOCK(vp, 0, td);
+ fdrop(fp, td);
+ td->td_retval[0] = indx;
return (0);
bad:
@@ -3601,8 +3615,8 @@ struct fhstat_args {
};
#endif
int
-fhstat(p, uap)
- struct proc *p;
+fhstat(td, uap)
+ struct thread *td;
register struct fhstat_args /* {
syscallarg(struct fhandle *) u_fhp;
syscallarg(struct stat *) sb;
@@ -3617,7 +3631,7 @@ fhstat(p, uap)
/*
* Must be super user
*/
- error = suser(p);
+ error = suser_td(td);
if (error)
return (error);
@@ -3629,7 +3643,7 @@ fhstat(p, uap)
return (ESTALE);
if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
return (error);
- error = vn_stat(vp, &sb, p);
+ error = vn_stat(vp, &sb, td);
vput(vp);
if (error)
return (error);
@@ -3647,8 +3661,8 @@ struct fhstatfs_args {
};
#endif
int
-fhstatfs(p, uap)
- struct proc *p;
+fhstatfs(td, uap)
+ struct thread *td;
struct fhstatfs_args /* {
syscallarg(struct fhandle) *u_fhp;
syscallarg(struct statfs) *buf;
@@ -3664,7 +3678,7 @@ fhstatfs(p, uap)
/*
* Must be super user
*/
- if ((error = suser(p)))
+ if ((error = suser_td(td)))
return (error);
if ((error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t))) != 0)
@@ -3677,10 +3691,10 @@ fhstatfs(p, uap)
mp = vp->v_mount;
sp = &mp->mnt_stat;
vput(vp);
- if ((error = VFS_STATFS(mp, sp, p)) != 0)
+ if ((error = VFS_STATFS(mp, sp, td)) != 0)
return (error);
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
- if (suser_xxx(p->p_ucred, 0, 0)) {
+ if (suser_xxx(td->td_proc->p_ucred, 0, 0)) {
bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
sp = &sb;
@@ -3700,8 +3714,8 @@ fhstatfs(p, uap)
* Currently this is used only by UFS Extended Attributes.
*/
int
-extattrctl(p, uap)
- struct proc *p;
+extattrctl(td, uap)
+ struct thread *td;
struct extattrctl_args *uap;
{
struct vnode *filename_vp;
@@ -3728,7 +3742,7 @@ extattrctl(p, uap)
filename_vp = NULL;
if (SCARG(uap, filename) != NULL) {
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
- SCARG(uap, filename), p);
+ SCARG(uap, filename), td);
if ((error = namei(&nd)) != 0)
return (error);
filename_vp = nd.ni_vp;
@@ -3736,7 +3750,7 @@ extattrctl(p, uap)
}
/* SCARG(uap, path) always defined. */
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
error = vn_start_write(nd.ni_vp, &mp, V_WAIT | PCATCH);
@@ -3749,10 +3763,10 @@ extattrctl(p, uap)
if (SCARG(uap, attrname) != NULL) {
error = VFS_EXTATTRCTL(mp, SCARG(uap, cmd), filename_vp,
- SCARG(uap, attrnamespace), attrname, p);
+ SCARG(uap, attrnamespace), attrname, td);
} else {
error = VFS_EXTATTRCTL(mp, SCARG(uap, cmd), filename_vp,
- SCARG(uap, attrnamespace), NULL, p);
+ SCARG(uap, attrnamespace), NULL, td);
}
vn_finished_write(mp);
@@ -3779,7 +3793,7 @@ extattrctl(p, uap)
*/
static int
extattr_set_vp(struct vnode *vp, int attrnamespace, const char *attrname,
- struct iovec *iovp, unsigned iovcnt, struct proc *p)
+ struct iovec *iovp, unsigned iovcnt, struct thread *td)
{
struct mount *mp;
struct uio auio;
@@ -3789,8 +3803,8 @@ extattr_set_vp(struct vnode *vp, int attrnamespace, const char *attrname,
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
iovlen = iovcnt * sizeof(struct iovec);
if (iovcnt > UIO_SMALLIOV) {
@@ -3806,7 +3820,7 @@ extattr_set_vp(struct vnode *vp, int attrnamespace, const char *attrname,
auio.uio_iovcnt = iovcnt;
auio.uio_rw = UIO_WRITE;
auio.uio_segflg = UIO_USERSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_offset = 0;
if ((error = copyin((caddr_t)iovp, (caddr_t)iov, iovlen)))
goto done;
@@ -3821,20 +3835,20 @@ extattr_set_vp(struct vnode *vp, int attrnamespace, const char *attrname,
}
cnt = auio.uio_resid;
error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio,
- p->p_ucred, p);
+ td->td_proc->p_ucred, td);
cnt -= auio.uio_resid;
- p->p_retval[0] = cnt;
+ td->td_retval[0] = cnt;
done:
if (needfree)
FREE(needfree, M_IOV);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return (error);
}
int
-extattr_set_file(p, uap)
- struct proc *p;
+extattr_set_file(td, uap)
+ struct thread *td;
struct extattr_set_file_args *uap;
{
struct nameidata nd;
@@ -3846,21 +3860,21 @@ extattr_set_file(p, uap)
if (error)
return (error);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
error = extattr_set_vp(nd.ni_vp, SCARG(uap, attrnamespace), attrname,
- SCARG(uap, iovp), SCARG(uap, iovcnt), p);
+ SCARG(uap, iovp), SCARG(uap, iovcnt), td);
vrele(nd.ni_vp);
return (error);
}
int
-extattr_set_fd(p, uap)
- struct proc *p;
+extattr_set_fd(td, uap)
+ struct thread *td;
struct extattr_set_fd_args *uap;
{
struct file *fp;
@@ -3872,12 +3886,12 @@ extattr_set_fd(p, uap)
if (error)
return (error);
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
error = extattr_set_vp((struct vnode *)fp->f_data,
SCARG(uap, attrnamespace), attrname, SCARG(uap, iovp),
- SCARG(uap, iovcnt), p);
+ SCARG(uap, iovcnt), td);
return (error);
}
@@ -3895,15 +3909,15 @@ extattr_set_fd(p, uap)
*/
static int
extattr_get_vp(struct vnode *vp, int attrnamespace, const char *attrname,
- struct iovec *iovp, unsigned iovcnt, struct proc *p)
+ struct iovec *iovp, unsigned iovcnt, struct thread *td)
{
struct uio auio;
struct iovec *iov, *needfree = NULL, aiov[UIO_SMALLIOV];
u_int iovlen, cnt;
int error, i;
- VOP_LEASE(vp, p, p->p_ucred, LEASE_READ);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_READ);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
iovlen = iovcnt * sizeof (struct iovec);
if (iovcnt > UIO_SMALLIOV) {
@@ -3919,7 +3933,7 @@ extattr_get_vp(struct vnode *vp, int attrnamespace, const char *attrname,
auio.uio_iovcnt = iovcnt;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_USERSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_offset = 0;
if ((error = copyin((caddr_t)iovp, (caddr_t)iov, iovlen)))
goto done;
@@ -3934,19 +3948,19 @@ extattr_get_vp(struct vnode *vp, int attrnamespace, const char *attrname,
}
cnt = auio.uio_resid;
error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio,
- p->p_ucred, p);
+ td->td_proc->p_ucred, td);
cnt -= auio.uio_resid;
- p->p_retval[0] = cnt;
+ td->td_retval[0] = cnt;
done:
if (needfree)
FREE(needfree, M_IOV);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
return (error);
}
int
-extattr_get_file(p, uap)
- struct proc *p;
+extattr_get_file(td, uap)
+ struct thread *td;
struct extattr_get_file_args *uap;
{
struct nameidata nd;
@@ -3958,21 +3972,21 @@ extattr_get_file(p, uap)
if (error)
return (error);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
error = extattr_get_vp(nd.ni_vp, SCARG(uap, attrnamespace), attrname,
- SCARG(uap, iovp), SCARG(uap, iovcnt), p);
+ SCARG(uap, iovp), SCARG(uap, iovcnt), td);
vrele(nd.ni_vp);
return (error);
}
int
-extattr_get_fd(p, uap)
- struct proc *p;
+extattr_get_fd(td, uap)
+ struct thread *td;
struct extattr_get_fd_args *uap;
{
struct file *fp;
@@ -3984,12 +3998,12 @@ extattr_get_fd(p, uap)
if (error)
return (error);
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
error = extattr_get_vp((struct vnode *)fp->f_data,
SCARG(uap, attrnamespace), attrname, SCARG(uap, iovp),
- SCARG(uap, iovcnt), p);
+ SCARG(uap, iovcnt), td);
return (error);
}
@@ -4006,27 +4020,27 @@ extattr_get_fd(p, uap)
*/
static int
extattr_delete_vp(struct vnode *vp, int attrnamespace, const char *attrname,
- struct proc *p)
+ struct thread *td)
{
struct mount *mp;
int error;
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
- p->p_ucred, p);
+ td->td_proc->p_ucred, td);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return (error);
}
int
-extattr_delete_file(p, uap)
- struct proc *p;
+extattr_delete_file(td, uap)
+ struct thread *td;
struct extattr_delete_file_args *uap;
{
struct nameidata nd;
@@ -4038,21 +4052,21 @@ extattr_delete_file(p, uap)
if (error)
return(error);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return(error);
NDFREE(&nd, NDF_ONLY_PNBUF);
error = extattr_delete_vp(nd.ni_vp, SCARG(uap, attrnamespace),
- attrname, p);
+ attrname, td);
vrele(nd.ni_vp);
return(error);
}
int
-extattr_delete_fd(p, uap)
- struct proc *p;
+extattr_delete_fd(td, uap)
+ struct thread *td;
struct extattr_delete_fd_args *uap;
{
struct file *fp;
@@ -4064,11 +4078,11 @@ extattr_delete_fd(p, uap)
if (error)
return (error);
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
error = extattr_delete_vp((struct vnode *)fp->f_data,
- SCARG(uap, attrnamespace), attrname, p);
+ SCARG(uap, attrnamespace), attrname, td);
return (error);
}
diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c
index 4875533..470abba 100644
--- a/sys/kern/vfs_lookup.c
+++ b/sys/kern/vfs_lookup.c
@@ -100,15 +100,16 @@ namei(ndp)
struct uio auio;
int error, linklen;
struct componentname *cnp = &ndp->ni_cnd;
- struct proc *p = cnp->cn_proc;
+ struct thread *td = cnp->cn_thread;
+ struct proc *p = td->td_proc;
- ndp->ni_cnd.cn_cred = ndp->ni_cnd.cn_proc->p_ucred;
- KASSERT(cnp->cn_cred && cnp->cn_proc, ("namei: bad cred/proc"));
+ ndp->ni_cnd.cn_cred = ndp->ni_cnd.cn_thread->td_proc->p_ucred;
+ KASSERT(cnp->cn_cred && p, ("namei: bad cred/proc"));
KASSERT((cnp->cn_nameiop & (~OPMASK)) == 0,
("namei: nameiop contaminated with flags"));
KASSERT((cnp->cn_flags & OPMASK) == 0,
("namei: flags contaminated with nameiops"));
- fdp = cnp->cn_proc->p_fd;
+ fdp = p->p_fd;
/*
* Get a buffer for the name to be translated, and copy the
@@ -136,8 +137,8 @@ namei(ndp)
}
ndp->ni_loopcnt = 0;
#ifdef KTRACE
- if (KTRPOINT(cnp->cn_proc, KTR_NAMEI))
- ktrnamei(cnp->cn_proc->p_tracep, cnp->cn_pnbuf);
+ if (KTRPOINT(p, KTR_NAMEI))
+ ktrnamei(p->p_tracep, cnp->cn_pnbuf);
#endif
/*
@@ -182,14 +183,13 @@ namei(ndp)
(cnp->cn_nameiop != DELETE) &&
((cnp->cn_flags & (NOOBJ|LOCKLEAF)) ==
LOCKLEAF))
- vfs_object_create(ndp->ni_vp,
- ndp->ni_cnd.cn_proc,
+ vfs_object_create(ndp->ni_vp, td,
ndp->ni_cnd.cn_cred);
return (0);
}
if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1)
- VOP_UNLOCK(ndp->ni_dvp, 0, p);
+ VOP_UNLOCK(ndp->ni_dvp, 0, td);
if (ndp->ni_loopcnt++ >= MAXSYMLINKS) {
error = ELOOP;
break;
@@ -205,7 +205,7 @@ namei(ndp)
auio.uio_offset = 0;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_SYSSPACE;
- auio.uio_procp = (struct proc *)0;
+ auio.uio_td = (struct thread *)0;
auio.uio_resid = MAXPATHLEN;
error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred);
if (error) {
@@ -296,7 +296,7 @@ lookup(ndp)
int error = 0;
int dpunlocked = 0; /* dp has already been unlocked */
struct componentname *cnp = &ndp->ni_cnd;
- struct proc *p = cnp->cn_proc;
+ struct thread *td = cnp->cn_thread;
/*
* Setup: break out flag bits into variables.
@@ -312,7 +312,7 @@ lookup(ndp)
cnp->cn_flags &= ~ISSYMLINK;
dp = ndp->ni_startdir;
ndp->ni_startdir = NULLVP;
- vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, td);
dirloop:
/*
@@ -392,7 +392,7 @@ dirloop:
}
ndp->ni_vp = dp;
if (!(cnp->cn_flags & (LOCKPARENT | LOCKLEAF)))
- VOP_UNLOCK(dp, 0, p);
+ VOP_UNLOCK(dp, 0, td);
/* XXX This should probably move to the top of function. */
if (cnp->cn_flags & SAVESTART)
panic("lookup: SAVESTART");
@@ -432,7 +432,7 @@ dirloop:
dp = dp->v_mount->mnt_vnodecovered;
vput(tdp);
VREF(dp);
- vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, td);
}
}
@@ -459,7 +459,7 @@ unionlookup:
else
vput(tdp);
VREF(dp);
- vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, td);
goto unionlookup;
}
@@ -514,11 +514,11 @@ unionlookup:
*/
while (dp->v_type == VDIR && (mp = dp->v_mountedhere) &&
(cnp->cn_flags & NOCROSSMOUNT) == 0) {
- if (vfs_busy(mp, 0, 0, p))
+ if (vfs_busy(mp, 0, 0, td))
continue;
- VOP_UNLOCK(dp, 0, p);
+ VOP_UNLOCK(dp, 0, td);
error = VFS_ROOT(mp, &tdp);
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
if (error) {
dpunlocked = 1;
goto bad2;
@@ -587,13 +587,13 @@ nextname:
vrele(ndp->ni_dvp);
if ((cnp->cn_flags & LOCKLEAF) == 0)
- VOP_UNLOCK(dp, 0, p);
+ VOP_UNLOCK(dp, 0, td);
return (0);
bad2:
if ((cnp->cn_flags & (LOCKPARENT | PDIRUNLOCK)) == LOCKPARENT &&
*ndp->ni_next == '\0')
- VOP_UNLOCK(ndp->ni_dvp, 0, p);
+ VOP_UNLOCK(ndp->ni_dvp, 0, td);
vrele(ndp->ni_dvp);
bad:
if (dpunlocked)
@@ -613,7 +613,7 @@ relookup(dvp, vpp, cnp)
struct vnode *dvp, **vpp;
struct componentname *cnp;
{
- struct proc *p = cnp->cn_proc;
+ struct thread *td = cnp->cn_thread;
struct vnode *dp = 0; /* the directory we are searching */
int docache; /* == 0 do not cache last component */
int wantparent; /* 1 => wantparent or lockparent flag */
@@ -635,7 +635,7 @@ relookup(dvp, vpp, cnp)
rdonly = cnp->cn_flags & RDONLY;
cnp->cn_flags &= ~ISSYMLINK;
dp = dvp;
- vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, td);
/* dirloop: */
/*
@@ -669,7 +669,7 @@ relookup(dvp, vpp, cnp)
goto bad;
}
if (!(cnp->cn_flags & LOCKLEAF))
- VOP_UNLOCK(dp, 0, p);
+ VOP_UNLOCK(dp, 0, td);
*vpp = dp;
/* XXX This should probably move to the top of function. */
if (cnp->cn_flags & SAVESTART)
@@ -730,15 +730,15 @@ relookup(dvp, vpp, cnp)
if (vn_canvmio(dp) == TRUE &&
((cnp->cn_flags & (NOOBJ|LOCKLEAF)) == LOCKLEAF))
- vfs_object_create(dp, cnp->cn_proc, cnp->cn_cred);
+ vfs_object_create(dp, td, cnp->cn_cred);
if ((cnp->cn_flags & LOCKLEAF) == 0)
- VOP_UNLOCK(dp, 0, p);
+ VOP_UNLOCK(dp, 0, td);
return (0);
bad2:
if ((cnp->cn_flags & LOCKPARENT) && (cnp->cn_flags & ISLASTCN))
- VOP_UNLOCK(dvp, 0, p);
+ VOP_UNLOCK(dvp, 0, td);
vrele(dvp);
bad:
vput(dp);
diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c
index e651ef0..98c3f13 100644
--- a/sys/kern/vfs_mount.c
+++ b/sys/kern/vfs_mount.c
@@ -227,7 +227,7 @@ vfs_mountroot_try(char *mountfrom)
*/
strncpy(mp->mnt_stat.f_mntonname, "/", MNAMELEN);
- error = VFS_MOUNT(mp, NULL, NULL, NULL, curproc);
+ error = VFS_MOUNT(mp, NULL, NULL, NULL, curthread);
done:
if (vfsname != NULL)
@@ -236,7 +236,7 @@ done:
free(path, M_MOUNT);
if (error != 0) {
if (mp != NULL) {
- vfs_unbusy(mp, curproc);
+ vfs_unbusy(mp, curthread);
free(mp, M_MOUNT);
}
printf("Root mount failed: %d\n", error);
@@ -249,7 +249,7 @@ done:
/* sanity check system clock against root filesystem timestamp */
inittodr(mp->mnt_time);
- vfs_unbusy(mp, curproc);
+ vfs_unbusy(mp, curthread);
}
return(error);
}
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index b38bc16..8c67662 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -74,7 +74,7 @@ static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
static void addalias __P((struct vnode *vp, dev_t nvp_rdev));
static void insmntque __P((struct vnode *vp, struct mount *mp));
-static void vclean __P((struct vnode *vp, int flags, struct proc *p));
+static void vclean __P((struct vnode *vp, int flags, struct thread *td));
/*
* Number of vnodes in existence. Increased whenever getnewvnode()
@@ -269,11 +269,11 @@ SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
* unmounting. Interlock is not released on failure.
*/
int
-vfs_busy(mp, flags, interlkp, p)
+vfs_busy(mp, flags, interlkp, td)
struct mount *mp;
int flags;
struct mtx *interlkp;
- struct proc *p;
+ struct thread *td;
{
int lkflags;
@@ -293,7 +293,7 @@ vfs_busy(mp, flags, interlkp, p)
lkflags = LK_SHARED | LK_NOPAUSE;
if (interlkp)
lkflags |= LK_INTERLOCK;
- if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p))
+ if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td))
panic("vfs_busy: unexpected lock failure");
return (0);
}
@@ -302,12 +302,12 @@ vfs_busy(mp, flags, interlkp, p)
* Free a busy filesystem.
*/
void
-vfs_unbusy(mp, p)
+vfs_unbusy(mp, td)
struct mount *mp;
- struct proc *p;
+ struct thread *td;
{
- lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p);
+ lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
}
/*
@@ -322,7 +322,7 @@ vfs_rootmountalloc(fstypename, devname, mpp)
char *devname;
struct mount **mpp;
{
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
struct vfsconf *vfsp;
struct mount *mp;
@@ -335,7 +335,7 @@ vfs_rootmountalloc(fstypename, devname, mpp)
return (ENODEV);
mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
- (void)vfs_busy(mp, LK_NOWAIT, 0, p);
+ (void)vfs_busy(mp, LK_NOWAIT, 0, td);
LIST_INIT(&mp->mnt_vnodelist);
mp->mnt_vfc = vfsp;
mp->mnt_op = vfsp->vfc_vfsops;
@@ -524,7 +524,7 @@ getnewvnode(tag, mp, vops, vpp)
struct vnode **vpp;
{
int s, count;
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
struct vnode *vp = NULL;
struct mount *vnmp;
vm_object_t object;
@@ -582,7 +582,7 @@ getnewvnode(tag, mp, vops, vpp)
cache_purge(vp);
vp->v_lease = NULL;
if (vp->v_type != VBAD) {
- vgonel(vp, p);
+ vgonel(vp, td);
} else {
mtx_unlock(&vp->v_interlock);
}
@@ -634,7 +634,7 @@ getnewvnode(tag, mp, vops, vpp)
splx(s);
- vfs_object_create(vp, p, p->p_ucred);
+ vfs_object_create(vp, td, td->td_proc->p_ucred);
vnodeallocs++;
if (vnodeallocs % vnoderecycleperiod == 0 &&
@@ -699,11 +699,11 @@ vwakeup(bp)
* Called with the underlying object locked.
*/
int
-vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
+vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
register struct vnode *vp;
int flags;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
int slpflag, slptimeo;
{
register struct buf *bp;
@@ -726,7 +726,7 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
}
if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
splx(s);
- if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
+ if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0)
return (error);
s = splbio();
if (vp->v_numoutput > 0 ||
@@ -815,10 +815,10 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
* sync activity.
*/
int
-vtruncbuf(vp, cred, p, length, blksize)
+vtruncbuf(vp, cred, td, length, blksize)
register struct vnode *vp;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
off_t length;
int blksize;
{
@@ -1020,15 +1020,15 @@ sched_sync(void)
struct mount *mp;
long starttime;
int s;
- struct proc *p = updateproc;
+ struct thread *td = &updateproc->p_thread; /* XXXKSE */
mtx_lock(&Giant);
- EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
+ EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, td->td_proc,
SHUTDOWN_PRI_LAST);
for (;;) {
- kthread_suspend_check(p);
+ kthread_suspend_check(td->td_proc);
starttime = time_second;
@@ -1046,9 +1046,9 @@ sched_sync(void)
while ((vp = LIST_FIRST(slp)) != NULL) {
if (VOP_ISLOCKED(vp, NULL) == 0 &&
vn_start_write(vp, &mp, V_NOWAIT) == 0) {
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- (void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p);
- VOP_UNLOCK(vp, 0, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ (void) VOP_FSYNC(vp, td->td_proc->p_ucred, MNT_LAZY, td);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
}
s = splbio();
@@ -1111,14 +1111,15 @@ sched_sync(void)
* Request the syncer daemon to speed up its work.
* We never push it to speed up more than half of its
* normal turn time, otherwise it could take over the cpu.
+ * XXXKSE only one update?
*/
int
speedup_syncer()
{
mtx_lock_spin(&sched_lock);
- if (updateproc->p_wchan == &lbolt)
- setrunnable(updateproc);
+ if (updateproc->p_thread.td_wchan == &lbolt) /* XXXKSE */
+ setrunnable(&updateproc->p_thread);
mtx_unlock_spin(&sched_lock);
if (rushjob < syncdelay / 2) {
rushjob += 1;
@@ -1398,9 +1399,9 @@ addaliasu(nvp, nvp_rdev)
ovp->v_vnlock = &ovp->v_lock;
ops = ovp->v_op;
ovp->v_op = nvp->v_op;
- if (VOP_ISLOCKED(nvp, curproc)) {
- VOP_UNLOCK(nvp, 0, curproc);
- vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curproc);
+ if (VOP_ISLOCKED(nvp, curthread)) {
+ VOP_UNLOCK(nvp, 0, curthread);
+ vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread);
}
nvp->v_op = ops;
insmntque(ovp, nvp->v_mount);
@@ -1433,10 +1434,10 @@ addalias(nvp, dev)
* been changed to a new file system type).
*/
int
-vget(vp, flags, p)
+vget(vp, flags, td)
register struct vnode *vp;
int flags;
- struct proc *p;
+ struct thread *td;
{
int error;
@@ -1449,7 +1450,7 @@ vget(vp, flags, p)
if ((flags & LK_INTERLOCK) == 0)
mtx_lock(&vp->v_interlock);
if (vp->v_flag & VXLOCK) {
- if (vp->v_vxproc == curproc) {
+ if (vp->v_vxproc == curthread) {
printf("VXLOCK interlock avoided\n");
} else {
vp->v_flag |= VXWANT;
@@ -1464,7 +1465,7 @@ vget(vp, flags, p)
if (VSHOULDBUSY(vp))
vbusy(vp);
if (flags & LK_TYPE_MASK) {
- if ((error = vn_lock(vp, flags | LK_INTERLOCK, p)) != 0) {
+ if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
/*
* must expand vrele here because we do not want
* to call VOP_INACTIVE if the reference count
@@ -1504,7 +1505,7 @@ void
vrele(vp)
struct vnode *vp;
{
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
KASSERT(vp != NULL, ("vrele: null vp"));
@@ -1532,8 +1533,8 @@ vrele(vp)
* call VOP_INACTIVE with the node locked. So, in the case of
* vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
*/
- if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) {
- VOP_INACTIVE(vp, p);
+ if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
+ VOP_INACTIVE(vp, td);
}
} else {
@@ -1554,7 +1555,7 @@ void
vput(vp)
struct vnode *vp;
{
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
GIANT_REQUIRED;
@@ -1567,7 +1568,7 @@ vput(vp)
if (vp->v_usecount > 1) {
vp->v_usecount--;
- VOP_UNLOCK(vp, LK_INTERLOCK, p);
+ VOP_UNLOCK(vp, LK_INTERLOCK, td);
return;
}
@@ -1583,7 +1584,7 @@ vput(vp)
* vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
*/
mtx_unlock(&vp->v_interlock);
- VOP_INACTIVE(vp, p);
+ VOP_INACTIVE(vp, td);
} else {
#ifdef DIAGNOSTIC
@@ -1659,7 +1660,7 @@ vflush(mp, rootrefs, flags)
int rootrefs;
int flags;
{
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
struct vnode *vp, *nvp, *rootvp = NULL;
int busy = 0, error;
@@ -1711,7 +1712,7 @@ loop:
* vnode data structures and we are done.
*/
if (vp->v_usecount == 0) {
- vgonel(vp, p);
+ vgonel(vp, td);
mtx_lock(&mntvnode_mtx);
continue;
}
@@ -1723,9 +1724,9 @@ loop:
*/
if (flags & FORCECLOSE) {
if (vp->v_type != VCHR) {
- vgonel(vp, p);
+ vgonel(vp, td);
} else {
- vclean(vp, 0, p);
+ vclean(vp, 0, td);
vp->v_op = spec_vnodeop_p;
insmntque(vp, (struct mount *) 0);
}
@@ -1750,7 +1751,7 @@ loop:
KASSERT(busy > 0, ("vflush: not busy"));
KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs"));
if (busy == 1 && rootvp->v_usecount == rootrefs) {
- vgonel(rootvp, p);
+ vgonel(rootvp, td);
busy = 0;
} else
mtx_unlock(&rootvp->v_interlock);
@@ -1766,10 +1767,10 @@ loop:
* Disassociate the underlying file system from a vnode.
*/
static void
-vclean(vp, flags, p)
+vclean(vp, flags, td)
struct vnode *vp;
int flags;
- struct proc *p;
+ struct thread *td;
{
int active;
@@ -1788,7 +1789,7 @@ vclean(vp, flags, p)
if (vp->v_flag & VXLOCK)
panic("vclean: deadlock");
vp->v_flag |= VXLOCK;
- vp->v_vxproc = curproc;
+ vp->v_vxproc = curthread;
/*
* Even if the count is zero, the VOP_INACTIVE routine may still
* have the object locked while it cleans it out. The VOP_LOCK
@@ -1796,7 +1797,7 @@ vclean(vp, flags, p)
* For active vnodes, it ensures that no other activity can
* occur while the underlying object is being cleaned out.
*/
- VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p);
+ VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
/*
* Clean out any buffers associated with the vnode.
@@ -1805,8 +1806,8 @@ vclean(vp, flags, p)
if (flags & DOCLOSE) {
if (TAILQ_FIRST(&vp->v_dirtyblkhd) != NULL)
(void) vn_write_suspend_wait(vp, NULL, V_WAIT);
- if (vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0) != 0)
- vinvalbuf(vp, 0, NOCRED, p, 0, 0);
+ if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0)
+ vinvalbuf(vp, 0, NOCRED, td, 0, 0);
}
VOP_DESTROYVOBJECT(vp);
@@ -1818,19 +1819,19 @@ vclean(vp, flags, p)
*/
if (active) {
if (flags & DOCLOSE)
- VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
- VOP_INACTIVE(vp, p);
+ VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
+ VOP_INACTIVE(vp, td);
} else {
/*
* Any other processes trying to obtain this lock must first
* wait for VXLOCK to clear, then call the new lock operation.
*/
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
}
/*
* Reclaim the vnode.
*/
- if (VOP_RECLAIM(vp, p))
+ if (VOP_RECLAIM(vp, td))
panic("vclean: cannot reclaim");
if (active) {
@@ -1916,10 +1917,10 @@ vop_revoke(ap)
* Release the passed interlock if the vnode will be recycled.
*/
int
-vrecycle(vp, inter_lkp, p)
+vrecycle(vp, inter_lkp, td)
struct vnode *vp;
struct mtx *inter_lkp;
- struct proc *p;
+ struct thread *td;
{
mtx_lock(&vp->v_interlock);
@@ -1927,7 +1928,7 @@ vrecycle(vp, inter_lkp, p)
if (inter_lkp) {
mtx_unlock(inter_lkp);
}
- vgonel(vp, p);
+ vgonel(vp, td);
return (1);
}
mtx_unlock(&vp->v_interlock);
@@ -1942,19 +1943,19 @@ void
vgone(vp)
register struct vnode *vp;
{
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
mtx_lock(&vp->v_interlock);
- vgonel(vp, p);
+ vgonel(vp, td);
}
/*
* vgone, with the vp interlock held.
*/
void
-vgonel(vp, p)
+vgonel(vp, td)
struct vnode *vp;
- struct proc *p;
+ struct thread *td;
{
int s;
@@ -1972,7 +1973,7 @@ vgonel(vp, p)
/*
* Clean out the filesystem specific data.
*/
- vclean(vp, DOCLOSE, p);
+ vclean(vp, DOCLOSE, td);
mtx_lock(&vp->v_interlock);
/*
@@ -2132,14 +2133,14 @@ vprint(label, vp)
*/
DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
{
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
struct mount *mp, *nmp;
struct vnode *vp;
printf("Locked vnodes\n");
mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -2151,7 +2152,7 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
mtx_unlock(&mntvnode_mtx);
mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
}
mtx_unlock(&mountlist_mtx);
}
@@ -2187,7 +2188,7 @@ vfs_sysctl(SYSCTL_HANDLER_ARGS)
if (vfsp == NULL)
return (EOPNOTSUPP);
return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
- oldp, oldlenp, newp, newlen, p));
+ oldp, oldlenp, newp, newlen, td));
}
#endif
switch (name[1]) {
@@ -2245,7 +2246,7 @@ sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
static int
sysctl_vnode(SYSCTL_HANDLER_ARGS)
{
- struct proc *p = curproc; /* XXX */
+ struct thread *td = curthread; /* XXX */
struct mount *mp, *nmp;
struct vnode *nvp, *vp;
int error;
@@ -2260,7 +2261,7 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -2286,7 +2287,7 @@ again:
mtx_unlock(&mntvnode_mtx);
mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
}
mtx_unlock(&mountlist_mtx);
@@ -2323,19 +2324,19 @@ void
vfs_unmountall()
{
struct mount *mp;
- struct proc *p;
+ struct thread *td;
int error;
- if (curproc != NULL)
- p = curproc;
+ if (curthread != NULL)
+ td = curthread;
else
- p = initproc; /* XXX XXX should this be proc0? */
+ td = &initproc->p_thread; /* XXX XXX should this be proc0? */
/*
* Since this only runs when rebooting, it is not interlocked.
*/
while(!TAILQ_EMPTY(&mountlist)) {
mp = TAILQ_LAST(&mountlist, mntlist);
- error = dounmount(mp, MNT_FORCE, p);
+ error = dounmount(mp, MNT_FORCE, td);
if (error) {
TAILQ_REMOVE(&mountlist, mp, mnt_list);
printf("unmount of %s failed (",
@@ -2394,7 +2395,7 @@ loop:
if (VOP_GETVOBJECT(vp, &obj) == 0 &&
(obj->flags & OBJ_MIGHTBEDIRTY)) {
if (!vget(vp,
- LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) {
+ LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curthread)) {
if (VOP_GETVOBJECT(vp, &obj) == 0) {
vm_object_page_clean(obj, 0, 0,
flags == MNT_WAIT ?
@@ -2422,13 +2423,13 @@ loop:
* vp must be locked when vfs_object_create is called.
*/
int
-vfs_object_create(vp, p, cred)
+vfs_object_create(vp, td, cred)
struct vnode *vp;
- struct proc *p;
+ struct thread *td;
struct ucred *cred;
{
GIANT_REQUIRED;
- return (VOP_CREATEVOBJECT(vp, cred, p));
+ return (VOP_CREATEVOBJECT(vp, cred, td));
}
/*
@@ -2483,9 +2484,9 @@ vbusy(vp)
* to avoid race conditions.)
*/
int
-vn_pollrecord(vp, p, events)
+vn_pollrecord(vp, td, events)
struct vnode *vp;
- struct proc *p;
+ struct thread *td;
short events;
{
mtx_lock(&vp->v_pollinfo.vpi_lock);
@@ -2504,7 +2505,7 @@ vn_pollrecord(vp, p, events)
return events;
}
vp->v_pollinfo.vpi_events |= events;
- selrecord(p, &vp->v_pollinfo.vpi_selinfo);
+ selrecord(td, &vp->v_pollinfo.vpi_selinfo);
mtx_unlock(&vp->v_pollinfo.vpi_lock);
return 0;
}
@@ -2640,12 +2641,12 @@ sync_fsync(ap)
struct vnode *a_vp;
struct ucred *a_cred;
int a_waitfor;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
struct vnode *syncvp = ap->a_vp;
struct mount *mp = syncvp->v_mount;
- struct proc *p = ap->a_p;
+ struct thread *td = ap->a_td;
int asyncflag;
/*
@@ -2664,22 +2665,22 @@ sync_fsync(ap)
* not already on the sync list.
*/
mtx_lock(&mountlist_mtx);
- if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, p) != 0) {
+ if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
mtx_unlock(&mountlist_mtx);
return (0);
}
if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
return (0);
}
asyncflag = mp->mnt_flag & MNT_ASYNC;
mp->mnt_flag &= ~MNT_ASYNC;
vfs_msync(mp, MNT_NOWAIT);
- VFS_SYNC(mp, MNT_LAZY, ap->a_cred, p);
+ VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td);
if (asyncflag)
mp->mnt_flag |= MNT_ASYNC;
vn_finished_write(mp);
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
return (0);
}
@@ -2690,7 +2691,7 @@ static int
sync_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
- struct proc *a_p;
+ struct thread *a_td;
} */ *ap;
{
@@ -2805,7 +2806,7 @@ NDFREE(ndp, flags)
if (!(flags & NDF_NO_DVP_UNLOCK) &&
(ndp->ni_cnd.cn_flags & LOCKPARENT) &&
ndp->ni_dvp != ndp->ni_vp)
- VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_proc);
+ VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread);
if (!(flags & NDF_NO_DVP_RELE) &&
(ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
vrele(ndp->ni_dvp);
@@ -2813,7 +2814,7 @@ NDFREE(ndp, flags)
}
if (!(flags & NDF_NO_VP_UNLOCK) &&
(ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
- VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_proc);
+ VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread);
if (!(flags & NDF_NO_VP_RELE) &&
ndp->ni_vp) {
vrele(ndp->ni_vp);
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index bb39292..4e05b8d 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -75,18 +75,18 @@
#include <vm/vm_zone.h>
#include <vm/vm_page.h>
-static int change_dir __P((struct nameidata *ndp, struct proc *p));
+static int change_dir __P((struct nameidata *ndp, struct thread *td));
static void checkdirs __P((struct vnode *olddp, struct vnode *newdp));
static int chroot_refuse_vdir_fds __P((struct filedesc *fdp));
static int getutimes __P((const struct timeval *, struct timespec *));
-static int setfown __P((struct proc *, struct vnode *, uid_t, gid_t));
-static int setfmode __P((struct proc *, struct vnode *, int));
-static int setfflags __P((struct proc *, struct vnode *, int));
-static int setutimes __P((struct proc *, struct vnode *,
+static int setfown __P((struct thread *td, struct vnode *, uid_t, gid_t));
+static int setfmode __P((struct thread *td, struct vnode *, int));
+static int setfflags __P((struct thread *td, struct vnode *, int));
+static int setutimes __P((struct thread *td, struct vnode *,
const struct timespec *, int));
static int usermount = 0; /* if 1, non-root can mount fs. */
-int (*union_dircheckp) __P((struct proc *, struct vnode **, struct file *));
+int (*union_dircheckp) __P((struct thread *td, struct vnode **, struct file *));
SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, "");
@@ -107,8 +107,8 @@ struct mount_args {
#endif
/* ARGSUSED */
int
-mount(p, uap)
- struct proc *p;
+mount(td, uap)
+ struct thread *td;
struct mount_args /* {
syscallarg(char *) type;
syscallarg(char *) path;
@@ -133,7 +133,7 @@ mount(p, uap)
error = copyinstr(SCARG(uap, path), fspath, MNAMELEN, NULL);
if (error)
goto finish;
- error = vfs_mount(p, fstype, fspath, SCARG(uap, flags),
+ error = vfs_mount(td, fstype, fspath, SCARG(uap, flags),
SCARG(uap, data));
finish:
free(fstype, M_TEMP);
@@ -150,8 +150,8 @@ finish:
* into userspace.
*/
int
-vfs_mount(p, fstype, fspath, fsflags, fsdata)
- struct proc *p;
+vfs_mount(td, fstype, fspath, fsflags, fsdata)
+ struct thread *td;
const char *fstype;
char *fspath;
int fsflags;
@@ -163,6 +163,7 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
int error, flag = 0, flag2 = 0;
struct vattr va;
struct nameidata nd;
+ struct proc *p = td->td_proc;
/*
* Be ultra-paranoid about making sure the type and fspath
@@ -173,13 +174,13 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
(strlen(fspath) >= MNAMELEN - 1))
return (ENAMETOOLONG);
- if (usermount == 0 && (error = suser(p)))
+ if (usermount == 0 && (error = suser_td(td)))
return (error);
/*
* Do not allow NFS export by non-root users.
*/
if (fsflags & MNT_EXPORTED) {
- error = suser(p);
+ error = suser_td(td);
if (error)
return (error);
}
@@ -191,7 +192,7 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
/*
* Get vnode to be covered
*/
- NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspath, p);
+ NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspath, td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
@@ -218,11 +219,11 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
* permitted to update it.
*/
if (mp->mnt_stat.f_owner != p->p_ucred->cr_uid &&
- (error = suser(p))) {
+ (error = suser_td(td))) {
vput(vp);
return (error);
}
- if (vfs_busy(mp, LK_NOWAIT, 0, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, 0, td)) {
vput(vp);
return (EBUSY);
}
@@ -230,7 +231,7 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
if ((vp->v_flag & VMOUNT) != 0 ||
vp->v_mountedhere != NULL) {
mtx_unlock(&vp->v_interlock);
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
vput(vp);
return (EBUSY);
}
@@ -238,20 +239,20 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
mtx_unlock(&vp->v_interlock);
mp->mnt_flag |= fsflags &
(MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
goto update;
}
/*
* If the user is not root, ensure that they own the directory
* onto which we are attempting to mount.
*/
- if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) ||
+ if ((error = VOP_GETATTR(vp, &va, p->p_ucred, td)) ||
(va.va_uid != p->p_ucred->cr_uid &&
- (error = suser(p)))) {
+ (error = suser_td(td)))) {
vput(vp);
return (error);
}
- if ((error = vinvalbuf(vp, V_SAVE, p->p_ucred, p, 0, 0)) != 0) {
+ if ((error = vinvalbuf(vp, V_SAVE, p->p_ucred, td, 0, 0)) != 0) {
vput(vp);
return (error);
}
@@ -266,7 +267,7 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
linker_file_t lf;
/* Only load modules for root (very important!) */
- if ((error = suser(p)) != 0) {
+ if ((error = suser_td(td)) != 0) {
vput(vp);
return error;
}
@@ -304,7 +305,7 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
*/
mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
- (void)vfs_busy(mp, LK_NOWAIT, 0, p);
+ (void)vfs_busy(mp, LK_NOWAIT, 0, td);
mp->mnt_op = vfsp->vfc_vfsops;
mp->mnt_vfc = vfsp;
vfsp->vfc_refcount++;
@@ -317,7 +318,7 @@ vfs_mount(p, fstype, fspath, fsflags, fsdata)
strncpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN);
mp->mnt_stat.f_mntonname[MNAMELEN - 1] = '\0';
mp->mnt_iosize_max = DFLTPHYS;
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
update:
/*
* Set the mount level flags.
@@ -339,7 +340,7 @@ update:
* XXX The final recipients of VFS_MOUNT just overwrite the ndp they
* get. No freeing of cn_pnbuf.
*/
- error = VFS_MOUNT(mp, fspath, fsdata, &nd, p);
+ error = VFS_MOUNT(mp, fspath, fsdata, &nd, td);
if (mp->mnt_flag & MNT_UPDATE) {
if (mp->mnt_kern_flag & MNTK_WANTRDWR)
mp->mnt_flag &= ~MNT_RDONLY;
@@ -358,14 +359,14 @@ update:
vrele(mp->mnt_syncer);
mp->mnt_syncer = NULL;
}
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
mtx_unlock(&vp->v_interlock);
vrele(vp);
return (error);
}
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
/*
* Put the new filesystem on the mount list after root.
*/
@@ -384,18 +385,18 @@ update:
panic("mount: lost mount");
checkdirs(vp, newdp);
vput(newdp);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
if ((mp->mnt_flag & MNT_RDONLY) == 0)
error = vfs_allocate_syncvnode(mp);
- vfs_unbusy(mp, p);
- if ((error = VFS_START(mp, 0, p)) != 0)
+ vfs_unbusy(mp, td);
+ if ((error = VFS_START(mp, 0, td)) != 0)
vrele(vp);
} else {
mtx_lock(&vp->v_interlock);
vp->v_flag &= ~VMOUNT;
mtx_unlock(&vp->v_interlock);
mp->mnt_vfc->vfc_refcount--;
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
free((caddr_t)mp, M_MOUNT);
vput(vp);
}
@@ -454,8 +455,8 @@ struct unmount_args {
#endif
/* ARGSUSED */
int
-unmount(p, uap)
- struct proc *p;
+unmount(td, uap)
+ struct thread *td;
register struct unmount_args /* {
syscallarg(char *) path;
syscallarg(int) flags;
@@ -467,7 +468,7 @@ unmount(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
@@ -478,8 +479,8 @@ unmount(p, uap)
* Only root, or the user that did the original mount is
* permitted to unmount this filesystem.
*/
- if ((mp->mnt_stat.f_owner != p->p_ucred->cr_uid) &&
- (error = suser(p))) {
+ if ((mp->mnt_stat.f_owner != td->td_proc->p_ucred->cr_uid) &&
+ (error = suser_td(td))) {
vput(vp);
return (error);
}
@@ -500,17 +501,17 @@ unmount(p, uap)
return (EINVAL);
}
vput(vp);
- return (dounmount(mp, SCARG(uap, flags), p));
+ return (dounmount(mp, SCARG(uap, flags), td));
}
/*
* Do the actual file system unmount.
*/
int
-dounmount(mp, flags, p)
+dounmount(mp, flags, td)
struct mount *mp;
int flags;
- struct proc *p;
+ struct thread *td;
{
struct vnode *coveredvp, *fsrootvp;
int error;
@@ -518,7 +519,7 @@ dounmount(mp, flags, p)
mtx_lock(&mountlist_mtx);
mp->mnt_kern_flag |= MNTK_UNMOUNT;
- lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_mtx, p);
+ lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_mtx, td);
vn_start_write(NULL, &mp, V_WAIT);
if (mp->mnt_flag & MNT_EXPUBLIC)
@@ -541,9 +542,9 @@ dounmount(mp, flags, p)
vput(fsrootvp);
}
if (((mp->mnt_flag & MNT_RDONLY) ||
- (error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p)) == 0) ||
+ (error = VFS_SYNC(mp, MNT_WAIT, td->td_proc->p_ucred, td)) == 0) ||
(flags & MNT_FORCE)) {
- error = VFS_UNMOUNT(mp, flags, p);
+ error = VFS_UNMOUNT(mp, flags, td);
}
vn_finished_write(mp);
if (error) {
@@ -563,7 +564,7 @@ dounmount(mp, flags, p)
mp->mnt_kern_flag &= ~MNTK_UNMOUNT;
mp->mnt_flag |= async_flag;
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK,
- &mountlist_mtx, p);
+ &mountlist_mtx, td);
if (mp->mnt_kern_flag & MNTK_MWAIT)
wakeup((caddr_t)mp);
return (error);
@@ -575,7 +576,7 @@ dounmount(mp, flags, p)
mp->mnt_vfc->vfc_refcount--;
if (!LIST_EMPTY(&mp->mnt_vnodelist))
panic("unmount: dangling vnode");
- lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, p);
+ lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, td);
lockdestroy(&mp->mnt_lock);
if (coveredvp != NULL)
vrele(coveredvp);
@@ -601,8 +602,8 @@ SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, "");
/* ARGSUSED */
int
-sync(p, uap)
- struct proc *p;
+sync(td, uap)
+ struct thread *td;
struct sync_args *uap;
{
struct mount *mp, *nmp;
@@ -610,7 +611,7 @@ sync(p, uap)
mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -620,13 +621,13 @@ sync(p, uap)
mp->mnt_flag &= ~MNT_ASYNC;
vfs_msync(mp, MNT_NOWAIT);
VFS_SYNC(mp, MNT_NOWAIT,
- ((p != NULL) ? p->p_ucred : NOCRED), p);
+ ((td != NULL) ? td->td_proc->p_ucred : NOCRED), td);
mp->mnt_flag |= asyncflag;
vn_finished_write(mp);
}
mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
}
mtx_unlock(&mountlist_mtx);
#if 0
@@ -661,8 +662,8 @@ struct quotactl_args {
#endif
/* ARGSUSED */
int
-quotactl(p, uap)
- struct proc *p;
+quotactl(td, uap)
+ struct thread *td;
register struct quotactl_args /* {
syscallarg(char *) path;
syscallarg(int) cmd;
@@ -674,9 +675,9 @@ quotactl(p, uap)
int error;
struct nameidata nd;
- if (jailed(p->p_ucred) && !prison_quotas)
+ if (jailed(td->td_proc->p_ucred) && !prison_quotas)
return (EPERM);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
@@ -685,7 +686,7 @@ quotactl(p, uap)
if (error)
return (error);
error = VFS_QUOTACTL(mp, SCARG(uap, cmd), SCARG(uap, uid),
- SCARG(uap, arg), p);
+ SCARG(uap, arg), td);
vn_finished_write(mp);
return (error);
}
@@ -701,8 +702,8 @@ struct statfs_args {
#endif
/* ARGSUSED */
int
-statfs(p, uap)
- struct proc *p;
+statfs(td, uap)
+ struct thread *td;
register struct statfs_args /* {
syscallarg(char *) path;
syscallarg(struct statfs *) buf;
@@ -714,18 +715,18 @@ statfs(p, uap)
struct nameidata nd;
struct statfs sb;
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
mp = nd.ni_vp->v_mount;
sp = &mp->mnt_stat;
NDFREE(&nd, NDF_ONLY_PNBUF);
vrele(nd.ni_vp);
- error = VFS_STATFS(mp, sp, p);
+ error = VFS_STATFS(mp, sp, td);
if (error)
return (error);
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
- if (suser_xxx(p->p_ucred, 0, 0)) {
+ if (suser_xxx(td->td_proc->p_ucred, 0, 0)) {
bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
sp = &sb;
@@ -744,8 +745,8 @@ struct fstatfs_args {
#endif
/* ARGSUSED */
int
-fstatfs(p, uap)
- struct proc *p;
+fstatfs(td, uap)
+ struct thread *td;
register struct fstatfs_args /* {
syscallarg(int) fd;
syscallarg(struct statfs *) buf;
@@ -757,15 +758,15 @@ fstatfs(p, uap)
int error;
struct statfs sb;
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
mp = ((struct vnode *)fp->f_data)->v_mount;
sp = &mp->mnt_stat;
- error = VFS_STATFS(mp, sp, p);
+ error = VFS_STATFS(mp, sp, td);
if (error)
return (error);
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
- if (suser_xxx(p->p_ucred, 0, 0)) {
+ if (suser_xxx(td->td_proc->p_ucred, 0, 0)) {
bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
sp = &sb;
@@ -784,8 +785,8 @@ struct getfsstat_args {
};
#endif
int
-getfsstat(p, uap)
- struct proc *p;
+getfsstat(td, uap)
+ struct thread *td;
register struct getfsstat_args /* {
syscallarg(struct statfs *) buf;
syscallarg(long) bufsize;
@@ -802,7 +803,7 @@ getfsstat(p, uap)
count = 0;
mtx_lock(&mountlist_mtx);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
- if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
@@ -815,16 +816,16 @@ getfsstat(p, uap)
*/
if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
(SCARG(uap, flags) & MNT_WAIT)) &&
- (error = VFS_STATFS(mp, sp, p))) {
+ (error = VFS_STATFS(mp, sp, td))) {
mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
continue;
}
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
error = copyout((caddr_t)sp, sfsp, sizeof(*sp));
if (error) {
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
return (error);
}
sfsp += sizeof(*sp);
@@ -832,13 +833,13 @@ getfsstat(p, uap)
count++;
mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
}
mtx_unlock(&mountlist_mtx);
if (sfsp && count > maxcount)
- p->p_retval[0] = maxcount;
+ td->td_retval[0] = maxcount;
else
- p->p_retval[0] = count;
+ td->td_retval[0] = count;
return (0);
}
@@ -852,13 +853,13 @@ struct fchdir_args {
#endif
/* ARGSUSED */
int
-fchdir(p, uap)
- struct proc *p;
+fchdir(td, uap)
+ struct thread *td;
struct fchdir_args /* {
syscallarg(int) fd;
} */ *uap;
{
- register struct filedesc *fdp = p->p_fd;
+ register struct filedesc *fdp = td->td_proc->p_fd;
struct vnode *vp, *tdp;
struct mount *mp;
struct file *fp;
@@ -868,16 +869,16 @@ fchdir(p, uap)
return (error);
vp = (struct vnode *)fp->f_data;
VREF(vp);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (vp->v_type != VDIR)
error = ENOTDIR;
else
- error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p);
+ error = VOP_ACCESS(vp, VEXEC, td->td_proc->p_ucred, td);
while (!error && (mp = vp->v_mountedhere) != NULL) {
- if (vfs_busy(mp, 0, 0, p))
+ if (vfs_busy(mp, 0, 0, td))
continue;
error = VFS_ROOT(mp, &tdp);
- vfs_unbusy(mp, p);
+ vfs_unbusy(mp, td);
if (error)
break;
vput(vp);
@@ -887,7 +888,7 @@ fchdir(p, uap)
vput(vp);
return (error);
}
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
vrele(fdp->fd_cdir);
fdp->fd_cdir = vp;
return (0);
@@ -903,19 +904,19 @@ struct chdir_args {
#endif
/* ARGSUSED */
int
-chdir(p, uap)
- struct proc *p;
+chdir(td, uap)
+ struct thread *td;
struct chdir_args /* {
syscallarg(char *) path;
} */ *uap;
{
- register struct filedesc *fdp = p->p_fd;
+ register struct filedesc *fdp = td->td_proc->p_fd;
int error;
struct nameidata nd;
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
- SCARG(uap, path), p);
- if ((error = change_dir(&nd, p)) != 0)
+ SCARG(uap, path), td);
+ if ((error = change_dir(&nd, td)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
vrele(fdp->fd_cdir);
@@ -971,17 +972,17 @@ struct chroot_args {
#endif
/* ARGSUSED */
int
-chroot(p, uap)
- struct proc *p;
+chroot(td, uap)
+ struct thread *td;
struct chroot_args /* {
syscallarg(char *) path;
} */ *uap;
{
- register struct filedesc *fdp = p->p_fd;
+ register struct filedesc *fdp = td->td_proc->p_fd;
int error;
struct nameidata nd;
- error = suser_xxx(0, p, PRISON_ROOT);
+ error = suser_xxx(0, td->td_proc, PRISON_ROOT);
if (error)
return (error);
if (chroot_allow_open_directories == 0 ||
@@ -990,8 +991,8 @@ chroot(p, uap)
if (error)
return (error);
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
- SCARG(uap, path), p);
- if ((error = change_dir(&nd, p)) != 0)
+ SCARG(uap, path), td);
+ if ((error = change_dir(&nd, td)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
vrele(fdp->fd_rdir);
@@ -1007,9 +1008,9 @@ chroot(p, uap)
* Common routine for chroot and chdir.
*/
static int
-change_dir(ndp, p)
+change_dir(ndp, td)
register struct nameidata *ndp;
- struct proc *p;
+ struct thread *td;
{
struct vnode *vp;
int error;
@@ -1021,11 +1022,11 @@ change_dir(ndp, p)
if (vp->v_type != VDIR)
error = ENOTDIR;
else
- error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p);
+ error = VOP_ACCESS(vp, VEXEC, td->td_proc->p_ucred, td);
if (error)
vput(vp);
else
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
return (error);
}
@@ -1041,14 +1042,15 @@ struct open_args {
};
#endif
int
-open(p, uap)
- struct proc *p;
+open(td, uap)
+ struct thread *td;
register struct open_args /* {
syscallarg(char *) path;
syscallarg(int) flags;
syscallarg(int) mode;
} */ *uap;
{
+ struct proc *p = td->td_proc;
struct filedesc *fdp = p->p_fd;
struct file *fp;
struct vnode *vp;
@@ -1064,13 +1066,13 @@ open(p, uap)
if ((oflags & O_ACCMODE) == O_ACCMODE)
return (EINVAL);
flags = FFLAGS(oflags);
- error = falloc(p, &nfp, &indx);
+ error = falloc(td, &nfp, &indx);
if (error)
return (error);
fp = nfp;
cmode = ((SCARG(uap, mode) &~ fdp->fd_cmask) & ALLPERMS) &~ S_ISTXT;
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
- p->p_dupfd = -indx - 1; /* XXX check for fdopen */
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
+ td->td_dupfd = -indx - 1; /* XXX check for fdopen */
/*
* Bump the ref count to prevent another process from closing
* the descriptor while we are blocked in vn_open()
@@ -1081,7 +1083,7 @@ open(p, uap)
/*
* release our own reference
*/
- fdrop(fp, p);
+ fdrop(fp, td);
/*
* handle special fdopen() case. bleh. dupfdopen() is
@@ -1089,10 +1091,10 @@ open(p, uap)
* if it succeeds.
*/
if ((error == ENODEV || error == ENXIO) &&
- p->p_dupfd >= 0 && /* XXX from fdopen */
+ td->td_dupfd >= 0 && /* XXX from fdopen */
(error =
- dupfdopen(p, fdp, indx, p->p_dupfd, flags, error)) == 0) {
- p->p_retval[0] = indx;
+ dupfdopen(td, fdp, indx, td->td_dupfd, flags, error)) == 0) {
+ td->td_retval[0] = indx;
return (0);
}
/*
@@ -1101,14 +1103,14 @@ open(p, uap)
*/
if (fdp->fd_ofiles[indx] == fp) {
fdp->fd_ofiles[indx] = NULL;
- fdrop(fp, p);
+ fdrop(fp, td);
}
if (error == ERESTART)
error = EINTR;
return (error);
}
- p->p_dupfd = 0;
+ td->td_dupfd = 0;
NDFREE(&nd, NDF_ONLY_PNBUF);
vp = nd.ni_vp;
@@ -1123,10 +1125,10 @@ open(p, uap)
if (fp->f_count == 1) {
KASSERT(fdp->fd_ofiles[indx] != fp,
("Open file descriptor lost all refs"));
- VOP_UNLOCK(vp, 0, p);
- vn_close(vp, flags & FMASK, fp->f_cred, p);
- fdrop(fp, p);
- p->p_retval[0] = indx;
+ VOP_UNLOCK(vp, 0, td);
+ vn_close(vp, flags & FMASK, fp->f_cred, td);
+ fdrop(fp, td);
+ td->td_retval[0] = indx;
return 0;
}
@@ -1134,7 +1136,7 @@ open(p, uap)
fp->f_flag = flags & FMASK;
fp->f_ops = &vnops;
fp->f_type = (vp->v_type == VFIFO ? DTYPE_FIFO : DTYPE_VNODE);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
if (flags & (O_EXLOCK | O_SHLOCK)) {
lf.l_whence = SEEK_SET;
lf.l_start = 0;
@@ -1153,12 +1155,12 @@ open(p, uap)
if (flags & O_TRUNC) {
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
goto bad;
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
VATTR_NULL(&vat);
vat.va_size = 0;
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- error = VOP_SETATTR(vp, &vat, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ error = VOP_SETATTR(vp, &vat, p->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
if (error)
goto bad;
@@ -1170,15 +1172,15 @@ open(p, uap)
* Release our private reference, leaving the one associated with
* the descriptor table intact.
*/
- fdrop(fp, p);
- p->p_retval[0] = indx;
+ fdrop(fp, td);
+ td->td_retval[0] = indx;
return (0);
bad:
if (fdp->fd_ofiles[indx] == fp) {
fdp->fd_ofiles[indx] = NULL;
- fdrop(fp, p);
+ fdrop(fp, td);
}
- fdrop(fp, p);
+ fdrop(fp, td);
return (error);
}
@@ -1193,8 +1195,8 @@ struct ocreat_args {
};
#endif
int
-ocreat(p, uap)
- struct proc *p;
+ocreat(td, uap)
+ struct thread *td;
register struct ocreat_args /* {
syscallarg(char *) path;
syscallarg(int) mode;
@@ -1209,7 +1211,7 @@ ocreat(p, uap)
SCARG(&nuap, path) = SCARG(uap, path);
SCARG(&nuap, mode) = SCARG(uap, mode);
SCARG(&nuap, flags) = O_WRONLY | O_CREAT | O_TRUNC;
- return (open(p, &nuap));
+ return (open(td, &nuap));
}
#endif /* COMPAT_43 */
@@ -1225,8 +1227,8 @@ struct mknod_args {
#endif
/* ARGSUSED */
int
-mknod(p, uap)
- struct proc *p;
+mknod(td, uap)
+ struct thread *td;
register struct mknod_args /* {
syscallarg(char *) path;
syscallarg(int) mode;
@@ -1243,17 +1245,17 @@ mknod(p, uap)
switch (SCARG(uap, mode) & S_IFMT) {
case S_IFCHR:
case S_IFBLK:
- error = suser(p);
+ error = suser_td(td);
break;
default:
- error = suser_xxx(0, p, PRISON_ROOT);
+ error = suser_xxx(0, td->td_proc, PRISON_ROOT);
break;
}
if (error)
return (error);
restart:
bwillwrite();
- NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
@@ -1262,7 +1264,7 @@ restart:
error = EEXIST;
} else {
VATTR_NULL(&vattr);
- vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ p->p_fd->fd_cmask;
+ vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ td->td_proc->p_fd->fd_cmask;
vattr.va_rdev = SCARG(uap, dev);
whiteout = 0;
@@ -1292,7 +1294,7 @@ restart:
goto restart;
}
if (!error) {
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
if (whiteout)
error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, CREATE);
else {
@@ -1321,8 +1323,8 @@ struct mkfifo_args {
#endif
/* ARGSUSED */
int
-mkfifo(p, uap)
- struct proc *p;
+mkfifo(td, uap)
+ struct thread *td;
register struct mkfifo_args /* {
syscallarg(char *) path;
syscallarg(int) mode;
@@ -1335,7 +1337,7 @@ mkfifo(p, uap)
restart:
bwillwrite();
- NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
if (nd.ni_vp != NULL) {
@@ -1353,8 +1355,8 @@ restart:
}
VATTR_NULL(&vattr);
vattr.va_type = VFIFO;
- vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ p->p_fd->fd_cmask;
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
+ vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ td->td_proc->p_fd->fd_cmask;
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
if (error == 0)
vput(nd.ni_vp);
@@ -1375,8 +1377,8 @@ struct link_args {
#endif
/* ARGSUSED */
int
-link(p, uap)
- struct proc *p;
+link(td, uap)
+ struct thread *td;
register struct link_args /* {
syscallarg(char *) path;
syscallarg(char *) link;
@@ -1388,7 +1390,7 @@ link(p, uap)
int error;
bwillwrite();
- NDINIT(&nd, LOOKUP, FOLLOW|NOOBJ, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW|NOOBJ, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
@@ -1401,14 +1403,14 @@ link(p, uap)
vrele(vp);
return (error);
}
- NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), p);
+ NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), td);
if ((error = namei(&nd)) == 0) {
if (nd.ni_vp != NULL) {
vrele(nd.ni_vp);
error = EEXIST;
} else {
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
error = VOP_LINK(nd.ni_dvp, vp, &nd.ni_cnd);
}
NDFREE(&nd, NDF_ONLY_PNBUF);
@@ -1432,8 +1434,8 @@ struct symlink_args {
#endif
/* ARGSUSED */
int
-symlink(p, uap)
- struct proc *p;
+symlink(td, uap)
+ struct thread *td;
register struct symlink_args /* {
syscallarg(char *) path;
syscallarg(char *) link;
@@ -1450,7 +1452,7 @@ symlink(p, uap)
goto out;
restart:
bwillwrite();
- NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), p);
+ NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), td);
if ((error = namei(&nd)) != 0)
goto out;
if (nd.ni_vp) {
@@ -1468,8 +1470,8 @@ restart:
goto restart;
}
VATTR_NULL(&vattr);
- vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask;
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
+ vattr.va_mode = ACCESSPERMS &~ td->td_proc->p_fd->fd_cmask;
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path);
NDFREE(&nd, NDF_ONLY_PNBUF);
if (error == 0)
@@ -1488,8 +1490,8 @@ out:
*/
/* ARGSUSED */
int
-undelete(p, uap)
- struct proc *p;
+undelete(td, uap)
+ struct thread *td;
register struct undelete_args /* {
syscallarg(char *) path;
} */ *uap;
@@ -1501,7 +1503,7 @@ undelete(p, uap)
restart:
bwillwrite();
NDINIT(&nd, DELETE, LOCKPARENT|DOWHITEOUT, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
error = namei(&nd);
if (error)
return (error);
@@ -1520,7 +1522,7 @@ restart:
return (error);
goto restart;
}
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, DELETE);
NDFREE(&nd, NDF_ONLY_PNBUF);
vput(nd.ni_dvp);
@@ -1540,8 +1542,8 @@ struct unlink_args {
#endif
/* ARGSUSED */
int
-unlink(p, uap)
- struct proc *p;
+unlink(td, uap)
+ struct thread *td;
struct unlink_args /* {
syscallarg(char *) path;
} */ *uap;
@@ -1553,7 +1555,7 @@ unlink(p, uap)
restart:
bwillwrite();
- NDINIT(&nd, DELETE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, DELETE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
@@ -1576,10 +1578,10 @@ restart:
return (error);
goto restart;
}
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (!error) {
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
error = VOP_REMOVE(nd.ni_dvp, vp, &nd.ni_cnd);
}
NDFREE(&nd, NDF_ONLY_PNBUF);
@@ -1603,8 +1605,8 @@ struct lseek_args {
};
#endif
int
-lseek(p, uap)
- struct proc *p;
+lseek(td, uap)
+ struct thread *td;
register struct lseek_args /* {
syscallarg(int) fd;
syscallarg(int) pad;
@@ -1612,8 +1614,8 @@ lseek(p, uap)
syscallarg(int) whence;
} */ *uap;
{
- struct ucred *cred = p->p_ucred;
- register struct filedesc *fdp = p->p_fd;
+ struct ucred *cred = td->td_proc->p_ucred;
+ register struct filedesc *fdp = td->td_proc->p_fd;
register struct file *fp;
struct vattr vattr;
struct vnode *vp;
@@ -1637,7 +1639,7 @@ lseek(p, uap)
offset += fp->f_offset;
break;
case L_XTND:
- error = VOP_GETATTR(vp, &vattr, cred, p);
+ error = VOP_GETATTR(vp, &vattr, cred, td);
if (error)
return (error);
if (noneg &&
@@ -1654,7 +1656,7 @@ lseek(p, uap)
if (noneg && offset < 0)
return (EINVAL);
fp->f_offset = offset;
- *(off_t *)(p->p_retval) = fp->f_offset;
+ *(off_t *)(td->td_retval) = fp->f_offset;
return (0);
}
@@ -1670,8 +1672,8 @@ struct olseek_args {
};
#endif
int
-olseek(p, uap)
- struct proc *p;
+olseek(td, uap)
+ struct thread *td;
register struct olseek_args /* {
syscallarg(int) fd;
syscallarg(long) offset;
@@ -1689,7 +1691,7 @@ olseek(p, uap)
SCARG(&nuap, fd) = SCARG(uap, fd);
SCARG(&nuap, offset) = SCARG(uap, offset);
SCARG(&nuap, whence) = SCARG(uap, whence);
- error = lseek(p, &nuap);
+ error = lseek(td, &nuap);
return (error);
}
#endif /* COMPAT_43 */
@@ -1704,8 +1706,8 @@ struct access_args {
};
#endif
int
-access(p, uap)
- struct proc *p;
+access(td, uap)
+ struct thread *td;
register struct access_args /* {
syscallarg(char *) path;
syscallarg(int) flags;
@@ -1716,7 +1718,7 @@ access(p, uap)
int error, flags;
struct nameidata nd;
- cred = p->p_ucred;
+ cred = td->td_proc->p_ucred;
/*
* Create and modify a temporary credential instead of one that
* is potentially shared. This could also mess up socket
@@ -1729,9 +1731,9 @@ access(p, uap)
tmpcred = crdup(cred);
tmpcred->cr_uid = cred->cr_ruid;
tmpcred->cr_groups[0] = cred->cr_rgid;
- p->p_ucred = tmpcred;
+ td->td_proc->p_ucred = tmpcred;
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
goto out1;
vp = nd.ni_vp;
@@ -1746,12 +1748,12 @@ access(p, uap)
if (SCARG(uap, flags) & X_OK)
flags |= VEXEC;
if ((flags & VWRITE) == 0 || (error = vn_writechk(vp)) == 0)
- error = VOP_ACCESS(vp, flags, tmpcred, p);
+ error = VOP_ACCESS(vp, flags, tmpcred, td);
}
NDFREE(&nd, NDF_ONLY_PNBUF);
vput(vp);
out1:
- p->p_ucred = cred;
+ td->td_proc->p_ucred = cred;
crfree(tmpcred);
return (error);
}
@@ -1768,8 +1770,8 @@ struct ostat_args {
#endif
/* ARGSUSED */
int
-ostat(p, uap)
- struct proc *p;
+ostat(td, uap)
+ struct thread *td;
register struct ostat_args /* {
syscallarg(char *) path;
syscallarg(struct ostat *) ub;
@@ -1781,11 +1783,11 @@ ostat(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = vn_stat(nd.ni_vp, &sb, p);
+ error = vn_stat(nd.ni_vp, &sb, td);
vput(nd.ni_vp);
if (error)
return (error);
@@ -1805,8 +1807,8 @@ struct olstat_args {
#endif
/* ARGSUSED */
int
-olstat(p, uap)
- struct proc *p;
+olstat(td, uap)
+ struct thread *td;
register struct olstat_args /* {
syscallarg(char *) path;
syscallarg(struct ostat *) ub;
@@ -1819,11 +1821,11 @@ olstat(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
- error = vn_stat(vp, &sb, p);
+ error = vn_stat(vp, &sb, td);
NDFREE(&nd, NDF_ONLY_PNBUF);
vput(vp);
if (error)
@@ -1874,8 +1876,8 @@ struct stat_args {
#endif
/* ARGSUSED */
int
-stat(p, uap)
- struct proc *p;
+stat(td, uap)
+ struct thread *td;
register struct stat_args /* {
syscallarg(char *) path;
syscallarg(struct stat *) ub;
@@ -1886,10 +1888,10 @@ stat(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
- error = vn_stat(nd.ni_vp, &sb, p);
+ error = vn_stat(nd.ni_vp, &sb, td);
NDFREE(&nd, NDF_ONLY_PNBUF);
vput(nd.ni_vp);
if (error)
@@ -1909,8 +1911,8 @@ struct lstat_args {
#endif
/* ARGSUSED */
int
-lstat(p, uap)
- struct proc *p;
+lstat(td, uap)
+ struct thread *td;
register struct lstat_args /* {
syscallarg(char *) path;
syscallarg(struct stat *) ub;
@@ -1922,11 +1924,11 @@ lstat(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
- error = vn_stat(vp, &sb, p);
+ error = vn_stat(vp, &sb, td);
NDFREE(&nd, NDF_ONLY_PNBUF);
vput(vp);
if (error)
@@ -1974,8 +1976,8 @@ struct nstat_args {
#endif
/* ARGSUSED */
int
-nstat(p, uap)
- struct proc *p;
+nstat(td, uap)
+ struct thread *td;
register struct nstat_args /* {
syscallarg(char *) path;
syscallarg(struct nstat *) ub;
@@ -1987,11 +1989,11 @@ nstat(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = vn_stat(nd.ni_vp, &sb, p);
+ error = vn_stat(nd.ni_vp, &sb, td);
vput(nd.ni_vp);
if (error)
return (error);
@@ -2011,8 +2013,8 @@ struct lstat_args {
#endif
/* ARGSUSED */
int
-nlstat(p, uap)
- struct proc *p;
+nlstat(td, uap)
+ struct thread *td;
register struct nlstat_args /* {
syscallarg(char *) path;
syscallarg(struct nstat *) ub;
@@ -2025,12 +2027,12 @@ nlstat(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = vn_stat(vp, &sb, p);
+ error = vn_stat(vp, &sb, td);
vput(vp);
if (error)
return (error);
@@ -2050,8 +2052,8 @@ struct pathconf_args {
#endif
/* ARGSUSED */
int
-pathconf(p, uap)
- struct proc *p;
+pathconf(td, uap)
+ struct thread *td;
register struct pathconf_args /* {
syscallarg(char *) path;
syscallarg(int) name;
@@ -2061,11 +2063,11 @@ pathconf(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = VOP_PATHCONF(nd.ni_vp, SCARG(uap, name), p->p_retval);
+ error = VOP_PATHCONF(nd.ni_vp, SCARG(uap, name), td->td_retval);
vput(nd.ni_vp);
return (error);
}
@@ -2082,8 +2084,8 @@ struct readlink_args {
#endif
/* ARGSUSED */
int
-readlink(p, uap)
- struct proc *p;
+readlink(td, uap)
+ struct thread *td;
register struct readlink_args /* {
syscallarg(char *) path;
syscallarg(char *) buf;
@@ -2097,7 +2099,7 @@ readlink(p, uap)
struct nameidata nd;
NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
@@ -2112,12 +2114,12 @@ readlink(p, uap)
auio.uio_offset = 0;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_USERSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_resid = SCARG(uap, count);
- error = VOP_READLINK(vp, &auio, p->p_ucred);
+ error = VOP_READLINK(vp, &auio, td->td_proc->p_ucred);
}
vput(vp);
- p->p_retval[0] = SCARG(uap, count) - auio.uio_resid;
+ td->td_retval[0] = SCARG(uap, count) - auio.uio_resid;
return (error);
}
@@ -2125,8 +2127,8 @@ readlink(p, uap)
* Common implementation code for chflags() and fchflags().
*/
static int
-setfflags(p, vp, flags)
- struct proc *p;
+setfflags(td, vp, flags)
+ struct thread *td;
struct vnode *vp;
int flags;
{
@@ -2141,17 +2143,17 @@ setfflags(p, vp, flags)
* chown can't fail when done as root.
*/
if ((vp->v_type == VCHR || vp->v_type == VBLK) &&
- ((error = suser_xxx(p->p_ucred, p, PRISON_ROOT)) != 0))
+ ((error = suser_xxx(td->td_proc->p_ucred, td->td_proc, PRISON_ROOT)) != 0))
return (error);
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VATTR_NULL(&vattr);
vattr.va_flags = flags;
- error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ error = VOP_SETATTR(vp, &vattr, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return (error);
}
@@ -2167,8 +2169,8 @@ struct chflags_args {
#endif
/* ARGSUSED */
int
-chflags(p, uap)
- struct proc *p;
+chflags(td, uap)
+ struct thread *td;
register struct chflags_args /* {
syscallarg(char *) path;
syscallarg(int) flags;
@@ -2177,11 +2179,11 @@ chflags(p, uap)
int error;
struct nameidata nd;
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = setfflags(p, nd.ni_vp, SCARG(uap, flags));
+ error = setfflags(td, nd.ni_vp, SCARG(uap, flags));
vrele(nd.ni_vp);
return error;
}
@@ -2197,8 +2199,8 @@ struct fchflags_args {
#endif
/* ARGSUSED */
int
-fchflags(p, uap)
- struct proc *p;
+fchflags(td, uap)
+ struct thread *td;
register struct fchflags_args /* {
syscallarg(int) fd;
syscallarg(int) flags;
@@ -2207,17 +2209,17 @@ fchflags(p, uap)
struct file *fp;
int error;
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
- return setfflags(p, (struct vnode *) fp->f_data, SCARG(uap, flags));
+ return setfflags(td, (struct vnode *) fp->f_data, SCARG(uap, flags));
}
/*
* Common implementation code for chmod(), lchmod() and fchmod().
*/
static int
-setfmode(p, vp, mode)
- struct proc *p;
+setfmode(td, vp, mode)
+ struct thread *td;
struct vnode *vp;
int mode;
{
@@ -2227,12 +2229,12 @@ setfmode(p, vp, mode)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VATTR_NULL(&vattr);
vattr.va_mode = mode & ALLPERMS;
- error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ error = VOP_SETATTR(vp, &vattr, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return error;
}
@@ -2248,8 +2250,8 @@ struct chmod_args {
#endif
/* ARGSUSED */
int
-chmod(p, uap)
- struct proc *p;
+chmod(td, uap)
+ struct thread *td;
register struct chmod_args /* {
syscallarg(char *) path;
syscallarg(int) mode;
@@ -2258,11 +2260,11 @@ chmod(p, uap)
int error;
struct nameidata nd;
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = setfmode(p, nd.ni_vp, SCARG(uap, mode));
+ error = setfmode(td, nd.ni_vp, SCARG(uap, mode));
vrele(nd.ni_vp);
return error;
}
@@ -2278,8 +2280,8 @@ struct lchmod_args {
#endif
/* ARGSUSED */
int
-lchmod(p, uap)
- struct proc *p;
+lchmod(td, uap)
+ struct thread *td;
register struct lchmod_args /* {
syscallarg(char *) path;
syscallarg(int) mode;
@@ -2288,11 +2290,11 @@ lchmod(p, uap)
int error;
struct nameidata nd;
- NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = setfmode(p, nd.ni_vp, SCARG(uap, mode));
+ error = setfmode(td, nd.ni_vp, SCARG(uap, mode));
vrele(nd.ni_vp);
return error;
}
@@ -2308,8 +2310,8 @@ struct fchmod_args {
#endif
/* ARGSUSED */
int
-fchmod(p, uap)
- struct proc *p;
+fchmod(td, uap)
+ struct thread *td;
register struct fchmod_args /* {
syscallarg(int) fd;
syscallarg(int) mode;
@@ -2318,17 +2320,17 @@ fchmod(p, uap)
struct file *fp;
int error;
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
- return setfmode(p, (struct vnode *)fp->f_data, SCARG(uap, mode));
+ return setfmode(td, (struct vnode *)fp->f_data, SCARG(uap, mode));
}
/*
* Common implementation for chown(), lchown(), and fchown()
*/
static int
-setfown(p, vp, uid, gid)
- struct proc *p;
+setfown(td, vp, uid, gid)
+ struct thread *td;
struct vnode *vp;
uid_t uid;
gid_t gid;
@@ -2339,13 +2341,13 @@ setfown(p, vp, uid, gid)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VATTR_NULL(&vattr);
vattr.va_uid = uid;
vattr.va_gid = gid;
- error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ error = VOP_SETATTR(vp, &vattr, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return error;
}
@@ -2362,8 +2364,8 @@ struct chown_args {
#endif
/* ARGSUSED */
int
-chown(p, uap)
- struct proc *p;
+chown(td, uap)
+ struct thread *td;
register struct chown_args /* {
syscallarg(char *) path;
syscallarg(int) uid;
@@ -2373,11 +2375,11 @@ chown(p, uap)
int error;
struct nameidata nd;
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = setfown(p, nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
+ error = setfown(td, nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
vrele(nd.ni_vp);
return (error);
}
@@ -2394,8 +2396,8 @@ struct lchown_args {
#endif
/* ARGSUSED */
int
-lchown(p, uap)
- struct proc *p;
+lchown(td, uap)
+ struct thread *td;
register struct lchown_args /* {
syscallarg(char *) path;
syscallarg(int) uid;
@@ -2405,11 +2407,11 @@ lchown(p, uap)
int error;
struct nameidata nd;
- NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = setfown(p, nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
+ error = setfown(td, nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
vrele(nd.ni_vp);
return (error);
}
@@ -2426,8 +2428,8 @@ struct fchown_args {
#endif
/* ARGSUSED */
int
-fchown(p, uap)
- struct proc *p;
+fchown(td, uap)
+ struct thread *td;
register struct fchown_args /* {
syscallarg(int) fd;
syscallarg(int) uid;
@@ -2437,9 +2439,9 @@ fchown(p, uap)
struct file *fp;
int error;
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
- return setfown(p, (struct vnode *)fp->f_data,
+ return setfown(td, (struct vnode *)fp->f_data,
SCARG(uap, uid), SCARG(uap, gid));
}
@@ -2471,8 +2473,8 @@ getutimes(usrtvp, tsp)
* Common implementation code for utimes(), lutimes(), and futimes().
*/
static int
-setutimes(p, vp, ts, nullflag)
- struct proc *p;
+setutimes(td, vp, ts, nullflag)
+ struct thread *td;
struct vnode *vp;
const struct timespec *ts;
int nullflag;
@@ -2483,15 +2485,15 @@ setutimes(p, vp, ts, nullflag)
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VATTR_NULL(&vattr);
vattr.va_atime = ts[0];
vattr.va_mtime = ts[1];
if (nullflag)
vattr.va_vaflags |= VA_UTIMES_NULL;
- error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
- VOP_UNLOCK(vp, 0, p);
+ error = VOP_SETATTR(vp, &vattr, td->td_proc->p_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return error;
}
@@ -2507,8 +2509,8 @@ struct utimes_args {
#endif
/* ARGSUSED */
int
-utimes(p, uap)
- struct proc *p;
+utimes(td, uap)
+ struct thread *td;
register struct utimes_args /* {
syscallarg(char *) path;
syscallarg(struct timeval *) tptr;
@@ -2522,11 +2524,11 @@ utimes(p, uap)
usrtvp = SCARG(uap, tptr);
if ((error = getutimes(usrtvp, ts)) != 0)
return (error);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = setutimes(p, nd.ni_vp, ts, usrtvp == NULL);
+ error = setutimes(td, nd.ni_vp, ts, usrtvp == NULL);
vrele(nd.ni_vp);
return (error);
}
@@ -2542,8 +2544,8 @@ struct lutimes_args {
#endif
/* ARGSUSED */
int
-lutimes(p, uap)
- struct proc *p;
+lutimes(td, uap)
+ struct thread *td;
register struct lutimes_args /* {
syscallarg(char *) path;
syscallarg(struct timeval *) tptr;
@@ -2557,11 +2559,11 @@ lutimes(p, uap)
usrtvp = SCARG(uap, tptr);
if ((error = getutimes(usrtvp, ts)) != 0)
return (error);
- NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
- error = setutimes(p, nd.ni_vp, ts, usrtvp == NULL);
+ error = setutimes(td, nd.ni_vp, ts, usrtvp == NULL);
vrele(nd.ni_vp);
return (error);
}
@@ -2577,8 +2579,8 @@ struct futimes_args {
#endif
/* ARGSUSED */
int
-futimes(p, uap)
- struct proc *p;
+futimes(td, uap)
+ struct thread *td;
register struct futimes_args /* {
syscallarg(int ) fd;
syscallarg(struct timeval *) tptr;
@@ -2592,9 +2594,9 @@ futimes(p, uap)
usrtvp = SCARG(uap, tptr);
if ((error = getutimes(usrtvp, ts)) != 0)
return (error);
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
- return setutimes(p, (struct vnode *)fp->f_data, ts, usrtvp == NULL);
+ return setutimes(td, (struct vnode *)fp->f_data, ts, usrtvp == NULL);
}
/*
@@ -2609,8 +2611,8 @@ struct truncate_args {
#endif
/* ARGSUSED */
int
-truncate(p, uap)
- struct proc *p;
+truncate(td, uap)
+ struct thread *td;
register struct truncate_args /* {
syscallarg(char *) path;
syscallarg(int) pad;
@@ -2625,7 +2627,7 @@ truncate(p, uap)
if (uap->length < 0)
return(EINVAL);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
@@ -2634,15 +2636,15 @@ truncate(p, uap)
return (error);
}
NDFREE(&nd, NDF_ONLY_PNBUF);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (vp->v_type == VDIR)
error = EISDIR;
else if ((error = vn_writechk(vp)) == 0 &&
- (error = VOP_ACCESS(vp, VWRITE, p->p_ucred, p)) == 0) {
+ (error = VOP_ACCESS(vp, VWRITE, td->td_proc->p_ucred, td)) == 0) {
VATTR_NULL(&vattr);
vattr.va_size = SCARG(uap, length);
- error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
+ error = VOP_SETATTR(vp, &vattr, td->td_proc->p_ucred, td);
}
vput(vp);
vn_finished_write(mp);
@@ -2661,8 +2663,8 @@ struct ftruncate_args {
#endif
/* ARGSUSED */
int
-ftruncate(p, uap)
- struct proc *p;
+ftruncate(td, uap)
+ struct thread *td;
register struct ftruncate_args /* {
syscallarg(int) fd;
syscallarg(int) pad;
@@ -2677,23 +2679,23 @@ ftruncate(p, uap)
if (uap->length < 0)
return(EINVAL);
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
if ((fp->f_flag & FWRITE) == 0)
return (EINVAL);
vp = (struct vnode *)fp->f_data;
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (vp->v_type == VDIR)
error = EISDIR;
else if ((error = vn_writechk(vp)) == 0) {
VATTR_NULL(&vattr);
vattr.va_size = SCARG(uap, length);
- error = VOP_SETATTR(vp, &vattr, fp->f_cred, p);
+ error = VOP_SETATTR(vp, &vattr, fp->f_cred, td);
}
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return (error);
}
@@ -2710,8 +2712,8 @@ struct otruncate_args {
#endif
/* ARGSUSED */
int
-otruncate(p, uap)
- struct proc *p;
+otruncate(td, uap)
+ struct thread *td;
register struct otruncate_args /* {
syscallarg(char *) path;
syscallarg(long) length;
@@ -2725,7 +2727,7 @@ otruncate(p, uap)
SCARG(&nuap, path) = SCARG(uap, path);
SCARG(&nuap, length) = SCARG(uap, length);
- return (truncate(p, &nuap));
+ return (truncate(td, &nuap));
}
/*
@@ -2739,8 +2741,8 @@ struct oftruncate_args {
#endif
/* ARGSUSED */
int
-oftruncate(p, uap)
- struct proc *p;
+oftruncate(td, uap)
+ struct thread *td;
register struct oftruncate_args /* {
syscallarg(int) fd;
syscallarg(long) length;
@@ -2754,7 +2756,7 @@ oftruncate(p, uap)
SCARG(&nuap, fd) = SCARG(uap, fd);
SCARG(&nuap, length) = SCARG(uap, length);
- return (ftruncate(p, &nuap));
+ return (ftruncate(td, &nuap));
}
#endif /* COMPAT_43 || COMPAT_SUNOS */
@@ -2768,8 +2770,8 @@ struct fsync_args {
#endif
/* ARGSUSED */
int
-fsync(p, uap)
- struct proc *p;
+fsync(td, uap)
+ struct thread *td;
struct fsync_args /* {
syscallarg(int) fd;
} */ *uap;
@@ -2782,22 +2784,22 @@ fsync(p, uap)
GIANT_REQUIRED;
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
vp = (struct vnode *)fp->f_data;
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (VOP_GETVOBJECT(vp, &obj) == 0) {
vm_object_page_clean(obj, 0, 0, 0);
}
- error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
+ error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, td);
#ifdef SOFTUPDATES
if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
error = softdep_fsync(vp);
#endif
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return (error);
}
@@ -2814,8 +2816,8 @@ struct rename_args {
#endif
/* ARGSUSED */
int
-rename(p, uap)
- struct proc *p;
+rename(td, uap)
+ struct thread *td;
register struct rename_args /* {
syscallarg(char *) from;
syscallarg(char *) to;
@@ -2828,7 +2830,7 @@ rename(p, uap)
bwillwrite();
NDINIT(&fromnd, DELETE, WANTPARENT | SAVESTART, UIO_USERSPACE,
- SCARG(uap, from), p);
+ SCARG(uap, from), td);
if ((error = namei(&fromnd)) != 0)
return (error);
fvp = fromnd.ni_vp;
@@ -2839,7 +2841,7 @@ rename(p, uap)
goto out1;
}
NDINIT(&tond, RENAME, LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART | NOOBJ,
- UIO_USERSPACE, SCARG(uap, to), p);
+ UIO_USERSPACE, SCARG(uap, to), td);
if (fromnd.ni_vp->v_type == VDIR)
tond.ni_cnd.cn_flags |= WILLBEDIR;
if ((error = namei(&tond)) != 0) {
@@ -2876,12 +2878,12 @@ rename(p, uap)
error = -1;
out:
if (!error) {
- VOP_LEASE(tdvp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(tdvp, td, td->td_proc->p_ucred, LEASE_WRITE);
if (fromnd.ni_dvp != tdvp) {
- VOP_LEASE(fromnd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(fromnd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
}
if (tvp) {
- VOP_LEASE(tvp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(tvp, td, td->td_proc->p_ucred, LEASE_WRITE);
}
error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd,
tond.ni_dvp, tond.ni_vp, &tond.ni_cnd);
@@ -2924,13 +2926,24 @@ struct mkdir_args {
#endif
/* ARGSUSED */
int
-mkdir(p, uap)
- struct proc *p;
+mkdir(td, uap)
+ struct thread *td;
register struct mkdir_args /* {
syscallarg(char *) path;
syscallarg(int) mode;
} */ *uap;
{
+
+ return vn_mkdir(uap->path, uap->mode, UIO_USERSPACE, td);
+}
+
+int
+vn_mkdir(path, mode, segflg, td)
+ char *path;
+ int mode;
+ enum uio_seg segflg;
+ struct thread *td;
+{
struct mount *mp;
struct vnode *vp;
struct vattr vattr;
@@ -2939,7 +2952,7 @@ mkdir(p, uap)
restart:
bwillwrite();
- NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, CREATE, LOCKPARENT, segflg, path, td);
nd.ni_cnd.cn_flags |= WILLBEDIR;
if ((error = namei(&nd)) != 0)
return (error);
@@ -2959,8 +2972,8 @@ restart:
}
VATTR_NULL(&vattr);
vattr.va_type = VDIR;
- vattr.va_mode = (SCARG(uap, mode) & ACCESSPERMS) &~ p->p_fd->fd_cmask;
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
+ vattr.va_mode = (mode & ACCESSPERMS) &~ td->td_proc->p_fd->fd_cmask;
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
NDFREE(&nd, NDF_ONLY_PNBUF);
vput(nd.ni_dvp);
@@ -2982,8 +2995,8 @@ struct rmdir_args {
#endif
/* ARGSUSED */
int
-rmdir(p, uap)
- struct proc *p;
+rmdir(td, uap)
+ struct thread *td;
struct rmdir_args /* {
syscallarg(char *) path;
} */ *uap;
@@ -2996,7 +3009,7 @@ rmdir(p, uap)
restart:
bwillwrite();
NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF, UIO_USERSPACE,
- SCARG(uap, path), p);
+ SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
@@ -3029,8 +3042,8 @@ restart:
return (error);
goto restart;
}
- VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
+ VOP_LEASE(nd.ni_dvp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
error = VOP_RMDIR(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
vn_finished_write(mp);
out:
@@ -3058,8 +3071,8 @@ struct ogetdirentries_args {
};
#endif
int
-ogetdirentries(p, uap)
- struct proc *p;
+ogetdirentries(td, uap)
+ struct thread *td;
register struct ogetdirentries_args /* {
syscallarg(int) fd;
syscallarg(char *) buf;
@@ -3079,7 +3092,7 @@ ogetdirentries(p, uap)
/* XXX arbitrary sanity limit on `count'. */
if (SCARG(uap, count) > 64 * 1024)
return (EINVAL);
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
if ((fp->f_flag & FREAD) == 0)
return (EBADF);
@@ -3093,9 +3106,9 @@ unionread:
auio.uio_iovcnt = 1;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_USERSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_resid = SCARG(uap, count);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
loff = auio.uio_offset = fp->f_offset;
# if (BYTE_ORDER != LITTLE_ENDIAN)
if (vp->v_mount->mnt_maxsymlinklen <= 0) {
@@ -3148,12 +3161,12 @@ unionread:
}
FREE(dirbuf, M_TEMP);
}
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
if (error)
return (error);
if (SCARG(uap, count) == auio.uio_resid) {
if (union_dircheckp) {
- error = union_dircheckp(p, &vp, fp);
+ error = union_dircheckp(td, &vp, fp);
if (error == -1)
goto unionread;
if (error)
@@ -3172,7 +3185,7 @@ unionread:
}
error = copyout((caddr_t)&loff, (caddr_t)SCARG(uap, basep),
sizeof(long));
- p->p_retval[0] = SCARG(uap, count) - auio.uio_resid;
+ td->td_retval[0] = SCARG(uap, count) - auio.uio_resid;
return (error);
}
#endif /* COMPAT_43 */
@@ -3189,8 +3202,8 @@ struct getdirentries_args {
};
#endif
int
-getdirentries(p, uap)
- struct proc *p;
+getdirentries(td, uap)
+ struct thread *td;
register struct getdirentries_args /* {
syscallarg(int) fd;
syscallarg(char *) buf;
@@ -3205,7 +3218,7 @@ getdirentries(p, uap)
long loff;
int error, eofflag;
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
if ((fp->f_flag & FREAD) == 0)
return (EBADF);
@@ -3219,19 +3232,19 @@ unionread:
auio.uio_iovcnt = 1;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_USERSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_resid = SCARG(uap, count);
- /* vn_lock(vp, LK_SHARED | LK_RETRY, p); */
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ /* vn_lock(vp, LK_SHARED | LK_RETRY, td); */
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
loff = auio.uio_offset = fp->f_offset;
error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL);
fp->f_offset = auio.uio_offset;
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
if (error)
return (error);
if (SCARG(uap, count) == auio.uio_resid) {
if (union_dircheckp) {
- error = union_dircheckp(p, &vp, fp);
+ error = union_dircheckp(td, &vp, fp);
if (error == -1)
goto unionread;
if (error)
@@ -3252,7 +3265,7 @@ unionread:
error = copyout((caddr_t)&loff, (caddr_t)SCARG(uap, basep),
sizeof(long));
}
- p->p_retval[0] = SCARG(uap, count) - auio.uio_resid;
+ td->td_retval[0] = SCARG(uap, count) - auio.uio_resid;
return (error);
}
#ifndef _SYS_SYSPROTO_H_
@@ -3263,8 +3276,8 @@ struct getdents_args {
};
#endif
int
-getdents(p, uap)
- struct proc *p;
+getdents(td, uap)
+ struct thread *td;
register struct getdents_args /* {
syscallarg(int) fd;
syscallarg(char *) buf;
@@ -3276,7 +3289,7 @@ getdents(p, uap)
ap.buf = uap->buf;
ap.count = uap->count;
ap.basep = NULL;
- return getdirentries(p, &ap);
+ return getdirentries(td, &ap);
}
/*
@@ -3290,16 +3303,16 @@ struct umask_args {
};
#endif
int
-umask(p, uap)
- struct proc *p;
+umask(td, uap)
+ struct thread *td;
struct umask_args /* {
syscallarg(int) newmask;
} */ *uap;
{
register struct filedesc *fdp;
- fdp = p->p_fd;
- p->p_retval[0] = fdp->fd_cmask;
+ fdp = td->td_proc->p_fd;
+ td->td_retval[0] = fdp->fd_cmask;
fdp->fd_cmask = SCARG(uap, newmask) & ALLPERMS;
return (0);
}
@@ -3315,8 +3328,8 @@ struct revoke_args {
#endif
/* ARGSUSED */
int
-revoke(p, uap)
- struct proc *p;
+revoke(td, uap)
+ struct thread *td;
register struct revoke_args /* {
syscallarg(char *) path;
} */ *uap;
@@ -3327,7 +3340,7 @@ revoke(p, uap)
int error;
struct nameidata nd;
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
vp = nd.ni_vp;
@@ -3336,10 +3349,10 @@ revoke(p, uap)
error = EINVAL;
goto out;
}
- if ((error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) != 0)
+ if ((error = VOP_GETATTR(vp, &vattr, td->td_proc->p_ucred, td)) != 0)
goto out;
- if (p->p_ucred->cr_uid != vattr.va_uid &&
- (error = suser_xxx(0, p, PRISON_ROOT)))
+ if (td->td_proc->p_ucred->cr_uid != vattr.va_uid &&
+ (error = suser_xxx(0, td->td_proc, PRISON_ROOT)))
goto out;
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
goto out;
@@ -3380,8 +3393,8 @@ struct getfh_args {
};
#endif
int
-getfh(p, uap)
- struct proc *p;
+getfh(td, uap)
+ struct thread *td;
register struct getfh_args *uap;
{
struct nameidata nd;
@@ -3392,10 +3405,10 @@ getfh(p, uap)
/*
* Must be super user
*/
- error = suser(p);
+ error = suser_td(td);
if (error)
return (error);
- NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, uap->fname, p);
+ NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, uap->fname, td);
error = namei(&nd);
if (error)
return (error);
@@ -3425,13 +3438,14 @@ struct fhopen_args {
};
#endif
int
-fhopen(p, uap)
- struct proc *p;
+fhopen(td, uap)
+ struct thread *td;
struct fhopen_args /* {
syscallarg(const struct fhandle *) u_fhp;
syscallarg(int) flags;
} */ *uap;
{
+ struct proc *p = td->td_proc;
struct mount *mp;
struct vnode *vp;
struct fhandle fhp;
@@ -3447,7 +3461,7 @@ fhopen(p, uap)
/*
* Must be super user
*/
- error = suser(p);
+ error = suser_td(td);
if (error)
return (error);
@@ -3498,33 +3512,33 @@ fhopen(p, uap)
if (fmode & FREAD)
mode |= VREAD;
if (mode) {
- error = VOP_ACCESS(vp, mode, p->p_ucred, p);
+ error = VOP_ACCESS(vp, mode, p->p_ucred, td);
if (error)
goto bad;
}
if (fmode & O_TRUNC) {
- VOP_UNLOCK(vp, 0, p); /* XXX */
+ VOP_UNLOCK(vp, 0, td); /* XXX */
if ((error = vn_start_write(NULL, &mp, V_WAIT | PCATCH)) != 0) {
vrele(vp);
return (error);
}
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */
+ VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); /* XXX */
VATTR_NULL(vap);
vap->va_size = 0;
- error = VOP_SETATTR(vp, vap, p->p_ucred, p);
+ error = VOP_SETATTR(vp, vap, p->p_ucred, td);
vn_finished_write(mp);
if (error)
goto bad;
}
- error = VOP_OPEN(vp, fmode, p->p_ucred, p);
+ error = VOP_OPEN(vp, fmode, p->p_ucred, td);
if (error)
goto bad;
/*
* Make sure that a VM object is created for VMIO support.
*/
if (vn_canvmio(vp) == TRUE) {
- if ((error = vfs_object_create(vp, p, p->p_ucred)) != 0)
+ if ((error = vfs_object_create(vp, td, p->p_ucred)) != 0)
goto bad;
}
if (fmode & FWRITE)
@@ -3534,7 +3548,7 @@ fhopen(p, uap)
* end of vn_open code
*/
- if ((error = falloc(p, &nfp, &indx)) != 0)
+ if ((error = falloc(td, &nfp, &indx)) != 0)
goto bad;
fp = nfp;
@@ -3558,7 +3572,7 @@ fhopen(p, uap)
type = F_FLOCK;
if ((fmode & FNONBLOCK) == 0)
type |= F_WAIT;
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
/*
* The lock request failed. Normally close the
@@ -3567,23 +3581,23 @@ fhopen(p, uap)
*/
if (fdp->fd_ofiles[indx] == fp) {
fdp->fd_ofiles[indx] = NULL;
- fdrop(fp, p);
+ fdrop(fp, td);
}
/*
* release our private reference
*/
- fdrop(fp, p);
+ fdrop(fp, td);
return(error);
}
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
fp->f_flag |= FHASLOCK;
}
if ((vp->v_type == VREG) && (VOP_GETVOBJECT(vp, NULL) != 0))
- vfs_object_create(vp, p, p->p_ucred);
+ vfs_object_create(vp, td, p->p_ucred);
- VOP_UNLOCK(vp, 0, p);
- fdrop(fp, p);
- p->p_retval[0] = indx;
+ VOP_UNLOCK(vp, 0, td);
+ fdrop(fp, td);
+ td->td_retval[0] = indx;
return (0);
bad:
@@ -3601,8 +3615,8 @@ struct fhstat_args {
};
#endif
int
-fhstat(p, uap)
- struct proc *p;
+fhstat(td, uap)
+ struct thread *td;
register struct fhstat_args /* {
syscallarg(struct fhandle *) u_fhp;
syscallarg(struct stat *) sb;
@@ -3617,7 +3631,7 @@ fhstat(p, uap)
/*
* Must be super user
*/
- error = suser(p);
+ error = suser_td(td);
if (error)
return (error);
@@ -3629,7 +3643,7 @@ fhstat(p, uap)
return (ESTALE);
if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
return (error);
- error = vn_stat(vp, &sb, p);
+ error = vn_stat(vp, &sb, td);
vput(vp);
if (error)
return (error);
@@ -3647,8 +3661,8 @@ struct fhstatfs_args {
};
#endif
int
-fhstatfs(p, uap)
- struct proc *p;
+fhstatfs(td, uap)
+ struct thread *td;
struct fhstatfs_args /* {
syscallarg(struct fhandle) *u_fhp;
syscallarg(struct statfs) *buf;
@@ -3664,7 +3678,7 @@ fhstatfs(p, uap)
/*
* Must be super user
*/
- if ((error = suser(p)))
+ if ((error = suser_td(td)))
return (error);
if ((error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t))) != 0)
@@ -3677,10 +3691,10 @@ fhstatfs(p, uap)
mp = vp->v_mount;
sp = &mp->mnt_stat;
vput(vp);
- if ((error = VFS_STATFS(mp, sp, p)) != 0)
+ if ((error = VFS_STATFS(mp, sp, td)) != 0)
return (error);
sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
- if (suser_xxx(p->p_ucred, 0, 0)) {
+ if (suser_xxx(td->td_proc->p_ucred, 0, 0)) {
bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
sp = &sb;
@@ -3700,8 +3714,8 @@ fhstatfs(p, uap)
* Currently this is used only by UFS Extended Attributes.
*/
int
-extattrctl(p, uap)
- struct proc *p;
+extattrctl(td, uap)
+ struct thread *td;
struct extattrctl_args *uap;
{
struct vnode *filename_vp;
@@ -3728,7 +3742,7 @@ extattrctl(p, uap)
filename_vp = NULL;
if (SCARG(uap, filename) != NULL) {
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
- SCARG(uap, filename), p);
+ SCARG(uap, filename), td);
if ((error = namei(&nd)) != 0)
return (error);
filename_vp = nd.ni_vp;
@@ -3736,7 +3750,7 @@ extattrctl(p, uap)
}
/* SCARG(uap, path) always defined. */
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
error = vn_start_write(nd.ni_vp, &mp, V_WAIT | PCATCH);
@@ -3749,10 +3763,10 @@ extattrctl(p, uap)
if (SCARG(uap, attrname) != NULL) {
error = VFS_EXTATTRCTL(mp, SCARG(uap, cmd), filename_vp,
- SCARG(uap, attrnamespace), attrname, p);
+ SCARG(uap, attrnamespace), attrname, td);
} else {
error = VFS_EXTATTRCTL(mp, SCARG(uap, cmd), filename_vp,
- SCARG(uap, attrnamespace), NULL, p);
+ SCARG(uap, attrnamespace), NULL, td);
}
vn_finished_write(mp);
@@ -3779,7 +3793,7 @@ extattrctl(p, uap)
*/
static int
extattr_set_vp(struct vnode *vp, int attrnamespace, const char *attrname,
- struct iovec *iovp, unsigned iovcnt, struct proc *p)
+ struct iovec *iovp, unsigned iovcnt, struct thread *td)
{
struct mount *mp;
struct uio auio;
@@ -3789,8 +3803,8 @@ extattr_set_vp(struct vnode *vp, int attrnamespace, const char *attrname,
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
iovlen = iovcnt * sizeof(struct iovec);
if (iovcnt > UIO_SMALLIOV) {
@@ -3806,7 +3820,7 @@ extattr_set_vp(struct vnode *vp, int attrnamespace, const char *attrname,
auio.uio_iovcnt = iovcnt;
auio.uio_rw = UIO_WRITE;
auio.uio_segflg = UIO_USERSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_offset = 0;
if ((error = copyin((caddr_t)iovp, (caddr_t)iov, iovlen)))
goto done;
@@ -3821,20 +3835,20 @@ extattr_set_vp(struct vnode *vp, int attrnamespace, const char *attrname,
}
cnt = auio.uio_resid;
error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio,
- p->p_ucred, p);
+ td->td_proc->p_ucred, td);
cnt -= auio.uio_resid;
- p->p_retval[0] = cnt;
+ td->td_retval[0] = cnt;
done:
if (needfree)
FREE(needfree, M_IOV);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return (error);
}
int
-extattr_set_file(p, uap)
- struct proc *p;
+extattr_set_file(td, uap)
+ struct thread *td;
struct extattr_set_file_args *uap;
{
struct nameidata nd;
@@ -3846,21 +3860,21 @@ extattr_set_file(p, uap)
if (error)
return (error);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
error = extattr_set_vp(nd.ni_vp, SCARG(uap, attrnamespace), attrname,
- SCARG(uap, iovp), SCARG(uap, iovcnt), p);
+ SCARG(uap, iovp), SCARG(uap, iovcnt), td);
vrele(nd.ni_vp);
return (error);
}
int
-extattr_set_fd(p, uap)
- struct proc *p;
+extattr_set_fd(td, uap)
+ struct thread *td;
struct extattr_set_fd_args *uap;
{
struct file *fp;
@@ -3872,12 +3886,12 @@ extattr_set_fd(p, uap)
if (error)
return (error);
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
error = extattr_set_vp((struct vnode *)fp->f_data,
SCARG(uap, attrnamespace), attrname, SCARG(uap, iovp),
- SCARG(uap, iovcnt), p);
+ SCARG(uap, iovcnt), td);
return (error);
}
@@ -3895,15 +3909,15 @@ extattr_set_fd(p, uap)
*/
static int
extattr_get_vp(struct vnode *vp, int attrnamespace, const char *attrname,
- struct iovec *iovp, unsigned iovcnt, struct proc *p)
+ struct iovec *iovp, unsigned iovcnt, struct thread *td)
{
struct uio auio;
struct iovec *iov, *needfree = NULL, aiov[UIO_SMALLIOV];
u_int iovlen, cnt;
int error, i;
- VOP_LEASE(vp, p, p->p_ucred, LEASE_READ);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_READ);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
iovlen = iovcnt * sizeof (struct iovec);
if (iovcnt > UIO_SMALLIOV) {
@@ -3919,7 +3933,7 @@ extattr_get_vp(struct vnode *vp, int attrnamespace, const char *attrname,
auio.uio_iovcnt = iovcnt;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_USERSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_offset = 0;
if ((error = copyin((caddr_t)iovp, (caddr_t)iov, iovlen)))
goto done;
@@ -3934,19 +3948,19 @@ extattr_get_vp(struct vnode *vp, int attrnamespace, const char *attrname,
}
cnt = auio.uio_resid;
error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio,
- p->p_ucred, p);
+ td->td_proc->p_ucred, td);
cnt -= auio.uio_resid;
- p->p_retval[0] = cnt;
+ td->td_retval[0] = cnt;
done:
if (needfree)
FREE(needfree, M_IOV);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
return (error);
}
int
-extattr_get_file(p, uap)
- struct proc *p;
+extattr_get_file(td, uap)
+ struct thread *td;
struct extattr_get_file_args *uap;
{
struct nameidata nd;
@@ -3958,21 +3972,21 @@ extattr_get_file(p, uap)
if (error)
return (error);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return (error);
NDFREE(&nd, NDF_ONLY_PNBUF);
error = extattr_get_vp(nd.ni_vp, SCARG(uap, attrnamespace), attrname,
- SCARG(uap, iovp), SCARG(uap, iovcnt), p);
+ SCARG(uap, iovp), SCARG(uap, iovcnt), td);
vrele(nd.ni_vp);
return (error);
}
int
-extattr_get_fd(p, uap)
- struct proc *p;
+extattr_get_fd(td, uap)
+ struct thread *td;
struct extattr_get_fd_args *uap;
{
struct file *fp;
@@ -3984,12 +3998,12 @@ extattr_get_fd(p, uap)
if (error)
return (error);
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
error = extattr_get_vp((struct vnode *)fp->f_data,
SCARG(uap, attrnamespace), attrname, SCARG(uap, iovp),
- SCARG(uap, iovcnt), p);
+ SCARG(uap, iovcnt), td);
return (error);
}
@@ -4006,27 +4020,27 @@ extattr_get_fd(p, uap)
*/
static int
extattr_delete_vp(struct vnode *vp, int attrnamespace, const char *attrname,
- struct proc *p)
+ struct thread *td)
{
struct mount *mp;
int error;
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
- p->p_ucred, p);
+ td->td_proc->p_ucred, td);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return (error);
}
int
-extattr_delete_file(p, uap)
- struct proc *p;
+extattr_delete_file(td, uap)
+ struct thread *td;
struct extattr_delete_file_args *uap;
{
struct nameidata nd;
@@ -4038,21 +4052,21 @@ extattr_delete_file(p, uap)
if (error)
return(error);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
if ((error = namei(&nd)) != 0)
return(error);
NDFREE(&nd, NDF_ONLY_PNBUF);
error = extattr_delete_vp(nd.ni_vp, SCARG(uap, attrnamespace),
- attrname, p);
+ attrname, td);
vrele(nd.ni_vp);
return(error);
}
int
-extattr_delete_fd(p, uap)
- struct proc *p;
+extattr_delete_fd(td, uap)
+ struct thread *td;
struct extattr_delete_fd_args *uap;
{
struct file *fp;
@@ -4064,11 +4078,11 @@ extattr_delete_fd(p, uap)
if (error)
return (error);
- if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
+ if ((error = getvnode(td->td_proc->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
error = extattr_delete_vp((struct vnode *)fp->f_data,
- SCARG(uap, attrnamespace), attrname, p);
+ SCARG(uap, attrnamespace), attrname, td);
return (error);
}
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index 9a6f87a..b3160b1 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -57,17 +57,17 @@
#include <machine/limits.h>
-static int vn_closefile __P((struct file *fp, struct proc *p));
+static int vn_closefile __P((struct file *fp, struct thread *td));
static int vn_ioctl __P((struct file *fp, u_long com, caddr_t data,
- struct proc *p));
+ struct thread *td));
static int vn_read __P((struct file *fp, struct uio *uio,
- struct ucred *cred, int flags, struct proc *p));
+ struct ucred *cred, int flags, struct thread *td));
static int vn_poll __P((struct file *fp, int events, struct ucred *cred,
- struct proc *p));
+ struct thread *td));
static int vn_kqfilter __P((struct file *fp, struct knote *kn));
-static int vn_statfile __P((struct file *fp, struct stat *sb, struct proc *p));
+static int vn_statfile __P((struct file *fp, struct stat *sb, struct thread *td));
static int vn_write __P((struct file *fp, struct uio *uio,
- struct ucred *cred, int flags, struct proc *p));
+ struct ucred *cred, int flags, struct thread *td));
struct fileops vnops = {
vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter,
@@ -88,8 +88,8 @@ vn_open(ndp, flagp, cmode)
{
struct vnode *vp;
struct mount *mp;
- struct proc *p = ndp->ni_cnd.cn_proc;
- struct ucred *cred = p->p_ucred;
+ struct thread *td = ndp->ni_cnd.cn_thread;
+ struct ucred *cred = td->td_proc->p_ucred;
struct vattr vat;
struct vattr *vap = &vat;
int mode, fmode, error;
@@ -118,7 +118,7 @@ restart:
return (error);
goto restart;
}
- VOP_LEASE(ndp->ni_dvp, p, cred, LEASE_WRITE);
+ VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE);
error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
&ndp->ni_cnd, vap);
vput(ndp->ni_dvp);
@@ -175,18 +175,18 @@ restart:
if (fmode & FREAD)
mode |= VREAD;
if (mode) {
- error = VOP_ACCESS(vp, mode, cred, p);
+ error = VOP_ACCESS(vp, mode, cred, td);
if (error)
goto bad;
}
}
- if ((error = VOP_OPEN(vp, fmode, cred, p)) != 0)
+ if ((error = VOP_OPEN(vp, fmode, cred, td)) != 0)
goto bad;
/*
* Make sure that a VM object is created for VMIO support.
*/
if (vn_canvmio(vp) == TRUE) {
- if ((error = vfs_object_create(vp, p, cred)) != 0)
+ if ((error = vfs_object_create(vp, td, cred)) != 0)
goto bad;
}
@@ -224,17 +224,17 @@ vn_writechk(vp)
* Vnode close call
*/
int
-vn_close(vp, flags, cred, p)
+vn_close(vp, flags, cred, td)
register struct vnode *vp;
int flags;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
{
int error;
if (flags & FWRITE)
vp->v_writecount--;
- error = VOP_CLOSE(vp, flags, cred, p);
+ error = VOP_CLOSE(vp, flags, cred, td);
/*
* XXX - In certain instances VOP_CLOSE has to do the vrele
* itself. If the vrele has been done, it will return EAGAIN
@@ -283,7 +283,7 @@ sequential_heuristic(struct uio *uio, struct file *fp)
* Package up an I/O request on a vnode into a uio and do it.
*/
int
-vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
+vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
enum uio_rw rw;
struct vnode *vp;
caddr_t base;
@@ -293,7 +293,7 @@ vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
int ioflg;
struct ucred *cred;
int *aresid;
- struct proc *p;
+ struct thread *td;
{
struct uio auio;
struct iovec aiov;
@@ -306,7 +306,7 @@ vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
vp->v_type != VCHR &&
(error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
}
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
@@ -316,7 +316,7 @@ vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
auio.uio_offset = offset;
auio.uio_segflg = segflg;
auio.uio_rw = rw;
- auio.uio_procp = p;
+ auio.uio_td = td;
if (rw == UIO_READ) {
error = VOP_READ(vp, &auio, ioflg, cred);
} else {
@@ -329,7 +329,7 @@ vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
error = EIO;
if ((ioflg & IO_NODELOCKED) == 0) {
vn_finished_write(mp);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
}
return (error);
}
@@ -341,7 +341,7 @@ vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
* check bwillwrite() before calling vn_rdwr()
*/
int
-vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
+vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
enum uio_rw rw;
struct vnode *vp;
caddr_t base;
@@ -351,7 +351,7 @@ vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
int ioflg;
struct ucred *cred;
int *aresid;
- struct proc *p;
+ struct thread *td;
{
int error = 0;
@@ -361,7 +361,7 @@ vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
if (rw != UIO_READ && vp->v_type == VREG)
bwillwrite();
error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
- ioflg, cred, aresid, p);
+ ioflg, cred, aresid, td);
len -= chunk; /* aresid calc already includes length */
if (error)
break;
@@ -377,26 +377,26 @@ vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
* File table vnode read routine.
*/
static int
-vn_read(fp, uio, cred, flags, p)
+vn_read(fp, uio, cred, flags, td)
struct file *fp;
struct uio *uio;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
int flags;
{
struct vnode *vp;
int error, ioflag;
- KASSERT(uio->uio_procp == p, ("uio_procp %p is not p %p",
- uio->uio_procp, p));
+ KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
+ uio->uio_td, td));
vp = (struct vnode *)fp->f_data;
ioflag = 0;
if (fp->f_flag & FNONBLOCK)
ioflag |= IO_NDELAY;
if (fp->f_flag & O_DIRECT)
ioflag |= IO_DIRECT;
- VOP_LEASE(vp, p, cred, LEASE_READ);
- vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, p);
+ VOP_LEASE(vp, td, cred, LEASE_READ);
+ vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
if ((flags & FOF_OFFSET) == 0)
uio->uio_offset = fp->f_offset;
@@ -406,7 +406,7 @@ vn_read(fp, uio, cred, flags, p)
if ((flags & FOF_OFFSET) == 0)
fp->f_offset = uio->uio_offset;
fp->f_nextoff = uio->uio_offset;
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
return (error);
}
@@ -414,19 +414,19 @@ vn_read(fp, uio, cred, flags, p)
* File table vnode write routine.
*/
static int
-vn_write(fp, uio, cred, flags, p)
+vn_write(fp, uio, cred, flags, td)
struct file *fp;
struct uio *uio;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
int flags;
{
struct vnode *vp;
struct mount *mp;
int error, ioflag;
- KASSERT(uio->uio_procp == p, ("uio_procp %p is not p %p",
- uio->uio_procp, p));
+ KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
+ uio->uio_td, td));
vp = (struct vnode *)fp->f_data;
if (vp->v_type == VREG)
bwillwrite();
@@ -445,8 +445,8 @@ vn_write(fp, uio, cred, flags, p)
if (vp->v_type != VCHR &&
(error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
return (error);
- VOP_LEASE(vp, p, cred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ VOP_LEASE(vp, td, cred, LEASE_WRITE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if ((flags & FOF_OFFSET) == 0)
uio->uio_offset = fp->f_offset;
ioflag |= sequential_heuristic(uio, fp);
@@ -454,7 +454,7 @@ vn_write(fp, uio, cred, flags, p)
if ((flags & FOF_OFFSET) == 0)
fp->f_offset = uio->uio_offset;
fp->f_nextoff = uio->uio_offset;
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
return (error);
}
@@ -463,21 +463,21 @@ vn_write(fp, uio, cred, flags, p)
* File table vnode stat routine.
*/
static int
-vn_statfile(fp, sb, p)
+vn_statfile(fp, sb, td)
struct file *fp;
struct stat *sb;
- struct proc *p;
+ struct thread *td;
{
struct vnode *vp = (struct vnode *)fp->f_data;
- return vn_stat(vp, sb, p);
+ return vn_stat(vp, sb, td);
}
int
-vn_stat(vp, sb, p)
+vn_stat(vp, sb, td)
struct vnode *vp;
register struct stat *sb;
- struct proc *p;
+ struct thread *td;
{
struct vattr vattr;
register struct vattr *vap;
@@ -485,7 +485,7 @@ vn_stat(vp, sb, p)
u_short mode;
vap = &vattr;
- error = VOP_GETATTR(vp, vap, p->p_ucred, p);
+ error = VOP_GETATTR(vp, vap, td->td_proc->p_ucred, td);
if (error)
return (error);
@@ -568,7 +568,7 @@ vn_stat(vp, sb, p)
}
sb->st_flags = vap->va_flags;
- if (suser_xxx(p->p_ucred, 0, 0))
+ if (suser_xxx(td->td_proc->p_ucred, 0, 0))
sb->st_gen = 0;
else
sb->st_gen = vap->va_gen;
@@ -586,11 +586,11 @@ vn_stat(vp, sb, p)
* File table vnode ioctl routine.
*/
static int
-vn_ioctl(fp, com, data, p)
+vn_ioctl(fp, com, data, td)
struct file *fp;
u_long com;
caddr_t data;
- struct proc *p;
+ struct thread *td;
{
register struct vnode *vp = ((struct vnode *)fp->f_data);
struct vattr vattr;
@@ -601,7 +601,7 @@ vn_ioctl(fp, com, data, p)
case VREG:
case VDIR:
if (com == FIONREAD) {
- error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
+ error = VOP_GETATTR(vp, &vattr, td->td_proc->p_ucred, td);
if (error)
return (error);
*(int *)data = vattr.va_size - fp->f_offset;
@@ -624,18 +624,18 @@ vn_ioctl(fp, com, data, p)
*(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK;
return (0);
}
- error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
+ error = VOP_IOCTL(vp, com, data, fp->f_flag, td->td_proc->p_ucred, td);
if (error == 0 && com == TIOCSCTTY) {
/* Do nothing if reassigning same control tty */
- if (p->p_session->s_ttyvp == vp)
+ if (td->td_proc->p_session->s_ttyvp == vp)
return (0);
/* Get rid of reference to old control tty */
- if (p->p_session->s_ttyvp)
- vrele(p->p_session->s_ttyvp);
+ if (td->td_proc->p_session->s_ttyvp)
+ vrele(td->td_proc->p_session->s_ttyvp);
- p->p_session->s_ttyvp = vp;
+ td->td_proc->p_session->s_ttyvp = vp;
VREF(vp);
}
return (error);
@@ -646,14 +646,14 @@ vn_ioctl(fp, com, data, p)
* File table vnode poll routine.
*/
static int
-vn_poll(fp, events, cred, p)
+vn_poll(fp, events, cred, td)
struct file *fp;
int events;
struct ucred *cred;
- struct proc *p;
+ struct thread *td;
{
- return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, p));
+ return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, td));
}
/*
@@ -662,13 +662,13 @@ vn_poll(fp, events, cred, p)
*/
int
#ifndef DEBUG_LOCKS
-vn_lock(vp, flags, p)
+vn_lock(vp, flags, td)
#else
-debug_vn_lock(vp, flags, p, filename, line)
+debug_vn_lock(vp, flags, td, filename, line)
#endif
struct vnode *vp;
int flags;
- struct proc *p;
+ struct thread *td;
#ifdef DEBUG_LOCKS
const char *filename;
int line;
@@ -679,7 +679,7 @@ debug_vn_lock(vp, flags, p, filename, line)
do {
if ((flags & LK_INTERLOCK) == 0)
mtx_lock(&vp->v_interlock);
- if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curproc) {
+ if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curthread) {
vp->v_flag |= VXWANT;
msleep(vp, &vp->v_interlock, PINOD | PDROP,
"vn_lock", 0);
@@ -692,7 +692,7 @@ debug_vn_lock(vp, flags, p, filename, line)
vp->line = line;
#endif
error = VOP_LOCK(vp,
- flags | LK_NOPAUSE | LK_INTERLOCK, p);
+ flags | LK_NOPAUSE | LK_INTERLOCK, td);
if (error == 0)
return (error);
}
@@ -705,14 +705,14 @@ debug_vn_lock(vp, flags, p, filename, line)
* File table vnode close routine.
*/
static int
-vn_closefile(fp, p)
+vn_closefile(fp, td)
struct file *fp;
- struct proc *p;
+ struct thread *td;
{
fp->f_ops = &badfileops;
return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
- fp->f_cred, p));
+ fp->f_cred, td));
}
/*
@@ -825,14 +825,14 @@ void
vfs_write_suspend(mp)
struct mount *mp;
{
- struct proc *p = curproc;
+ struct thread *td = curthread;
if (mp->mnt_kern_flag & MNTK_SUSPEND)
return;
mp->mnt_kern_flag |= MNTK_SUSPEND;
if (mp->mnt_writeopcount > 0)
(void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0);
- VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p);
+ VFS_SYNC(mp, MNT_WAIT, td->td_proc->p_ucred, td);
mp->mnt_kern_flag |= MNTK_SUSPENDED;
}
@@ -865,7 +865,7 @@ vn_kqfilter(struct file *fp, struct knote *kn)
*/
int
vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
- const char *attrname, int *buflen, char *buf, struct proc *p)
+ const char *attrname, int *buflen, char *buf, struct thread *td)
{
struct uio auio;
struct iovec iov;
@@ -878,18 +878,18 @@ vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
auio.uio_iovcnt = 1;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_SYSSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_offset = 0;
auio.uio_resid = *buflen;
if ((ioflg & IO_NODELOCKED) == 0)
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
/* authorize attribute retrieval as kernel */
- error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, p);
+ error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
if ((ioflg & IO_NODELOCKED) == 0)
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
if (error == 0) {
*buflen = *buflen - auio.uio_resid;
@@ -903,7 +903,7 @@ vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
*/
int
vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
- const char *attrname, int buflen, char *buf, struct proc *p)
+ const char *attrname, int buflen, char *buf, struct thread *td)
{
struct uio auio;
struct iovec iov;
@@ -917,22 +917,22 @@ vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
auio.uio_iovcnt = 1;
auio.uio_rw = UIO_WRITE;
auio.uio_segflg = UIO_SYSSPACE;
- auio.uio_procp = p;
+ auio.uio_td = td;
auio.uio_offset = 0;
auio.uio_resid = buflen;
if ((ioflg & IO_NODELOCKED) == 0) {
if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
return (error);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
}
/* authorize attribute setting as kernel */
- error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, p);
+ error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
if ((ioflg & IO_NODELOCKED) == 0) {
vn_finished_write(mp);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
}
return (error);
@@ -940,7 +940,7 @@ vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
int
vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
- const char *attrname, struct proc *p)
+ const char *attrname, struct thread *td)
{
struct mount *mp;
int error;
@@ -948,15 +948,15 @@ vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
if ((ioflg & IO_NODELOCKED) == 0) {
if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
return (error);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
}
/* authorize attribute removal as kernel */
- error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, NULL, p);
+ error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, NULL, td);
if ((ioflg & IO_NODELOCKED) == 0) {
vn_finished_write(mp);
- VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0, td);
}
return (error);
diff --git a/sys/kern/vnode_if.pl b/sys/kern/vnode_if.pl
index 53236e5..32eabfe 100644
--- a/sys/kern/vnode_if.pl
+++ b/sys/kern/vnode_if.pl
@@ -322,8 +322,8 @@ line: while (<SRC>) {
printf CFILE "\t%s,\n", &find_arg_with_type('struct vnode **');
# cred (if any)
printf CFILE "\t%s,\n", &find_arg_with_type('struct ucred *');
- # proc (if any)
- printf CFILE "\t%s,\n", &find_arg_with_type('struct proc *');
+ # thread (if any)
+ printf CFILE "\t%s,\n", &find_arg_with_type('struct thread *');
# componentname
printf CFILE "\t%s,\n", &find_arg_with_type('struct componentname *');
# transport layer information
diff --git a/sys/kern/vnode_if.src b/sys/kern/vnode_if.src
index e275617..43ff6b5 100644
--- a/sys/kern/vnode_if.src
+++ b/sys/kern/vnode_if.src
@@ -59,7 +59,7 @@
#
vop_islocked {
IN struct vnode *vp;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -126,7 +126,7 @@ vop_open {
IN struct vnode *vp;
IN int mode;
IN struct ucred *cred;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -136,7 +136,7 @@ vop_close {
IN struct vnode *vp;
IN int fflag;
IN struct ucred *cred;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -146,7 +146,7 @@ vop_access {
IN struct vnode *vp;
IN int mode;
IN struct ucred *cred;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -156,7 +156,7 @@ vop_getattr {
IN struct vnode *vp;
OUT struct vattr *vap;
IN struct ucred *cred;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -166,7 +166,7 @@ vop_setattr {
IN struct vnode *vp;
IN struct vattr *vap;
IN struct ucred *cred;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -194,7 +194,7 @@ vop_write {
#
vop_lease {
IN struct vnode *vp;
- IN struct proc *p;
+ IN struct thread *td;
IN struct ucred *cred;
IN int flag;
};
@@ -208,7 +208,7 @@ vop_ioctl {
IN caddr_t data;
IN int fflag;
IN struct ucred *cred;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -218,7 +218,7 @@ vop_poll {
IN struct vnode *vp;
IN int events;
IN struct ucred *cred;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -244,7 +244,7 @@ vop_fsync {
IN struct vnode *vp;
IN struct ucred *cred;
IN int waitfor;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -341,7 +341,7 @@ vop_readlink {
#
vop_inactive {
IN struct vnode *vp;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -349,7 +349,7 @@ vop_inactive {
#
vop_reclaim {
IN struct vnode *vp;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -358,7 +358,7 @@ vop_reclaim {
vop_lock {
IN struct vnode *vp;
IN int flags;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -367,7 +367,7 @@ vop_lock {
vop_unlock {
IN struct vnode *vp;
IN int flags;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -479,7 +479,7 @@ vop_getacl {
IN acl_type_t type;
OUT struct acl *aclp;
IN struct ucred *cred;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -490,7 +490,7 @@ vop_setacl {
IN acl_type_t type;
IN struct acl *aclp;
IN struct ucred *cred;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -501,7 +501,7 @@ vop_aclcheck {
IN acl_type_t type;
IN struct acl *aclp;
IN struct ucred *cred;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -513,7 +513,7 @@ vop_getextattr {
IN const char *name;
INOUT struct uio *uio;
IN struct ucred *cred;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -525,7 +525,7 @@ vop_setextattr {
IN const char *name;
INOUT struct uio *uio;
IN struct ucred *cred;
- IN struct proc *p;
+ IN struct thread *td;
};
#
@@ -534,7 +534,7 @@ vop_setextattr {
vop_createvobject {
IN struct vnode *vp;
IN struct ucred *cred;
- IN struct proc *p;
+ IN struct thread *td;
};
#
OpenPOWER on IntegriCloud