diff options
author | Luiz Otavio O Souza <luiz@netgate.com> | 2016-06-30 13:24:42 -0500 |
---|---|---|
committer | Luiz Otavio O Souza <luiz@netgate.com> | 2016-06-30 13:24:42 -0500 |
commit | 9d5ffb47ff56597309eb2939cc97b1df4d616797 (patch) | |
tree | b34fd92dce8092bb4cb58c875caabd93e1fece39 /sys/kern | |
parent | 1fc6b0207cc2f3cce33817706603caa41a9de24d (diff) | |
parent | 13295f52fb5936b237a994e75311fe18612c73c4 (diff) | |
download | FreeBSD-src-9d5ffb47ff56597309eb2939cc97b1df4d616797.zip FreeBSD-src-9d5ffb47ff56597309eb2939cc97b1df4d616797.tar.gz |
Merge remote-tracking branch 'origin/stable/10' into devel
Diffstat (limited to 'sys/kern')
53 files changed, 382 insertions, 346 deletions
diff --git a/sys/kern/bus_if.m b/sys/kern/bus_if.m index af39036..f1fd64b 100644 --- a/sys/kern/bus_if.m +++ b/sys/kern/bus_if.m @@ -121,7 +121,7 @@ METHOD void probe_nomatch { * @param _child the child device whose instance variable is * being read * @param _index the instance variable to read - * @param _result a loction to recieve the instance variable + * @param _result a location to receive the instance variable * value * * @retval 0 success @@ -374,7 +374,7 @@ METHOD int release_resource { * triggers * @param _arg a value to use as the single argument in calls * to @p _intr - * @param _cookiep a pointer to a location to recieve a cookie + * @param _cookiep a pointer to a location to receive a cookie * value that may be used to remove the interrupt * handler */ @@ -445,9 +445,9 @@ METHOD int set_resource { * @param _child the device which owns the resource * @param _type the type of resource * @param _rid the resource identifier - * @param _start the address of a location to recieve the start + * @param _start the address of a location to receive the start * index of the resource range - * @param _count the address of a location to recieve the size + * @param _count the address of a location to receive the size * of the resource range */ METHOD int get_resource { diff --git a/sys/kern/imgact_binmisc.c b/sys/kern/imgact_binmisc.c index 5712838..0fceb13 100644 --- a/sys/kern/imgact_binmisc.c +++ b/sys/kern/imgact_binmisc.c @@ -708,7 +708,7 @@ imgact_binmisc_exec(struct image_params *imgp) break; case ' ': - /* Replace space with NUL to seperate arguments. */ + /* Replace space with NUL to separate arguments. */ *d++ = '\0'; break; diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index e3c08de..1e237c2 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -1325,10 +1325,6 @@ __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) * and write it out following the notes. */ hdr = malloc(hdrsize, M_TEMP, M_WAITOK); - if (hdr == NULL) { - error = EINVAL; - goto done; - } error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize, ¬elst, notesz, gzfile); diff --git a/sys/kern/inflate.c b/sys/kern/inflate.c index 383ebc4..8fde5cb 100644 --- a/sys/kern/inflate.c +++ b/sys/kern/inflate.c @@ -206,7 +206,7 @@ extern void kzipfree (void*); end-of-block. Note however that the static length tree defines 288 codes just to fill out the Huffman codes. Codes 286 and 287 cannot be used though, since there is no length base or extra bits - defined for them. Similarily, there are up to 30 distance codes. + defined for them. Similarly, there are up to 30 distance codes. However, static trees define 32 codes (all 5 bits) to fill out the Huffman codes, but the last two had better not show up in the data. 7. Unzip can check dynamic Huffman blocks for complete code sets. @@ -335,7 +335,7 @@ static const ush mask[] = { where NEEDBITS makes sure that b has at least j bits in it, and DUMPBITS removes the bits from b. The macros use the variable k for the number of bits in b. Normally, b and k are register - variables for speed, and are initialized at the begining of a + variables for speed, and are initialized at the beginning of a routine that uses these macros from a global bit buffer and count. In order to not ask for more bits than there are in the compressed diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index ae6bd3a..03a3d9e 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -437,6 +437,7 @@ proc0_init(void *dummy __unused) { struct proc *p; struct thread *td; + struct ucred *newcred; vm_paddr_t pageablemem; int i; @@ -513,19 +514,20 @@ proc0_init(void *dummy __unused) callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); /* Create credentials. */ - p->p_ucred = crget(); - p->p_ucred->cr_ngroups = 1; /* group 0 */ - p->p_ucred->cr_uidinfo = uifind(0); - p->p_ucred->cr_ruidinfo = uifind(0); - p->p_ucred->cr_prison = &prison0; - p->p_ucred->cr_loginclass = loginclass_find("default"); + newcred = crget(); + newcred->cr_ngroups = 1; /* group 0 */ + newcred->cr_uidinfo = uifind(0); + newcred->cr_ruidinfo = uifind(0); + newcred->cr_prison = &prison0; + newcred->cr_loginclass = loginclass_find("default"); + proc_set_cred(p, newcred); #ifdef AUDIT - audit_cred_kproc0(p->p_ucred); + audit_cred_kproc0(newcred); #endif #ifdef MAC - mac_cred_create_swapper(p->p_ucred); + mac_cred_create_swapper(newcred); #endif - td->td_ucred = crhold(p->p_ucred); + td->td_ucred = crhold(newcred); /* Create sigacts. */ p->p_sigacts = sigacts_alloc(); @@ -844,7 +846,7 @@ create_init(const void *udata __unused) #ifdef AUDIT audit_cred_proc1(newcred); #endif - initproc->p_ucred = newcred; + proc_set_cred(initproc, newcred); PROC_UNLOCK(initproc); sx_xunlock(&proctree_lock); crfree(oldcred); diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c index 95a6d09..60895c9 100644 --- a/sys/kern/kern_condvar.c +++ b/sys/kern/kern_condvar.c @@ -163,7 +163,7 @@ _cv_wait(struct cv *cvp, struct lock_object *lock) /* * Wait on a condition variable. This function differs from cv_wait by - * not aquiring the mutex after condition variable was signaled. + * not acquiring the mutex after condition variable was signaled. */ void _cv_wait_unlock(struct cv *cvp, struct lock_object *lock) diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c index 7561368..24381a9 100644 --- a/sys/kern/kern_descrip.c +++ b/sys/kern/kern_descrip.c @@ -1525,7 +1525,7 @@ fdgrowtable_exp(struct filedesc *fdp, int nfd) } /* - * Grow the file table to accomodate (at least) nfd descriptors. + * Grow the file table to accommodate (at least) nfd descriptors. */ static void fdgrowtable(struct filedesc *fdp, int nfd) @@ -1730,7 +1730,7 @@ fdavail(struct thread *td, int n) } /* - * Create a new open file structure and allocate a file decriptor for the + * Create a new open file structure and allocate a file descriptor for the * process that refers to it. We add one reference to the file for the * descriptor table and one reference for resultfp. This is to prevent us * being preempted and the entry in the descriptor table closed after we @@ -2448,7 +2448,7 @@ fget_unlocked(struct filedesc *fdp, int fd, cap_rights_t *needrightsp, * * File's rights will be checked against the capability rights mask. * - * If an error occured the non-zero error is returned and *fpp is set to + * If an error occurred the non-zero error is returned and *fpp is set to * NULL. Otherwise *fpp is held and set and zero is returned. Caller is * responsible for fdrop(). */ diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 47ea9b0..eeb8369 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -96,9 +96,9 @@ dtrace_execexit_func_t dtrace_fasttrap_exec; #endif SDT_PROVIDER_DECLARE(proc); -SDT_PROBE_DEFINE1(proc, kernel, , exec, "char *"); -SDT_PROBE_DEFINE1(proc, kernel, , exec__failure, "int"); -SDT_PROBE_DEFINE1(proc, kernel, , exec__success, "char *"); +SDT_PROBE_DEFINE1(proc, , , exec, "char *"); +SDT_PROBE_DEFINE1(proc, , , exec__failure, "int"); +SDT_PROBE_DEFINE1(proc, , , exec__success, "char *"); MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments"); @@ -359,7 +359,7 @@ do_execve(td, args, mac_p) { struct proc *p = td->td_proc; struct nameidata nd; - struct ucred *newcred = NULL, *oldcred; + struct ucred *oldcred; struct uidinfo *euip = NULL; register_t *stack_base; int error, i; @@ -367,12 +367,12 @@ do_execve(td, args, mac_p) struct vattr attr; int (*img_first)(struct image_params *); struct pargs *oldargs = NULL, *newargs = NULL; - struct sigacts *oldsigacts, *newsigacts; + struct sigacts *oldsigacts = NULL, *newsigacts = NULL; #ifdef KTRACE struct vnode *tracevp = NULL; struct ucred *tracecred = NULL; #endif - struct vnode *textvp = NULL, *binvp = NULL; + struct vnode *oldtextvp = NULL, *newtextvp; cap_rights_t rights; int credential_changing; int textset; @@ -407,6 +407,7 @@ do_execve(td, args, mac_p) imgp->proc = p; imgp->attr = &attr; imgp->args = args; + oldcred = p->p_ucred; #ifdef MAC error = mac_execve_enter(imgp, mac_p); @@ -416,7 +417,7 @@ do_execve(td, args, mac_p) /* * Translate the file name. namei() returns a vnode pointer - * in ni_vp amoung other things. + * in ni_vp among other things. * * XXXAUDIT: It would be desirable to also audit the name of the * interpreter if this is an interpreted binary. @@ -426,7 +427,7 @@ do_execve(td, args, mac_p) | AUDITVNODE1, UIO_SYSSPACE, args->fname, td); } - SDT_PROBE1(proc, kernel, , exec, args->fname); + SDT_PROBE1(proc, , , exec, args->fname); interpret: if (args->fname != NULL) { @@ -446,20 +447,20 @@ interpret: if (error) goto exec_fail; - binvp = nd.ni_vp; - imgp->vp = binvp; + newtextvp = nd.ni_vp; + imgp->vp = newtextvp; } else { AUDIT_ARG_FD(args->fd); /* * Descriptors opened only with O_EXEC or O_RDONLY are allowed. */ error = fgetvp_exec(td, args->fd, - cap_rights_init(&rights, CAP_FEXECVE), &binvp); + cap_rights_init(&rights, CAP_FEXECVE), &newtextvp); if (error) goto exec_fail; - vn_lock(binvp, LK_EXCLUSIVE | LK_RETRY); - AUDIT_ARG_VNODE1(binvp); - imgp->vp = binvp; + vn_lock(newtextvp, LK_EXCLUSIVE | LK_RETRY); + AUDIT_ARG_VNODE1(newtextvp); + imgp->vp = newtextvp; } /* @@ -488,6 +489,100 @@ interpret: goto exec_fail_dealloc; imgp->proc->p_osrel = 0; + + /* + * Implement image setuid/setgid. + * + * Determine new credentials before attempting image activators + * so that it can be used by process_exec handlers to determine + * credential/setid changes. + * + * Don't honor setuid/setgid if the filesystem prohibits it or if + * the process is being traced. + * + * We disable setuid/setgid/etc in capability mode on the basis + * that most setugid applications are not written with that + * environment in mind, and will therefore almost certainly operate + * incorrectly. In principle there's no reason that setugid + * applications might not be useful in capability mode, so we may want + * to reconsider this conservative design choice in the future. + * + * XXXMAC: For the time being, use NOSUID to also prohibit + * transitions on the file system. + */ + credential_changing = 0; + credential_changing |= (attr.va_mode & S_ISUID) && + oldcred->cr_uid != attr.va_uid; + credential_changing |= (attr.va_mode & S_ISGID) && + oldcred->cr_gid != attr.va_gid; +#ifdef MAC + will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp, + interpvplabel, imgp); + credential_changing |= will_transition; +#endif + + if (credential_changing && +#ifdef CAPABILITY_MODE + ((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) && +#endif + (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 && + (p->p_flag & P_TRACED) == 0) { + imgp->credential_setid = true; + VOP_UNLOCK(imgp->vp, 0); + imgp->newcred = crdup(oldcred); + if (attr.va_mode & S_ISUID) { + euip = uifind(attr.va_uid); + change_euid(imgp->newcred, euip); + } + vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); + if (attr.va_mode & S_ISGID) + change_egid(imgp->newcred, attr.va_gid); + /* + * Implement correct POSIX saved-id behavior. + * + * XXXMAC: Note that the current logic will save the + * uid and gid if a MAC domain transition occurs, even + * though maybe it shouldn't. + */ + change_svuid(imgp->newcred, imgp->newcred->cr_uid); + change_svgid(imgp->newcred, imgp->newcred->cr_gid); + } else { + /* + * Implement correct POSIX saved-id behavior. + * + * XXX: It's not clear that the existing behavior is + * POSIX-compliant. A number of sources indicate that the + * saved uid/gid should only be updated if the new ruid is + * not equal to the old ruid, or the new euid is not equal + * to the old euid and the new euid is not equal to the old + * ruid. The FreeBSD code always updates the saved uid/gid. + * Also, this code uses the new (replaced) euid and egid as + * the source, which may or may not be the right ones to use. + */ + if (oldcred->cr_svuid != oldcred->cr_uid || + oldcred->cr_svgid != oldcred->cr_gid) { + VOP_UNLOCK(imgp->vp, 0); + imgp->newcred = crdup(oldcred); + vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); + change_svuid(imgp->newcred, imgp->newcred->cr_uid); + change_svgid(imgp->newcred, imgp->newcred->cr_gid); + } + } + /* The new credentials are installed into the process later. */ + + /* + * Do the best to calculate the full path to the image file. + */ + if (args->fname != NULL && args->fname[0] == '/') + imgp->execpath = args->fname; + else { + VOP_UNLOCK(imgp->vp, 0); + if (vn_fullpath(td, imgp->vp, &imgp->execpath, + &imgp->freepath) != 0) + imgp->execpath = args->fname; + vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); + } + /* * If the current process has a special image activator it * wants to try first, call it. For example, emulating shell @@ -536,15 +631,23 @@ interpret: if (args->fname != NULL) NDFREE(&nd, NDF_ONLY_PNBUF); #ifdef MAC - mac_execve_interpreter_enter(binvp, &interpvplabel); + mac_execve_interpreter_enter(newtextvp, &interpvplabel); #endif if (imgp->opened) { - VOP_CLOSE(binvp, FREAD, td->td_ucred, td); + VOP_CLOSE(newtextvp, FREAD, td->td_ucred, td); imgp->opened = 0; } - vput(binvp); + vput(newtextvp); vm_object_deallocate(imgp->object); imgp->object = NULL; + imgp->credential_setid = false; + if (imgp->newcred != NULL) { + crfree(imgp->newcred); + imgp->newcred = NULL; + } + imgp->execpath = NULL; + free(imgp->freepath, M_TEMP); + imgp->freepath = NULL; /* set new name to that of the interpreter */ NDINIT(&nd, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME, UIO_SYSSPACE, imgp->interpreter_name, td); @@ -558,14 +661,6 @@ interpret: */ VOP_UNLOCK(imgp->vp, 0); - /* - * Do the best to calculate the full path to the image file. - */ - if (imgp->auxargs != NULL && - ((args->fname != NULL && args->fname[0] == '/') || - vn_fullpath(td, imgp->vp, &imgp->execpath, &imgp->freepath) != 0)) - imgp->execpath = args->fname; - if (disallow_high_osrel && P_OSREL_MAJOR(p->p_osrel) > P_OSREL_MAJOR(__FreeBSD_version)) { error = ENOEXEC; @@ -611,11 +706,6 @@ interpret: bcopy(imgp->args->begin_argv, newargs->ar_args, i); } - vn_lock(imgp->vp, LK_SHARED | LK_RETRY); - - /* Get a reference to the vnode prior to locking the proc */ - VREF(binvp); - /* * For security and other reasons, signal handlers cannot * be shared after an exec. The new process gets a copy of the old @@ -626,15 +716,13 @@ interpret: oldsigacts = p->p_sigacts; newsigacts = sigacts_alloc(); sigacts_copy(newsigacts, oldsigacts); - } else { - oldsigacts = NULL; - newsigacts = NULL; /* satisfy gcc */ } + vn_lock(imgp->vp, LK_SHARED | LK_RETRY); + PROC_LOCK(p); if (oldsigacts) p->p_sigacts = newsigacts; - oldcred = p->p_ucred; /* Stop profiling */ stopprofclock(p); @@ -646,7 +734,7 @@ interpret: if (args->fname) bcopy(nd.ni_cnd.cn_nameptr, p->p_comm, min(nd.ni_cnd.cn_namelen, MAXCOMLEN)); - else if (vn_commname(binvp, p->p_comm, sizeof(p->p_comm)) != 0) + else if (vn_commname(newtextvp, p->p_comm, sizeof(p->p_comm)) != 0) bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title)); bcopy(p->p_comm, td->td_name, sizeof(td->td_name)); #ifdef KTR @@ -666,38 +754,9 @@ interpret: } /* - * Implement image setuid/setgid. - * - * Don't honor setuid/setgid if the filesystem prohibits it or if - * the process is being traced. - * - * We disable setuid/setgid/etc in compatibility mode on the basis - * that most setugid applications are not written with that - * environment in mind, and will therefore almost certainly operate - * incorrectly. In principle there's no reason that setugid - * applications might not be useful in capability mode, so we may want - * to reconsider this conservative design choice in the future. - * - * XXXMAC: For the time being, use NOSUID to also prohibit - * transitions on the file system. + * Implement image setuid/setgid installation. */ - credential_changing = 0; - credential_changing |= (attr.va_mode & S_ISUID) && oldcred->cr_uid != - attr.va_uid; - credential_changing |= (attr.va_mode & S_ISGID) && oldcred->cr_gid != - attr.va_gid; -#ifdef MAC - will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp, - interpvplabel, imgp); - credential_changing |= will_transition; -#endif - - if (credential_changing && -#ifdef CAPABILITY_MODE - ((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) && -#endif - (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 && - (p->p_flag & P_TRACED) == 0) { + if (imgp->credential_setid) { /* * Turn off syscall tracing for set-id programs, except for * root. Record any set-id flags first to make sure that @@ -723,70 +782,36 @@ interpret: VOP_UNLOCK(imgp->vp, 0); setugidsafety(td); error = fdcheckstd(td); - if (error != 0) - goto done1; - newcred = crdup(oldcred); - euip = uifind(attr.va_uid); vn_lock(imgp->vp, LK_SHARED | LK_RETRY); + if (error != 0) + goto exec_fail_dealloc; PROC_LOCK(p); - /* - * Set the new credentials. - */ - if (attr.va_mode & S_ISUID) - change_euid(newcred, euip); - if (attr.va_mode & S_ISGID) - change_egid(newcred, attr.va_gid); #ifdef MAC if (will_transition) { - mac_vnode_execve_transition(oldcred, newcred, imgp->vp, - interpvplabel, imgp); + mac_vnode_execve_transition(oldcred, imgp->newcred, + imgp->vp, interpvplabel, imgp); } #endif - /* - * Implement correct POSIX saved-id behavior. - * - * XXXMAC: Note that the current logic will save the - * uid and gid if a MAC domain transition occurs, even - * though maybe it shouldn't. - */ - change_svuid(newcred, newcred->cr_uid); - change_svgid(newcred, newcred->cr_gid); - p->p_ucred = newcred; } else { if (oldcred->cr_uid == oldcred->cr_ruid && oldcred->cr_gid == oldcred->cr_rgid) p->p_flag &= ~P_SUGID; - /* - * Implement correct POSIX saved-id behavior. - * - * XXX: It's not clear that the existing behavior is - * POSIX-compliant. A number of sources indicate that the - * saved uid/gid should only be updated if the new ruid is - * not equal to the old ruid, or the new euid is not equal - * to the old euid and the new euid is not equal to the old - * ruid. The FreeBSD code always updates the saved uid/gid. - * Also, this code uses the new (replaced) euid and egid as - * the source, which may or may not be the right ones to use. - */ - if (oldcred->cr_svuid != oldcred->cr_uid || - oldcred->cr_svgid != oldcred->cr_gid) { - PROC_UNLOCK(p); - VOP_UNLOCK(imgp->vp, 0); - newcred = crdup(oldcred); - vn_lock(imgp->vp, LK_SHARED | LK_RETRY); - PROC_LOCK(p); - change_svuid(newcred, newcred->cr_uid); - change_svgid(newcred, newcred->cr_gid); - p->p_ucred = newcred; - } + } + /* + * Set the new credentials. + */ + if (imgp->newcred != NULL) { + proc_set_cred(p, imgp->newcred); + crfree(oldcred); + oldcred = NULL; } /* - * Store the vp for use in procfs. This vnode was referenced prior - * to locking the proc lock. + * Store the vp for use in procfs. This vnode was referenced by namei + * or fgetvp_exec. */ - textvp = p->p_textvp; - p->p_textvp = binvp; + oldtextvp = p->p_textvp; + p->p_textvp = newtextvp; #ifdef KDTRACE_HOOKS /* @@ -848,42 +873,9 @@ interpret: vfs_mark_atime(imgp->vp, td->td_ucred); - SDT_PROBE1(proc, kernel, , exec__success, args->fname); - - VOP_UNLOCK(imgp->vp, 0); -done1: - /* - * Free any resources malloc'd earlier that we didn't use. - */ - if (euip != NULL) - uifree(euip); - if (newcred != NULL) - crfree(oldcred); - - /* - * Handle deferred decrement of ref counts. - */ - if (textvp != NULL) - vrele(textvp); - if (binvp && error != 0) - vrele(binvp); -#ifdef KTRACE - if (tracevp != NULL) - vrele(tracevp); - if (tracecred != NULL) - crfree(tracecred); -#endif - vn_lock(imgp->vp, LK_SHARED | LK_RETRY); - pargs_drop(oldargs); - pargs_drop(newargs); - if (oldsigacts != NULL) - sigacts_free(oldsigacts); + SDT_PROBE1(proc, , , exec__success, args->fname); exec_fail_dealloc: - - /* - * free various allocated resources - */ if (imgp->firstpage != NULL) exec_unmap_first_page(imgp); @@ -892,7 +884,10 @@ exec_fail_dealloc: NDFREE(&nd, NDF_ONLY_PNBUF); if (imgp->opened) VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td); - vput(imgp->vp); + if (error != 0) + vput(imgp->vp); + else + VOP_UNLOCK(imgp->vp, 0); } if (imgp->object != NULL) @@ -910,24 +905,43 @@ exec_fail_dealloc: * the S_EXEC bit set. */ STOPEVENT(p, S_EXEC, 0); - goto done2; - } - + } else { exec_fail: - /* we're done here, clear P_INEXEC */ - PROC_LOCK(p); - p->p_flag &= ~P_INEXEC; - PROC_UNLOCK(p); + /* we're done here, clear P_INEXEC */ + PROC_LOCK(p); + p->p_flag &= ~P_INEXEC; + PROC_UNLOCK(p); - SDT_PROBE1(proc, kernel, , exec__failure, error); + SDT_PROBE1(proc, , , exec__failure, error); + } + + if (imgp->newcred != NULL && oldcred != NULL) + crfree(imgp->newcred); -done2: #ifdef MAC mac_execve_exit(imgp); mac_execve_interpreter_exit(interpvplabel); #endif exec_free_args(args); + /* + * Handle deferred decrement of ref counts. + */ + if (oldtextvp != NULL) + vrele(oldtextvp); +#ifdef KTRACE + if (tracevp != NULL) + vrele(tracevp); + if (tracecred != NULL) + crfree(tracecred); +#endif + pargs_drop(oldargs); + pargs_drop(newargs); + if (oldsigacts != NULL) + sigacts_free(oldsigacts); + if (euip != NULL) + uifree(euip); + if (error && imgp->vmspace_destroyed) { /* sorry, no more process anymore. exit gracefully */ exit1(td, W_EXITCODE(0, SIGABRT)); @@ -1500,8 +1514,6 @@ exec_register(execsw_arg) for (es = execsw; *es; es++) count++; newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); - if (newexecsw == NULL) - return (ENOMEM); xs = newexecsw; if (execsw) for (es = execsw; *es; es++) @@ -1534,8 +1546,6 @@ exec_unregister(execsw_arg) if (*es != execsw_arg) count++; newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); - if (newexecsw == NULL) - return (ENOMEM); xs = newexecsw; for (es = execsw; *es; es++) if (*es != execsw_arg) diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index 76b4427..f9244af 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -95,7 +95,7 @@ dtrace_execexit_func_t dtrace_fasttrap_exit; #endif SDT_PROVIDER_DECLARE(proc); -SDT_PROBE_DEFINE1(proc, kernel, , exit, "int"); +SDT_PROBE_DEFINE1(proc, , , exit, "int"); /* Hook for NFS teardown procedure. */ void (*nlminfo_release_p)(struct proc *p); @@ -564,7 +564,7 @@ exit1(struct thread *td, int rv) reason = CLD_DUMPED; else if (WIFSIGNALED(rv)) reason = CLD_KILLED; - SDT_PROBE1(proc, kernel, , exit, reason); + SDT_PROBE1(proc, , , exit, reason); #endif /* @@ -937,7 +937,7 @@ proc_reap(struct thread *td, struct proc *p, int *status, int options) * Free credentials, arguments, and sigacts. */ crfree(p->p_ucred); - p->p_ucred = NULL; + proc_set_cred(p, NULL); pargs_drop(p->p_args); p->p_args = NULL; sigacts_free(p->p_sigacts); diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index f24ba20..d50db75 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -90,8 +90,7 @@ dtrace_fork_func_t dtrace_fasttrap_fork; #endif SDT_PROVIDER_DECLARE(proc); -SDT_PROBE_DEFINE3(proc, kernel, , create, "struct proc *", - "struct proc *", "int"); +SDT_PROBE_DEFINE3(proc, , , create, "struct proc *", "struct proc *", "int"); #ifndef _SYS_SYSPROTO_H_ struct fork_args { @@ -411,8 +410,10 @@ do_fork(struct thread *td, int flags, struct proc *p2, struct thread *td2, bzero(&p2->p_startzero, __rangeof(struct proc, p_startzero, p_endzero)); p2->p_treeflag = 0; + p2->p_filemon = NULL; - p2->p_ucred = crhold(td->td_ucred); + crhold(td->td_ucred); + proc_set_cred(p2, td->td_ucred); /* Tell the prison that we exist. */ prison_proc_hold(p2->p_ucred->cr_prison); @@ -753,7 +754,7 @@ do_fork(struct thread *td, int flags, struct proc *p2, struct thread *td2, * Tell any interested parties about the new process. */ knote_fork(&p1->p_klist, p2->p_pid); - SDT_PROBE3(proc, kernel, , create, p2, p1, flags); + SDT_PROBE3(proc, , , create, p2, p1, flags); /* * Wait until debugger is attached to child. @@ -898,7 +899,7 @@ fork1(struct thread *td, int flags, int pages, struct proc **procp, /* * The swap reservation failed. The accounting * from the entries of the copied vm2 will be - * substracted in vmspace_free(), so force the + * subtracted in vmspace_free(), so force the * reservation there. */ swap_reserve_force(mem_charged); @@ -912,7 +913,7 @@ fork1(struct thread *td, int flags, int pages, struct proc **procp, * XXX: This is ugly; when we copy resource usage, we need to bump * per-cred resource counters. */ - newproc->p_ucred = p1->p_ucred; + proc_set_cred(newproc, p1->p_ucred); /* * Initialize resource accounting for the child process. diff --git a/sys/kern/kern_jail.c b/sys/kern/kern_jail.c index a0e4c61..f0a046e 100644 --- a/sys/kern/kern_jail.c +++ b/sys/kern/kern_jail.c @@ -2476,7 +2476,7 @@ do_jail_attach(struct thread *td, struct prison *pr) PROC_LOCK(p); oldcred = crcopysafe(p, newcred); newcred->cr_prison = pr; - p->p_ucred = newcred; + proc_set_cred(p, newcred); setsugid(p); PROC_UNLOCK(p); #ifdef RACCT @@ -4072,7 +4072,7 @@ prison_priv_check(struct ucred *cred, int priv) return (0); /* - * Allow jailed root to set certian IPv4/6 (option) headers. + * Allow jailed root to set certain IPv4/6 (option) headers. */ case PRIV_NETINET_SETHDROPTS: return (0); @@ -4313,7 +4313,7 @@ SYSCTL_UINT(_security_jail, OID_AUTO, jail_max_af_ips, CTLFLAG_RW, #endif /* - * Default parameters for jail(2) compatability. For historical reasons, + * Default parameters for jail(2) compatibility. For historical reasons, * the sysctl names have varying similarity to the parameter names. Prisons * just see their own parameters, and can't change them. */ diff --git a/sys/kern/kern_linker.c b/sys/kern/kern_linker.c index 78eab87..ca8c10a 100644 --- a/sys/kern/kern_linker.c +++ b/sys/kern/kern_linker.c @@ -948,7 +948,7 @@ linker_debug_search_symbol_name(caddr_t value, char *buf, u_int buflen, * * Note that we do not obey list locking protocols here. We really don't need * DDB to hang because somebody's got the lock held. We'll take the chance - * that the files list is inconsistant instead. + * that the files list is inconsistent instead. */ #ifdef DDB int @@ -1762,8 +1762,6 @@ linker_hints_lookup(const char *path, int pathlen, const char *modname, goto bad; } hints = malloc(vattr.va_size, M_TEMP, M_WAITOK); - if (hints == NULL) - goto bad; error = vn_rdwr(UIO_READ, nd.ni_vp, (caddr_t)hints, vattr.va_size, 0, UIO_SYSSPACE, IO_NODELOCKED, cred, NOCRED, &reclen, td); if (error) @@ -2038,7 +2036,7 @@ linker_load_dependencies(linker_file_t lf) int ver, error = 0, count; /* - * All files are dependant on /kernel. + * All files are dependent on /kernel. */ sx_assert(&kld_sx, SA_XLOCKED); if (linker_kernel_file) { diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index ca2a359..e3e946e 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -286,7 +286,7 @@ wakeupshlk(struct lock *lk, const char *file, int line) * exclusive waiters bit anyway. * Please note that lk_exslpfail count may be lying about * the real number of waiters with the LK_SLEEPFAIL flag on - * because they may be used in conjuction with interruptible + * because they may be used in conjunction with interruptible * sleeps so lk_exslpfail might be considered an 'upper limit' * bound, including the edge cases. */ @@ -1063,7 +1063,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, * Please note that lk_exslpfail count may be lying * about the real number of waiters with the * LK_SLEEPFAIL flag on because they may be used in - * conjuction with interruptible sleeps so + * conjunction with interruptible sleeps so * lk_exslpfail might be considered an 'upper limit' * bound, including the edge cases. */ @@ -1176,7 +1176,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, * Please note that lk_exslpfail count may be * lying about the real number of waiters with * the LK_SLEEPFAIL flag on because they may - * be used in conjuction with interruptible + * be used in conjunction with interruptible * sleeps so lk_exslpfail might be considered * an 'upper limit' bound, including the edge * cases. diff --git a/sys/kern/kern_lockf.c b/sys/kern/kern_lockf.c index 91eae09..a0a3789 100644 --- a/sys/kern/kern_lockf.c +++ b/sys/kern/kern_lockf.c @@ -362,7 +362,7 @@ lf_free_lock(struct lockf_entry *lock) struct lock_owner *lo = lock->lf_owner; if (lo) { KASSERT(LIST_EMPTY(&lock->lf_outedges), - ("freeing lock with dependancies")); + ("freeing lock with dependencies")); KASSERT(LIST_EMPTY(&lock->lf_inedges), ("freeing lock with dependants")); sx_xlock(&lf_lock_owners_lock); @@ -827,7 +827,7 @@ lf_purgelocks(struct vnode *vp, struct lockf **statep) /* * We can just free all the active locks since they - * will have no dependancies (we removed them all + * will have no dependencies (we removed them all * above). We don't need to bother locking since we * are the last thread using this state structure. */ @@ -1112,7 +1112,7 @@ lf_insert_lock(struct lockf *state, struct lockf_entry *lock) /* * Wake up a sleeping lock and remove it from the pending list now - * that all its dependancies have been resolved. The caller should + * that all its dependencies have been resolved. The caller should * arrange for the lock to be added to the active list, adjusting any * existing locks for the same owner as needed. */ @@ -1137,9 +1137,9 @@ lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock) } /* - * Re-check all dependant locks and remove edges to locks that we no + * Re-check all dependent locks and remove edges to locks that we no * longer block. If 'all' is non-zero, the lock has been removed and - * we must remove all the dependancies, otherwise it has simply been + * we must remove all the dependencies, otherwise it has simply been * reduced but remains active. Any pending locks which have been been * unblocked are added to 'granted' */ @@ -1165,7 +1165,7 @@ lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all, } /* - * Set the start of an existing active lock, updating dependancies and + * Set the start of an existing active lock, updating dependencies and * adding any newly woken locks to 'granted'. */ static void @@ -1181,7 +1181,7 @@ lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start, } /* - * Set the end of an existing active lock, updating dependancies and + * Set the end of an existing active lock, updating dependencies and * adding any newly woken locks to 'granted'. */ static void @@ -1204,7 +1204,7 @@ lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end, * pending locks as a result of downgrading/unlocking. We simply * activate the newly granted locks by looping. * - * Since the new lock already has its dependancies set up, we always + * Since the new lock already has its dependencies set up, we always * add it to the list (unless its an unlock request). This may * fragment the lock list in some pathological cases but its probably * not a real problem. @@ -1332,7 +1332,7 @@ lf_cancel_lock(struct lockf *state, struct lockf_entry *lock) * may allow some other pending lock to become * active. Consider this case: * - * Owner Action Result Dependancies + * Owner Action Result Dependencies * * A: lock [0..0] succeeds * B: lock [2..2] succeeds @@ -1840,7 +1840,7 @@ lf_split(struct lockf *state, struct lockf_entry *lock1, /* * This cannot cause a deadlock since any edges we would add * to splitlock already exist in lock1. We must be sure to add - * necessary dependancies to splitlock before we reduce lock1 + * necessary dependencies to splitlock before we reduce lock1 * otherwise we may accidentally grant a pending lock that * was blocked by the tail end of lock1. */ diff --git a/sys/kern/kern_loginclass.c b/sys/kern/kern_loginclass.c index b20f60b..04f6809 100644 --- a/sys/kern/kern_loginclass.c +++ b/sys/kern/kern_loginclass.c @@ -205,7 +205,7 @@ sys_setloginclass(struct thread *td, struct setloginclass_args *uap) PROC_LOCK(p); oldcred = crcopysafe(p, newcred); newcred->cr_loginclass = newlc; - p->p_ucred = newcred; + proc_set_cred(p, newcred); PROC_UNLOCK(p); #ifdef RACCT racct_proc_ucred_changed(p, oldcred, newcred); diff --git a/sys/kern/kern_mbuf.c b/sys/kern/kern_mbuf.c index 7f9f666..c610f54 100644 --- a/sys/kern/kern_mbuf.c +++ b/sys/kern/kern_mbuf.c @@ -92,7 +92,7 @@ __FBSDID("$FreeBSD$"); * * Whenever an object is allocated from the underlying global * memory pool it gets pre-initialized with the _zinit_ functions. - * When the Keg's are overfull objects get decomissioned with + * When the Keg's are overfull objects get decommissioned with * _zfini_ functions and free'd back to the global memory pool. * */ diff --git a/sys/kern/kern_mtxpool.c b/sys/kern/kern_mtxpool.c index 23b41bb..6bfe611 100644 --- a/sys/kern/kern_mtxpool.c +++ b/sys/kern/kern_mtxpool.c @@ -39,7 +39,7 @@ * * Disadvantages: * - should generally only be used as leaf mutexes. - * - pool/pool dependancy ordering cannot be depended on. + * - pool/pool dependency ordering cannot be depended on. * - possible L1 cache mastersip contention between cpus. */ diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index ac375a4..0f60553 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -92,18 +92,15 @@ __FBSDID("$FreeBSD$"); #endif SDT_PROVIDER_DEFINE(proc); -SDT_PROBE_DEFINE4(proc, kernel, ctor, entry, "struct proc *", "int", - "void *", "int"); -SDT_PROBE_DEFINE4(proc, kernel, ctor, return, "struct proc *", "int", - "void *", "int"); -SDT_PROBE_DEFINE4(proc, kernel, dtor, entry, "struct proc *", "int", - "void *", "struct thread *"); -SDT_PROBE_DEFINE3(proc, kernel, dtor, return, "struct proc *", "int", - "void *"); -SDT_PROBE_DEFINE3(proc, kernel, init, entry, "struct proc *", "int", +SDT_PROBE_DEFINE4(proc, , ctor, entry, "struct proc *", "int", "void *", "int"); -SDT_PROBE_DEFINE3(proc, kernel, init, return, "struct proc *", "int", +SDT_PROBE_DEFINE4(proc, , ctor, return, "struct proc *", "int", "void *", "int"); +SDT_PROBE_DEFINE4(proc, , dtor, entry, "struct proc *", "int", "void *", + "struct thread *"); +SDT_PROBE_DEFINE3(proc, , dtor, return, "struct proc *", "int", "void *"); +SDT_PROBE_DEFINE3(proc, , init, entry, "struct proc *", "int", "int"); +SDT_PROBE_DEFINE3(proc, , init, return, "struct proc *", "int", "int"); MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); MALLOC_DEFINE(M_SESSION, "session", "session header"); @@ -196,9 +193,9 @@ proc_ctor(void *mem, int size, void *arg, int flags) struct proc *p; p = (struct proc *)mem; - SDT_PROBE4(proc, kernel, ctor , entry, p, size, arg, flags); + SDT_PROBE4(proc, , ctor , entry, p, size, arg, flags); EVENTHANDLER_INVOKE(process_ctor, p); - SDT_PROBE4(proc, kernel, ctor , return, p, size, arg, flags); + SDT_PROBE4(proc, , ctor , return, p, size, arg, flags); return (0); } @@ -214,7 +211,7 @@ proc_dtor(void *mem, int size, void *arg) /* INVARIANTS checks go here */ p = (struct proc *)mem; td = FIRST_THREAD_IN_PROC(p); - SDT_PROBE4(proc, kernel, dtor, entry, p, size, arg, td); + SDT_PROBE4(proc, , dtor, entry, p, size, arg, td); if (td != NULL) { #ifdef INVARIANTS KASSERT((p->p_numthreads == 1), @@ -227,7 +224,7 @@ proc_dtor(void *mem, int size, void *arg) EVENTHANDLER_INVOKE(process_dtor, p); if (p->p_ksi != NULL) KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue")); - SDT_PROBE3(proc, kernel, dtor, return, p, size, arg); + SDT_PROBE3(proc, , dtor, return, p, size, arg); } /* @@ -239,7 +236,7 @@ proc_init(void *mem, int size, int flags) struct proc *p; p = (struct proc *)mem; - SDT_PROBE3(proc, kernel, init, entry, p, size, flags); + SDT_PROBE3(proc, , init, entry, p, size, flags); p->p_sched = (struct p_sched *)&p[1]; bzero(&p->p_mtx, sizeof(struct mtx)); mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); @@ -250,7 +247,7 @@ proc_init(void *mem, int size, int flags) EVENTHANDLER_INVOKE(process_init, p); p->p_stats = pstats_alloc(); p->p_pgrp = NULL; - SDT_PROBE3(proc, kernel, init, return, p, size, flags); + SDT_PROBE3(proc, , init, return, p, size, flags); return (0); } diff --git a/sys/kern/kern_prot.c b/sys/kern/kern_prot.c index 03b9001..8235a1a 100644 --- a/sys/kern/kern_prot.c +++ b/sys/kern/kern_prot.c @@ -147,7 +147,7 @@ sys_getpgrp(struct thread *td, struct getpgrp_args *uap) return (0); } -/* Get an arbitary pid's process group id */ +/* Get an arbitrary pid's process group id */ #ifndef _SYS_SYSPROTO_H_ struct getpgid_args { pid_t pid; @@ -178,7 +178,7 @@ sys_getpgid(struct thread *td, struct getpgid_args *uap) } /* - * Get an arbitary pid's session id. + * Get an arbitrary pid's session id. */ #ifndef _SYS_SYSPROTO_H_ struct getsid_args { @@ -582,7 +582,7 @@ sys_setuid(struct thread *td, struct setuid_args *uap) change_euid(newcred, uip); setsugid(p); } - p->p_ucred = newcred; + proc_set_cred(p, newcred); PROC_UNLOCK(p); #ifdef RACCT racct_proc_ucred_changed(p, oldcred, newcred); @@ -641,7 +641,7 @@ sys_seteuid(struct thread *td, struct seteuid_args *uap) change_euid(newcred, euip); setsugid(p); } - p->p_ucred = newcred; + proc_set_cred(p, newcred); PROC_UNLOCK(p); uifree(euip); crfree(oldcred); @@ -741,7 +741,7 @@ sys_setgid(struct thread *td, struct setgid_args *uap) change_egid(newcred, gid); setsugid(p); } - p->p_ucred = newcred; + proc_set_cred(p, newcred); PROC_UNLOCK(p); crfree(oldcred); return (0); @@ -787,7 +787,7 @@ sys_setegid(struct thread *td, struct setegid_args *uap) change_egid(newcred, egid); setsugid(p); } - p->p_ucred = newcred; + proc_set_cred(p, newcred); PROC_UNLOCK(p); crfree(oldcred); return (0); @@ -860,7 +860,7 @@ kern_setgroups(struct thread *td, u_int ngrp, gid_t *groups) crsetgroups_locked(newcred, ngrp, groups); } setsugid(p); - p->p_ucred = newcred; + proc_set_cred(p, newcred); PROC_UNLOCK(p); crfree(oldcred); return (0); @@ -923,7 +923,7 @@ sys_setreuid(register struct thread *td, struct setreuid_args *uap) change_svuid(newcred, newcred->cr_uid); setsugid(p); } - p->p_ucred = newcred; + proc_set_cred(p, newcred); PROC_UNLOCK(p); #ifdef RACCT racct_proc_ucred_changed(p, oldcred, newcred); @@ -990,7 +990,7 @@ sys_setregid(register struct thread *td, struct setregid_args *uap) change_svgid(newcred, newcred->cr_groups[0]); setsugid(p); } - p->p_ucred = newcred; + proc_set_cred(p, newcred); PROC_UNLOCK(p); crfree(oldcred); return (0); @@ -1064,7 +1064,7 @@ sys_setresuid(register struct thread *td, struct setresuid_args *uap) change_svuid(newcred, suid); setsugid(p); } - p->p_ucred = newcred; + proc_set_cred(p, newcred); PROC_UNLOCK(p); #ifdef RACCT racct_proc_ucred_changed(p, oldcred, newcred); @@ -1143,7 +1143,7 @@ sys_setresgid(register struct thread *td, struct setresgid_args *uap) change_svgid(newcred, sgid); setsugid(p); } - p->p_ucred = newcred; + proc_set_cred(p, newcred); PROC_UNLOCK(p); crfree(oldcred); return (0); @@ -1956,6 +1956,31 @@ cred_update_thread(struct thread *td) crfree(cred); } +/* + * Change process credentials. + * Callers are responsible for providing the reference for current credentials + * and for freeing old ones. + * + * Process has to be locked except when it does not have credentials (as it + * should not be visible just yet) or when newcred is NULL (as this can be + * only used when the process is about to be freed, at which point it should + * not be visible anymore). + */ +struct ucred * +proc_set_cred(struct proc *p, struct ucred *newcred) +{ + struct ucred *oldcred; + + if (newcred == NULL) + MPASS(p->p_state == PRS_ZOMBIE); + else if (p->p_ucred != NULL) + PROC_LOCK_ASSERT(p, MA_OWNED); + + oldcred = p->p_ucred; + p->p_ucred = newcred; + return (oldcred); +} + struct ucred * crcopysafe(struct proc *p, struct ucred *cr) { diff --git a/sys/kern/kern_racct.c b/sys/kern/kern_racct.c index 9e67034..eaf93fc 100644 --- a/sys/kern/kern_racct.c +++ b/sys/kern/kern_racct.c @@ -104,30 +104,32 @@ static void racct_add_cred_locked(struct ucred *cred, int resource, uint64_t amount); SDT_PROVIDER_DEFINE(racct); -SDT_PROBE_DEFINE3(racct, kernel, rusage, add, "struct proc *", "int", - "uint64_t"); -SDT_PROBE_DEFINE3(racct, kernel, rusage, add__failure, +SDT_PROBE_DEFINE3(racct, , rusage, add, "struct proc *", "int", "uint64_t"); -SDT_PROBE_DEFINE3(racct, kernel, rusage, add__cred, "struct ucred *", - "int", "uint64_t"); -SDT_PROBE_DEFINE3(racct, kernel, rusage, add__force, "struct proc *", - "int", "uint64_t"); -SDT_PROBE_DEFINE3(racct, kernel, rusage, set, "struct proc *", "int", - "uint64_t"); -SDT_PROBE_DEFINE3(racct, kernel, rusage, set__failure, +SDT_PROBE_DEFINE3(racct, , rusage, add__failure, "struct proc *", "int", "uint64_t"); -SDT_PROBE_DEFINE3(racct, kernel, rusage, sub, "struct proc *", "int", - "uint64_t"); -SDT_PROBE_DEFINE3(racct, kernel, rusage, sub__cred, "struct ucred *", - "int", "uint64_t"); -SDT_PROBE_DEFINE1(racct, kernel, racct, create, "struct racct *"); -SDT_PROBE_DEFINE1(racct, kernel, racct, destroy, "struct racct *"); -SDT_PROBE_DEFINE2(racct, kernel, racct, join, "struct racct *", +SDT_PROBE_DEFINE3(racct, , rusage, add__cred, + "struct ucred *", "int", "uint64_t"); +SDT_PROBE_DEFINE3(racct, , rusage, add__force, + "struct proc *", "int", "uint64_t"); +SDT_PROBE_DEFINE3(racct, , rusage, set, + "struct proc *", "int", "uint64_t"); +SDT_PROBE_DEFINE3(racct, , rusage, set__failure, + "struct proc *", "int", "uint64_t"); +SDT_PROBE_DEFINE3(racct, , rusage, sub, + "struct proc *", "int", "uint64_t"); +SDT_PROBE_DEFINE3(racct, , rusage, sub__cred, + "struct ucred *", "int", "uint64_t"); +SDT_PROBE_DEFINE1(racct, , racct, create, "struct racct *"); -SDT_PROBE_DEFINE2(racct, kernel, racct, join__failure, - "struct racct *", "struct racct *"); -SDT_PROBE_DEFINE2(racct, kernel, racct, leave, "struct racct *", +SDT_PROBE_DEFINE1(racct, , racct, destroy, "struct racct *"); +SDT_PROBE_DEFINE2(racct, , racct, join, + "struct racct *", "struct racct *"); +SDT_PROBE_DEFINE2(racct, , racct, join__failure, + "struct racct *", "struct racct *"); +SDT_PROBE_DEFINE2(racct, , racct, leave, + "struct racct *", "struct racct *"); int racct_types[] = { [RACCT_CPU] = @@ -447,7 +449,7 @@ racct_create(struct racct **racctp) if (!racct_enable) return; - SDT_PROBE1(racct, kernel, racct, create, racctp); + SDT_PROBE1(racct, , racct, create, racctp); KASSERT(*racctp == NULL, ("racct already allocated")); @@ -462,7 +464,7 @@ racct_destroy_locked(struct racct **racctp) ASSERT_RACCT_ENABLED(); - SDT_PROBE1(racct, kernel, racct, destroy, racctp); + SDT_PROBE1(racct, , racct, destroy, racctp); mtx_assert(&racct_lock, MA_OWNED); KASSERT(racctp != NULL, ("NULL racctp")); @@ -540,7 +542,7 @@ racct_add_locked(struct proc *p, int resource, uint64_t amount) ASSERT_RACCT_ENABLED(); - SDT_PROBE3(racct, kernel, rusage, add, p, resource, amount); + SDT_PROBE3(racct, , rusage, add, p, resource, amount); /* * We need proc lock to dereference p->p_ucred. @@ -550,8 +552,7 @@ racct_add_locked(struct proc *p, int resource, uint64_t amount) #ifdef RCTL error = rctl_enforce(p, resource, amount); if (error && RACCT_IS_DENIABLE(resource)) { - SDT_PROBE3(racct, kernel, rusage, add__failure, p, resource, - amount); + SDT_PROBE3(racct, , rusage, add__failure, p, resource, amount); return (error); } #endif @@ -586,7 +587,7 @@ racct_add_cred_locked(struct ucred *cred, int resource, uint64_t amount) ASSERT_RACCT_ENABLED(); - SDT_PROBE3(racct, kernel, rusage, add__cred, cred, resource, amount); + SDT_PROBE3(racct, , rusage, add__cred, cred, resource, amount); racct_adjust_resource(cred->cr_ruidinfo->ui_racct, resource, amount); for (pr = cred->cr_prison; pr != NULL; pr = pr->pr_parent) @@ -624,7 +625,7 @@ racct_add_force(struct proc *p, int resource, uint64_t amount) if (!racct_enable) return; - SDT_PROBE3(racct, kernel, rusage, add__force, p, resource, amount); + SDT_PROBE3(racct, , rusage, add__force, p, resource, amount); /* * We need proc lock to dereference p->p_ucred. @@ -648,7 +649,7 @@ racct_set_locked(struct proc *p, int resource, uint64_t amount) ASSERT_RACCT_ENABLED(); - SDT_PROBE3(racct, kernel, rusage, set, p, resource, amount); + SDT_PROBE3(racct, , rusage, set, p, resource, amount); /* * We need proc lock to dereference p->p_ucred. @@ -680,8 +681,8 @@ racct_set_locked(struct proc *p, int resource, uint64_t amount) if (diff_proc > 0) { error = rctl_enforce(p, resource, diff_proc); if (error && RACCT_IS_DENIABLE(resource)) { - SDT_PROBE3(racct, kernel, rusage, set__failure, p, - resource, amount); + SDT_PROBE3(racct, , rusage, set__failure, p, resource, + amount); return (error); } } @@ -724,7 +725,7 @@ racct_set_force_locked(struct proc *p, int resource, uint64_t amount) ASSERT_RACCT_ENABLED(); - SDT_PROBE3(racct, kernel, rusage, set, p, resource, amount); + SDT_PROBE3(racct, , rusage, set, p, resource, amount); /* * We need proc lock to dereference p->p_ucred. @@ -835,7 +836,7 @@ racct_sub(struct proc *p, int resource, uint64_t amount) if (!racct_enable) return; - SDT_PROBE3(racct, kernel, rusage, sub, p, resource, amount); + SDT_PROBE3(racct, , rusage, sub, p, resource, amount); /* * We need proc lock to dereference p->p_ucred. @@ -862,7 +863,7 @@ racct_sub_cred_locked(struct ucred *cred, int resource, uint64_t amount) ASSERT_RACCT_ENABLED(); - SDT_PROBE3(racct, kernel, rusage, sub__cred, cred, resource, amount); + SDT_PROBE3(racct, , rusage, sub__cred, cred, resource, amount); #ifdef notyet KASSERT(RACCT_CAN_DROP(resource), diff --git a/sys/kern/kern_rctl.c b/sys/kern/kern_rctl.c index c43b83d..2aa55e2 100644 --- a/sys/kern/kern_rctl.c +++ b/sys/kern/kern_rctl.c @@ -310,7 +310,7 @@ rctl_pcpu_available(const struct proc *p) { /* * Return slightly less than actual value of the available - * %cpu resource. This makes %cpu throttling more agressive + * %cpu resource. This makes %cpu throttling more aggressive * and lets us act sooner than the limits are already exceeded. */ if (limit != 0) { diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c index 194c603..aa8d4b0 100644 --- a/sys/kern/kern_rmlock.c +++ b/sys/kern/kern_rmlock.c @@ -370,7 +370,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) } /* - * We allow readers to aquire a lock even if a writer is blocked if + * We allow readers to acquire a lock even if a writer is blocked if * the lock is recursive and the reader already holds the lock. */ if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) { diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index 841fc49..29783f8 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -94,11 +94,11 @@ __FBSDID("$FreeBSD$"); #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */ SDT_PROVIDER_DECLARE(proc); -SDT_PROBE_DEFINE3(proc, kernel, , signal__send, "struct thread *", - "struct proc *", "int"); -SDT_PROBE_DEFINE2(proc, kernel, , signal__clear, "int", - "ksiginfo_t *"); -SDT_PROBE_DEFINE3(proc, kernel, , signal__discard, +SDT_PROBE_DEFINE3(proc, , , signal__send, + "struct thread *", "struct proc *", "int"); +SDT_PROBE_DEFINE2(proc, , , signal__clear, + "int", "ksiginfo_t *"); +SDT_PROBE_DEFINE3(proc, , , signal__discard, "struct thread *", "struct proc *", "int"); static int coredump(struct thread *); @@ -1291,7 +1291,7 @@ kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi, reschedule_signals(p, new_block, 0); if (error == 0) { - SDT_PROBE2(proc, kernel, , signal__clear, sig, ksi); + SDT_PROBE2(proc, , , signal__clear, sig, ksi); if (ksi->ksi_code == SI_TIMER) itimer_accept(p, ksi->ksi_timerid, ksi); @@ -2108,7 +2108,7 @@ tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) } else sigqueue = &td->td_sigqueue; - SDT_PROBE3(proc, kernel, , signal__send, td, p, sig); + SDT_PROBE3(proc, , , signal__send, td, p, sig); /* * If the signal is being ignored, @@ -2119,7 +2119,7 @@ tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) */ mtx_lock(&ps->ps_mtx); if (SIGISMEMBER(ps->ps_sigignore, sig)) { - SDT_PROBE3(proc, kernel, , signal__discard, td, p, sig); + SDT_PROBE3(proc, , , signal__discard, td, p, sig); mtx_unlock(&ps->ps_mtx); if (ksi && (ksi->ksi_flags & KSI_INS)) diff --git a/sys/kern/kern_sysctl.c b/sys/kern/kern_sysctl.c index ddc3369..af7ac77 100644 --- a/sys/kern/kern_sysctl.c +++ b/sys/kern/kern_sysctl.c @@ -172,7 +172,7 @@ sysctl_register_oid(struct sysctl_oid *oidp) * * NOTE: DO NOT change the starting value here, change it in * <sys/sysctl.h>, and make sure it is at least 256 to - * accomodate e.g. net.inet.raw as a static sysctl node. + * accommodate e.g. net.inet.raw as a static sysctl node. */ if (oid_number < 0) { static int newoid; @@ -295,7 +295,7 @@ sysctl_ctx_free(struct sysctl_ctx_list *clist) } /* * Restore deregistered entries, either from the end, - * or from the place where error occured. + * or from the place where error occurred. * e contains the entry that was not unregistered */ if (error) diff --git a/sys/kern/kern_tc.c b/sys/kern/kern_tc.c index e9240ce..eabdd54 100644 --- a/sys/kern/kern_tc.c +++ b/sys/kern/kern_tc.c @@ -1855,7 +1855,7 @@ inittimecounter(void *dummy) * Set the initial timeout to * max(1, <approx. number of hardclock ticks in a millisecond>). * People should probably not use the sysctl to set the timeout - * to smaller than its inital value, since that value is the + * to smaller than its initial value, since that value is the * smallest reasonable one. If they want better timestamps they * should use the non-"get"* functions. */ diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index c85813b..2f8382c 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -424,7 +424,7 @@ thread_exit(void) * architecture specific resources that * would not be on a new untouched process. */ - cpu_thread_exit(td); /* XXXSMP */ + cpu_thread_exit(td); /* * The last thread is left attached to the process @@ -613,11 +613,6 @@ weed_inhib(int mode, struct thread *td2, struct proc *p) wakeup_swapper |= sleepq_abort(td2, EINTR); break; case SINGLE_BOUNDARY: - if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0) - wakeup_swapper |= thread_unsuspend_one(td2, p, false); - if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) - wakeup_swapper |= sleepq_abort(td2, ERESTART); - break; case SINGLE_NO_EXIT: if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0) wakeup_swapper |= thread_unsuspend_one(td2, p, false); @@ -856,8 +851,8 @@ thread_suspend_check(int return_instead) /* * The only suspension in action is a * single-threading. Single threader need not stop. - * XXX Should be safe to access unlocked - * as it can only be set to be true by us. + * It is safe to access p->p_singlethread unlocked + * because it can only be set to our address by us. */ if (p->p_singlethread == td) return (0); /* Exempt from stopping. */ diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c index a3402f8..9aa11ba 100644 --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -69,10 +69,8 @@ DPCPU_DECLARE(sbintime_t, hardclocktime); #endif SDT_PROVIDER_DEFINE(callout_execute); -SDT_PROBE_DEFINE1(callout_execute, kernel, , callout__start, - "struct callout *"); -SDT_PROBE_DEFINE1(callout_execute, kernel, , callout__end, - "struct callout *"); +SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *"); +SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *"); #ifdef CALLOUT_PROFILING static int avg_depth; @@ -681,9 +679,9 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc, sbt1 = sbinuptime(); #endif THREAD_NO_SLEEPING(); - SDT_PROBE1(callout_execute, kernel, , callout__start, c); + SDT_PROBE1(callout_execute, , , callout__start, c); c_func(c_arg); - SDT_PROBE1(callout_execute, kernel, , callout__end, c); + SDT_PROBE1(callout_execute, , , callout__end, c); THREAD_SLEEPING_OK(); #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) sbt2 = sbinuptime(); @@ -1397,7 +1395,7 @@ _callout_init_lock(c, lock, flags) * which set the timer can do the maintanence the timer was for as close * as possible to the originally intended time. Testing this code for a * week showed that resuming from a suspend resulted in 22 to 25 timers - * firing, which seemed independant on whether the suspend was 2 hours or + * firing, which seemed independent on whether the suspend was 2 hours or * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> */ void diff --git a/sys/kern/link_elf_obj.c b/sys/kern/link_elf_obj.c index a578049..b7610b7 100644 --- a/sys/kern/link_elf_obj.c +++ b/sys/kern/link_elf_obj.c @@ -1099,7 +1099,7 @@ relocate_file(elf_file_t ef) } /* - * Only clean SHN_FBSD_CACHED for successfull return. If we + * Only clean SHN_FBSD_CACHED for successful return. If we * modified symbol table for the object but found an * unresolved symbol, there is no reason to roll back. */ diff --git a/sys/kern/linker_if.m b/sys/kern/linker_if.m index 3df592c..a583a03 100644 --- a/sys/kern/linker_if.m +++ b/sys/kern/linker_if.m @@ -89,7 +89,7 @@ METHOD int lookup_set { }; # -# Unload a file, releasing dependancies and freeing storage. +# Unload a file, releasing dependencies and freeing storage. # METHOD void unload { linker_file_t file; diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index 6498ae7..676bd35 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -1393,7 +1393,7 @@ sched_add(struct thread *td, int flags) * or kicking off another CPU as it won't help and may hinder. * In the YIEDLING case, we are about to run whoever is being * put in the queue anyhow, and in the OURSELF case, we are - * puting ourself on the run queue which also only happens + * putting ourself on the run queue which also only happens * when we are about to yield. */ if ((flags & SRQ_YIELDING) == 0) { diff --git a/sys/kern/subr_blist.c b/sys/kern/subr_blist.c index 5c45b81..fb3526b 100644 --- a/sys/kern/subr_blist.c +++ b/sys/kern/subr_blist.c @@ -57,8 +57,8 @@ * The non-blocking features of the blist code are used in the swap code * (vm/swap_pager.c). * - * LAYOUT: The radix tree is layed out recursively using a - * linear array. Each meta node is immediately followed (layed out + * LAYOUT: The radix tree is laid out recursively using a + * linear array. Each meta node is immediately followed (laid out * sequentially in memory) by BLIST_META_RADIX lower level nodes. This * is a recursive structure but one that can be easily scanned through * a very simple 'skip' calculation. In order to support large radixes, diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c index faf4aa0..aba4364 100644 --- a/sys/kern/subr_bus.c +++ b/sys/kern/subr_bus.c @@ -4564,7 +4564,7 @@ root_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, } /* - * If we get here, assume that the device is permanant and really is + * If we get here, assume that the device is permanent and really is * present in the system. Removable bus drivers are expected to intercept * this call long before it gets here. We return -1 so that drivers that * really care can check vs -1 or some ERRNO returned higher in the food diff --git a/sys/kern/subr_devstat.c b/sys/kern/subr_devstat.c index 84696c2..84305db 100644 --- a/sys/kern/subr_devstat.c +++ b/sys/kern/subr_devstat.c @@ -391,7 +391,7 @@ sysctl_devstat(SYSCTL_HANDLER_ARGS) * XXX devstat_generation should really be "volatile" but that * XXX freaks out the sysctl macro below. The places where we * XXX change it and inspect it are bracketed in the mutex which - * XXX guarantees us proper write barriers. I don't belive the + * XXX guarantees us proper write barriers. I don't believe the * XXX compiler is allowed to optimize mygen away across calls * XXX to other functions, so the following is belived to be safe. */ diff --git a/sys/kern/subr_mbpool.c b/sys/kern/subr_mbpool.c index f0b44d2..acc73ef 100644 --- a/sys/kern/subr_mbpool.c +++ b/sys/kern/subr_mbpool.c @@ -289,7 +289,7 @@ mbp_ext_free(struct mbuf *m, void *buf, void *arg) } /* - * Free all buffers that are marked as beeing on the card + * Free all buffers that are marked as being on the card */ void mbp_card_free(struct mbpool *p) diff --git a/sys/kern/subr_mchain.c b/sys/kern/subr_mchain.c index e9d7d22..204a27c 100644 --- a/sys/kern/subr_mchain.c +++ b/sys/kern/subr_mchain.c @@ -101,7 +101,7 @@ mb_fixhdr(struct mbchain *mbp) /* * Check if object of size 'size' fit to the current position and * allocate new mbuf if not. Advance pointers and increase length of mbuf(s). - * Return pointer to the object placeholder or NULL if any error occured. + * Return pointer to the object placeholder or NULL if any error occurred. * Note: size should be <= MLEN */ caddr_t diff --git a/sys/kern/subr_msgbuf.c b/sys/kern/subr_msgbuf.c index ecdbe72..10096c9 100644 --- a/sys/kern/subr_msgbuf.c +++ b/sys/kern/subr_msgbuf.c @@ -50,7 +50,7 @@ static u_int msgbuf_cksum(struct msgbuf *mbp); /* * Timestamps in msgbuf are useful when trying to diagnose when core dumps - * or other actions occured. + * or other actions occurred. */ static int msgbuf_show_timestamp = 0; SYSCTL_INT(_kern, OID_AUTO, msgbuf_show_timestamp, CTLFLAG_RW | CTLFLAG_TUN, diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c index 3d4e86a..dc2b91b 100644 --- a/sys/kern/subr_prof.c +++ b/sys/kern/subr_prof.c @@ -269,7 +269,7 @@ kmstartup(dummy) * without much risk of reducing the profiling times below what they * would be when profiling is not configured. Abbreviate: * ab = minimum time between MC1 and MC3 - * a = minumum time between MC1 and MC2 + * a = minimum time between MC1 and MC2 * b = minimum time between MC2 and MC3 * cd = minimum time between ME1 and ME3 * c = minimum time between ME1 and ME2 diff --git a/sys/kern/subr_scanf.c b/sys/kern/subr_scanf.c index 824e392..806ca4f 100644 --- a/sys/kern/subr_scanf.c +++ b/sys/kern/subr_scanf.c @@ -603,7 +603,7 @@ doswitch: * z', but treats `a-a' as `the letter a, the * character -, and the letter a'. * - * For compatibility, the `-' is not considerd + * For compatibility, the `-' is not considered * to define a range if the character following * it is either a close bracket (required by ANSI) * or is not numerically greater than the character diff --git a/sys/kern/subr_uio.c b/sys/kern/subr_uio.c index 3712f92..8141958 100644 --- a/sys/kern/subr_uio.c +++ b/sys/kern/subr_uio.c @@ -468,7 +468,7 @@ copyout_map(struct thread *td, vm_offset_t *addr, size_t sz) lim_max(td->td_proc, RLIMIT_DATA)); PROC_UNLOCK(td->td_proc); - /* round size up to page boundry */ + /* round size up to page boundary */ size = (vm_size_t)round_page(sz); error = vm_mmap(&vms->vm_map, addr, size, PROT_READ | PROT_WRITE, diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c index bd54f57..b767103 100644 --- a/sys/kern/subr_witness.c +++ b/sys/kern/subr_witness.c @@ -2898,7 +2898,7 @@ witness_lock_order_add(struct witness *parent, struct witness *child) return (1); } -/* Call this whenver the structure of the witness graph changes. */ +/* Call this whenever the structure of the witness graph changes. */ static void witness_increment_graph_generation(void) { diff --git a/sys/kern/sys_capability.c b/sys/kern/sys_capability.c index b309905..b3dc1b5 100644 --- a/sys/kern/sys_capability.c +++ b/sys/kern/sys_capability.c @@ -105,7 +105,7 @@ sys_cap_enter(struct thread *td, struct cap_enter_args *uap) oldcred = p->p_ucred; crcopy(newcred, oldcred); newcred->cr_flags |= CRED_FLAG_CAPMODE; - p->p_ucred = newcred; + proc_set_cred(p, newcred); PROC_UNLOCK(p); crfree(oldcred); return (0); diff --git a/sys/kern/sysv_sem.c b/sys/kern/sysv_sem.c index c39d93d..7d27b48 100644 --- a/sys/kern/sysv_sem.c +++ b/sys/kern/sysv_sem.c @@ -1153,7 +1153,7 @@ sys_semop(struct thread *td, struct semop_args *uap) if ((error = sem_prison_cansee(rpr, semakptr)) != 0) goto done2; /* - * Initial pass thru sops to see what permissions are needed. + * Initial pass through sops to see what permissions are needed. * Also perform any checks that don't need repeating on each * attempt to satisfy the request vector. */ diff --git a/sys/kern/tty.c b/sys/kern/tty.c index 0e352a6..e977150 100644 --- a/sys/kern/tty.c +++ b/sys/kern/tty.c @@ -397,7 +397,7 @@ tty_wait_background(struct tty *tp, struct thread *td, int sig) PROC_LOCK(p); /* * The process should only sleep, when: - * - This terminal is the controling terminal + * - This terminal is the controlling terminal * - Its process group is not the foreground process * group * - The parent process isn't waiting for the child to diff --git a/sys/kern/tty_pts.c b/sys/kern/tty_pts.c index 3d2d745..5de9f5e 100644 --- a/sys/kern/tty_pts.c +++ b/sys/kern/tty_pts.c @@ -123,7 +123,7 @@ ptsdev_read(struct file *fp, struct uio *uio, struct ucred *active_cred, /* * Implement packet mode. When packet mode is turned on, * the first byte contains a bitmask of events that - * occured (start, stop, flush, window size, etc). + * occurred (start, stop, flush, window size, etc). */ if (psc->pts_flags & PTS_PKT && psc->pts_pkt) { pkt = psc->pts_pkt; diff --git a/sys/kern/uipc_mbuf2.c b/sys/kern/uipc_mbuf2.c index e32e2a1..00472d3 100644 --- a/sys/kern/uipc_mbuf2.c +++ b/sys/kern/uipc_mbuf2.c @@ -141,7 +141,7 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp) * Ideally, the requirement should only be (iii). * * If we're writable, we're sure we're writable, because the ref. count - * cannot increase from 1, as that would require posession of mbuf + * cannot increase from 1, as that would require possession of mbuf * n by someone else (which is impossible). However, if we're _not_ * writable, we may eventually become writable )if the ref. count drops * to 1), but we'll fail to notice it unless we re-evaluate diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c index 141e268..23acdf7 100644 --- a/sys/kern/uipc_socket.c +++ b/sys/kern/uipc_socket.c @@ -186,7 +186,7 @@ MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); /* * Limit on the number of connections in the listen queue waiting * for accept(2). - * NB: The orginal sysctl somaxconn is still available but hidden + * NB: The original sysctl somaxconn is still available but hidden * to prevent confusion about the actual purpose of this number. */ static int somaxconn = SOMAXCONN; @@ -1091,7 +1091,7 @@ sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio, } /* * XXX all the SBS_CANTSENDMORE checks previously done could be out - * of date. We could have recieved a reset packet in an interrupt or + * of date. We could have received a reset packet in an interrupt or * maybe we slept while doing page faults in uiomove() etc. We could * probably recheck again inside the locking protection here, but * there are probably other places that this also happens. We must @@ -1271,7 +1271,7 @@ restart: } /* * XXX all the SBS_CANTSENDMORE checks previously - * done could be out of date. We could have recieved + * done could be out of date. We could have received * a reset packet in an interrupt or maybe we slept * while doing page faults in uiomove() etc. We * could probably recheck again inside the locking diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c index 311271b..4c123fd 100644 --- a/sys/kern/vfs_cache.c +++ b/sys/kern/vfs_cache.c @@ -150,7 +150,7 @@ struct namecache_ts { */ /* - * Structures associated with name cacheing. + * Structures associated with name caching. */ #define NCHHASH(hash) \ (&nchashtbl[(hash) & nchash]) @@ -418,7 +418,6 @@ cache_zap(ncp) rw_assert(&cache_lock, RA_WLOCKED); CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp); -#ifdef KDTRACE_HOOKS if (ncp->nc_vp != NULL) { SDT_PROBE3(vfs, namecache, zap, done, ncp->nc_dvp, nc_get_name(ncp), ncp->nc_vp); @@ -426,7 +425,6 @@ cache_zap(ncp) SDT_PROBE2(vfs, namecache, zap_negative, done, ncp->nc_dvp, nc_get_name(ncp)); } -#endif vp = NULL; LIST_REMOVE(ncp, nc_hash); if (ncp->nc_flag & NCF_ISDOTDOT) { @@ -460,7 +458,7 @@ cache_zap(ncp) * cnp pointing to the name of the entry being sought. If the lookup * succeeds, the vnode is returned in *vpp, and a status of -1 is * returned. If the lookup determines that the name does not exist - * (negative cacheing), a status of ENOENT is returned. If the lookup + * (negative caching), a status of ENOENT is returned. If the lookup * fails, a status of zero is returned. If the directory vnode is * recycled out from under us due to a forced unmount, a status of * ENOENT is returned. diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c index 362ebb7b..fe3d5db 100644 --- a/sys/kern/vfs_cluster.c +++ b/sys/kern/vfs_cluster.c @@ -540,7 +540,7 @@ cluster_callback(bp) int error = 0; /* - * Must propogate errors to all the components. + * Must propagate errors to all the components. */ if (bp->b_ioflags & BIO_ERROR) error = bp->b_error; diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c index 8236f32..123ef54 100644 --- a/sys/kern/vfs_lookup.c +++ b/sys/kern/vfs_lookup.c @@ -1127,7 +1127,7 @@ NDFREE(struct nameidata *ndp, const u_int flags) * Determine if there is a suitable alternate filename under the specified * prefix for the specified path. If the create flag is set, then the * alternate prefix will be used so long as the parent directory exists. - * This is used by the various compatiblity ABIs so that Linux binaries prefer + * This is used by the various compatibility ABIs so that Linux binaries prefer * files under /compat/linux for example. The chosen path (whether under * the prefix or under /) is returned in a kernel malloc'd buffer pointed * to by pathbuf. The caller is responsible for free'ing the buffer from diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c index 3ca995f..f5f522e 100644 --- a/sys/kern/vfs_mount.c +++ b/sys/kern/vfs_mount.c @@ -1222,7 +1222,6 @@ dounmount(struct mount *mp, int flags, struct thread *td) VI_LOCK(coveredvp); vholdl(coveredvp); vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY); - vdrop(coveredvp); /* * Check for mp being unmounted while waiting for the * covered vnode lock. @@ -1230,18 +1229,22 @@ dounmount(struct mount *mp, int flags, struct thread *td) if (coveredvp->v_mountedhere != mp || coveredvp->v_mountedhere->mnt_gen != mnt_gen_r) { VOP_UNLOCK(coveredvp, 0); + vdrop(coveredvp); vfs_rel(mp); return (EBUSY); } } + /* * Only privileged root, or (if MNT_USER is set) the user that did the * original mount is permitted to unmount this filesystem. */ error = vfs_suser(mp, td); if (error != 0) { - if (coveredvp) + if (coveredvp != NULL) { VOP_UNLOCK(coveredvp, 0); + vdrop(coveredvp); + } vfs_rel(mp); return (error); } @@ -1251,8 +1254,10 @@ dounmount(struct mount *mp, int flags, struct thread *td) if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 || !TAILQ_EMPTY(&mp->mnt_uppers)) { MNT_IUNLOCK(mp); - if (coveredvp) + if (coveredvp != NULL) { VOP_UNLOCK(coveredvp, 0); + vdrop(coveredvp); + } vn_finished_write(mp); return (EBUSY); } @@ -1285,6 +1290,16 @@ dounmount(struct mount *mp, int flags, struct thread *td) if (mp->mnt_flag & MNT_EXPUBLIC) vfs_setpublicfs(NULL, NULL, NULL); + /* + * From now, we can claim that the use reference on the + * coveredvp is ours, and the ref can be released only by + * successfull unmount by us, or left for later unmount + * attempt. The previously acquired hold reference is no + * longer needed to protect the vnode from reuse. + */ + if (coveredvp != NULL) + vdrop(coveredvp); + vfs_msync(mp, MNT_WAIT); MNT_ILOCK(mp); async_flag = mp->mnt_flag & MNT_ASYNC; diff --git a/sys/kern/vfs_mountroot.c b/sys/kern/vfs_mountroot.c index 184976a..12cddff 100644 --- a/sys/kern/vfs_mountroot.c +++ b/sys/kern/vfs_mountroot.c @@ -79,7 +79,7 @@ __FBSDID("$FreeBSD$"); * * If the environment variable vfs.root.mountfrom is a space separated list, * each list element is tried in turn and the root filesystem will be mounted - * from the first one that suceeds. + * from the first one that succeeds. * * The environment variable vfs.root.mountfrom.options is a comma delimited * set of string mount options. These mount options must be parseable diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index a137217..e0e7205 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -501,7 +501,7 @@ vfs_busy(struct mount *mp, int flags) MNT_ILOCK(mp); MNT_REF(mp); /* - * If mount point is currenly being unmounted, sleep until the + * If mount point is currently being unmounted, sleep until the * mount point fate is decided. If thread doing the unmounting fails, * it will clear MNTK_UNMOUNT flag before waking us up, indicating * that this mount point has survived the unmount attempt and vfs_busy @@ -797,7 +797,7 @@ vattr_null(struct vattr *vap) * the buffer cache may have references on the vnode, a directory * vnode may still have references due to the namei cache representing * underlying files, or the vnode may be in active use. It is not - * desireable to reuse such vnodes. These conditions may cause the + * desirable to reuse such vnodes. These conditions may cause the * number of vnodes to reach some minimum value regardless of what * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. */ @@ -3669,7 +3669,7 @@ destroy_vpollinfo(struct vpollinfo *vi) } /* - * Initalize per-vnode helper structure to hold poll-related state. + * Initialize per-vnode helper structure to hold poll-related state. */ void v_addpollinfo(struct vnode *vp) @@ -4080,7 +4080,7 @@ extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, #ifdef DEBUG_VFS_LOCKS /* - * This only exists to supress warnings from unlocked specfs accesses. It is + * This only exists to suppress warnings from unlocked specfs accesses. It is * no longer ok to have an unlocked VFS. */ #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ diff --git a/sys/kern/vnode_if.src b/sys/kern/vnode_if.src index fbffffe..e571a5f 100644 --- a/sys/kern/vnode_if.src +++ b/sys/kern/vnode_if.src @@ -35,7 +35,7 @@ # is a specification of the locking protocol used by each vop call. # The first column is the name of the variable, the remaining three # columns are in, out and error respectively. The "in" column defines -# the lock state on input, the "out" column defines the state on succesful +# the lock state on input, the "out" column defines the state on successful # return, and the "error" column defines the locking state on error exit. # # The locking value can take the following values: |