diff options
author | Renato Botelho <renato@netgate.com> | 2016-08-01 12:53:16 -0300 |
---|---|---|
committer | Renato Botelho <renato@netgate.com> | 2016-08-01 12:53:16 -0300 |
commit | 80c20d0bef69a2c543d1bc2dddd2bc34198fec9b (patch) | |
tree | fc6009a2d49da5ab043d4c847a288f71ec6aa731 /sys | |
parent | f235fecdc77c17505022bc5202d74f3d36b33359 (diff) | |
parent | eed7d9e93aec04a3f6a7d157c4cac7452a6c1727 (diff) | |
download | FreeBSD-src-80c20d0bef69a2c543d1bc2dddd2bc34198fec9b.zip FreeBSD-src-80c20d0bef69a2c543d1bc2dddd2bc34198fec9b.tar.gz |
Merge remote-tracking branch 'origin/stable/11' into devel-11
Diffstat (limited to 'sys')
-rw-r--r-- | sys/conf/newvers.sh | 2 | ||||
-rw-r--r-- | sys/dev/pty/pty.c | 13 | ||||
-rw-r--r-- | sys/dev/urtwn/if_urtwn.c | 4 | ||||
-rw-r--r-- | sys/dev/urtwn/if_urtwnreg.h | 2 | ||||
-rw-r--r-- | sys/kern/kern_exec.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_mutex.c | 28 | ||||
-rw-r--r-- | sys/kern/subr_prf.c | 21 | ||||
-rw-r--r-- | sys/kern/subr_trap.c | 55 | ||||
-rw-r--r-- | sys/kern/vfs_aio.c | 23 | ||||
-rw-r--r-- | sys/sys/mutex.h | 44 | ||||
-rw-r--r-- | sys/sys/proc.h | 2 | ||||
-rw-r--r-- | sys/sys/systm.h | 2 | ||||
-rw-r--r-- | sys/ufs/ufs/ufs_lookup.c | 5 | ||||
-rw-r--r-- | sys/ufs/ufs/ufs_vnops.c | 2 |
14 files changed, 181 insertions, 24 deletions
diff --git a/sys/conf/newvers.sh b/sys/conf/newvers.sh index 84dde7a..c3662e2 100644 --- a/sys/conf/newvers.sh +++ b/sys/conf/newvers.sh @@ -32,7 +32,7 @@ TYPE="FreeBSD" REVISION="11.0" -BRANCH="BETA2" +BRANCH="BETA3" if [ -n "${BRANCH_OVERRIDE}" ]; then BRANCH=${BRANCH_OVERRIDE} fi diff --git a/sys/dev/pty/pty.c b/sys/dev/pty/pty.c index 5036cb2..ad34e11 100644 --- a/sys/dev/pty/pty.c +++ b/sys/dev/pty/pty.c @@ -52,10 +52,10 @@ __FBSDID("$FreeBSD$"); * binary emulation. */ -static unsigned int pty_warningcnt = 1; +static unsigned pty_warningcnt = 1; SYSCTL_UINT(_kern, OID_AUTO, tty_pty_warningcnt, CTLFLAG_RW, - &pty_warningcnt, 0, - "Warnings that will be triggered upon legacy PTY allocation"); + &pty_warningcnt, 0, + "Warnings that will be triggered upon legacy PTY allocation"); static int ptydev_fdopen(struct cdev *dev, int fflags, struct thread *td, struct file *fp) @@ -77,12 +77,7 @@ ptydev_fdopen(struct cdev *dev, int fflags, struct thread *td, struct file *fp) } /* Raise a warning when a legacy PTY has been allocated. */ - if (pty_warningcnt > 0) { - pty_warningcnt--; - log(LOG_INFO, "pid %d (%s) is using legacy pty devices%s\n", - td->td_proc->p_pid, td->td_name, - pty_warningcnt ? "" : " - not logging anymore"); - } + counted_warning(&pty_warningcnt, "is using legacy pty devices"); return (0); } diff --git a/sys/dev/urtwn/if_urtwn.c b/sys/dev/urtwn/if_urtwn.c index f5d3818..72f3e67 100644 --- a/sys/dev/urtwn/if_urtwn.c +++ b/sys/dev/urtwn/if_urtwn.c @@ -2318,6 +2318,10 @@ urtwn_key_set_cb(struct urtwn_softc *sc, union sec_param *data) k->wk_cipher->ic_cipher, algo, k->wk_flags, k->wk_keylen, ether_sprintf(k->wk_macaddr)); + /* Clear high bits. */ + urtwn_cam_write(sc, R92C_CAM_CTL6(k->wk_keyix), 0); + urtwn_cam_write(sc, R92C_CAM_CTL7(k->wk_keyix), 0); + /* Write key. */ for (i = 0; i < 4; i++) { error = urtwn_cam_write(sc, R92C_CAM_KEY(k->wk_keyix, i), diff --git a/sys/dev/urtwn/if_urtwnreg.h b/sys/dev/urtwn/if_urtwnreg.h index e80fd07..aff9b13 100644 --- a/sys/dev/urtwn/if_urtwnreg.h +++ b/sys/dev/urtwn/if_urtwnreg.h @@ -871,6 +871,8 @@ #define R92C_CAM_CTL0(entry) ((entry) * 8 + 0) #define R92C_CAM_CTL1(entry) ((entry) * 8 + 1) #define R92C_CAM_KEY(entry, i) ((entry) * 8 + 2 + (i)) +#define R92C_CAM_CTL6(entry) ((entry) * 8 + 6) +#define R92C_CAM_CTL7(entry) ((entry) * 8 + 7) /* Bits for R92C_CAM_CTL0(i). */ #define R92C_CAM_KEYID_M 0x00000003 diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 844f1ed..6466a3e 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -759,6 +759,8 @@ interpret: if (p->p_flag & P_PPWAIT) { p->p_flag &= ~(P_PPWAIT | P_PPTRACE); cv_broadcast(&p->p_pwait); + /* STOPs are no longer ignored, arrange for AST */ + signotify(td); } /* diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 012cf7c..453add4 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -281,6 +281,34 @@ __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); } +int +__mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file, + int line) +{ + struct mtx *m; + + if (SCHEDULER_STOPPED()) + return (1); + + m = mtxlock2mtx(c); + + KASSERT(m->mtx_lock != MTX_DESTROYED, + ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line)); + KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, + ("mtx_trylock_spin() of sleep mutex %s @ %s:%d", + m->lock_object.lo_name, file, line)); + KASSERT((opts & MTX_RECURSE) == 0, + ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n", + m->lock_object.lo_name, file, line)); + if (__mtx_trylock_spin(m, curthread, opts, file, line)) { + LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line); + WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); + return (1); + } + LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line); + return (0); +} + void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line) diff --git a/sys/kern/subr_prf.c b/sys/kern/subr_prf.c index 5bcf39b..3784ffd 100644 --- a/sys/kern/subr_prf.c +++ b/sys/kern/subr_prf.c @@ -1196,3 +1196,24 @@ sbuf_hexdump(struct sbuf *sb, const void *ptr, int length, const char *hdr, } } +#ifdef _KERNEL +void +counted_warning(unsigned *counter, const char *msg) +{ + struct thread *td; + unsigned c; + + for (;;) { + c = *counter; + if (c == 0) + break; + if (atomic_cmpset_int(counter, c, c - 1)) { + td = curthread; + log(LOG_INFO, "pid %d (%s) %s%s\n", + td->td_proc->p_pid, td->td_name, msg, + c > 1 ? "" : " - not logging anymore"); + break; + } + } +} +#endif diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c index eb44087..763ba0d 100644 --- a/sys/kern/subr_trap.c +++ b/sys/kern/subr_trap.c @@ -101,17 +101,29 @@ userret(struct thread *td, struct trapframe *frame) td->td_name); KASSERT((p->p_flag & P_WEXIT) == 0, ("Exiting process returns to usermode")); -#if 0 #ifdef DIAGNOSTIC - /* Check that we called signotify() enough. */ - PROC_LOCK(p); - thread_lock(td); - if (SIGPENDING(td) && ((td->td_flags & TDF_NEEDSIGCHK) == 0 || - (td->td_flags & TDF_ASTPENDING) == 0)) - printf("failed to set signal flags properly for ast()\n"); - thread_unlock(td); - PROC_UNLOCK(p); -#endif + /* + * Check that we called signotify() enough. For + * multi-threaded processes, where signal distribution might + * change due to other threads changing sigmask, the check is + * racy and cannot be performed reliably. + * If current process is vfork child, indicated by P_PPWAIT, then + * issignal() ignores stops, so we block the check to avoid + * classifying pending signals. + */ + if (p->p_numthreads == 1) { + PROC_LOCK(p); + thread_lock(td); + if ((p->p_flag & P_PPWAIT) == 0) { + KASSERT(!SIGPENDING(td) || (td->td_flags & + (TDF_NEEDSIGCHK | TDF_ASTPENDING)) == + (TDF_NEEDSIGCHK | TDF_ASTPENDING), + ("failed to set signal flags for ast p %p " + "td %p fl %x", p, td, td->td_flags)); + } + thread_unlock(td); + PROC_UNLOCK(p); + } #endif #ifdef KTRACE KTRUSERRET(td); @@ -265,6 +277,29 @@ ast(struct trapframe *framep) #endif } +#ifdef DIAGNOSTIC + if (p->p_numthreads == 1 && (flags & TDF_NEEDSIGCHK) == 0) { + PROC_LOCK(p); + thread_lock(td); + /* + * Note that TDF_NEEDSIGCHK should be re-read from + * td_flags, since signal might have been delivered + * after we cleared td_flags above. This is one of + * the reason for looping check for AST condition. + * See comment in userret() about P_PPWAIT. + */ + if ((p->p_flag & P_PPWAIT) == 0) { + KASSERT(!SIGPENDING(td) || (td->td_flags & + (TDF_NEEDSIGCHK | TDF_ASTPENDING)) == + (TDF_NEEDSIGCHK | TDF_ASTPENDING), + ("failed2 to set signal flags for ast p %p td %p " + "fl %x %x", p, td, flags, td->td_flags)); + } + thread_unlock(td); + PROC_UNLOCK(p); + } +#endif + /* * Check for signals. Unlocked reads of p_pendingcnt or * p_siglist might cause process-directed signal to be handled diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c index 3c9aa44..ead5e7c 100644 --- a/sys/kern/vfs_aio.c +++ b/sys/kern/vfs_aio.c @@ -53,6 +53,7 @@ __FBSDID("$FreeBSD$"); #include <sys/syscall.h> #include <sys/sysent.h> #include <sys/sysctl.h> +#include <sys/syslog.h> #include <sys/sx.h> #include <sys/taskqueue.h> #include <sys/vnode.h> @@ -110,6 +111,11 @@ static int enable_aio_unsafe = 0; SYSCTL_INT(_vfs_aio, OID_AUTO, enable_unsafe, CTLFLAG_RW, &enable_aio_unsafe, 0, "Permit asynchronous IO on all file types, not just known-safe types"); +static unsigned int unsafe_warningcnt = 1; +SYSCTL_UINT(_vfs_aio, OID_AUTO, unsafe_warningcnt, CTLFLAG_RW, + &unsafe_warningcnt, 0, + "Warnings that will be triggered upon failed IO requests on unsafe files"); + static int max_aio_procs = MAX_AIO_PROCS; SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW, &max_aio_procs, 0, "Maximum number of kernel processes to use for handling async IO "); @@ -1664,7 +1670,10 @@ aio_queue_file(struct file *fp, struct kaiocb *job) struct aioliojob *lj; struct kaioinfo *ki; struct kaiocb *job2; + struct vnode *vp; + struct mount *mp; int error, opcode; + bool safe; lj = job->lio; ki = job->userproc->p_aioinfo; @@ -1685,8 +1694,20 @@ aio_queue_file(struct file *fp, struct kaiocb *job) goto done; #endif queueit: - if (!enable_aio_unsafe) + safe = false; + if (fp->f_type == DTYPE_VNODE) { + vp = fp->f_vnode; + if (vp->v_type == VREG || vp->v_type == VDIR) { + mp = fp->f_vnode->v_mount; + if (mp == NULL || (mp->mnt_flag & MNT_LOCAL) != 0) + safe = true; + } + } + if (!(safe || enable_aio_unsafe)) { + counted_warning(&unsafe_warningcnt, + "is attempting to use unsafe AIO requests"); return (EOPNOTSUPP); + } if (opcode == LIO_SYNC) { AIO_LOCK(ki); diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h index 0443922..374aaab 100644 --- a/sys/sys/mutex.h +++ b/sys/sys/mutex.h @@ -112,6 +112,8 @@ void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line); void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line); +int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, + const char *file, int line); void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line); #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) @@ -152,6 +154,8 @@ void thread_lock_flags_(struct thread *, int, const char *, int); __mtx_unlock_flags(&(m)->mtx_lock, o, f, l) #define _mtx_lock_spin_flags(m, o, f, l) \ __mtx_lock_spin_flags(&(m)->mtx_lock, o, f, l) +#define _mtx_trylock_spin_flags(m, o, f, l) \ + __mtx_trylock_spin_flags(&(m)->mtx_lock, o, f, l) #define _mtx_unlock_spin_flags(m, o, f, l) \ __mtx_unlock_spin_flags(&(m)->mtx_lock, o, f, l) #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) @@ -212,6 +216,21 @@ void thread_lock_flags_(struct thread *, int, const char *, int); LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \ mp, 0, 0, file, line); \ } while (0) +#define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ + uintptr_t _tid = (uintptr_t)(tid); \ + int _ret; \ + \ + spinlock_enter(); \ + if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid))) {\ + spinlock_exit(); \ + _ret = 0; \ + } else { \ + LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \ + mp, 0, 0, file, line); \ + _ret = 1; \ + } \ + _ret; \ +}) #else /* SMP */ #define __mtx_lock_spin(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ @@ -224,6 +243,20 @@ void thread_lock_flags_(struct thread *, int, const char *, int); (mp)->mtx_lock = _tid; \ } \ } while (0) +#define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ + uintptr_t _tid = (uintptr_t)(tid); \ + int _ret; \ + \ + spinlock_enter(); \ + if ((mp)->mtx_lock != MTX_UNOWNED) { \ + spinlock_exit(); \ + _ret = 0; \ + } else { \ + (mp)->mtx_lock = _tid; \ + _ret = 1; \ + } \ + _ret; \ +}) #endif /* SMP */ /* Unlock a normal mutex. */ @@ -293,6 +326,10 @@ void thread_lock_flags_(struct thread *, int, const char *, int); * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts * relevant option flags `opts.' * + * mtx_trylock_spin(m) attempts to acquire MTX_SPIN mutex `m' but doesn't + * spin if it cannot. Rather, it returns 0 on failure and non-zero on + * success. It always returns failure for recursed lock attempts. + * * mtx_initialized(m) returns non-zero if the lock `m' has been initialized. * * mtx_owned(m) returns non-zero if the current thread owns the lock `m' @@ -302,6 +339,7 @@ void thread_lock_flags_(struct thread *, int, const char *, int); #define mtx_lock(m) mtx_lock_flags((m), 0) #define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0) #define mtx_trylock(m) mtx_trylock_flags((m), 0) +#define mtx_trylock_spin(m) mtx_trylock_spin_flags((m), 0) #define mtx_unlock(m) mtx_unlock_flags((m), 0) #define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0) @@ -335,6 +373,8 @@ extern struct mtx_pool *mtxpool_sleep; _mtx_unlock_flags((m), (opts), (file), (line)) #define mtx_lock_spin_flags_(m, opts, file, line) \ _mtx_lock_spin_flags((m), (opts), (file), (line)) +#define mtx_trylock_spin_flags_(m, opts, file, line) \ + _mtx_trylock_spin_flags((m), (opts), (file), (line)) #define mtx_unlock_spin_flags_(m, opts, file, line) \ _mtx_unlock_spin_flags((m), (opts), (file), (line)) #else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */ @@ -344,6 +384,8 @@ extern struct mtx_pool *mtxpool_sleep; __mtx_unlock((m), curthread, (opts), (file), (line)) #define mtx_lock_spin_flags_(m, opts, file, line) \ __mtx_lock_spin((m), curthread, (opts), (file), (line)) +#define mtx_trylock_spin_flags_(m, opts, file, line) \ + __mtx_trylock_spin((m), curthread, (opts), (file), (line)) #define mtx_unlock_spin_flags_(m, opts, file, line) \ __mtx_unlock_spin((m)) #endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */ @@ -369,6 +411,8 @@ extern struct mtx_pool *mtxpool_sleep; mtx_unlock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_trylock_flags(m, opts) \ mtx_trylock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) +#define mtx_trylock_spin_flags(m, opts) \ + mtx_trylock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_assert(m, what) \ mtx_assert_((m), (what), __FILE__, __LINE__) diff --git a/sys/sys/proc.h b/sys/sys/proc.h index f533db6..1550742 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -739,7 +739,7 @@ struct proc { #define SW_TYPE_MASK 0xff /* First 8 bits are switch type */ #define SWT_NONE 0 /* Unspecified switch. */ #define SWT_PREEMPT 1 /* Switching due to preemption. */ -#define SWT_OWEPREEMPT 2 /* Switching due to opepreempt. */ +#define SWT_OWEPREEMPT 2 /* Switching due to owepreempt. */ #define SWT_TURNSTILE 3 /* Turnstile contention. */ #define SWT_SLEEPQ 4 /* Sleepq wait. */ #define SWT_SLEEPQTIMO 5 /* Sleepq timeout wait. */ diff --git a/sys/sys/systm.h b/sys/sys/systm.h index dae6adc..f47ba2d 100644 --- a/sys/sys/systm.h +++ b/sys/sys/systm.h @@ -447,4 +447,6 @@ void intr_prof_stack_use(struct thread *td, struct trapframe *frame); extern void (*softdep_ast_cleanup)(void); +void counted_warning(unsigned *counter, const char *msg); + #endif /* !_SYS_SYSTM_H_ */ diff --git a/sys/ufs/ufs/ufs_lookup.c b/sys/ufs/ufs/ufs_lookup.c index 53536ff..29d96a0 100644 --- a/sys/ufs/ufs/ufs_lookup.c +++ b/sys/ufs/ufs/ufs_lookup.c @@ -881,6 +881,7 @@ ufs_direnter(dvp, tvp, dirp, cnp, newdirbp, isrename) struct buf *bp; u_int dsize; struct direct *ep, *nep; + u_int64_t old_isize; int error, ret, blkoff, loc, spacefree, flags, namlen; char *dirbuf; @@ -909,16 +910,18 @@ ufs_direnter(dvp, tvp, dirp, cnp, newdirbp, isrename) return (error); } #endif + old_isize = dp->i_size; + vnode_pager_setsize(dvp, (u_long)dp->i_offset + DIRBLKSIZ); if ((error = UFS_BALLOC(dvp, (off_t)dp->i_offset, DIRBLKSIZ, cr, flags, &bp)) != 0) { if (DOINGSOFTDEP(dvp) && newdirbp != NULL) bdwrite(newdirbp); + vnode_pager_setsize(dvp, (u_long)old_isize); return (error); } dp->i_size = dp->i_offset + DIRBLKSIZ; DIP_SET(dp, i_size, dp->i_size); dp->i_flag |= IN_CHANGE | IN_UPDATE; - vnode_pager_setsize(dvp, (u_long)dp->i_size); dirp->d_reclen = DIRBLKSIZ; blkoff = dp->i_offset & (VFSTOUFS(dvp->v_mount)->um_mountp->mnt_stat.f_iosize - 1); diff --git a/sys/ufs/ufs/ufs_vnops.c b/sys/ufs/ufs/ufs_vnops.c index 83df347..217ca90 100644 --- a/sys/ufs/ufs/ufs_vnops.c +++ b/sys/ufs/ufs/ufs_vnops.c @@ -1913,13 +1913,13 @@ ufs_mkdir(ap) dirtemplate = *dtp; dirtemplate.dot_ino = ip->i_number; dirtemplate.dotdot_ino = dp->i_number; + vnode_pager_setsize(tvp, DIRBLKSIZ); if ((error = UFS_BALLOC(tvp, (off_t)0, DIRBLKSIZ, cnp->cn_cred, BA_CLRBUF, &bp)) != 0) goto bad; ip->i_size = DIRBLKSIZ; DIP_SET(ip, i_size, DIRBLKSIZ); ip->i_flag |= IN_CHANGE | IN_UPDATE; - vnode_pager_setsize(tvp, (u_long)ip->i_size); bcopy((caddr_t)&dirtemplate, (caddr_t)bp->b_data, sizeof dirtemplate); if (DOINGSOFTDEP(tvp)) { /* |