summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_event.c
diff options
context:
space:
mode:
authoralfred <alfred@FreeBSD.org>2002-01-13 11:58:06 +0000
committeralfred <alfred@FreeBSD.org>2002-01-13 11:58:06 +0000
commit844237b3960bfbf49070d6371a84f67f9e3366f6 (patch)
tree598e20df363e602313c7ad93de8f8c4b4240d61d /sys/kern/kern_event.c
parent8cd61193307ff459ae72eb7aa6a734eb5e3b427e (diff)
downloadFreeBSD-src-844237b3960bfbf49070d6371a84f67f9e3366f6.zip
FreeBSD-src-844237b3960bfbf49070d6371a84f67f9e3366f6.tar.gz
SMP Lock struct file, filedesc and the global file list.
Seigo Tanimura (tanimura) posted the initial delta. I've polished it quite a bit reducing the need for locking and adapting it for KSE. Locks: 1 mutex in each filedesc protects all the fields. protects "struct file" initialization, while a struct file is being changed from &badfileops -> &pipeops or something the filedesc should be locked. 1 mutex in each struct file protects the refcount fields. doesn't protect anything else. the flags used for garbage collection have been moved to f_gcflag which was the FILLER short, this doesn't need locking because the garbage collection is a single threaded container. could likely be made to use a pool mutex. 1 sx lock for the global filelist. struct file * fhold(struct file *fp); /* increments reference count on a file */ struct file * fhold_locked(struct file *fp); /* like fhold but expects file to locked */ struct file * ffind_hold(struct thread *, int fd); /* finds the struct file in thread, adds one reference and returns it unlocked */ struct file * ffind_lock(struct thread *, int fd); /* ffind_hold, but returns file locked */ I still have to smp-safe the fget cruft, I'll get to that asap.
Diffstat (limited to 'sys/kern/kern_event.c')
-rw-r--r--sys/kern/kern_event.c92
1 files changed, 71 insertions, 21 deletions
diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c
index 6bec056..038b233 100644
--- a/sys/kern/kern_event.c
+++ b/sys/kern/kern_event.c
@@ -372,15 +372,20 @@ kqueue(struct thread *td, struct kqueue_args *uap)
error = falloc(td, &fp, &fd);
if (error)
goto done2;
+ kq = malloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
+ TAILQ_INIT(&kq->kq_head);
+ FILE_LOCK(fp);
fp->f_flag = FREAD | FWRITE;
fp->f_type = DTYPE_KQUEUE;
fp->f_ops = &kqueueops;
- kq = malloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
TAILQ_INIT(&kq->kq_head);
fp->f_data = (caddr_t)kq;
+ FILE_UNLOCK(fp);
+ FILEDESC_LOCK(fdp);
td->td_retval[0] = fd;
if (fdp->fd_knlistsize < 0)
fdp->fd_knlistsize = 0; /* this process has a kq */
+ FILEDESC_UNLOCK(fdp);
kq->kq_fdp = fdp;
done2:
mtx_unlock(&Giant);
@@ -409,19 +414,19 @@ kevent(struct thread *td, struct kevent_args *uap)
struct timespec ts;
int i, n, nerrors, error;
- mtx_lock(&Giant);
- if ((error = fget(td, uap->fd, &fp)) != 0)
- goto done;
- if (fp->f_type != DTYPE_KQUEUE) {
- error = EBADF;
- goto done;
+ fp = ffind_hold(td, uap->fd);
+ if (fp == NULL || fp->f_type != DTYPE_KQUEUE) {
+ if (fp != NULL)
+ fdrop(fp, td);
+ return (EBADF);
}
if (uap->timeout != NULL) {
error = copyin(uap->timeout, &ts, sizeof(ts));
if (error)
- goto done;
+ goto done_nogiant;
uap->timeout = &ts;
}
+ mtx_lock(&Giant);
kq = (struct kqueue *)fp->f_data;
nerrors = 0;
@@ -462,9 +467,10 @@ kevent(struct thread *td, struct kevent_args *uap)
error = kqueue_scan(fp, uap->nevents, uap->eventlist, uap->timeout, td);
done:
+ mtx_unlock(&Giant);
+done_nogiant:
if (fp != NULL)
fdrop(fp, td);
- mtx_unlock(&Giant);
return (error);
}
@@ -521,11 +527,14 @@ kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td)
return (EINVAL);
}
+ FILEDESC_LOCK(fdp);
if (fops->f_isfd) {
/* validate descriptor */
if ((u_int)kev->ident >= fdp->fd_nfiles ||
- (fp = fdp->fd_ofiles[kev->ident]) == NULL)
+ (fp = fdp->fd_ofiles[kev->ident]) == NULL) {
+ FILEDESC_UNLOCK(fdp);
return (EBADF);
+ }
fhold(fp);
if (kev->ident < fdp->fd_knlistsize) {
@@ -547,6 +556,7 @@ kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td)
break;
}
}
+ FILEDESC_UNLOCK(fdp);
if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
error = ENOENT;
@@ -633,12 +643,15 @@ static int
kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp,
const struct timespec *tsp, struct thread *td)
{
- struct kqueue *kq = (struct kqueue *)fp->f_data;
+ struct kqueue *kq;
struct kevent *kevp;
struct timeval atv, rtv, ttv;
struct knote *kn, marker;
int s, count, timeout, nkev = 0, error = 0;
+ FILE_LOCK_ASSERT(fp, MA_NOTOWNED);
+
+ kq = (struct kqueue *)fp->f_data;
count = maxevents;
if (count == 0)
goto done;
@@ -788,10 +801,11 @@ kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct thread *td)
static int
kqueue_poll(struct file *fp, int events, struct ucred *cred, struct thread *td)
{
- struct kqueue *kq = (struct kqueue *)fp->f_data;
+ struct kqueue *kq;
int revents = 0;
int s = splnet();
+ kq = (struct kqueue *)fp->f_data;
if (events & (POLLIN | POLLRDNORM)) {
if (kq->kq_count) {
revents |= events & (POLLIN | POLLRDNORM);
@@ -808,8 +822,9 @@ kqueue_poll(struct file *fp, int events, struct ucred *cred, struct thread *td)
static int
kqueue_stat(struct file *fp, struct stat *st, struct thread *td)
{
- struct kqueue *kq = (struct kqueue *)fp->f_data;
+ struct kqueue *kq;
+ kq = (struct kqueue *)fp->f_data;
bzero((void *)st, sizeof(*st));
st->st_size = kq->kq_count;
st->st_blksize = sizeof(struct kevent);
@@ -826,6 +841,7 @@ kqueue_close(struct file *fp, struct thread *td)
struct knote **knp, *kn, *kn0;
int i;
+ FILEDESC_LOCK(fdp);
for (i = 0; i < fdp->fd_knlistsize; i++) {
knp = &SLIST_FIRST(&fdp->fd_knlist[i]);
kn = *knp;
@@ -833,9 +849,12 @@ kqueue_close(struct file *fp, struct thread *td)
kn0 = SLIST_NEXT(kn, kn_link);
if (kq == kn->kn_kq) {
kn->kn_fop->f_detach(kn);
- fdrop(kn->kn_fp, td);
- knote_free(kn);
*knp = kn0;
+ FILE_LOCK(kn->kn_fp);
+ FILEDESC_UNLOCK(fdp);
+ fdrop_locked(kn->kn_fp, td);
+ knote_free(kn);
+ FILEDESC_LOCK(fdp);
} else {
knp = &SLIST_NEXT(kn, kn_link);
}
@@ -850,9 +869,11 @@ kqueue_close(struct file *fp, struct thread *td)
kn0 = SLIST_NEXT(kn, kn_link);
if (kq == kn->kn_kq) {
kn->kn_fop->f_detach(kn);
+ *knp = kn0;
/* XXX non-fd release of kn->kn_ptr */
+ FILEDESC_UNLOCK(fdp);
knote_free(kn);
- *knp = kn0;
+ FILEDESC_LOCK(fdp);
} else {
knp = &SLIST_NEXT(kn, kn_link);
}
@@ -860,6 +881,7 @@ kqueue_close(struct file *fp, struct thread *td)
}
}
}
+ FILEDESC_UNLOCK(fdp);
free(kq, M_KQUEUE);
fp->f_data = NULL;
@@ -915,16 +937,21 @@ void
knote_fdclose(struct thread *td, int fd)
{
struct filedesc *fdp = td->td_proc->p_fd;
- struct klist *list = &fdp->fd_knlist[fd];
+ struct klist *list;
+ FILEDESC_LOCK(fdp);
+ list = &fdp->fd_knlist[fd];
+ FILEDESC_UNLOCK(fdp);
knote_remove(td, list);
}
static void
knote_attach(struct knote *kn, struct filedesc *fdp)
{
- struct klist *list;
- int size;
+ struct klist *list, *oldlist;
+ int size, newsize;
+
+ FILEDESC_LOCK(fdp);
if (! kn->kn_fop->f_isfd) {
if (fdp->fd_knhashmask == 0)
@@ -935,23 +962,42 @@ knote_attach(struct knote *kn, struct filedesc *fdp)
}
if (fdp->fd_knlistsize <= kn->kn_id) {
+retry:
size = fdp->fd_knlistsize;
while (size <= kn->kn_id)
size += KQEXTENT;
+ FILEDESC_UNLOCK(fdp);
MALLOC(list, struct klist *,
size * sizeof(struct klist *), M_KQUEUE, M_WAITOK);
+ FILEDESC_LOCK(fdp);
+ newsize = fdp->fd_knlistsize;
+ while (newsize <= kn->kn_id)
+ newsize += KQEXTENT;
+ if (newsize != size) {
+ FILEDESC_UNLOCK(fdp);
+ free(list, M_TEMP);
+ FILEDESC_LOCK(fdp);
+ goto retry;
+ }
bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
fdp->fd_knlistsize * sizeof(struct klist *));
bzero((caddr_t)list +
fdp->fd_knlistsize * sizeof(struct klist *),
(size - fdp->fd_knlistsize) * sizeof(struct klist *));
if (fdp->fd_knlist != NULL)
- FREE(fdp->fd_knlist, M_KQUEUE);
+ oldlist = fdp->fd_knlist;
+ else
+ oldlist = NULL;
fdp->fd_knlistsize = size;
fdp->fd_knlist = list;
+ FILEDESC_UNLOCK(fdp);
+ if (oldlist != NULL)
+ FREE(oldlist, M_KQUEUE);
+ FILEDESC_LOCK(fdp);
}
list = &fdp->fd_knlist[kn->kn_id];
done:
+ FILEDESC_UNLOCK(fdp);
SLIST_INSERT_HEAD(list, kn, kn_link);
kn->kn_status = 0;
}
@@ -966,16 +1012,20 @@ knote_drop(struct knote *kn, struct thread *td)
struct filedesc *fdp = td->td_proc->p_fd;
struct klist *list;
+ FILEDESC_LOCK(fdp);
if (kn->kn_fop->f_isfd)
list = &fdp->fd_knlist[kn->kn_id];
else
list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
+ if (kn->kn_fop->f_isfd)
+ FILE_LOCK(kn->kn_fp);
+ FILEDESC_UNLOCK(fdp);
SLIST_REMOVE(list, kn, knote, kn_link);
if (kn->kn_status & KN_QUEUED)
knote_dequeue(kn);
if (kn->kn_fop->f_isfd)
- fdrop(kn->kn_fp, td);
+ fdrop_locked(kn->kn_fp, td);
knote_free(kn);
}
OpenPOWER on IntegriCloud