summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2009-06-25 18:54:56 +0000
committerkib <kib@FreeBSD.org>2009-06-25 18:54:56 +0000
commit4ce38c42837137d49ef614d0c032e423b9d0d9f5 (patch)
tree92e04075d212f2ab2ac88ad513f7d8675d71b079
parenta7a5954511a4b948ae8218744bc2069641a3ab19 (diff)
downloadFreeBSD-src-4ce38c42837137d49ef614d0c032e423b9d0d9f5.zip
FreeBSD-src-4ce38c42837137d49ef614d0c032e423b9d0d9f5.tar.gz
In lf_iteratelocks_vnode, increment state->ls_threads around iterating
of the vnode advisory lock list. This prevents deallocation of state while inside the loop. Reported and tested by: pho MFC after: 2 weeks
-rw-r--r--sys/kern/kern_lockf.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/sys/kern/kern_lockf.c b/sys/kern/kern_lockf.c
index bac7ac8..ddf6846 100644
--- a/sys/kern/kern_lockf.c
+++ b/sys/kern/kern_lockf.c
@@ -1937,9 +1937,14 @@ lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
* make sure it doesn't go away before we are finished.
*/
STAILQ_INIT(&locks);
+ VI_LOCK(vp);
ls = vp->v_lockf;
- if (!ls)
+ if (!ls) {
+ VI_UNLOCK(vp);
return (0);
+ }
+ ls->ls_threads++;
+ VI_UNLOCK(vp);
sx_xlock(&ls->ls_lock);
LIST_FOREACH(lf, &ls->ls_active, lf_link) {
@@ -1960,6 +1965,10 @@ lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
STAILQ_INSERT_TAIL(&locks, ldesc, link);
}
sx_xunlock(&ls->ls_lock);
+ VI_LOCK(vp);
+ ls->ls_threads--;
+ wakeup(ls);
+ VI_UNLOCK(vp);
/*
* Call the iterator function for each lock in turn. If the
OpenPOWER on IntegriCloud