summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2003-10-04 18:03:53 +0000
committerjeff <jeff@FreeBSD.org>2003-10-04 18:03:53 +0000
commit55547647ecb82bcf7ff95e5f63008eba907da828 (patch)
tree83dd168181d7b6cf3bbcbb1dba6facd3c15d429a /sys/kern
parentdaf04438572f986b4e7158eb0be7b2f17ba95a78 (diff)
downloadFreeBSD-src-55547647ecb82bcf7ff95e5f63008eba907da828.zip
FreeBSD-src-55547647ecb82bcf7ff95e5f63008eba907da828.tar.gz
- In sched_sync() test our preconditions prior to dropping the sync_mtx.
This is so that we may grab the interlock while still holding the sync_mtx. We have to VI_TRYLOCK() because in all other cases the lock order runs the other way. - If we don't meet any of the preconditions, reinsert the vp into the list for the next second. - We don't need to panic if we fail to sync here because each FSYNC function handles this case. Removing this redundant code also simplifies locking.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/vfs_subr.c40
1 files changed, 18 insertions, 22 deletions
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 5457d84..aba67d7 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -1681,6 +1681,7 @@ SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
static void
sched_sync(void)
{
+ struct synclist *next;
struct synclist *slp;
struct vnode *vp;
struct mount *mp;
@@ -1706,35 +1707,30 @@ sched_sync(void)
syncer_delayno += 1;
if (syncer_delayno == syncer_maxdelay)
syncer_delayno = 0;
+ next = &syncer_workitem_pending[syncer_delayno];
while ((vp = LIST_FIRST(slp)) != NULL) {
- /*
- * XXX we have no guarantees about this vnodes
- * identity due to a lack of interlock.
- */
- mtx_unlock(&sync_mtx);
- if (VOP_ISLOCKED(vp, NULL) == 0 &&
- vn_start_write(vp, &mp, V_NOWAIT) == 0) {
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
- (void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td);
- VOP_UNLOCK(vp, 0, td);
+ if (VOP_ISLOCKED(vp, NULL) != 0 ||
+ vn_start_write(vp, &mp, V_NOWAIT) != 0) {
+ LIST_REMOVE(vp, v_synclist);
+ LIST_INSERT_HEAD(next, vp, v_synclist);
+ continue;
+ }
+ if (VI_TRYLOCK(vp) == 0) {
+ LIST_REMOVE(vp, v_synclist);
+ LIST_INSERT_HEAD(next, vp, v_synclist);
vn_finished_write(mp);
+ continue;
}
+ mtx_unlock(&sync_mtx);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, td);
+ (void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td);
+ VOP_UNLOCK(vp, 0, td);
+ vn_finished_write(mp);
mtx_lock(&sync_mtx);
if (LIST_FIRST(slp) == vp) {
- mtx_unlock(&sync_mtx);
- /*
- * Note: VFS vnodes can remain on the
- * worklist too with no dirty blocks, but
- * since sync_fsync() moves it to a different
- * slot we are safe.
- */
VI_LOCK(vp);
- if (TAILQ_EMPTY(&vp->v_dirtyblkhd) &&
- !vn_isdisk(vp, NULL)) {
- panic("sched_sync: fsync failed "
- "vp %p tag %s", vp, vp->v_tag);
- }
+ mtx_unlock(&sync_mtx);
/*
* Put us back on the worklist. The worklist
* routine will remove us from our current
OpenPOWER on IntegriCloud