summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authormdf <mdf@FreeBSD.org>2011-02-08 00:16:36 +0000
committermdf <mdf@FreeBSD.org>2011-02-08 00:16:36 +0000
commit33ee365b5548dd6130fd6a2707e2169369e1fab6 (patch)
tree3696b04084e29d2a5b666d5e3854af878bb7879b /sys
parent93acd8b57328416a75419b8798763cecaa1bbb29 (diff)
downloadFreeBSD-src-33ee365b5548dd6130fd6a2707e2169369e1fab6.zip
FreeBSD-src-33ee365b5548dd6130fd6a2707e2169369e1fab6.tar.gz
Based on discussions on the svn-src mailing list, rework r218195:
- entirely eliminate some calls to uio_yeild() as being unnecessary, such as in a sysctl handler. - move should_yield() and maybe_yield() to kern_synch.c and move the prototypes from sys/uio.h to sys/proc.h - add a slightly more generic kern_yield() that can replace the functionality of uio_yield(). - replace source uses of uio_yield() with the functional equivalent, or in some cases do not change the thread priority when switching. - fix a logic inversion bug in vlrureclaim(), pointed out by bde@. - instead of using the per-cpu last switched ticks, use a per thread variable for should_yield(). With PREEMPTION, the only reasonable use of this is to determine if a lock has been held a long time and relinquish it. Without PREEMPTION, this is essentially the same as the per-cpu variable.
Diffstat (limited to 'sys')
-rw-r--r--sys/dev/sio/sio.c1
-rw-r--r--sys/kern/kern_synch.c35
-rw-r--r--sys/kern/kern_sysctl.c2
-rw-r--r--sys/kern/subr_uio.c26
-rw-r--r--sys/kern/vfs_bio.c2
-rw-r--r--sys/kern/vfs_mount.c2
-rw-r--r--sys/kern/vfs_subr.c8
-rw-r--r--sys/kern/vfs_vnops.c4
-rw-r--r--sys/pc98/cbus/sio.c1
-rw-r--r--sys/sys/proc.h4
-rw-r--r--sys/sys/uio.h2
-rw-r--r--sys/ufs/ffs/ffs_softdep.c2
12 files changed, 49 insertions, 40 deletions
diff --git a/sys/dev/sio/sio.c b/sys/dev/sio/sio.c
index 2eb2ad1..f1348c4 100644
--- a/sys/dev/sio/sio.c
+++ b/sys/dev/sio/sio.c
@@ -1466,7 +1466,6 @@ sysctl_siots(SYSCTL_HANDLER_ARGS)
error = SYSCTL_OUT(req, buf, len);
if (error != 0)
return (error);
- uio_yield();
}
return (0);
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index ddc2186..a3e920d 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -413,9 +413,10 @@ mi_switch(int flags, struct thread *newtd)
*/
if (kdb_active)
kdb_switch();
- if (flags & SW_VOL)
+ if (flags & SW_VOL) {
td->td_ru.ru_nvcsw++;
- else
+ td->td_swvoltick = ticks;
+ } else
td->td_ru.ru_nivcsw++;
#ifdef SCHED_STATS
SCHED_STAT_INC(sched_switch_stats[flags & SW_TYPE_MASK]);
@@ -538,6 +539,36 @@ synch_setup(void *dummy)
loadav(NULL);
}
+int
+should_yield(void)
+{
+
+ return (ticks - curthread->td_swvoltick >= hogticks);
+}
+
+void
+maybe_yield(void)
+{
+
+ if (should_yield())
+ kern_yield(curthread->td_user_pri);
+}
+
+void
+kern_yield(int prio)
+{
+ struct thread *td;
+
+ td = curthread;
+ DROP_GIANT();
+ thread_lock(td);
+ if (prio >= 0)
+ sched_prio(td, prio);
+ mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
+ thread_unlock(td);
+ PICKUP_GIANT();
+}
+
/*
* General purpose yield system call.
*/
diff --git a/sys/kern/kern_sysctl.c b/sys/kern/kern_sysctl.c
index cde8a0c..c061a47 100644
--- a/sys/kern/kern_sysctl.c
+++ b/sys/kern/kern_sysctl.c
@@ -1568,7 +1568,7 @@ userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
SYSCTL_XUNLOCK();
if (error != EAGAIN)
break;
- uio_yield();
+ kern_yield(curthread->td_user_pri);
}
CURVNET_RESTORE();
diff --git a/sys/kern/subr_uio.c b/sys/kern/subr_uio.c
index 96f9331..934a533 100644
--- a/sys/kern/subr_uio.c
+++ b/sys/kern/subr_uio.c
@@ -352,33 +352,11 @@ again:
return (0);
}
-int
-should_yield(void)
-{
-
- return (ticks - PCPU_GET(switchticks) >= hogticks);
-}
-
-void
-maybe_yield(void)
-{
-
- if (should_yield())
- uio_yield();
-}
-
void
uio_yield(void)
{
- struct thread *td;
-
- td = curthread;
- DROP_GIANT();
- thread_lock(td);
- sched_prio(td, td->td_user_pri);
- mi_switch(SW_INVOL | SWT_RELINQUISH, NULL);
- thread_unlock(td);
- PICKUP_GIANT();
+
+ kern_yield(curthread->td_user_pri);
}
int
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index b81e2f8..5d0fd78 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -2234,7 +2234,7 @@ buf_daemon()
while (numdirtybuffers > lodirtybuffers) {
if (buf_do_flush(NULL) == 0)
break;
- uio_yield();
+ kern_yield(-1);
}
lodirtybuffers = lodirtysave;
diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c
index 4ca514b..cc8321a 100644
--- a/sys/kern/vfs_mount.c
+++ b/sys/kern/vfs_mount.c
@@ -1661,7 +1661,7 @@ __mnt_vnode_next(struct vnode **mvp, struct mount *mp)
KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
if (should_yield()) {
MNT_IUNLOCK(mp);
- uio_yield();
+ kern_yield(-1);
MNT_ILOCK(mp);
}
vp = TAILQ_NEXT(*mvp, v_nmntvnodes);
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index b4db0a1..f4f2215 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -707,15 +707,15 @@ vlrureclaim(struct mount *mp)
vdropl(vp);
done++;
next_iter_mntunlocked:
- if (should_yield())
+ if (!should_yield())
goto relock_mnt;
goto yield;
next_iter:
- if (should_yield())
+ if (!should_yield())
continue;
MNT_IUNLOCK(mp);
yield:
- uio_yield();
+ kern_yield(-1);
relock_mnt:
MNT_ILOCK(mp);
}
@@ -828,7 +828,7 @@ vnlru_proc(void)
vnlru_nowhere++;
tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
} else
- uio_yield();
+ kern_yield(-1);
}
}
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index 42abf6e..7b5cad1 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -444,7 +444,7 @@ vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
* Package up an I/O request on a vnode into a uio and do it. The I/O
* request is split up into smaller chunks and we try to avoid saturating
* the buffer cache while potentially holding a vnode locked, so we
- * check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
+ * check bwillwrite() before calling vn_rdwr(). We also call kern_yield()
* to give other processes a chance to lock the vnode (either other processes
* core'ing the same binary, or unrelated processes scanning the directory).
*/
@@ -491,7 +491,7 @@ vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
break;
offset += chunk;
base = (char *)base + chunk;
- uio_yield();
+ kern_yield(curthread->td_user_pri);
} while (len);
if (aresid)
*aresid = len + iaresid;
diff --git a/sys/pc98/cbus/sio.c b/sys/pc98/cbus/sio.c
index 0bd4bd8..01747d7 100644
--- a/sys/pc98/cbus/sio.c
+++ b/sys/pc98/cbus/sio.c
@@ -2250,7 +2250,6 @@ sysctl_siots(SYSCTL_HANDLER_ARGS)
error = SYSCTL_OUT(req, buf, len);
if (error != 0)
return (error);
- uio_yield();
}
return (0);
}
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 6811d50..2015d66 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -242,6 +242,7 @@ struct thread {
u_int td_estcpu; /* (t) estimated cpu utilization */
int td_slptick; /* (t) Time at sleep. */
int td_blktick; /* (t) Time spent blocked. */
+ int td_swvoltick; /* (t) Time at last SW_VOL switch. */
struct rusage td_ru; /* (t) rusage information. */
struct rusage_ext td_rux; /* (t) Internal rusage information. */
uint64_t td_incruntime; /* (t) Cpu ticks to transfer to proc. */
@@ -822,9 +823,11 @@ void fork_exit(void (*)(void *, struct trapframe *), void *,
struct trapframe *);
void fork_return(struct thread *, struct trapframe *);
int inferior(struct proc *p);
+void kern_yield(int);
void kick_proc0(void);
int leavepgrp(struct proc *p);
int maybe_preempt(struct thread *td);
+void maybe_yield(void);
void mi_switch(int flags, struct thread *newtd);
int p_candebug(struct thread *td, struct proc *p);
int p_cansee(struct thread *td, struct proc *p);
@@ -847,6 +850,7 @@ void sess_hold(struct session *);
void sess_release(struct session *);
int setrunnable(struct thread *);
void setsugid(struct proc *p);
+int should_yield(void);
int sigonstack(size_t sp);
void sleepinit(void);
void stopevent(struct proc *, u_int, u_int);
diff --git a/sys/sys/uio.h b/sys/sys/uio.h
index 60af2b7..1de8880 100644
--- a/sys/sys/uio.h
+++ b/sys/sys/uio.h
@@ -95,8 +95,6 @@ int copyinstrfrom(const void * __restrict src, void * __restrict dst,
size_t len, size_t * __restrict copied, int seg);
int copyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop);
void uio_yield(void);
-void maybe_yield(void);
-int should_yield(void);
int uiomove(void *cp, int n, struct uio *uio);
int uiomove_frombuf(void *buf, int buflen, struct uio *uio);
int uiomove_fromphys(struct vm_page *ma[], vm_offset_t offset, int n,
diff --git a/sys/ufs/ffs/ffs_softdep.c b/sys/ufs/ffs/ffs_softdep.c
index 9bc21d3..0e790d6 100644
--- a/sys/ufs/ffs/ffs_softdep.c
+++ b/sys/ufs/ffs/ffs_softdep.c
@@ -1380,7 +1380,7 @@ softdep_process_worklist(mp, full)
*/
if (should_yield()) {
FREE_LOCK(&lk);
- uio_yield();
+ kern_yield(-1);
bwillwrite();
ACQUIRE_LOCK(&lk);
}
OpenPOWER on IntegriCloud