summaryrefslogtreecommitdiffstats
path: root/sys/kern/vfs_subr.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/vfs_subr.c')
-rw-r--r--sys/kern/vfs_subr.c82
1 files changed, 74 insertions, 8 deletions
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 3b1f854..ae297f0 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -59,6 +59,7 @@
#include <sys/namei.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
+#include <sys/syslog.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@@ -532,13 +533,15 @@ vattr_null(vap)
* underlying files, or the vnode may be in active use. It is not
* desireable to reuse such vnodes. These conditions may cause the
* number of vnodes to reach some minimum value regardless of what
- * you set kern.maxvnodes to. Do not set kernl.maxvnodes too low.
+ * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
*/
-static void
+static int
vlrureclaim(struct mount *mp, int count)
{
struct vnode *vp;
+ int done;
+ done = 0;
mtx_lock(&mntvnode_mtx);
while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
@@ -552,6 +555,7 @@ vlrureclaim(struct mount *mp, int count)
mtx_unlock(&mntvnode_mtx);
if (VMIGHTFREE(vp)) {
vgonel(vp, curthread);
+ done++;
} else {
mtx_unlock(&vp->v_interlock);
}
@@ -560,9 +564,69 @@ vlrureclaim(struct mount *mp, int count)
--count;
}
mtx_unlock(&mntvnode_mtx);
+ return done;
}
/*
+ * Attempt to recycle vnodes in a context that is always safe to block.
+ * Calling vlrurecycle() from the bowels of file system code has some
+ * interesting deadlock problems.
+ */
+static struct proc *vnlruproc;
+static int vnlruproc_sig;
+
+static void
+vnlru_proc(void)
+{
+ struct mount *mp, *nmp;
+ int s;
+ int done;
+ struct proc *p = vnlruproc;
+ struct thread *td = &p->p_thread; /* XXXKSE */
+
+ mtx_lock(&Giant);
+
+ EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
+ SHUTDOWN_PRI_FIRST);
+
+ s = splbio();
+ for (;;) {
+ kthread_suspend_check(p);
+ if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
+ vnlruproc_sig = 0;
+ tsleep(&vnlruproc, PVFS, "vlruwt", hz);
+ continue;
+ }
+ done = 0;
+ mtx_lock(&mountlist_mtx);
+ for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
+ nmp = TAILQ_NEXT(mp, mnt_list);
+ continue;
+ }
+ done += vlrureclaim(mp, 10);
+ mtx_lock(&mountlist_mtx);
+ nmp = TAILQ_NEXT(mp, mnt_list);
+ vfs_unbusy(mp, td);
+ }
+ mtx_unlock(&mountlist_mtx);
+ if (done == 0) {
+ printf("vnlru process getting nowhere, pausing..\n");
+ tsleep(&vnlru_proc, PPAUSE, "vlrup", hz * 3);
+ }
+ }
+ splx(s);
+}
+
+static struct kproc_desc vnlru_kp = {
+ "vnlru",
+ vnlru_proc,
+ &vnlruproc
+};
+SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
+
+
+/*
* Routines having to do with the management of the vnode table.
*/
@@ -585,12 +649,14 @@ getnewvnode(tag, mp, vops, vpp)
s = splbio();
/*
* Try to reuse vnodes if we hit the max. This situation only
- * occurs in certain large-memory (2G+) situations. For the
- * algorithm to be stable we have to try to reuse at least 2.
- * No hysteresis should be necessary.
+ * occurs in certain large-memory (2G+) situations. We cannot
+ * attempt to directly reclaim vnodes due to nasty recursion
+ * problems.
*/
- if (mp && numvnodes - freevnodes > desiredvnodes)
- vlrureclaim(mp, 2);
+ if (vnlruproc_sig == 0 && numvnodes - freevnodes > desiredvnodes) {
+ vnlruproc_sig = 1; /* avoid unnecessary wakeups */
+ wakeup(&vnlruproc);
+ }
/*
* Attempt to reuse a vnode already on the free list, allocating
@@ -1555,7 +1621,7 @@ vget(vp, flags, td)
mtx_lock(&vp->v_interlock);
if (vp->v_flag & VXLOCK) {
if (vp->v_vxproc == curthread) {
- printf("VXLOCK interlock avoided\n");
+ log(LOG_INFO, "VXLOCK interlock avoided\n");
} else {
vp->v_flag |= VXWANT;
msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP,
OpenPOWER on IntegriCloud