summaryrefslogtreecommitdiffstats
path: root/sys/kern/vfs_subr.c
diff options
context:
space:
mode:
authorphk <phk@FreeBSD.org>2002-12-29 10:39:05 +0000
committerphk <phk@FreeBSD.org>2002-12-29 10:39:05 +0000
commit90510abb6e701df9b6cdee3cd888c20b2e81daba (patch)
tree23be39d9a68204a14a1b1c5516eea8649e3764ce /sys/kern/vfs_subr.c
parent816919ad3911bdfe3e18835353fe58ce6ba5a436 (diff)
downloadFreeBSD-src-90510abb6e701df9b6cdee3cd888c20b2e81daba.zip
FreeBSD-src-90510abb6e701df9b6cdee3cd888c20b2e81daba.tar.gz
Vnodes pull in 800-900 bytes these days, all things counted, so we need
to treat desiredvnodes much more like a limit than as a vague concept. On a 2GB RAM machine where desired vnodes is 130k, we run out of kmem_map space when we hit about 190k vnodes. If we wake up the vnode washer in getnewvnode(), sleep until it is done, so that it has a chance to offer us a washed vnode. If we don't sleep here we'll just race ahead and allocate yet a vnode which will never get freed. In the vnodewasher, instead of doing 10 vnodes per mountpoint per rotation, do 10% of the vnodes distributed evenly across the mountpoints.
Diffstat (limited to 'sys/kern/vfs_subr.c')
-rw-r--r--sys/kern/vfs_subr.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 0d5c8d7..5584666 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -736,7 +736,7 @@ vnlru_proc(void)
{
struct mount *mp, *nmp;
int s;
- int done;
+ int done, take;
struct proc *p = vnlruproc;
struct thread *td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
@@ -752,18 +752,23 @@ vnlru_proc(void)
if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
mtx_unlock(&vnode_free_list_mtx);
vnlruproc_sig = 0;
+ wakeup(&vnlruproc_sig);
tsleep(vnlruproc, PVFS, "vlruwt", 0);
continue;
}
mtx_unlock(&vnode_free_list_mtx);
done = 0;
mtx_lock(&mountlist_mtx);
+ take = 0;
+ TAILQ_FOREACH(mp, &mountlist, mnt_list)
+ take++;
+ take = desiredvnodes / (take * 10);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
- done += vlrureclaim(mp, 10);
+ done += vlrureclaim(mp, take);
mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, td);
@@ -897,9 +902,14 @@ getnewvnode(tag, mp, vops, vpp)
* attempt to directly reclaim vnodes due to nasty recursion
* problems.
*/
- if (vnlruproc_sig == 0 && numvnodes - freevnodes > desiredvnodes) {
- vnlruproc_sig = 1; /* avoid unnecessary wakeups */
- wakeup(vnlruproc);
+ while (numvnodes - freevnodes > desiredvnodes) {
+ if (vnlruproc_sig == 0) {
+ vnlruproc_sig = 1; /* avoid unnecessary wakeups */
+ wakeup(vnlruproc);
+ }
+ mtx_unlock(&vnode_free_list_mtx);
+ tsleep(&vnlruproc_sig, PVFS, "vlruwk", hz);
+ mtx_lock(&vnode_free_list_mtx);
}
/*
OpenPOWER on IntegriCloud