summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorGlauber Costa <glommer@openvz.org>2013-08-28 10:17:53 +1000
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:29 -0400
commit55f841ce9395a72c6285fbcc4c403c0c786e1c74 (patch)
treed64933e4976ca3fe5a83e619ba6bdc96c5690438 /fs
parent3942c07ccf98e66b8893f396dca98f5b076f905f (diff)
downloadop-kernel-dev-55f841ce9395a72c6285fbcc4c403c0c786e1c74.zip
op-kernel-dev-55f841ce9395a72c6285fbcc4c403c0c786e1c74.tar.gz
super: fix calculation of shrinkable objects for small numbers
The sysctl knob sysctl_vfs_cache_pressure is used to determine which percentage of the shrinkable objects in our cache we should actively try to shrink. It works great in situations in which we have many objects (at least more than 100), because the aproximation errors will be negligible. But if this is not the case, specially when total_objects < 100, we may end up concluding that we have no objects at all (total / 100 = 0, if total < 100). This is certainly not the biggest killer in the world, but may matter in very low kernel memory situations. Signed-off-by: Glauber Costa <glommer@openvz.org> Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Dave Chinner <david@fromorbit.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r--fs/gfs2/glock.c2
-rw-r--r--fs/gfs2/quota.c2
-rw-r--r--fs/mbcache.c2
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/quota/dquot.c5
-rw-r--r--fs/super.c14
-rw-r--r--fs/xfs/xfs_qm.c2
7 files changed, 14 insertions, 15 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 722329c..b782bb5 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1462,7 +1462,7 @@ static int gfs2_shrink_glock_memory(struct shrinker *shrink,
gfs2_scan_glock_lru(sc->nr_to_scan);
}
- return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
+ return vfs_pressure_ratio(atomic_read(&lru_count));
}
static struct shrinker glock_shrinker = {
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 3768c2f..d550a5d 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -114,7 +114,7 @@ int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
spin_unlock(&qd_lru_lock);
out:
- return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
+ return vfs_pressure_ratio(atomic_read(&qd_lru_count));
}
static u64 qd2index(struct gfs2_quota_data *qd)
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 8c32ef3..5eb0476 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -189,7 +189,7 @@ mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc)
list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
__mb_cache_entry_forget(entry, gfp_mask);
}
- return (count / 100) * sysctl_vfs_cache_pressure;
+ return vfs_pressure_ratio(count);
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index e79bc6c..813ef25 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2046,7 +2046,7 @@ remove_lru_entry:
}
spin_unlock(&nfs_access_lru_lock);
nfs_access_free_list(&head);
- return (atomic_long_read(&nfs_access_nr_entries) / 100) * sysctl_vfs_cache_pressure;
+ return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries));
}
static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 9a702e19..13eee84 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -719,9 +719,8 @@ static int shrink_dqcache_memory(struct shrinker *shrink,
prune_dqcache(nr);
spin_unlock(&dq_list_lock);
}
- return ((unsigned)
- percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS])
- /100) * sysctl_vfs_cache_pressure;
+ return vfs_pressure_ratio(
+ percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
}
static struct shrinker dqcache_shrinker = {
diff --git a/fs/super.c b/fs/super.c
index f6961ea..63b6863 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -82,13 +82,13 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
int inodes;
/* proportion the scan between the caches */
- dentries = (sc->nr_to_scan * sb->s_nr_dentry_unused) /
- total_objects;
- inodes = (sc->nr_to_scan * sb->s_nr_inodes_unused) /
- total_objects;
+ dentries = mult_frac(sc->nr_to_scan, sb->s_nr_dentry_unused,
+ total_objects);
+ inodes = mult_frac(sc->nr_to_scan, sb->s_nr_inodes_unused,
+ total_objects);
if (fs_objects)
- fs_objects = (sc->nr_to_scan * fs_objects) /
- total_objects;
+ fs_objects = mult_frac(sc->nr_to_scan, fs_objects,
+ total_objects);
/*
* prune the dcache first as the icache is pinned by it, then
* prune the icache, followed by the filesystem specific caches
@@ -104,7 +104,7 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
sb->s_nr_inodes_unused + fs_objects;
}
- total_objects = (total_objects / 100) * sysctl_vfs_cache_pressure;
+ total_objects = vfs_pressure_ratio(total_objects);
drop_super(sb);
return total_objects;
}
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 6218a0a..956da2e 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1722,7 +1722,7 @@ xfs_qm_shake(
}
out:
- return (qi->qi_lru_count / 100) * sysctl_vfs_cache_pressure;
+ return vfs_pressure_ratio(qi->qi_lru_count);
}
/*
OpenPOWER on IntegriCloud