summaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2011-07-08 14:14:42 +1000
committerAl Viro <viro@zeniv.linux.org.uk>2011-07-20 20:47:10 -0400
commitb0d40c92adafde7c2d81203ce7c1c69275f41140 (patch)
treef75a19dcd1a37aff23dc43323b58f014b1297c6b /fs/dcache.c
parent12ad3ab66103e6582ca69c0c9de18b13487eaaef (diff)
downloadop-kernel-dev-b0d40c92adafde7c2d81203ce7c1c69275f41140.zip
op-kernel-dev-b0d40c92adafde7c2d81203ce7c1c69275f41140.tar.gz
superblock: introduce per-sb cache shrinker infrastructure
With context based shrinkers, we can implement a per-superblock shrinker that shrinks the caches attached to the superblock. We currently have global shrinkers for the inode and dentry caches that split up into per-superblock operations via a coarse proportioning method that does not batch very well. The global shrinkers also have a dependency - dentries pin inodes - so we have to be very careful about how we register the global shrinkers so that the implicit call order is always correct. With a per-sb shrinker callout, we can encode this dependency directly into the per-sb shrinker, hence avoiding the need for strictly ordering shrinker registrations. We also have no need for any proportioning code for the shrinker subsystem already provides this functionality across all shrinkers. Allowing the shrinker to operate on a single superblock at a time means that we do less superblock list traversals and locking and reclaim should batch more effectively. This should result in less CPU overhead for reclaim and potentially faster reclaim of items from each filesystem. Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c121
1 files changed, 12 insertions, 109 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 41e2085..2762804 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -743,13 +743,11 @@ static void shrink_dentry_list(struct list_head *list)
*
* If flags contains DCACHE_REFERENCED reference dentries will not be pruned.
*/
-static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
+static void __shrink_dcache_sb(struct super_block *sb, int count, int flags)
{
- /* called from prune_dcache() and shrink_dcache_parent() */
struct dentry *dentry;
LIST_HEAD(referenced);
LIST_HEAD(tmp);
- int cnt = *count;
relock:
spin_lock(&dcache_lru_lock);
@@ -777,7 +775,7 @@ relock:
} else {
list_move_tail(&dentry->d_lru, &tmp);
spin_unlock(&dentry->d_lock);
- if (!--cnt)
+ if (!--count)
break;
}
cond_resched_lock(&dcache_lru_lock);
@@ -787,83 +785,22 @@ relock:
spin_unlock(&dcache_lru_lock);
shrink_dentry_list(&tmp);
-
- *count = cnt;
}
/**
- * prune_dcache - shrink the dcache
- * @count: number of entries to try to free
+ * prune_dcache_sb - shrink the dcache
+ * @nr_to_scan: number of entries to try to free
*
- * Shrink the dcache. This is done when we need more memory, or simply when we
- * need to unmount something (at which point we need to unuse all dentries).
+ * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
+ * done when we need more memory an called from the superblock shrinker
+ * function.
*
- * This function may fail to free any resources if all the dentries are in use.
+ * This function may fail to free any resources if all the dentries are in
+ * use.
*/
-static void prune_dcache(int count)
+void prune_dcache_sb(struct super_block *sb, int nr_to_scan)
{
- struct super_block *sb, *p = NULL;
- int w_count;
- int unused = dentry_stat.nr_unused;
- int prune_ratio;
- int pruned;
-
- if (unused == 0 || count == 0)
- return;
- if (count >= unused)
- prune_ratio = 1;
- else
- prune_ratio = unused / count;
- spin_lock(&sb_lock);
- list_for_each_entry(sb, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
- continue;
- if (sb->s_nr_dentry_unused == 0)
- continue;
- sb->s_count++;
- /* Now, we reclaim unused dentrins with fairness.
- * We reclaim them same percentage from each superblock.
- * We calculate number of dentries to scan on this sb
- * as follows, but the implementation is arranged to avoid
- * overflows:
- * number of dentries to scan on this sb =
- * count * (number of dentries on this sb /
- * number of dentries in the machine)
- */
- spin_unlock(&sb_lock);
- if (prune_ratio != 1)
- w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1;
- else
- w_count = sb->s_nr_dentry_unused;
- pruned = w_count;
- /*
- * We need to be sure this filesystem isn't being unmounted,
- * otherwise we could race with generic_shutdown_super(), and
- * end up holding a reference to an inode while the filesystem
- * is unmounted. So we try to get s_umount, and make sure
- * s_root isn't NULL.
- */
- if (down_read_trylock(&sb->s_umount)) {
- if ((sb->s_root != NULL) &&
- (!list_empty(&sb->s_dentry_lru))) {
- __shrink_dcache_sb(sb, &w_count,
- DCACHE_REFERENCED);
- pruned -= w_count;
- }
- up_read(&sb->s_umount);
- }
- spin_lock(&sb_lock);
- if (p)
- __put_super(p);
- count -= pruned;
- p = sb;
- /* more work left to do? */
- if (count <= 0)
- break;
- }
- if (p)
- __put_super(p);
- spin_unlock(&sb_lock);
+ __shrink_dcache_sb(sb, nr_to_scan, DCACHE_REFERENCED);
}
/**
@@ -1238,42 +1175,10 @@ void shrink_dcache_parent(struct dentry * parent)
int found;
while ((found = select_parent(parent)) != 0)
- __shrink_dcache_sb(sb, &found, 0);
+ __shrink_dcache_sb(sb, found, 0);
}
EXPORT_SYMBOL(shrink_dcache_parent);
-/*
- * Scan `sc->nr_slab_to_reclaim' dentries and return the number which remain.
- *
- * We need to avoid reentering the filesystem if the caller is performing a
- * GFP_NOFS allocation attempt. One example deadlock is:
- *
- * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache->
- * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode->
- * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK.
- *
- * In this case we return -1 to tell the caller that we baled.
- */
-static int shrink_dcache_memory(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- int nr = sc->nr_to_scan;
- gfp_t gfp_mask = sc->gfp_mask;
-
- if (nr) {
- if (!(gfp_mask & __GFP_FS))
- return -1;
- prune_dcache(nr);
- }
-
- return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
-}
-
-static struct shrinker dcache_shrinker = {
- .shrink = shrink_dcache_memory,
- .seeks = DEFAULT_SEEKS,
-};
-
/**
* __d_alloc - allocate a dcache entry
* @sb: filesystem it will belong to
@@ -3083,8 +2988,6 @@ static void __init dcache_init(void)
*/
dentry_cache = KMEM_CACHE(dentry,
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
-
- register_shrinker(&dcache_shrinker);
/* Hash may have been set up in dcache_init_early */
if (!hashdist)
OpenPOWER on IntegriCloud