summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 15:01:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 15:01:38 -0700
commit26935fb06ee88f1188789807687c03041f3c70d9 (patch)
tree381c487716540b52348d78bee6555f8fa61d77ef /drivers
parent3cc69b638e11bfda5d013c2b75b60934aa0e88a1 (diff)
parentbf2ba3bc185269eca274b458aac46ba1ad7c1121 (diff)
downloadop-kernel-dev-26935fb06ee88f1188789807687c03041f3c70d9.zip
op-kernel-dev-26935fb06ee88f1188789807687c03041f3c70d9.tar.gz
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs pile 4 from Al Viro: "list_lru pile, mostly" This came out of Andrew's pile, Al ended up doing the merge work so that Andrew didn't have to. Additionally, a few fixes. * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (42 commits) super: fix for destroy lrus list_lru: dynamically adjust node arrays shrinker: Kill old ->shrink API. shrinker: convert remaining shrinkers to count/scan API staging/lustre/libcfs: cleanup linux-mem.h staging/lustre/ptlrpc: convert to new shrinker API staging/lustre/obdclass: convert lu_object shrinker to count/scan API staging/lustre/ldlm: convert to shrinkers to count/scan API hugepage: convert huge zero page shrinker to new shrinker API i915: bail out earlier when shrinker cannot acquire mutex drivers: convert shrinkers to new count/scan API fs: convert fs shrinkers to new scan/count API xfs: fix dquot isolation hang xfs-convert-dquot-cache-lru-to-list_lru-fix xfs: convert dquot cache lru to list_lru xfs: rework buffer dispose list tracking xfs-convert-buftarg-lru-to-generic-code-fix xfs: convert buftarg LRU to generic code fs: convert inode and dentry shrinking to be node aware vmscan: per-node deferred work ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c82
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c44
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c51
-rw-r--r--drivers/md/bcache/btree.c43
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/dm-bufio.c64
-rw-r--r--drivers/staging/android/ashmem.c44
-rw-r--r--drivers/staging/android/lowmemorykiller.c43
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h38
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c148
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c98
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c76
13 files changed, 413 insertions, 324 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9b265a4..c27a210 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1676,7 +1676,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
out_gem_unload:
- if (dev_priv->mm.inactive_shrinker.shrink)
+ if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
if (dev->pdev->msi_enabled)
@@ -1715,7 +1715,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_teardown_sysfs(dev);
- if (dev_priv->mm.inactive_shrinker.shrink)
+ if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d9e337f..8507c6d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -57,10 +57,12 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
-static int i915_gem_inactive_shrink(struct shrinker *shrinker,
- struct shrink_control *sc);
+static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
+ struct shrink_control *sc);
+static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
+ struct shrink_control *sc);
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
-static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -1769,16 +1771,21 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
return __i915_gem_shrink(dev_priv, target, true);
}
-static void
+static long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj, *next;
+ long freed = 0;
i915_gem_evict_everything(dev_priv->dev);
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
- global_list)
+ global_list) {
+ if (obj->pages_pin_count == 0)
+ freed += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put_pages(obj);
+ }
+ return freed;
}
static int
@@ -4558,7 +4565,8 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.interruptible = true;
- dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
+ dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
+ dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);
}
@@ -4781,8 +4789,8 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
-static int
-i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+static unsigned long
+i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker,
@@ -4790,45 +4798,35 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
- int nr_to_scan = sc->nr_to_scan;
bool unlock = true;
- int cnt;
+ unsigned long count;
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return 0;
+ return SHRINK_STOP;
if (dev_priv->mm.shrinker_no_lock_stealing)
- return 0;
+ return SHRINK_STOP;
unlock = false;
}
- if (nr_to_scan) {
- nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
- if (nr_to_scan > 0)
- nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
- false);
- if (nr_to_scan > 0)
- i915_gem_shrink_all(dev_priv);
- }
-
- cnt = 0;
+ count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
- cnt += obj->base.size >> PAGE_SHIFT;
+ count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->active)
continue;
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
- cnt += obj->base.size >> PAGE_SHIFT;
+ count += obj->base.size >> PAGE_SHIFT;
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
- return cnt;
+ return count;
}
/* All the new VM stuff */
@@ -4892,6 +4890,40 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
return 0;
}
+static unsigned long
+i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker,
+ struct drm_i915_private,
+ mm.inactive_shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ int nr_to_scan = sc->nr_to_scan;
+ unsigned long freed;
+ bool unlock = true;
+
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return 0;
+
+ if (dev_priv->mm.shrinker_no_lock_stealing)
+ return 0;
+
+ unlock = false;
+ }
+
+ freed = i915_gem_purge(dev_priv, nr_to_scan);
+ if (freed < nr_to_scan)
+ freed += __i915_gem_shrink(dev_priv, nr_to_scan,
+ false);
+ if (freed < nr_to_scan)
+ freed += i915_gem_shrink_all(dev_priv);
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+ return freed;
+}
+
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index bd2a3b4..863bef9 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -377,28 +377,26 @@ out:
return nr_free;
}
-/* Get good estimation how many pages are free in pools */
-static int ttm_pool_get_num_unused_pages(void)
-{
- unsigned i;
- int total = 0;
- for (i = 0; i < NUM_POOLS; ++i)
- total += _manager->pools[i].npages;
-
- return total;
-}
-
/**
* Callback for mm to request pool to reduce number of page held.
+ *
+ * XXX: (dchinner) Deadlock warning!
+ *
+ * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
+ * this can deadlock when called a sc->gfp_mask that is not equal to
+ * GFP_KERNEL.
+ *
+ * This code is crying out for a shrinker per pool....
*/
-static int ttm_pool_mm_shrink(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long
+ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned i;
unsigned pool_offset = atomic_add_return(1, &start_pool);
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
+ unsigned long freed = 0;
pool_offset = pool_offset % NUM_POOLS;
/* select start pool in round robin fashion */
@@ -408,14 +406,28 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free);
+ freed += nr_free - shrink_pages;
}
- /* return estimated number of unused pages in pool */
- return ttm_pool_get_num_unused_pages();
+ return freed;
+}
+
+
+static unsigned long
+ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned i;
+ unsigned long count = 0;
+
+ for (i = 0; i < NUM_POOLS; ++i)
+ count += _manager->pools[i].npages;
+
+ return count;
}
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
- manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
+ manager->mm_shrink.count_objects = ttm_pool_shrink_count;
+ manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index b8b3943..7957bee 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -918,19 +918,6 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
}
EXPORT_SYMBOL_GPL(ttm_dma_populate);
-/* Get good estimation how many pages are free in pools */
-static int ttm_dma_pool_get_num_unused_pages(void)
-{
- struct device_pools *p;
- unsigned total = 0;
-
- mutex_lock(&_manager->lock);
- list_for_each_entry(p, &_manager->pools, pools)
- total += p->pool->npages_free;
- mutex_unlock(&_manager->lock);
- return total;
-}
-
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
@@ -1002,18 +989,29 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
/**
* Callback for mm to request pool to reduce number of page held.
+ *
+ * XXX: (dchinner) Deadlock warning!
+ *
+ * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
+ * needs to be paid to sc->gfp_mask to determine if this can be done or not.
+ * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
+ * bad.
+ *
+ * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
+ * shrinkers
*/
-static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long
+ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned idx = 0;
unsigned pool_offset = atomic_add_return(1, &start_pool);
unsigned shrink_pages = sc->nr_to_scan;
struct device_pools *p;
+ unsigned long freed = 0;
if (list_empty(&_manager->pools))
- return 0;
+ return SHRINK_STOP;
mutex_lock(&_manager->lock);
pool_offset = pool_offset % _manager->npools;
@@ -1029,18 +1027,33 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
continue;
nr_free = shrink_pages;
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+ freed += nr_free - shrink_pages;
+
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
p->pool->dev_name, p->pool->name, current->pid,
nr_free, shrink_pages);
}
mutex_unlock(&_manager->lock);
- /* return estimated number of unused pages in pool */
- return ttm_dma_pool_get_num_unused_pages();
+ return freed;
+}
+
+static unsigned long
+ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct device_pools *p;
+ unsigned long count = 0;
+
+ mutex_lock(&_manager->lock);
+ list_for_each_entry(p, &_manager->pools, pools)
+ count += p->pool->npages_free;
+ mutex_unlock(&_manager->lock);
+ return count;
}
static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
- manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
+ manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
+ manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index ee37288..f9764e6 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -597,24 +597,19 @@ static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order)
return 0;
}
-static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
+static unsigned long bch_mca_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
{
struct cache_set *c = container_of(shrink, struct cache_set, shrink);
struct btree *b, *t;
unsigned long i, nr = sc->nr_to_scan;
+ unsigned long freed = 0;
if (c->shrinker_disabled)
- return 0;
+ return SHRINK_STOP;
if (c->try_harder)
- return 0;
-
- /*
- * If nr == 0, we're supposed to return the number of items we have
- * cached. Not allowed to return -1.
- */
- if (!nr)
- return mca_can_free(c) * c->btree_pages;
+ return SHRINK_STOP;
/* Return -1 if we can't do anything right now */
if (sc->gfp_mask & __GFP_WAIT)
@@ -634,14 +629,14 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
i = 0;
list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
- if (!nr)
+ if (freed >= nr)
break;
if (++i > 3 &&
!mca_reap(b, NULL, 0)) {
mca_data_free(b);
rw_unlock(true, b);
- --nr;
+ freed++;
}
}
@@ -652,7 +647,7 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
if (list_empty(&c->btree_cache))
goto out;
- for (i = 0; nr && i < c->bucket_cache_used; i++) {
+ for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
b = list_first_entry(&c->btree_cache, struct btree, list);
list_rotate_left(&c->btree_cache);
@@ -661,14 +656,27 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
mca_bucket_free(b);
mca_data_free(b);
rw_unlock(true, b);
- --nr;
+ freed++;
} else
b->accessed = 0;
}
out:
- nr = mca_can_free(c) * c->btree_pages;
mutex_unlock(&c->bucket_lock);
- return nr;
+ return freed;
+}
+
+static unsigned long bch_mca_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct cache_set *c = container_of(shrink, struct cache_set, shrink);
+
+ if (c->shrinker_disabled)
+ return 0;
+
+ if (c->try_harder)
+ return 0;
+
+ return mca_can_free(c) * c->btree_pages;
}
void bch_btree_cache_free(struct cache_set *c)
@@ -737,7 +745,8 @@ int bch_btree_cache_alloc(struct cache_set *c)
c->verify_data = NULL;
#endif
- c->shrink.shrink = bch_mca_shrink;
+ c->shrink.count_objects = bch_mca_count;
+ c->shrink.scan_objects = bch_mca_scan;
c->shrink.seeks = 4;
c->shrink.batch = c->btree_pages * 2;
register_shrinker(&c->shrink);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 12a2c28..4fe6ab2 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -556,7 +556,7 @@ STORE(__bch_cache_set)
struct shrink_control sc;
sc.gfp_mask = GFP_KERNEL;
sc.nr_to_scan = strtoul_or_return(buf);
- c->shrink.shrink(&c->shrink, &sc);
+ c->shrink.scan_objects(&c->shrink, &sc);
}
sysfs_strtoul(congested_read_threshold_us,
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 5227e07..173cbb2 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1425,62 +1425,75 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
unsigned long max_jiffies)
{
if (jiffies - b->last_accessed < max_jiffies)
- return 1;
+ return 0;
if (!(gfp & __GFP_IO)) {
if (test_bit(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
- return 1;
+ return 0;
}
if (b->hold_count)
- return 1;
+ return 0;
__make_buffer_clean(b);
__unlink_buffer(b);
__free_buffer_wake(b);
- return 0;
+ return 1;
}
-static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
- struct shrink_control *sc)
+static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
+ gfp_t gfp_mask)
{
int l;
struct dm_buffer *b, *tmp;
+ long freed = 0;
for (l = 0; l < LIST_SIZE; l++) {
- list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list)
- if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) &&
- !--nr_to_scan)
- return;
+ list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
+ freed += __cleanup_old_buffer(b, gfp_mask, 0);
+ if (!--nr_to_scan)
+ break;
+ }
dm_bufio_cond_resched();
}
+ return freed;
}
-static int shrink(struct shrinker *shrinker, struct shrink_control *sc)
+static unsigned long
+dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- struct dm_bufio_client *c =
- container_of(shrinker, struct dm_bufio_client, shrinker);
- unsigned long r;
- unsigned long nr_to_scan = sc->nr_to_scan;
+ struct dm_bufio_client *c;
+ unsigned long freed;
+ c = container_of(shrink, struct dm_bufio_client, shrinker);
if (sc->gfp_mask & __GFP_IO)
dm_bufio_lock(c);
else if (!dm_bufio_trylock(c))
- return !nr_to_scan ? 0 : -1;
+ return SHRINK_STOP;
- if (nr_to_scan)
- __scan(c, nr_to_scan, sc);
+ freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
+ dm_bufio_unlock(c);
+ return freed;
+}
- r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
- if (r > INT_MAX)
- r = INT_MAX;
+static unsigned long
+dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct dm_bufio_client *c;
+ unsigned long count;
- dm_bufio_unlock(c);
+ c = container_of(shrink, struct dm_bufio_client, shrinker);
+ if (sc->gfp_mask & __GFP_IO)
+ dm_bufio_lock(c);
+ else if (!dm_bufio_trylock(c))
+ return 0;
- return r;
+ count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
+ dm_bufio_unlock(c);
+ return count;
}
/*
@@ -1582,7 +1595,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
- c->shrinker.shrink = shrink;
+ c->shrinker.count_objects = dm_bufio_shrink_count;
+ c->shrinker.scan_objects = dm_bufio_shrink_scan;
c->shrinker.seeks = 1;
c->shrinker.batch = 0;
register_shrinker(&c->shrinker);
@@ -1669,7 +1683,7 @@ static void cleanup_old_buffers(void)
struct dm_buffer *b;
b = list_entry(c->lru[LIST_CLEAN].prev,
struct dm_buffer, lru_list);
- if (__cleanup_old_buffer(b, 0, max_age * HZ))
+ if (!__cleanup_old_buffer(b, 0, max_age * HZ))
break;
dm_bufio_cond_resched();
}
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 21a3f72..8e76ddc 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -341,27 +341,26 @@ out:
/*
* ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
*
- * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
- * many objects (pages) we have in total.
+ * 'nr_to_scan' is the number of objects to scan for freeing.
*
* 'gfp_mask' is the mask of the allocation that got us into this mess.
*
- * Return value is the number of objects (pages) remaining, or -1 if we cannot
+ * Return value is the number of objects freed or -1 if we cannot
* proceed without risk of deadlock (due to gfp_mask).
*
* We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
* chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
* pages freed.
*/
-static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
+static unsigned long
+ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct ashmem_range *range, *next;
+ unsigned long freed = 0;
/* We might recurse into filesystem code, so bail out if necessary */
- if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
- return -1;
- if (!sc->nr_to_scan)
- return lru_count;
+ if (!(sc->gfp_mask & __GFP_FS))
+ return SHRINK_STOP;
mutex_lock(&ashmem_mutex);
list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
@@ -374,17 +373,32 @@ static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
range->purged = ASHMEM_WAS_PURGED;
lru_del(range);
- sc->nr_to_scan -= range_size(range);
- if (sc->nr_to_scan <= 0)
+ freed += range_size(range);
+ if (--sc->nr_to_scan <= 0)
break;
}
mutex_unlock(&ashmem_mutex);
+ return freed;
+}
+static unsigned long
+ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ /*
+ * note that lru_count is count of pages on the lru, not a count of
+ * objects on the list. This means the scan function needs to return the
+ * number of pages freed, not the number of objects scanned.
+ */
return lru_count;
}
static struct shrinker ashmem_shrinker = {
- .shrink = ashmem_shrink,
+ .count_objects = ashmem_shrink_count,
+ .scan_objects = ashmem_shrink_scan,
+ /*
+ * XXX (dchinner): I wish people would comment on why they need on
+ * significant changes to the default value here
+ */
.seeks = DEFAULT_SEEKS * 4,
};
@@ -690,11 +704,11 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (capable(CAP_SYS_ADMIN)) {
struct shrink_control sc = {
.gfp_mask = GFP_KERNEL,
- .nr_to_scan = 0,
+ .nr_to_scan = LONG_MAX,
};
- ret = ashmem_shrink(&ashmem_shrinker, &sc);
- sc.nr_to_scan = ret;
- ashmem_shrink(&ashmem_shrinker, &sc);
+
+ nodes_setall(sc.nodes_to_scan);
+ ashmem_shrink_scan(&ashmem_shrinker, &sc);
}
break;
}
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index fe74494..6f094b3 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -66,11 +66,20 @@ static unsigned long lowmem_deathpending_timeout;
pr_info(x); \
} while (0)
-static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
+static unsigned long lowmem_count(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ return global_page_state(NR_ACTIVE_ANON) +
+ global_page_state(NR_ACTIVE_FILE) +
+ global_page_state(NR_INACTIVE_ANON) +
+ global_page_state(NR_INACTIVE_FILE);
+}
+
+static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *tsk;
struct task_struct *selected = NULL;
- int rem = 0;
+ unsigned long rem = 0;
int tasksize;
int i;
short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
@@ -92,19 +101,17 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
break;
}
}
- if (sc->nr_to_scan > 0)
- lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %hd\n",
- sc->nr_to_scan, sc->gfp_mask, other_free,
- other_file, min_score_adj);
- rem = global_page_state(NR_ACTIVE_ANON) +
- global_page_state(NR_ACTIVE_FILE) +
- global_page_state(NR_INACTIVE_ANON) +
- global_page_state(NR_INACTIVE_FILE);
- if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
- lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
- sc->nr_to_scan, sc->gfp_mask, rem);
- return rem;
+
+ lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n",
+ sc->nr_to_scan, sc->gfp_mask, other_free,
+ other_file, min_score_adj);
+
+ if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
+ lowmem_print(5, "lowmem_scan %lu, %x, return 0\n",
+ sc->nr_to_scan, sc->gfp_mask);
+ return 0;
}
+
selected_oom_score_adj = min_score_adj;
rcu_read_lock();
@@ -154,16 +161,18 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
lowmem_deathpending_timeout = jiffies + HZ;
send_sig(SIGKILL, selected, 0);
set_tsk_thread_flag(selected, TIF_MEMDIE);
- rem -= selected_tasksize;
+ rem += selected_tasksize;
}
- lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
+
+ lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n",
sc->nr_to_scan, sc->gfp_mask, rem);
rcu_read_unlock();
return rem;
}
static struct shrinker lowmem_shrinker = {
- .shrink = lowmem_shrink,
+ .scan_objects = lowmem_scan,
+ .count_objects = lowmem_count,
.seeks = DEFAULT_SEEKS * 16
};
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
index 63efb7b..2af15d4 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
@@ -79,42 +79,4 @@
do { __oldfs = get_fs(); set_fs(get_ds());} while(0)
#define MMSPACE_CLOSE set_fs(__oldfs)
-/*
- * Shrinker
- */
-
-# define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask) \
- struct shrinker *shrinker, \
- struct shrink_control *sc
-# define shrink_param(sc, var) ((sc)->var)
-
-typedef int (*shrinker_t)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask));
-
-static inline
-struct shrinker *set_shrinker(int seek, shrinker_t func)
-{
- struct shrinker *s;
-
- s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (s == NULL)
- return (NULL);
-
- s->shrink = func;
- s->seeks = seek;
-
- register_shrinker(s);
-
- return s;
-}
-
-static inline
-void remove_shrinker(struct shrinker *shrinker)
-{
- if (shrinker == NULL)
- return;
-
- unregister_shrinker(shrinker);
- kfree(shrinker);
-}
-
#endif /* __LINUX_CFS_MEM_H__ */
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 454027d..0025ee6 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -521,7 +521,7 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
int nr, unsigned int gfp_mask)
{
struct ldlm_namespace *ns;
- int canceled = 0, unused;
+ int unused;
ns = ldlm_pl2ns(pl);
@@ -540,14 +540,10 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
unused = ns->ns_nr_unused;
spin_unlock(&ns->ns_lock);
- if (nr) {
- canceled = ldlm_cancel_lru(ns, nr, LCF_ASYNC,
- LDLM_CANCEL_SHRINK);
- }
- /*
- * Return the number of potentially reclaimable locks.
- */
- return ((unused - canceled) / 100) * sysctl_vfs_cache_pressure;
+ if (nr == 0)
+ return (unused / 100) * sysctl_vfs_cache_pressure;
+ else
+ return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
}
struct ldlm_pool_ops ldlm_srv_pool_ops = {
@@ -601,9 +597,10 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
return recalc_interval_sec;
}
-/**
+/*
* Pool shrink wrapper. Will call either client or server pool recalc callback
- * depending what pool \a pl is used.
+ * depending what pool pl is used. When nr == 0, just return the number of
+ * freeable locks. Otherwise, return the number of canceled locks.
*/
int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
unsigned int gfp_mask)
@@ -1017,29 +1014,24 @@ static int ldlm_pool_granted(struct ldlm_pool *pl)
}
static struct ptlrpc_thread *ldlm_pools_thread;
-static struct shrinker *ldlm_pools_srv_shrinker;
-static struct shrinker *ldlm_pools_cli_shrinker;
static struct completion ldlm_pools_comp;
/*
- * Cancel \a nr locks from all namespaces (if possible). Returns number of
- * cached locks after shrink is finished. All namespaces are asked to
- * cancel approximately equal amount of locks to keep balancing.
+ * count locks from all namespaces (if possible). Returns number of
+ * cached locks.
*/
-static int ldlm_pools_shrink(ldlm_side_t client, int nr,
- unsigned int gfp_mask)
+static unsigned long ldlm_pools_count(ldlm_side_t client, unsigned int gfp_mask)
{
- int total = 0, cached = 0, nr_ns;
+ int total = 0, nr_ns;
struct ldlm_namespace *ns;
struct ldlm_namespace *ns_old = NULL; /* loop detection */
void *cookie;
- if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
- !(gfp_mask & __GFP_FS))
- return -1;
+ if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
+ return 0;
- CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",
- nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
+ CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
+ client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
cookie = cl_env_reenter();
@@ -1047,8 +1039,7 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
* Find out how many resources we may release.
*/
for (nr_ns = ldlm_namespace_nr_read(client);
- nr_ns > 0; nr_ns--)
- {
+ nr_ns > 0; nr_ns--) {
mutex_lock(ldlm_namespace_lock(client));
if (list_empty(ldlm_namespace_list(client))) {
mutex_unlock(ldlm_namespace_lock(client));
@@ -1078,17 +1069,27 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
ldlm_namespace_put(ns);
}
- if (nr == 0 || total == 0) {
- cl_env_reexit(cookie);
- return total;
- }
+ cl_env_reexit(cookie);
+ return total;
+}
+
+static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, unsigned int gfp_mask)
+{
+ unsigned long freed = 0;
+ int tmp, nr_ns;
+ struct ldlm_namespace *ns;
+ void *cookie;
+
+ if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
+ return -1;
+
+ cookie = cl_env_reenter();
/*
- * Shrink at least ldlm_namespace_nr(client) namespaces.
+ * Shrink at least ldlm_namespace_nr_read(client) namespaces.
*/
- for (nr_ns = ldlm_namespace_nr_read(client) - nr_ns;
- nr_ns > 0; nr_ns--)
- {
+ for (tmp = nr_ns = ldlm_namespace_nr_read(client);
+ tmp > 0; tmp--) {
int cancel, nr_locks;
/*
@@ -1097,12 +1098,6 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
mutex_lock(ldlm_namespace_lock(client));
if (list_empty(ldlm_namespace_list(client))) {
mutex_unlock(ldlm_namespace_lock(client));
- /*
- * If list is empty, we can't return any @cached > 0,
- * that probably would cause needless shrinker
- * call.
- */
- cached = 0;
break;
}
ns = ldlm_namespace_first_locked(client);
@@ -1111,29 +1106,42 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
mutex_unlock(ldlm_namespace_lock(client));
nr_locks = ldlm_pool_granted(&ns->ns_pool);
- cancel = 1 + nr_locks * nr / total;
- ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
- cached += ldlm_pool_granted(&ns->ns_pool);
+ /*
+ * We use to shrink propotionally but with new shrinker API,
+ * we lost the total number of freeable locks.
+ */
+ cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
+ freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
ldlm_namespace_put(ns);
}
cl_env_reexit(cookie);
- /* we only decrease the SLV in server pools shrinker, return -1 to
- * kernel to avoid needless loop. LU-1128 */
- return (client == LDLM_NAMESPACE_SERVER) ? -1 : cached;
+ /*
+ * we only decrease the SLV in server pools shrinker, return
+ * SHRINK_STOP to kernel to avoid needless loop. LU-1128
+ */
+ return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
+}
+
+static unsigned long ldlm_pools_srv_count(struct shrinker *s, struct shrink_control *sc)
+{
+ return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
}
-static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static unsigned long ldlm_pools_srv_scan(struct shrinker *s, struct shrink_control *sc)
{
- return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER,
- shrink_param(sc, nr_to_scan),
- shrink_param(sc, gfp_mask));
+ return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
+ sc->gfp_mask);
}
-static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static unsigned long ldlm_pools_cli_count(struct shrinker *s, struct shrink_control *sc)
{
- return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT,
- shrink_param(sc, nr_to_scan),
- shrink_param(sc, gfp_mask));
+ return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
+}
+
+static unsigned long ldlm_pools_cli_scan(struct shrinker *s, struct shrink_control *sc)
+{
+ return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
+ sc->gfp_mask);
}
int ldlm_pools_recalc(ldlm_side_t client)
@@ -1216,7 +1224,7 @@ int ldlm_pools_recalc(ldlm_side_t client)
}
/*
- * Recalc at least ldlm_namespace_nr(client) namespaces.
+ * Recalc at least ldlm_namespace_nr_read(client) namespaces.
*/
for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
int skip;
@@ -1383,18 +1391,26 @@ static void ldlm_pools_thread_stop(void)
ldlm_pools_thread = NULL;
}
+static struct shrinker ldlm_pools_srv_shrinker = {
+ .count_objects = ldlm_pools_srv_count,
+ .scan_objects = ldlm_pools_srv_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+static struct shrinker ldlm_pools_cli_shrinker = {
+ .count_objects = ldlm_pools_cli_count,
+ .scan_objects = ldlm_pools_cli_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
int ldlm_pools_init(void)
{
int rc;
rc = ldlm_pools_thread_start();
if (rc == 0) {
- ldlm_pools_srv_shrinker =
- set_shrinker(DEFAULT_SEEKS,
- ldlm_pools_srv_shrink);
- ldlm_pools_cli_shrinker =
- set_shrinker(DEFAULT_SEEKS,
- ldlm_pools_cli_shrink);
+ register_shrinker(&ldlm_pools_srv_shrinker);
+ register_shrinker(&ldlm_pools_cli_shrinker);
}
return rc;
}
@@ -1402,14 +1418,8 @@ EXPORT_SYMBOL(ldlm_pools_init);
void ldlm_pools_fini(void)
{
- if (ldlm_pools_srv_shrinker != NULL) {
- remove_shrinker(ldlm_pools_srv_shrinker);
- ldlm_pools_srv_shrinker = NULL;
- }
- if (ldlm_pools_cli_shrinker != NULL) {
- remove_shrinker(ldlm_pools_cli_shrinker);
- ldlm_pools_cli_shrinker = NULL;
- }
+ unregister_shrinker(&ldlm_pools_srv_shrinker);
+ unregister_shrinker(&ldlm_pools_cli_shrinker);
ldlm_pools_thread_stop();
}
EXPORT_SYMBOL(ldlm_pools_fini);
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index c29ac1c..3a3d5bc 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -1779,7 +1779,6 @@ int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
}
EXPORT_SYMBOL(lu_env_refill_by_tags);
-static struct shrinker *lu_site_shrinker = NULL;
typedef struct lu_site_stats{
unsigned lss_populated;
@@ -1835,61 +1834,68 @@ static void lu_site_stats_get(cfs_hash_t *hs,
* objects without taking the lu_sites_guard lock, but this is not
* possible in the current implementation.
*/
-static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static unsigned long lu_cache_shrink_count(struct shrinker *sk,
+ struct shrink_control *sc)
{
lu_site_stats_t stats;
struct lu_site *s;
struct lu_site *tmp;
- int cached = 0;
- int remain = shrink_param(sc, nr_to_scan);
- LIST_HEAD(splice);
-
- if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) {
- if (remain != 0)
- return -1;
- else
- /* We must not take the lu_sites_guard lock when
- * __GFP_FS is *not* set because of the deadlock
- * possibility detailed above. Additionally,
- * since we cannot determine the number of
- * objects in the cache without taking this
- * lock, we're in a particularly tough spot. As
- * a result, we'll just lie and say our cache is
- * empty. This _should_ be ok, as we can't
- * reclaim objects when __GFP_FS is *not* set
- * anyways.
- */
- return 0;
- }
+ unsigned long cached = 0;
- CDEBUG(D_INODE, "Shrink %d objects\n", remain);
+ if (!(sc->gfp_mask & __GFP_FS))
+ return 0;
mutex_lock(&lu_sites_guard);
list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
- if (shrink_param(sc, nr_to_scan) != 0) {
- remain = lu_site_purge(&lu_shrink_env, s, remain);
- /*
- * Move just shrunk site to the tail of site list to
- * assure shrinking fairness.
- */
- list_move_tail(&s->ls_linkage, &splice);
- }
-
memset(&stats, 0, sizeof(stats));
lu_site_stats_get(s->ls_obj_hash, &stats, 0);
cached += stats.lss_total - stats.lss_busy;
- if (shrink_param(sc, nr_to_scan) && remain <= 0)
- break;
}
- list_splice(&splice, lu_sites.prev);
mutex_unlock(&lu_sites_guard);
cached = (cached / 100) * sysctl_vfs_cache_pressure;
- if (shrink_param(sc, nr_to_scan) == 0)
- CDEBUG(D_INODE, "%d objects cached\n", cached);
+ CDEBUG(D_INODE, "%ld objects cached\n", cached);
return cached;
}
+static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
+ struct shrink_control *sc)
+{
+ struct lu_site *s;
+ struct lu_site *tmp;
+ unsigned long remain = sc->nr_to_scan, freed = 0;
+ LIST_HEAD(splice);
+
+ if (!(sc->gfp_mask & __GFP_FS))
+ /* We must not take the lu_sites_guard lock when
+ * __GFP_FS is *not* set because of the deadlock
+ * possibility detailed above. Additionally,
+ * since we cannot determine the number of
+ * objects in the cache without taking this
+ * lock, we're in a particularly tough spot. As
+ * a result, we'll just lie and say our cache is
+ * empty. This _should_ be ok, as we can't
+ * reclaim objects when __GFP_FS is *not* set
+ * anyways.
+ */
+ return SHRINK_STOP;
+
+ mutex_lock(&lu_sites_guard);
+ list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
+ freed = lu_site_purge(&lu_shrink_env, s, remain);
+ remain -= freed;
+ /*
+ * Move just shrunk site to the tail of site list to
+ * assure shrinking fairness.
+ */
+ list_move_tail(&s->ls_linkage, &splice);
+ }
+ list_splice(&splice, lu_sites.prev);
+ mutex_unlock(&lu_sites_guard);
+
+ return sc->nr_to_scan - remain;
+}
+
/*
* Debugging stuff.
*/
@@ -1913,6 +1919,12 @@ int lu_printk_printer(const struct lu_env *env,
return 0;
}
+static struct shrinker lu_site_shrinker = {
+ .count_objects = lu_cache_shrink_count,
+ .scan_objects = lu_cache_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
/**
* Initialization of global lu_* data.
*/
@@ -1947,9 +1959,7 @@ int lu_global_init(void)
* inode, one for ea. Unfortunately setting this high value results in
* lu_object/inode cache consuming all the memory.
*/
- lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
- if (lu_site_shrinker == NULL)
- return -ENOMEM;
+ register_shrinker(&lu_site_shrinker);
return result;
}
@@ -1959,11 +1969,7 @@ int lu_global_init(void)
*/
void lu_global_fini(void)
{
- if (lu_site_shrinker != NULL) {
- remove_shrinker(lu_site_shrinker);
- lu_site_shrinker = NULL;
- }
-
+ unregister_shrinker(&lu_site_shrinker);
lu_context_key_degister(&lu_global_key);
/*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 9013745..e90c8fb 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -121,13 +121,6 @@ static struct ptlrpc_enc_page_pool {
} page_pools;
/*
- * memory shrinker
- */
-const int pools_shrinker_seeks = DEFAULT_SEEKS;
-static struct shrinker *pools_shrinker = NULL;
-
-
-/*
* /proc/fs/lustre/sptlrpc/encrypt_page_pools
*/
int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
@@ -226,30 +219,46 @@ static void enc_pools_release_free_pages(long npages)
}
/*
- * could be called frequently for query (@nr_to_scan == 0).
* we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
*/
-static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static unsigned long enc_pools_shrink_count(struct shrinker *s,
+ struct shrink_control *sc)
{
- if (unlikely(shrink_param(sc, nr_to_scan) != 0)) {
+ /*
+ * if no pool access for a long time, we consider it's fully idle.
+ * a little race here is fine.
+ */
+ if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
+ CACHE_QUIESCENT_PERIOD)) {
spin_lock(&page_pools.epp_lock);
- shrink_param(sc, nr_to_scan) = min_t(unsigned long,
- shrink_param(sc, nr_to_scan),
- page_pools.epp_free_pages -
- PTLRPC_MAX_BRW_PAGES);
- if (shrink_param(sc, nr_to_scan) > 0) {
- enc_pools_release_free_pages(shrink_param(sc,
- nr_to_scan));
- CDEBUG(D_SEC, "released %ld pages, %ld left\n",
- (long)shrink_param(sc, nr_to_scan),
- page_pools.epp_free_pages);
-
- page_pools.epp_st_shrinks++;
- page_pools.epp_last_shrink = cfs_time_current_sec();
- }
+ page_pools.epp_idle_idx = IDLE_IDX_MAX;
spin_unlock(&page_pools.epp_lock);
}
+ LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
+ return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
+ (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
+}
+
+/*
+ * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
+ */
+static unsigned long enc_pools_shrink_scan(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ spin_lock(&page_pools.epp_lock);
+ sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan,
+ page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES);
+ if (sc->nr_to_scan > 0) {
+ enc_pools_release_free_pages(sc->nr_to_scan);
+ CDEBUG(D_SEC, "released %ld pages, %ld left\n",
+ (long)sc->nr_to_scan, page_pools.epp_free_pages);
+
+ page_pools.epp_st_shrinks++;
+ page_pools.epp_last_shrink = cfs_time_current_sec();
+ }
+ spin_unlock(&page_pools.epp_lock);
+
/*
* if no pool access for a long time, we consider it's fully idle.
* a little race here is fine.
@@ -262,8 +271,7 @@ static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
}
LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
- return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
- (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
+ return sc->nr_to_scan;
}
static inline
@@ -699,6 +707,12 @@ static inline void enc_pools_free(void)
sizeof(*page_pools.epp_pools));
}
+static struct shrinker pools_shrinker = {
+ .count_objects = enc_pools_shrink_count,
+ .scan_objects = enc_pools_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
int sptlrpc_enc_pool_init(void)
{
/*
@@ -736,12 +750,7 @@ int sptlrpc_enc_pool_init(void)
if (page_pools.epp_pools == NULL)
return -ENOMEM;
- pools_shrinker = set_shrinker(pools_shrinker_seeks,
- enc_pools_shrink);
- if (pools_shrinker == NULL) {
- enc_pools_free();
- return -ENOMEM;
- }
+ register_shrinker(&pools_shrinker);
return 0;
}
@@ -750,11 +759,10 @@ void sptlrpc_enc_pool_fini(void)
{
unsigned long cleaned, npools;
- LASSERT(pools_shrinker);
LASSERT(page_pools.epp_pools);
LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
- remove_shrinker(pools_shrinker);
+ unregister_shrinker(&pools_shrinker);
npools = npages_to_npools(page_pools.epp_total_pages);
cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
OpenPOWER on IntegriCloud