summaryrefslogtreecommitdiffstats
path: root/fs/mbcache.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 09:17:42 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 09:17:42 -0800
commit5084fdf081739b7455c7aeecda6d7b83ec59c85f (patch)
tree7e99b684979d0020213efa095c73b67810a740ca /fs/mbcache.c
parent09cb6464fe5e7fcd5177911429badd139c4481b7 (diff)
parenta551d7c8deefb6d9fb45a1de03a617dd064e0419 (diff)
downloadop-kernel-dev-5084fdf081739b7455c7aeecda6d7b83ec59c85f.zip
op-kernel-dev-5084fdf081739b7455c7aeecda6d7b83ec59c85f.tar.gz
Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
Pull ext4 updates from Ted Ts'o: "This merge request includes the dax-4.0-iomap-pmd branch which is needed for both ext4 and xfs dax changes to use iomap for DAX. It also includes the fscrypt branch which is needed for ubifs encryption work as well as ext4 encryption and fscrypt cleanups. Lots of cleanups and bug fixes, especially making sure ext4 is robust against maliciously corrupted file systems --- especially maliciously corrupted xattr blocks and a maliciously corrupted superblock. Also fix ext4 support for 64k block sizes so it works well on ppcle. Fixed mbcache so we don't miss some common xattr blocks that can be merged" * tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (86 commits) dax: Fix sleep in atomic contex in grab_mapping_entry() fscrypt: Rename FS_WRITE_PATH_FL to FS_CTX_HAS_BOUNCE_BUFFER_FL fscrypt: Delay bounce page pool allocation until needed fscrypt: Cleanup page locking requirements for fscrypt_{decrypt,encrypt}_page() fscrypt: Cleanup fscrypt_{decrypt,encrypt}_page() fscrypt: Never allocate fscrypt_ctx on in-place encryption fscrypt: Use correct index in decrypt path. fscrypt: move the policy flags and encryption mode definitions to uapi header fscrypt: move non-public structures and constants to fscrypt_private.h fscrypt: unexport fscrypt_initialize() fscrypt: rename get_crypt_info() to fscrypt_get_crypt_info() fscrypto: move ioctl processing more fully into common code fscrypto: remove unneeded Kconfig dependencies MAINTAINERS: fscrypto: recommend linux-fsdevel for fscrypto patches ext4: do not perform data journaling when data is encrypted ext4: return -ENOMEM instead of success ext4: reject inodes with negative size ext4: remove another test in ext4_alloc_file_blocks() Documentation: fix description of ext4's block_validity mount option ext4: fix checks for data=ordered and journal_async_commit options ...
Diffstat (limited to 'fs/mbcache.c')
-rw-r--r--fs/mbcache.c41
1 files changed, 18 insertions, 23 deletions
diff --git a/fs/mbcache.c b/fs/mbcache.c
index c5bd19f..b19be429d 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -29,7 +29,7 @@ struct mb_cache {
/* log2 of hash table size */
int c_bucket_bits;
/* Maximum entries in cache to avoid degrading hash too much */
- int c_max_entries;
+ unsigned long c_max_entries;
/* Protects c_list, c_entry_count */
spinlock_t c_list_lock;
struct list_head c_list;
@@ -43,7 +43,7 @@ struct mb_cache {
static struct kmem_cache *mb_entry_cache;
static unsigned long mb_cache_shrink(struct mb_cache *cache,
- unsigned int nr_to_scan);
+ unsigned long nr_to_scan);
static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
u32 key)
@@ -155,12 +155,12 @@ out:
}
/*
- * mb_cache_entry_find_first - find the first entry in cache with given key
+ * mb_cache_entry_find_first - find the first reusable entry with the given key
* @cache: cache where we should search
* @key: key to look for
*
- * Search in @cache for entry with key @key. Grabs reference to the first
- * entry found and returns the entry.
+ * Search in @cache for a reusable entry with key @key. Grabs reference to the
+ * first reusable entry found and returns the entry.
*/
struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
u32 key)
@@ -170,14 +170,14 @@ struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
EXPORT_SYMBOL(mb_cache_entry_find_first);
/*
- * mb_cache_entry_find_next - find next entry in cache with the same
+ * mb_cache_entry_find_next - find next reusable entry with the same key
* @cache: cache where we should search
* @entry: entry to start search from
*
- * Finds next entry in the hash chain which has the same key as @entry.
- * If @entry is unhashed (which can happen when deletion of entry races
- * with the search), finds the first entry in the hash chain. The function
- * drops reference to @entry and returns with a reference to the found entry.
+ * Finds next reusable entry in the hash chain which has the same key as @entry.
+ * If @entry is unhashed (which can happen when deletion of entry races with the
+ * search), finds the first reusable entry in the hash chain. The function drops
+ * reference to @entry and returns with a reference to the found entry.
*/
struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
struct mb_cache_entry *entry)
@@ -274,11 +274,11 @@ static unsigned long mb_cache_count(struct shrinker *shrink,
/* Shrink number of entries in cache */
static unsigned long mb_cache_shrink(struct mb_cache *cache,
- unsigned int nr_to_scan)
+ unsigned long nr_to_scan)
{
struct mb_cache_entry *entry;
struct hlist_bl_head *head;
- unsigned int shrunk = 0;
+ unsigned long shrunk = 0;
spin_lock(&cache->c_list_lock);
while (nr_to_scan-- && !list_empty(&cache->c_list)) {
@@ -286,7 +286,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
struct mb_cache_entry, e_list);
if (entry->e_referenced) {
entry->e_referenced = 0;
- list_move_tail(&cache->c_list, &entry->e_list);
+ list_move_tail(&entry->e_list, &cache->c_list);
continue;
}
list_del_init(&entry->e_list);
@@ -316,10 +316,9 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
static unsigned long mb_cache_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
- int nr_to_scan = sc->nr_to_scan;
struct mb_cache *cache = container_of(shrink, struct mb_cache,
c_shrink);
- return mb_cache_shrink(cache, nr_to_scan);
+ return mb_cache_shrink(cache, sc->nr_to_scan);
}
/* We shrink 1/X of the cache when we have too many entries in it */
@@ -341,11 +340,8 @@ static void mb_cache_shrink_worker(struct work_struct *work)
struct mb_cache *mb_cache_create(int bucket_bits)
{
struct mb_cache *cache;
- int bucket_count = 1 << bucket_bits;
- int i;
-
- if (!try_module_get(THIS_MODULE))
- return NULL;
+ unsigned long bucket_count = 1UL << bucket_bits;
+ unsigned long i;
cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
if (!cache)
@@ -377,7 +373,6 @@ struct mb_cache *mb_cache_create(int bucket_bits)
return cache;
err_out:
- module_put(THIS_MODULE);
return NULL;
}
EXPORT_SYMBOL(mb_cache_create);
@@ -411,7 +406,6 @@ void mb_cache_destroy(struct mb_cache *cache)
}
kfree(cache->c_hash);
kfree(cache);
- module_put(THIS_MODULE);
}
EXPORT_SYMBOL(mb_cache_destroy);
@@ -420,7 +414,8 @@ static int __init mbcache_init(void)
mb_entry_cache = kmem_cache_create("mbcache",
sizeof(struct mb_cache_entry), 0,
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
- BUG_ON(!mb_entry_cache);
+ if (!mb_entry_cache)
+ return -ENOMEM;
return 0;
}
OpenPOWER on IntegriCloud