From e21ab36a80c2e011a14149fecdd654b8b47d3e8c Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 28 Oct 2014 15:11:43 -0700 Subject: test: bpf: add a testcase reduced from nmap nmap generates classic BPF programs to filter ARP packets with given target MAC which triggered a bug in eBPF x64 JIT. The bug was fixed in commit e0ee9c12157d ("x86: bpf_jit: fix two bugs in eBPF JIT compiler") This patch is adding a testcase in eBPF instructions (those that were generated by classic->eBPF converter) to be processed by JIT. The test is primarily targeting JIT compiler. Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- lib/test_bpf.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) (limited to 'lib') diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 23e070b..3f167d2 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -1756,6 +1756,49 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } } }, + { + "nmap reduced", + .u.insns_int = { + BPF_MOV64_REG(R6, R1), + BPF_LD_ABS(BPF_H, 12), + BPF_JMP_IMM(BPF_JNE, R0, 0x806, 28), + BPF_LD_ABS(BPF_H, 12), + BPF_JMP_IMM(BPF_JNE, R0, 0x806, 26), + BPF_MOV32_IMM(R0, 18), + BPF_STX_MEM(BPF_W, R10, R0, -64), + BPF_LDX_MEM(BPF_W, R7, R10, -64), + BPF_LD_IND(BPF_W, R7, 14), + BPF_STX_MEM(BPF_W, R10, R0, -60), + BPF_MOV32_IMM(R0, 280971478), + BPF_STX_MEM(BPF_W, R10, R0, -56), + BPF_LDX_MEM(BPF_W, R7, R10, -56), + BPF_LDX_MEM(BPF_W, R0, R10, -60), + BPF_ALU32_REG(BPF_SUB, R0, R7), + BPF_JMP_IMM(BPF_JNE, R0, 0, 15), + BPF_LD_ABS(BPF_H, 12), + BPF_JMP_IMM(BPF_JNE, R0, 0x806, 13), + BPF_MOV32_IMM(R0, 22), + BPF_STX_MEM(BPF_W, R10, R0, -56), + BPF_LDX_MEM(BPF_W, R7, R10, -56), + BPF_LD_IND(BPF_H, R7, 14), + BPF_STX_MEM(BPF_W, R10, R0, -52), + BPF_MOV32_IMM(R0, 17366), + BPF_STX_MEM(BPF_W, R10, R0, -48), + BPF_LDX_MEM(BPF_W, R7, R10, -48), + BPF_LDX_MEM(BPF_W, R0, R10, -52), + BPF_ALU32_REG(BPF_SUB, R0, R7), + BPF_JMP_IMM(BPF_JNE, R0, 0, 2), + BPF_MOV32_IMM(R0, 256), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6}, + { { 38, 256 } } + }, }; static struct net_device dev; -- cgit v1.1 From e5a2c899957659cd1a9f789bc462f9c0b35f5150 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Wed, 5 Nov 2014 00:23:04 +0100 Subject: fast_hash: avoid indirect function calls By default the arch_fast_hash hashing function pointers are initialized to jhash(2). If during boot-up a CPU with SSE4.2 is detected they get updated to the CRC32 ones. This dispatching scheme incurs a function pointer lookup and indirect call for every hashing operation. rhashtable as a user of arch_fast_hash e.g. stores pointers to hashing functions in its structure, too, causing two indirect branches per hashing operation. Using alternative_call we can get away with one of those indirect branches. Acked-by: Daniel Borkmann Cc: Thomas Graf Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- lib/Makefile | 2 +- lib/hash.c | 39 --------------------------------------- 2 files changed, 1 insertion(+), 40 deletions(-) delete mode 100644 lib/hash.c (limited to 'lib') diff --git a/lib/Makefile b/lib/Makefile index 7512dc9..04e53dd 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ - percpu-refcount.o percpu_ida.o hash.o rhashtable.o + percpu-refcount.o percpu_ida.o rhashtable.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += kstrtox.o diff --git a/lib/hash.c b/lib/hash.c deleted file mode 100644 index fea973f..0000000 --- a/lib/hash.c +++ /dev/null @@ -1,39 +0,0 @@ -/* General purpose hashing library - * - * That's a start of a kernel hashing library, which can be extended - * with further algorithms in future. arch_fast_hash{2,}() will - * eventually resolve to an architecture optimized implementation. - * - * Copyright 2013 Francesco Fusco - * Copyright 2013 Daniel Borkmann - * Copyright 2013 Thomas Graf - * Licensed under the GNU General Public License, version 2.0 (GPLv2) - */ - -#include -#include -#include - -static struct fast_hash_ops arch_hash_ops __read_mostly = { - .hash = jhash, - .hash2 = jhash2, -}; - -u32 arch_fast_hash(const void *data, u32 len, u32 seed) -{ - return arch_hash_ops.hash(data, len, seed); -} -EXPORT_SYMBOL_GPL(arch_fast_hash); - -u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) -{ - return arch_hash_ops.hash2(data, len, seed); -} -EXPORT_SYMBOL_GPL(arch_fast_hash2); - -static int __init hashlib_init(void) -{ - setup_arch_fast_hash(&arch_hash_ops); - return 0; -} -early_initcall(hashlib_init); -- cgit v1.1 From 1b2f309d70daf04b6a97b3753e375654532f6207 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 13 Nov 2014 18:11:20 +0800 Subject: rhashtable: Move mutex_is_held under PROVE_LOCKING The rhashtable function mutex_is_held is only used when PROVE_LOCKING is enabled. This patch makes the mutex_is_held field in rhashtable optional depending on PROVE_LOCKING. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- lib/rhashtable.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 081be3b..c7654b6 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -532,7 +532,9 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), * .hashfn = arch_fast_hash, + * #ifdef CONFIG_PROVE_LOCKING * .mutex_is_held = &my_mutex_is_held, + * #endif * }; * * Configuration Example 2: Variable length keys @@ -552,7 +554,9 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .head_offset = offsetof(struct test_obj, node), * .hashfn = arch_fast_hash, * .obj_hashfn = my_hash_fn, + * #ifdef CONFIG_PROVE_LOCKING * .mutex_is_held = &my_mutex_is_held, + * #endif * }; */ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) @@ -613,10 +617,12 @@ EXPORT_SYMBOL_GPL(rhashtable_destroy); #define TEST_PTR ((void *) 0xdeadbeef) #define TEST_NEXPANDS 4 +#ifdef CONFIG_PROVE_LOCKING static int test_mutex_is_held(void) { return 1; } +#endif struct test_obj { void *ptr; @@ -767,7 +773,9 @@ static int __init test_rht_init(void) .key_offset = offsetof(struct test_obj, value), .key_len = sizeof(int), .hashfn = arch_fast_hash, +#ifdef CONFIG_PROVE_LOCKING .mutex_is_held = &test_mutex_is_held, +#endif .grow_decision = rht_grow_above_75, .shrink_decision = rht_shrink_below_30, }; -- cgit v1.1 From 7b4ce2353467fdab6e003be7a3129fb09b09deac Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 13 Nov 2014 18:11:22 +0800 Subject: rhashtable: Add parent argument to mutex_is_held Currently mutex_is_held can only test locks in the that are global since it takes no arguments. This prevents rhashtable from being used in places where locks are lock, e.g., per-namespace locks. This patch adds a parent field to mutex_is_held and rhashtable_params so that local locks can be used (and tested). Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- lib/rhashtable.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index c7654b6..4b4b53b 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -32,7 +32,7 @@ #ifdef CONFIG_PROVE_LOCKING int lockdep_rht_mutex_is_held(const struct rhashtable *ht) { - return ht->p.mutex_is_held(); + return ht->p.mutex_is_held(ht->p.parent); } EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); #endif @@ -618,7 +618,7 @@ EXPORT_SYMBOL_GPL(rhashtable_destroy); #define TEST_NEXPANDS 4 #ifdef CONFIG_PROVE_LOCKING -static int test_mutex_is_held(void) +static int test_mutex_is_held(void *parent) { return 1; } -- cgit v1.1 From 6eba82248ef47fd478f940a418429e3ec95cb3db Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 13 Nov 2014 13:45:46 +0100 Subject: rhashtable: Drop gfp_flags arg in insert/remove functions Reallocation is only required for shrinking and expanding and both rely on a mutex for synchronization and callers of rhashtable_init() are in non atomic context. Therefore, no reason to continue passing allocation hints through the API. Instead, use GFP_KERNEL and add __GFP_NOWARN | __GFP_NORETRY to allow for silent fall back to vzalloc() without the OOM killer jumping in as pointed out by Eric Dumazet and Eric W. Biederman. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- lib/rhashtable.c | 41 +++++++++++++++++------------------------ 1 file changed, 17 insertions(+), 24 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 4b4b53b..25e4c21 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -107,13 +107,13 @@ static u32 head_hashfn(const struct rhashtable *ht, return obj_hashfn(ht, rht_obj(ht, he), hsize); } -static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags) +static struct bucket_table *bucket_table_alloc(size_t nbuckets) { struct bucket_table *tbl; size_t size; size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); - tbl = kzalloc(size, flags); + tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); if (tbl == NULL) tbl = vzalloc(size); @@ -200,7 +200,6 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, /** * rhashtable_expand - Expand hash table while allowing concurrent lookups * @ht: the hash table to expand - * @flags: allocation flags * * A secondary bucket array is allocated and the hash entries are migrated * while keeping them on both lists until the end of the RCU grace period. @@ -211,7 +210,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, * The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. */ -int rhashtable_expand(struct rhashtable *ht, gfp_t flags) +int rhashtable_expand(struct rhashtable *ht) { struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); struct rhash_head *he; @@ -223,7 +222,7 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags) if (ht->p.max_shift && ht->shift >= ht->p.max_shift) return 0; - new_tbl = bucket_table_alloc(old_tbl->size * 2, flags); + new_tbl = bucket_table_alloc(old_tbl->size * 2); if (new_tbl == NULL) return -ENOMEM; @@ -281,7 +280,6 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); /** * rhashtable_shrink - Shrink hash table while allowing concurrent lookups * @ht: the hash table to shrink - * @flags: allocation flags * * This function may only be called in a context where it is safe to call * synchronize_rcu(), e.g. not within a rcu_read_lock() section. @@ -289,7 +287,7 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); * The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. */ -int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) +int rhashtable_shrink(struct rhashtable *ht) { struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); struct rhash_head __rcu **pprev; @@ -300,7 +298,7 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) if (ht->shift <= ht->p.min_shift) return 0; - ntbl = bucket_table_alloc(tbl->size / 2, flags); + ntbl = bucket_table_alloc(tbl->size / 2); if (ntbl == NULL) return -ENOMEM; @@ -341,7 +339,6 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink); * rhashtable_insert - insert object into hash hash table * @ht: hash table * @obj: pointer to hash head inside object - * @flags: allocation flags (table expansion) * * Will automatically grow the table via rhashtable_expand() if the the * grow_decision function specified at rhashtable_init() returns true. @@ -349,8 +346,7 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink); * The caller must ensure that no concurrent table mutations occur. It is * however valid to have concurrent lookups if they are RCU protected. */ -void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, - gfp_t flags) +void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) { struct bucket_table *tbl = rht_dereference(ht->tbl, ht); u32 hash; @@ -363,7 +359,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, ht->nelems++; if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) - rhashtable_expand(ht, flags); + rhashtable_expand(ht); } EXPORT_SYMBOL_GPL(rhashtable_insert); @@ -372,14 +368,13 @@ EXPORT_SYMBOL_GPL(rhashtable_insert); * @ht: hash table * @obj: pointer to hash head inside object * @pprev: pointer to previous element - * @flags: allocation flags (table expansion) * * Identical to rhashtable_remove() but caller is alreayd aware of the element * in front of the element to be deleted. This is in particular useful for * deletion when combined with walking or lookup. */ void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, - struct rhash_head __rcu **pprev, gfp_t flags) + struct rhash_head __rcu **pprev) { struct bucket_table *tbl = rht_dereference(ht->tbl, ht); @@ -390,7 +385,7 @@ void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) - rhashtable_shrink(ht, flags); + rhashtable_shrink(ht); } EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); @@ -398,7 +393,6 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); * rhashtable_remove - remove object from hash table * @ht: hash table * @obj: pointer to hash head inside object - * @flags: allocation flags (table expansion) * * Since the hash chain is single linked, the removal operation needs to * walk the bucket chain upon removal. The removal operation is thus @@ -410,8 +404,7 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); * The caller must ensure that no concurrent table mutations occur. It is * however valid to have concurrent lookups if they are RCU protected. */ -bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, - gfp_t flags) +bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) { struct bucket_table *tbl = rht_dereference(ht->tbl, ht); struct rhash_head __rcu **pprev; @@ -429,7 +422,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, continue; } - rhashtable_remove_pprev(ht, he, pprev, flags); + rhashtable_remove_pprev(ht, he, pprev); return true; } @@ -576,7 +569,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) if (params->nelem_hint) size = rounded_hashtable_size(params); - tbl = bucket_table_alloc(size, GFP_KERNEL); + tbl = bucket_table_alloc(size); if (tbl == NULL) return -ENOMEM; @@ -713,7 +706,7 @@ static int __init test_rhashtable(struct rhashtable *ht) obj->ptr = TEST_PTR; obj->value = i * 2; - rhashtable_insert(ht, &obj->node, GFP_KERNEL); + rhashtable_insert(ht, &obj->node); } rcu_read_lock(); @@ -724,7 +717,7 @@ static int __init test_rhashtable(struct rhashtable *ht) for (i = 0; i < TEST_NEXPANDS; i++) { pr_info(" Table expansion iteration %u...\n", i); - rhashtable_expand(ht, GFP_KERNEL); + rhashtable_expand(ht); rcu_read_lock(); pr_info(" Verifying lookups...\n"); @@ -734,7 +727,7 @@ static int __init test_rhashtable(struct rhashtable *ht) for (i = 0; i < TEST_NEXPANDS; i++) { pr_info(" Table shrinkage iteration %u...\n", i); - rhashtable_shrink(ht, GFP_KERNEL); + rhashtable_shrink(ht); rcu_read_lock(); pr_info(" Verifying lookups...\n"); @@ -749,7 +742,7 @@ static int __init test_rhashtable(struct rhashtable *ht) obj = rhashtable_lookup(ht, &key); BUG_ON(!obj); - rhashtable_remove(ht, &obj->node, GFP_KERNEL); + rhashtable_remove(ht, &obj->node); kfree(obj); } -- cgit v1.1 From a77f9c5dcdf8480a93332792c336fa2bf9d31229 Mon Sep 17 00:00:00 2001 From: Jay Vosburgh Date: Fri, 14 Nov 2014 11:05:06 -0800 Subject: Revert "fast_hash: avoid indirect function calls" This reverts commit e5a2c899957659cd1a9f789bc462f9c0b35f5150. Commit e5a2c899 introduced an alternative_call, arch_fast_hash2, that selects between __jhash2 and __intel_crc4_2_hash based on the X86_FEATURE_XMM4_2. Unfortunately, the alternative_call system does not appear to be suitable for use with C functions, as register usage is not handled properly for the called functions. The __jhash2 function in particular clobbers registers that are not preserved when called via alternative_call, resulting in a panic for direct callers of arch_fast_hash2 on older CPUs lacking sse4_2. It is possible that __intel_crc4_2_hash works merely by chance because it uses fewer registers. This commit was suggested as the source of the problem by Jesse Gross . Signed-off-by: Jay Vosburgh Signed-off-by: David S. Miller --- lib/Makefile | 2 +- lib/hash.c | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 lib/hash.c (limited to 'lib') diff --git a/lib/Makefile b/lib/Makefile index 04e53dd..7512dc9 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ - percpu-refcount.o percpu_ida.o rhashtable.o + percpu-refcount.o percpu_ida.o hash.o rhashtable.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += kstrtox.o diff --git a/lib/hash.c b/lib/hash.c new file mode 100644 index 0000000..fea973f --- /dev/null +++ b/lib/hash.c @@ -0,0 +1,39 @@ +/* General purpose hashing library + * + * That's a start of a kernel hashing library, which can be extended + * with further algorithms in future. arch_fast_hash{2,}() will + * eventually resolve to an architecture optimized implementation. + * + * Copyright 2013 Francesco Fusco + * Copyright 2013 Daniel Borkmann + * Copyright 2013 Thomas Graf + * Licensed under the GNU General Public License, version 2.0 (GPLv2) + */ + +#include +#include +#include + +static struct fast_hash_ops arch_hash_ops __read_mostly = { + .hash = jhash, + .hash2 = jhash2, +}; + +u32 arch_fast_hash(const void *data, u32 len, u32 seed) +{ + return arch_hash_ops.hash(data, len, seed); +} +EXPORT_SYMBOL_GPL(arch_fast_hash); + +u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) +{ + return arch_hash_ops.hash2(data, len, seed); +} +EXPORT_SYMBOL_GPL(arch_fast_hash2); + +static int __init hashlib_init(void) +{ + setup_arch_fast_hash(&arch_hash_ops); + return 0; +} +early_initcall(hashlib_init); -- cgit v1.1 From 3e7b2ec4fe8ef4b05b33db3e84d1b1fbccde250e Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Mon, 24 Nov 2014 12:37:58 +0100 Subject: rhashtable: Check for count mismatch while iterating in selftest Verify whether both the lock and RCU protected iterators see all test entries before and after expanding and shrinking has been performed. Also verify whether the number of entries in the hashtable remains stable during expansion and shrinking. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- lib/rhashtable.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index e5f5e69..c7e987a 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -653,15 +653,15 @@ static int __init test_rht_lookup(struct rhashtable *ht) return 0; } -static void test_bucket_stats(struct rhashtable *ht, - struct bucket_table *tbl, - bool quiet) +static void test_bucket_stats(struct rhashtable *ht, bool quiet) { - unsigned int cnt, i, total = 0; + unsigned int cnt, rcu_cnt, i, total = 0; struct test_obj *obj; + struct bucket_table *tbl; + tbl = rht_dereference_rcu(ht->tbl, ht); for (i = 0; i < tbl->size; i++) { - cnt = 0; + rcu_cnt = cnt = 0; if (!quiet) pr_info(" [%#4x/%zu]", i, tbl->size); @@ -673,6 +673,13 @@ static void test_bucket_stats(struct rhashtable *ht, pr_cont(" [%p],", obj); } + rht_for_each_entry_rcu(obj, tbl->buckets[i], node) + rcu_cnt++; + + if (rcu_cnt != cnt) + pr_warn("Test failed: Chain count mismach %d != %d", + cnt, rcu_cnt); + if (!quiet) pr_cont("\n [%#x] first element: %p, chain length: %u\n", i, tbl->buckets[i], cnt); @@ -680,6 +687,9 @@ static void test_bucket_stats(struct rhashtable *ht, pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n", total, ht->nelems, TEST_ENTRIES); + + if (total != ht->nelems || total != TEST_ENTRIES) + pr_warn("Test failed: Total count mismatch ^^^"); } static int __init test_rhashtable(struct rhashtable *ht) @@ -710,8 +720,7 @@ static int __init test_rhashtable(struct rhashtable *ht) } rcu_read_lock(); - tbl = rht_dereference_rcu(ht->tbl, ht); - test_bucket_stats(ht, tbl, true); + test_bucket_stats(ht, true); test_rht_lookup(ht); rcu_read_unlock(); @@ -735,6 +744,10 @@ static int __init test_rhashtable(struct rhashtable *ht) rcu_read_unlock(); } + rcu_read_lock(); + test_bucket_stats(ht, true); + rcu_read_unlock(); + pr_info(" Deleting %d keys\n", TEST_ENTRIES); for (i = 0; i < TEST_ENTRIES; i++) { u32 key = i * 2; -- cgit v1.1 From 6867b17b26d80cfd419e491141feb75082915979 Mon Sep 17 00:00:00 2001 From: Denis Kirjanov Date: Mon, 1 Dec 2014 13:12:25 +0300 Subject: test: bpf: expand DIV_KX to DIV_MOD_KX Expand DIV_KX to use BPF_MOD operation in the DIV_KX bpf 'classic' test. CC: Alexei Starovoitov Signed-off-by: Denis Kirjanov Signed-off-by: David S. Miller --- lib/test_bpf.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 3f167d2..80d78c5 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -124,7 +124,7 @@ static struct bpf_test tests[] = { { { 0, 0xfffffffd } } }, { - "DIV_KX", + "DIV_MOD_KX", .u.insns = { BPF_STMT(BPF_LD | BPF_IMM, 8), BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2), @@ -134,12 +134,18 @@ static struct bpf_test tests[] = { BPF_STMT(BPF_MISC | BPF_TAX, 0), BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), + BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0), + BPF_STMT(BPF_MISC | BPF_TAX, 0), + BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff), + BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x70000000), BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0), BPF_STMT(BPF_RET | BPF_A, 0) }, CLASSIC | FLAG_NO_DATA, { }, - { { 0, 0x40000001 } } + { { 0, 0x20000000 } } }, { "AND_OR_LSH_K", -- cgit v1.1 From 218321e7a0838c2be974539f0a5341b398d4432b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 24 Nov 2014 19:45:05 -0500 Subject: bury memcpy_toiovec() no users left Signed-off-by: Al Viro --- lib/iovec.c | 25 ------------------------- 1 file changed, 25 deletions(-) (limited to 'lib') diff --git a/lib/iovec.c b/lib/iovec.c index df3abd1..2d99cb4 100644 --- a/lib/iovec.c +++ b/lib/iovec.c @@ -29,31 +29,6 @@ EXPORT_SYMBOL(memcpy_fromiovec); /* * Copy kernel to iovec. Returns -EFAULT on error. - * - * Note: this modifies the original iovec. - */ - -int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len) -{ - while (len > 0) { - if (iov->iov_len) { - int copy = min_t(unsigned int, iov->iov_len, len); - if (copy_to_user(iov->iov_base, kdata, copy)) - return -EFAULT; - kdata += copy; - len -= copy; - iov->iov_len -= copy; - iov->iov_base += copy; - } - iov++; - } - - return 0; -} -EXPORT_SYMBOL(memcpy_toiovec); - -/* - * Copy kernel to iovec. Returns -EFAULT on error. */ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, -- cgit v1.1 From 87545899b52f9c8b1621be4347f443890c0cb196 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 10 Dec 2014 16:33:11 +0100 Subject: net: replace remaining users of arch_fast_hash with jhash This patch effectively reverts commit 500f80872645 ("net: ovs: use CRC32 accelerated flow hash if available"), and other remaining arch_fast_hash() users such as from nfsd via commit 6282cd565553 ("NFSD: Don't hand out delegations for 30 seconds after recalling them.") where it has been used as a hash function for bloom filtering. While we think that these users are actually not much of concern, it has been requested to remove the arch_fast_hash() library bits that arose from [1] entirely as per recent discussion [2]. The main argument is that using it as a hash may introduce bias due to its linearity (see avalanche criterion) and thus makes it less clear (though we tried to document that) when this security/performance trade-off is actually acceptable for a general purpose library function. Lets therefore avoid any further confusion on this matter and remove it to prevent any future accidental misuse of it. For the time being, this is going to make hashing of flow keys a bit more expensive in the ovs case, but future work could reevaluate a different hashing discipline. [1] https://patchwork.ozlabs.org/patch/299369/ [2] https://patchwork.ozlabs.org/patch/418756/ Cc: Neil Brown Cc: Francesco Fusco Cc: Jesse Gross Cc: Thomas Graf Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- lib/rhashtable.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index c7e987a..6c3c723 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include @@ -524,7 +524,7 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .head_offset = offsetof(struct test_obj, node), * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), - * .hashfn = arch_fast_hash, + * .hashfn = jhash, * #ifdef CONFIG_PROVE_LOCKING * .mutex_is_held = &my_mutex_is_held, * #endif @@ -545,7 +545,7 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * * struct rhashtable_params params = { * .head_offset = offsetof(struct test_obj, node), - * .hashfn = arch_fast_hash, + * .hashfn = jhash, * .obj_hashfn = my_hash_fn, * #ifdef CONFIG_PROVE_LOCKING * .mutex_is_held = &my_mutex_is_held, @@ -778,7 +778,7 @@ static int __init test_rht_init(void) .head_offset = offsetof(struct test_obj, node), .key_offset = offsetof(struct test_obj, value), .key_len = sizeof(int), - .hashfn = arch_fast_hash, + .hashfn = jhash, #ifdef CONFIG_PROVE_LOCKING .mutex_is_held = &test_mutex_is_held, #endif -- cgit v1.1 From 0cb6c969ed9de43687abdfc63714b6fe4385d2fc Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 10 Dec 2014 16:33:12 +0100 Subject: net, lib: kill arch_fast_hash library bits As there are now no remaining users of arch_fast_hash(), lets kill it entirely. This basically reverts commit 71ae8aac3e19 ("lib: introduce arch optimized hash library") and follow-up work, that is f.e., commit 237217546d44 ("lib: hash: follow-up fixups for arch hash"), commit e3fec2f74f7f ("lib: Add missing arch generic-y entries for asm-generic/hash.h") and last but not least commit 6a02652df511 ("perf tools: Fix include for non x86 architectures"). Cc: Francesco Fusco Cc: Thomas Graf Cc: Arnaldo Carvalho de Melo Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- lib/Makefile | 2 +- lib/hash.c | 39 --------------------------------------- 2 files changed, 1 insertion(+), 40 deletions(-) delete mode 100644 lib/hash.c (limited to 'lib') diff --git a/lib/Makefile b/lib/Makefile index 0211d2b..4b9baa4 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ - percpu-refcount.o percpu_ida.o hash.o rhashtable.o reciprocal_div.o + percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += kstrtox.o diff --git a/lib/hash.c b/lib/hash.c deleted file mode 100644 index fea973f..0000000 --- a/lib/hash.c +++ /dev/null @@ -1,39 +0,0 @@ -/* General purpose hashing library - * - * That's a start of a kernel hashing library, which can be extended - * with further algorithms in future. arch_fast_hash{2,}() will - * eventually resolve to an architecture optimized implementation. - * - * Copyright 2013 Francesco Fusco - * Copyright 2013 Daniel Borkmann - * Copyright 2013 Thomas Graf - * Licensed under the GNU General Public License, version 2.0 (GPLv2) - */ - -#include -#include -#include - -static struct fast_hash_ops arch_hash_ops __read_mostly = { - .hash = jhash, - .hash2 = jhash2, -}; - -u32 arch_fast_hash(const void *data, u32 len, u32 seed) -{ - return arch_hash_ops.hash(data, len, seed); -} -EXPORT_SYMBOL_GPL(arch_fast_hash); - -u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) -{ - return arch_hash_ops.hash2(data, len, seed); -} -EXPORT_SYMBOL_GPL(arch_fast_hash2); - -static int __init hashlib_init(void) -{ - setup_arch_fast_hash(&arch_hash_ops); - return 0; -} -early_initcall(hashlib_init); -- cgit v1.1