summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-03-28 13:33:37 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-03-28 13:33:37 +0200
commita0306db6e5758b0488c79e739de3c0ffe47ea62f (patch)
tree86d17769496f96554f89bfc3be02b645a420889f /lib
parent4bc07aa4f030d4d3e551ce8d96ca15b6424a5641 (diff)
parent3eb2ce825ea1ad89d20f7a3b5780df850e4be274 (diff)
downloadop-kernel-dev-a0306db6e5758b0488c79e739de3c0ffe47ea62f.zip
op-kernel-dev-a0306db6e5758b0488c79e739de3c0ffe47ea62f.tar.gz
Merge 4.16-rc7 into staging-next
We want the IIO and staging driver fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/ioremap.c6
-rw-r--r--lib/percpu-refcount.c2
-rw-r--r--lib/rhashtable.c4
-rw-r--r--lib/test_bpf.c2
-rw-r--r--lib/test_rhashtable.c134
5 files changed, 144 insertions, 4 deletions
diff --git a/lib/ioremap.c b/lib/ioremap.c
index b808a39..54e5bba 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -91,7 +91,8 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
if (ioremap_pmd_enabled() &&
((next - addr) == PMD_SIZE) &&
- IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
+ IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
+ pmd_free_pte_page(pmd)) {
if (pmd_set_huge(pmd, phys_addr + addr, prot))
continue;
}
@@ -117,7 +118,8 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
if (ioremap_pud_enabled() &&
((next - addr) == PUD_SIZE) &&
- IS_ALIGNED(phys_addr + addr, PUD_SIZE)) {
+ IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
+ pud_free_pmd_page(pud)) {
if (pud_set_huge(pud, phys_addr + addr, prot))
continue;
}
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 30e7dd8..9f96fa7 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -322,6 +322,8 @@ EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
* This function normally doesn't block and can be called from any context
* but it may block if @confirm_kill is specified and @ref is in the
* process of switching to atomic mode by percpu_ref_switch_to_atomic().
+ *
+ * There are no implied RCU grace periods between kill and release.
*/
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 3825c30..47de025 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -506,8 +506,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
if (!key ||
(ht->p.obj_cmpfn ?
ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head))))
+ rhashtable_compare(&arg, rht_obj(ht, head)))) {
+ pprev = &head->next;
continue;
+ }
if (!ht->rhlist)
return rht_obj(ht, head);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 2efb213..3e93354 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -5467,7 +5467,7 @@ static struct bpf_test tests[] = {
{
"BPF_MAXINSNS: Jump, gap, jump, ...",
{ },
-#ifdef CONFIG_BPF_JIT_ALWAYS_ON
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_X86)
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
#else
CLASSIC | FLAG_NO_DATA,
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 76d3667..f4000c1 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -79,6 +79,21 @@ struct thread_data {
struct test_obj *objs;
};
+static u32 my_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct test_obj_rhl *obj = data;
+
+ return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE;
+}
+
+static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
+{
+ const struct test_obj_rhl *test_obj = obj;
+ const struct test_obj_val *val = arg->key;
+
+ return test_obj->value.id - val->id;
+}
+
static struct rhashtable_params test_rht_params = {
.head_offset = offsetof(struct test_obj, node),
.key_offset = offsetof(struct test_obj, value),
@@ -87,6 +102,17 @@ static struct rhashtable_params test_rht_params = {
.nulls_base = (3U << RHT_BASE_SHIFT),
};
+static struct rhashtable_params test_rht_params_dup = {
+ .head_offset = offsetof(struct test_obj_rhl, list_node),
+ .key_offset = offsetof(struct test_obj_rhl, value),
+ .key_len = sizeof(struct test_obj_val),
+ .hashfn = jhash,
+ .obj_hashfn = my_hashfn,
+ .obj_cmpfn = my_cmpfn,
+ .nelem_hint = 128,
+ .automatic_shrinking = false,
+};
+
static struct semaphore prestart_sem;
static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
@@ -465,6 +491,112 @@ static int __init test_rhashtable_max(struct test_obj *array,
return err;
}
+static unsigned int __init print_ht(struct rhltable *rhlt)
+{
+ struct rhashtable *ht;
+ const struct bucket_table *tbl;
+ char buff[512] = "";
+ unsigned int i, cnt = 0;
+
+ ht = &rhlt->ht;
+ tbl = rht_dereference(ht->tbl, ht);
+ for (i = 0; i < tbl->size; i++) {
+ struct rhash_head *pos, *next;
+ struct test_obj_rhl *p;
+
+ pos = rht_dereference(tbl->buckets[i], ht);
+ next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
+
+ if (!rht_is_a_nulls(pos)) {
+ sprintf(buff, "%s\nbucket[%d] -> ", buff, i);
+ }
+
+ while (!rht_is_a_nulls(pos)) {
+ struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead);
+ sprintf(buff, "%s[[", buff);
+ do {
+ pos = &list->rhead;
+ list = rht_dereference(list->next, ht);
+ p = rht_obj(ht, pos);
+
+ sprintf(buff, "%s val %d (tid=%d)%s", buff, p->value.id, p->value.tid,
+ list? ", " : " ");
+ cnt++;
+ } while (list);
+
+ pos = next,
+ next = !rht_is_a_nulls(pos) ?
+ rht_dereference(pos->next, ht) : NULL;
+
+ sprintf(buff, "%s]]%s", buff, !rht_is_a_nulls(pos) ? " -> " : "");
+ }
+ }
+ printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
+
+ return cnt;
+}
+
+static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
+ int cnt, bool slow)
+{
+ struct rhltable rhlt;
+ unsigned int i, ret;
+ const char *key;
+ int err = 0;
+
+ err = rhltable_init(&rhlt, &test_rht_params_dup);
+ if (WARN_ON(err))
+ return err;
+
+ for (i = 0; i < cnt; i++) {
+ rhl_test_objects[i].value.tid = i;
+ key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead);
+ key += test_rht_params_dup.key_offset;
+
+ if (slow) {
+ err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key,
+ &rhl_test_objects[i].list_node.rhead));
+ if (err == -EAGAIN)
+ err = 0;
+ } else
+ err = rhltable_insert(&rhlt,
+ &rhl_test_objects[i].list_node,
+ test_rht_params_dup);
+ if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
+ goto skip_print;
+ }
+
+ ret = print_ht(&rhlt);
+ WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
+
+skip_print:
+ rhltable_destroy(&rhlt);
+
+ return 0;
+}
+
+static int __init test_insert_duplicates_run(void)
+{
+ struct test_obj_rhl rhl_test_objects[3] = {};
+
+ pr_info("test inserting duplicates\n");
+
+ /* two different values that map to same bucket */
+ rhl_test_objects[0].value.id = 1;
+ rhl_test_objects[1].value.id = 21;
+
+ /* and another duplicate with same as [0] value
+ * which will be second on the bucket list */
+ rhl_test_objects[2].value.id = rhl_test_objects[0].value.id;
+
+ test_insert_dup(rhl_test_objects, 2, false);
+ test_insert_dup(rhl_test_objects, 3, false);
+ test_insert_dup(rhl_test_objects, 2, true);
+ test_insert_dup(rhl_test_objects, 3, true);
+
+ return 0;
+}
+
static int thread_lookup_test(struct thread_data *tdata)
{
unsigned int entries = tdata->entries;
@@ -613,6 +745,8 @@ static int __init test_rht_init(void)
do_div(total_time, runs);
pr_info("Average test time: %llu\n", total_time);
+ test_insert_duplicates_run();
+
if (!tcount)
return 0;
OpenPOWER on IntegriCloud