summaryrefslogtreecommitdiffstats
path: root/contrib/netbsd-tests/lib/libc/db
diff options
context:
space:
mode:
authorRenato Botelho <renato@netgate.com>2017-02-23 06:28:41 -0300
committerRenato Botelho <renato@netgate.com>2017-02-23 06:28:41 -0300
commit82ceeb2ea625cd9bff60f2863b9a0830f55b7905 (patch)
tree263ca9347bf664a4489743f9302e699ce14de1df /contrib/netbsd-tests/lib/libc/db
parent4a05f5440acda223e6a0ec5157bc32ecc0f09ff9 (diff)
parentd20dd8b36e7a565be7bfbb22aade51c8ffd753e9 (diff)
downloadFreeBSD-src-devel.zip
FreeBSD-src-devel.tar.gz
Merge remote-tracking branch 'origin/stable/10' into develdevel
Diffstat (limited to 'contrib/netbsd-tests/lib/libc/db')
-rw-r--r--contrib/netbsd-tests/lib/libc/db/h_db.c77
-rw-r--r--contrib/netbsd-tests/lib/libc/db/h_lfsr.c179
-rwxr-xr-xcontrib/netbsd-tests/lib/libc/db/t_db.sh442
-rw-r--r--contrib/netbsd-tests/lib/libc/db/t_db_hash_seq.c343
4 files changed, 998 insertions, 43 deletions
diff --git a/contrib/netbsd-tests/lib/libc/db/h_db.c b/contrib/netbsd-tests/lib/libc/db/h_db.c
index dfb1385..dd19a03 100644
--- a/contrib/netbsd-tests/lib/libc/db/h_db.c
+++ b/contrib/netbsd-tests/lib/libc/db/h_db.c
@@ -1,4 +1,4 @@
-/* $NetBSD: h_db.c,v 1.1 2011/01/07 15:05:58 pgoyette Exp $ */
+/* $NetBSD: h_db.c,v 1.3 2016/09/24 21:18:22 christos Exp $ */
/*-
* Copyright (c) 1992, 1993, 1994
@@ -39,7 +39,7 @@ __COPYRIGHT("@(#) Copyright (c) 1992, 1993, 1994\
#if 0
static char sccsid[] = "@(#)dbtest.c 8.17 (Berkeley) 9/1/94";
#else
-__RCSID("$NetBSD: h_db.c,v 1.1 2011/01/07 15:05:58 pgoyette Exp $");
+__RCSID("$NetBSD: h_db.c,v 1.3 2016/09/24 21:18:22 christos Exp $");
#endif
#endif /* not lint */
@@ -57,12 +57,13 @@ __RCSID("$NetBSD: h_db.c,v 1.1 2011/01/07 15:05:58 pgoyette Exp $");
#include <unistd.h>
#include <err.h>
#include <db.h>
+#include "btree.h"
enum S { COMMAND, COMPARE, GET, PUT, REMOVE, SEQ, SEQFLAG, KEY, DATA };
static void compare(DBT *, DBT *);
static DBTYPE dbtype(const char *);
-static void dump(DB *, int);
+static void dump(DB *, int, int);
static void get(DB *, DBT *);
static void getdata(DB *, DBT *, DBT *);
static void put(DB *, DBT *, DBT *);
@@ -73,6 +74,9 @@ static void *rfile(char *, size_t *);
static void seq(DB *, DBT *);
static u_int setflags(char *);
static void *setinfo(DBTYPE, char *);
+#ifdef __NetBSD__
+static void unlinkpg(DB *);
+#endif
static void usage(void) __attribute__((__noreturn__));
static void *xcopy(void *, size_t);
static void chkcmd(enum S);
@@ -82,6 +86,9 @@ static void chkkey(enum S);
#ifdef STATISTICS
extern void __bt_stat(DB *);
#endif
+#ifdef __NetBSD__
+extern int __bt_relink(BTREE *, PAGE *);
+#endif
static DBTYPE type; /* Database type. */
static void *infop; /* Iflags. */
@@ -315,8 +322,16 @@ lkey: switch (command) {
}
break;
case 'o':
- dump(dbp, p[1] == 'r');
+ dump(dbp, p[1] == 'r', 0);
+ break;
+#ifdef __NetBSD__
+ case 'O':
+ dump(dbp, p[1] == 'r', 1);
break;
+ case 'u':
+ unlinkpg(dbp);
+ break;
+#endif
default:
errx(1, "line %zu: %s: unknown command character",
lineno, p);
@@ -483,17 +498,25 @@ seq(DB *dbp, DBT *kp)
}
static void
-dump(DB *dbp, int rev)
+dump(DB *dbp, int rev, int recurse)
{
DBT key, data;
int xflags, nflags;
if (rev) {
xflags = R_LAST;
+#ifdef __NetBSD__
+ nflags = recurse ? R_RPREV : R_PREV;
+#else
nflags = R_PREV;
+#endif
} else {
xflags = R_FIRST;
+#ifdef __NetBSD__
+ nflags = recurse ? R_RNEXT : R_NEXT;
+#else
nflags = R_NEXT;
+#endif
}
for (;; xflags = nflags)
switch (dbp->seq(dbp, &key, &data, xflags)) {
@@ -511,6 +534,42 @@ dump(DB *dbp, int rev)
done: return;
}
+#ifdef __NetBSD__
+void
+unlinkpg(DB *dbp)
+{
+ BTREE *t = dbp->internal;
+ PAGE *h = NULL;
+ pgno_t pg;
+
+ for (pg = P_ROOT; pg < t->bt_mp->npages;
+ mpool_put(t->bt_mp, h, 0), pg++) {
+ if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
+ break;
+ /* Look for a nonempty leaf page that has both left
+ * and right siblings. */
+ if (h->prevpg == P_INVALID || h->nextpg == P_INVALID)
+ continue;
+ if (NEXTINDEX(h) == 0)
+ continue;
+ if ((h->flags & (P_BLEAF | P_RLEAF)))
+ break;
+ }
+ if (h == NULL || pg == t->bt_mp->npages) {
+ errx(1, "%s: no appropriate page found", __func__);
+ return;
+ }
+ if (__bt_relink(t, h) != 0) {
+ perror("unlinkpg");
+ goto cleanup;
+ }
+ h->prevpg = P_INVALID;
+ h->nextpg = P_INVALID;
+cleanup:
+ mpool_put(t->bt_mp, h, MPOOL_DIRTY);
+}
+#endif
+
static u_int
setflags(char *s)
{
@@ -725,7 +784,11 @@ static void
usage(void)
{
(void)fprintf(stderr,
- "Usage: %s [-l] [-f file] [-i info] [-o file] type script\n",
- getprogname());
+#ifdef __NetBSD__
+ "Usage: %s [-lu] [-f file] [-i info] [-o file] [-O file] "
+#else
+ "Usage: %s [-l] [-f file] [-i info] [-o file] "
+#endif
+ "type script\n", getprogname());
exit(1);
}
diff --git a/contrib/netbsd-tests/lib/libc/db/h_lfsr.c b/contrib/netbsd-tests/lib/libc/db/h_lfsr.c
new file mode 100644
index 0000000..3f3d712
--- /dev/null
+++ b/contrib/netbsd-tests/lib/libc/db/h_lfsr.c
@@ -0,0 +1,179 @@
+/*-
+ * Copyright (c) 2015 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Christos Zoulas.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+__RCSID("$NetBSD: h_lfsr.c,v 1.1 2015/11/18 18:35:35 christos Exp $");
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <err.h>
+#include <string.h>
+#include <unistd.h>
+#include <db.h>
+
+#define MAXKEY 0xffff
+#ifdef DEBUG
+#define DPRINTF(...) printf(__VA_ARGS__)
+#else
+#define DPRINTF(...)
+#endif
+
+static uint16_t
+next(uint16_t *cur)
+{
+ uint16_t lsb = *cur & 1;
+ *cur >>= 1;
+ *cur ^= (-lsb) & 0xB400u;
+ return *cur;
+}
+
+int
+main(int argc, char *argv[])
+{
+ char buf[65536];
+ char kb[256];
+ DBT key, val;
+ DB *db;
+ HASHINFO hi;
+ uint8_t c;
+ uint16_t len;
+ uint32_t pagesize = atoi(argv[1]);
+
+ memset(&hi, 0, sizeof(hi));
+ memset(buf, 'a', sizeof(buf));
+ hi.bsize = pagesize;
+ hi.nelem = 65536;
+ hi.ffactor = 128;
+
+ key.data = kb;
+ val.data = buf;
+
+ db = dbopen(NULL, O_CREAT|O_TRUNC|O_RDWR, 0, DB_HASH, &hi);
+ if (db == NULL)
+ err(EXIT_FAILURE, "dbopen");
+
+ len = 0xaec1;
+ for (size_t i = 0; i < MAXKEY; i++) {
+ key.size = (len & 0xff) + 1;
+ c = len >> 8;
+ memset(kb, c, key.size);
+ val.size = (next(&len) & 0xff) + 1;
+ switch ((*db->put)(db, &key, &val, R_NOOVERWRITE)) {
+ case 0:
+ DPRINTF("put %zu %zu %#x\n",
+ key.size, val.size, c);
+ break;
+ case -1:
+ err(EXIT_FAILURE, "put error %zu %zu %#x",
+ key.size, val.size, c);
+ case 1:
+ errx(EXIT_FAILURE, "put overwrite %zu %zu %#x",
+ key.size, val.size, c);
+ default:
+ abort();
+ }
+ }
+
+ len = 0xaec1;
+ for (size_t i = 0; i < MAXKEY; i++) {
+ key.size = (len & 0xff) + 1;
+ c = len >> 8;
+ memset(kb, c, key.size);
+ next(&len);
+ switch ((*db->get)(db, &key, &val, 0)) {
+ case 0:
+ DPRINTF("get %zu %zu %#x\n",
+ key.size, val.size, c);
+ break;
+ case -1:
+ err(EXIT_FAILURE, "get %zu %zu %#x",
+ key.size, val.size, c);
+ case 1:
+ errx(EXIT_FAILURE, "get not found %zu %zu %#x",
+ key.size, val.size, c);
+ default:
+ abort();
+ }
+ if (memcmp(key.data, kb, key.size) != 0)
+ errx(EXIT_FAILURE, "get badkey %zu %zu %#x",
+ key.size, val.size, c);
+ if (val.size != (len & 0xff) + 1U)
+ errx(EXIT_FAILURE, "get badvallen %zu %zu %#x",
+ key.size, val.size, c);
+ if (memcmp(val.data, buf, val.size) != 0)
+ errx(EXIT_FAILURE, "get badval %zu %zu %#x",
+ key.size, val.size, c);
+ }
+
+ len = 0xaec1;
+ for (size_t i = 0; i < MAXKEY; i++) {
+ key.size = (len & 0xff) + 1;
+ c = len >> 8;
+ memset(kb, c, key.size);
+ next(&len);
+ switch ((*db->del)(db, &key, 0)) {
+ case 0:
+ DPRINTF("del %zu %zu %#x\n",
+ key.size, val.size, c);
+ break;
+ case -1:
+ err(EXIT_FAILURE, "del %zu %zu %#x", key.size,
+ val.size, c);
+ case 1:
+ errx(EXIT_FAILURE, "del not found %zu %zu %#x",
+ key.size, val.size, c);
+ default:
+ abort();
+ }
+ }
+
+ len = 0xaec1;
+ for (size_t i = 0; i < MAXKEY; i++) {
+ key.size = (len & 0xff) + 1;
+ c = len >> 8;
+ memset(kb, c, key.size);
+ next(&len);
+ switch ((*db->get)(db, &key, &val, 0)) {
+ case 0:
+ errx(EXIT_FAILURE, "get2 found %zu %zu %#x",
+ key.size, val.size, c);
+ break;
+ case -1:
+ err(EXIT_FAILURE, "get2 %zu %zu %#x",
+ key.size, val.size, c);
+ case 1:
+ DPRINTF("get2 %zu %zu %#x\n",
+ key.size, val.size, c);
+ break;
+ default:
+ abort();
+ }
+ }
+ return 0;
+}
diff --git a/contrib/netbsd-tests/lib/libc/db/t_db.sh b/contrib/netbsd-tests/lib/libc/db/t_db.sh
index d256508..6858e36 100755
--- a/contrib/netbsd-tests/lib/libc/db/t_db.sh
+++ b/contrib/netbsd-tests/lib/libc/db/t_db.sh
@@ -1,4 +1,4 @@
-# $NetBSD: t_db.sh,v 1.4 2013/07/29 10:43:15 skrll Exp $
+# $NetBSD: t_db.sh,v 1.7 2016/09/24 20:12:33 christos Exp $
#
# Copyright (c) 2008 The NetBSD Foundation, Inc.
# All rights reserved.
@@ -25,11 +25,16 @@
# POSSIBILITY OF SUCH DAMAGE.
#
-prog()
+prog_db()
{
echo $(atf_get_srcdir)/h_db
}
+prog_lfsr()
+{
+ echo $(atf_get_srcdir)/h_lfsr
+}
+
dict()
{
if [ -f /usr/share/dict/words ]; then
@@ -37,6 +42,7 @@ dict()
elif [ -f /usr/dict/words ]; then
echo /usr/dict/words
else
+ echo ""
atf_fail "no dictionary found"
fi
}
@@ -44,12 +50,7 @@ dict()
# Begin FreeBSD
dict()
{
- if [ -f /usr/share/dict/words ]; then
- echo /usr/share/dict/words
- else
- echo /nonexistent
- atf_skip "Test requires dict/words"
- fi
+ echo /usr/share/dict/words
}
# End FreeBSD
@@ -62,6 +63,9 @@ small_btree_head()
"Checks btree database using small keys and small data" \
"pairs: takes the first hundred entries in the dictionary," \
"and makes them be key/data pairs."
+ # Begin FreeBSD
+ atf_set "require.files" /usr/share/dict/words
+ # End FreeBSD
}
small_btree_body()
{
@@ -78,7 +82,7 @@ small_btree_body()
echo k$i
done >in
- atf_check -o file:exp "$(prog)" btree in
+ atf_check -o file:exp "$(prog_db)" btree in
}
atf_test_case small_hash
@@ -88,6 +92,9 @@ small_hash_head()
"Checks hash database using small keys and small data" \
"pairs: takes the first hundred entries in the dictionary," \
"and makes them be key/data pairs."
+ # Begin FreeBSD
+ atf_set "require.files" /usr/share/dict/words
+ # End FreeBSD
}
small_hash_body()
{
@@ -104,7 +111,7 @@ small_hash_body()
echo k$i
done >in
- atf_check -o file:exp "$(prog)" hash in
+ atf_check -o file:exp "$(prog_db)" hash in
}
atf_test_case small_recno
@@ -114,6 +121,9 @@ small_recno_head()
"Checks recno database using small keys and small data" \
"pairs: takes the first hundred entries in the dictionary," \
"and makes them be key/data pairs."
+ # Begin FreeBSD
+ atf_set "require.files" /usr/share/dict/words
+ # End FreeBSD
}
small_recno_body()
{
@@ -128,7 +138,7 @@ small_recno_body()
printf("p\nk%d\nd%s\ng\nk%d\n", i, $0, i);
}' >in
- atf_check -o file:exp "$(prog)" recno in
+ atf_check -o file:exp "$(prog_db)" recno in
}
atf_test_case medium_btree
@@ -138,6 +148,9 @@ medium_btree_head()
"Checks btree database using small keys and medium" \
"data pairs: takes the first 200 entries in the" \
"dictionary, and gives them each a medium size data entry."
+ # Begin FreeBSD
+ atf_set "require.files" /usr/share/dict/words
+ # End FreeBSD
}
medium_btree_body()
{
@@ -156,7 +169,7 @@ medium_btree_body()
echo k$i
done >in
- atf_check -o file:exp "$(prog)" btree in
+ atf_check -o file:exp "$(prog_db)" btree in
}
atf_test_case medium_hash
@@ -166,6 +179,9 @@ medium_hash_head()
"Checks hash database using small keys and medium" \
"data pairs: takes the first 200 entries in the" \
"dictionary, and gives them each a medium size data entry."
+ # Begin FreeBSD
+ atf_set "require.files" /usr/share/dict/words
+ # End FreeBSD
}
medium_hash_body()
{
@@ -184,7 +200,7 @@ medium_hash_body()
echo k$i
done >in
- atf_check -o file:exp "$(prog)" hash in
+ atf_check -o file:exp "$(prog_db)" hash in
}
atf_test_case medium_recno
@@ -209,7 +225,7 @@ medium_recno_body()
printf("p\nk%d\nd%s\ng\nk%d\n", i, $0, i);
}' >in
- atf_check -o file:exp "$(prog)" recno in
+ atf_check -o file:exp "$(prog_db)" recno in
}
atf_test_case big_btree
@@ -238,7 +254,7 @@ big_btree_body()
echo k$i
done >in
- atf_check "$(prog)" -o out btree in
+ atf_check "$(prog_db)" -o out btree in
cmp -s exp out || atf_fail "test failed for page size: $psize"
done
}
@@ -266,7 +282,7 @@ big_hash_body()
echo k$i
done >in
- atf_check "$(prog)" -o out hash in
+ atf_check "$(prog_db)" -o out hash in
cmp -s exp out || atf_fail "test failed"
}
@@ -294,7 +310,7 @@ big_recno_body()
for psize in 512 16384 65536; do
echo "checking page size: $psize"
- atf_check "$(prog)" -o out recno in
+ atf_check "$(prog_db)" -o out recno in
cmp -s exp out || atf_fail "test failed for page size: $psize"
done
}
@@ -362,7 +378,7 @@ random_recno_body()
printf("g\nk%d\n", i);
}' >in
- atf_check -o file:exp "$(prog)" recno in
+ atf_check -o file:exp "$(prog_db)" recno in
}
atf_test_case reverse_recno
@@ -400,7 +416,7 @@ reverse_recno_body()
printf("g\nk%d\n", i);
}' >in
- atf_check -o file:exp "$(prog)" recno in
+ atf_check -o file:exp "$(prog_db)" recno in
}
atf_test_case alternate_recno
@@ -452,7 +468,7 @@ alternate_recno_body()
printf("g\nk%d\n", i);
}' >in
- atf_check "$(prog)" -o out recno in
+ atf_check "$(prog_db)" -o out recno in
sort -o exp exp
sort -o out out
@@ -521,7 +537,7 @@ h_delete()
}' >> exp
fi
- atf_check "$(prog)" -o out $type in
+ atf_check "$(prog_db)" -o out $type in
atf_check -o file:exp cat out
}
@@ -547,6 +563,7 @@ delete_recno_body()
h_repeated()
{
+ local type="$1"
TMPDIR="$(pwd)/db_dir"; export TMPDIR
mkdir ${TMPDIR}
@@ -565,7 +582,7 @@ h_repeated()
}
}' >in
- $(prog) btree in
+ $(prog_db) $type in
}
atf_test_case repeated_btree
@@ -620,16 +637,15 @@ duplicate_btree_body()
printf("o\n");
}' >in
- atf_check -o file:exp -x "$(prog) -iflags=1 btree in | sort"
+ atf_check -o file:exp -x "$(prog_db) -iflags=1 btree in | sort"
}
h_cursor_flags()
{
+ local type=$1
TMPDIR="$(pwd)/db_dir"; export TMPDIR
mkdir ${TMPDIR}
- type=$1
-
echo $SEVEN_SEVEN |
awk '{
for (i = 1; i <= 20; ++i)
@@ -649,7 +665,7 @@ h_cursor_flags()
printf("eR_CURSOR SHOULD HAVE FAILED\n");
}' >in
- atf_check -o ignore -e ignore -s ne:0 "$(prog)" -o out $type in
+ atf_check -o ignore -e ignore -s ne:0 "$(prog_db)" -o out $type in
atf_check -s ne:0 test -s out
cat exp |
@@ -663,7 +679,7 @@ h_cursor_flags()
printf("eR_CURSOR SHOULD HAVE FAILED\n");
}' >in
- atf_check -o ignore -e ignore -s ne:0 "$(prog)" -o out $type in
+ atf_check -o ignore -e ignore -s ne:0 "$(prog_db)" -o out $type in
atf_check -s ne:0 test -s out
}
@@ -719,7 +735,7 @@ reverse_order_recno_body()
printf("or\n");
}' >in
- atf_check -o file:exp "$(prog)" recno in
+ atf_check -o file:exp "$(prog_db)" recno in
}
atf_test_case small_page_btree
@@ -731,6 +747,9 @@ small_page_btree_head()
"reverses them, and gives them each a small size data" \
"entry. Uses a small page size to make sure the btree" \
"split code gets hammered."
+ # Begin FreeBSD
+ atf_set "require.files" /usr/share/dict/words
+ # End FreeBSD
}
small_page_btree_body()
{
@@ -749,7 +768,7 @@ small_page_btree_body()
echo k$i
done >in
- atf_check -o file:exp "$(prog)" -i psize=512 btree in
+ atf_check -o file:exp "$(prog_db)" -i psize=512 btree in
}
h_byte_orders()
@@ -765,18 +784,19 @@ h_byte_orders()
echo p
echo k$i
echo d$i
+ echo S
echo g
echo k$i
done >in
- atf_check -o file:exp "$(prog)" -ilorder=$order -f byte.file $type in
+ atf_check -o file:exp "$(prog_db)" -ilorder=$order -f byte.file $type in
for i in `sed 50q $(dict)`; do
echo g
echo k$i
done >in
- atf_check -o file:exp "$(prog)" -s -ilorder=$order -f byte.file $type in
+ atf_check -o file:exp "$(prog_db)" -s -ilorder=$order -f byte.file $type in
done
}
@@ -784,6 +804,9 @@ atf_test_case byte_orders_btree
byte_orders_btree_head()
{
atf_set "descr" "Checks btree database using differing byte orders"
+ # Begin FreeBSD
+ atf_set "require.files" /usr/share/dict/words
+ # End FreeBSD
}
byte_orders_btree_body()
{
@@ -806,16 +829,19 @@ h_bsize_ffactor()
ffactor=$2
echo "bucketsize $bsize, fill factor $ffactor"
- atf_check -o file:exp "$(prog)" "-ibsize=$bsize,\
+ atf_check -o file:exp "$(prog_db)" "-ibsize=$bsize,\
ffactor=$ffactor,nelem=25000,cachesize=65536" hash in
}
atf_test_case bsize_ffactor
bsize_ffactor_head()
{
- atf_set "timeout" "480"
+ atf_set "timeout" "1800"
atf_set "descr" "Checks hash database with various" \
"bucketsizes and fill factors"
+ # Begin FreeBSD
+ atf_set "require.files" /usr/share/dict/words
+ # End FreeBSD
}
bsize_ffactor_body()
{
@@ -876,9 +902,27 @@ bsize_ffactor_body()
h_bsize_ffactor 8192 341
h_bsize_ffactor 8192 455
h_bsize_ffactor 8192 683
+
+ h_bsize_ffactor 16384 341
+ h_bsize_ffactor 16384 455
+ h_bsize_ffactor 16384 683
+
+ h_bsize_ffactor 32768 341
+ h_bsize_ffactor 32768 455
+ h_bsize_ffactor 32768 683
+
+ # Begin FreeBSD
+ if false; then
+ # End FreeBSD
+ h_bsize_ffactor 65536 341
+ h_bsize_ffactor 65536 455
+ h_bsize_ffactor 65536 683
+ # Begin FreeBSD
+ fi
+ # End FreeBSD
}
-# FIXME: what does it test?
+# This tests 64K block size addition/removal
atf_test_case four_char_hash
four_char_hash_head()
{
@@ -901,15 +945,328 @@ EOF
# Begin FreeBSD
if true; then
- atf_check "$(prog)" -i bsize=32768 hash in
+ atf_check "$(prog_db)" -i bsize=32768 hash in
else
# End FreeBSD
- atf_check "$(prog)" -i bsize=65536 hash in
+ atf_check "$(prog_db)" -i bsize=65536 hash in
# Begin FreeBSD
fi
# End FreeBSD
}
+
+atf_test_case bsize_torture
+bsize_torture_head()
+{
+ atf_set "timeout" "36000"
+ atf_set "descr" "Checks hash database with various bucket sizes"
+}
+bsize_torture_body()
+{
+ TMPDIR="$(pwd)/db_dir"; export TMPDIR
+ mkdir ${TMPDIR}
+ # Begin FreeBSD
+ #
+ # db(3) doesn't support 64kB bucket sizes
+ for i in 2048 4096 8192 16384 32768 # 65536
+ # End FreeBSD
+ do
+ atf_check "$(prog_lfsr)" $i
+ done
+}
+
+atf_test_case btree_weird_page_split
+btree_weird_page_split_head()
+{
+ atf_set "descr" \
+ "Test for a weird page split condition where an insertion " \
+ "into index 0 of a page that would cause the new item to " \
+ "be the only item on the left page results in index 0 of " \
+ "the right page being erroneously skipped; this only " \
+ "happens with one particular key+data length for each page size."
+}
+btree_weird_page_split_body()
+{
+ for psize in 512 1024 2048 4096 8192; do
+ echo " page size $psize"
+ kdsizes=`awk 'BEGIN {
+ psize = '$psize'; hsize = int(psize/2);
+ for (kdsize = hsize-40; kdsize <= hsize; kdsize++) {
+ print kdsize;
+ }
+ }' /dev/null`
+
+ # Use a series of keylen+datalen values in the right
+ # neighborhood to find the one that triggers the bug.
+ # We could compute the exact size that triggers the
+ # bug but this additional fuzz may be useful.
+
+ # Insert keys in reverse order to maximize the chances
+ # for a split on index 0.
+
+ for kdsize in $kdsizes; do
+ awk 'BEGIN {
+ kdsize = '$kdsize';
+ for (i = 8; i-- > 0; ) {
+ s = sprintf("a%03d:%09d", i, kdsize);
+ for (j = 0; j < kdsize-20; j++) {
+ s = s "x";
+ }
+ printf("p\nka%03d\nd%s\n", i, s);
+ }
+ print "o";
+ }' /dev/null > in
+ sed -n 's/^d//p' in | sort > exp
+ atf_check -o file:exp \
+ "$(prog_db)" -i psize=$psize btree in
+ done
+ done
+}
+
+# Extremely tricky test attempting to replicate some unusual database
+# corruption seen in the field: pieces of the database becoming
+# inaccessible to random access, sequential access, or both. The
+# hypothesis is that at least some of these are triggered by the bug
+# in page splits on index 0 with a particular exact keylen+datalen.
+# (See Test 40.) For psize=4096, this size is exactly 2024.
+
+# The order of operations here relies on very specific knowledge of
+# the internals of the btree access method in order to place records
+# at specific offsets in a page and to create certain keys on internal
+# pages. The to-be-split page immediately prior to the bug-triggering
+# split has the following properties:
+#
+# * is not the leftmost leaf page
+# * key on the parent page is compares less than the key of the item
+# on index 0
+# * triggering record's key also compares greater than the key on the
+# parent page
+
+# Additionally, we prime the mpool LRU chain so that the head page on
+# the chain has the following properties:
+#
+# * record at index 0 is located where it will not get overwritten by
+# items written to the right-hand page during the split
+# * key of the record at index 0 compares less than the key of the
+# bug-triggering record
+
+# If the page-split bug exists, this test appears to create a database
+# where some records are inaccessible to a search, but still remain in
+# the file and are accessible by sequential traversal. At least one
+# record gets duplicated out of sequence.
+
+atf_test_case btree_tricky_page_split
+btree_tricky_page_split_head()
+{
+ atf_set "descr" \
+ "btree: no unsearchables due to page split on index 0"
+}
+btree_tricky_page_split_body()
+{
+ list=`(for i in a b c d; do
+ for j in 990 998 999; do
+ echo g ${i}${j} 1024
+ done
+ done;
+ echo g y997 2014
+ for i in y z; do
+ for j in 998 999; do
+ echo g ${i}${j} 1024
+ done
+ done)`
+ # Exact number for trigger condition accounts for newlines
+ # retained by dbtest with -ofile but not without; we use
+ # -ofile, so count newlines. keylen=5,datalen=5+2014 for
+ # psize=4096 here.
+ (cat - <<EOF
+p z999 1024
+p z998 1024
+p y999 1024
+p y990 1024
+p d999 1024
+p d990 1024
+p c999 1024
+p c990 1024
+p b999 1024
+p b990 1024
+p a999 1024
+p a990 1024
+p y998 1024
+r y990
+p d998 1024
+p d990 1024
+p c998 1024
+p c990 1024
+p b998 1024
+p b990 1024
+p a998 1024
+p a990 1024
+p y997 2014
+S
+o
+EOF
+ echo "$list") |
+ # awk script input:
+ # {p|g|r} key [datasize]
+ awk '/^[pgr]/{
+ printf("%s\nk%s\n", $1, $2);
+ }
+ /^p/{
+ s = $2;
+ for (i = 0; i < $3; i++) {
+ s = s "x";
+ }
+ printf("d%s\n", s);
+ }
+ !/^[pgr]/{
+ print $0;
+ }' > in
+ (echo "$list"; echo "$list") | awk '{
+ s = $2;
+ for (i = 0; i < $3; i++) {
+ s = s "x";
+ }
+ print s;
+ }' > exp
+ atf_check -o file:exp \
+ "$(prog_db)" -i psize=4096 btree in
+}
+
+# Begin FreeBSD
+if false; then
+# End FreeBSD
+atf_test_case btree_recursive_traversal
+btree_recursive_traversal_head()
+{
+ atf_set "descr" \
+ "btree: Test for recursive traversal successfully " \
+ "retrieving records that are inaccessible to normal " \
+ "sequential 'sibling-link' traversal. This works by " \
+ "unlinking a few leaf pages but leaving their parent " \
+ "links intact. To verify that the unlink actually makes " \
+ "records inaccessible, the test first uses 'o' to do a " \
+ "normal sequential traversal, followed by 'O' to do a " \
+ "recursive traversal."
+}
+btree_recursive_traversal_body()
+{
+ fill="abcdefghijklmnopqrstuvwxyzy"
+ script='{
+ for (i = 0; i < 20000; i++) {
+ printf("p\nkAA%05d\nd%05d%s\n", i, i, $0);
+ }
+ print "u";
+ print "u";
+ print "u";
+ print "u";
+ }'
+ (echo $fill | awk "$script"; echo o) > in1
+ echo $fill |
+ awk '{
+ for (i = 0; i < 20000; i++) {
+ if (i >= 5 && i <= 40)
+ continue;
+ printf("%05d%s\n", i, $0);
+ }
+ }' > exp1
+ atf_check -o file:exp1 \
+ "$(prog_db)" -i psize=512 btree in1
+ echo $fill |
+ awk '{
+ for (i = 0; i < 20000; i++) {
+ printf("%05d%s\n", i, $0);
+ }
+ }' > exp2
+ (echo $fill | awk "$script"; echo O) > in2
+ atf_check -o file:exp2 \
+ "$(prog_db)" -i psize=512 btree in2
+}
+# Begin FreeBSD
+fi
+# End FreeBSD
+
+atf_test_case btree_byteswap_unaligned_access_bksd
+btree_byteswap_unaligned_access_bksd_head()
+{
+ atf_set "descr" \
+ "btree: big key, small data, byteswap unaligned access"
+}
+btree_byteswap_unaligned_access_bksd_body()
+{
+ (echo foo; echo bar) |
+ awk '{
+ s = $0
+ for (i = 0; i < 488; i++) {
+ s = s "x";
+ }
+ printf("p\nk%s\ndx\n", s);
+ }' > in
+ for order in 1234 4321; do
+ atf_check \
+ "$(prog_db)" -o out -i psize=512,lorder=$order btree in
+ done
+}
+
+atf_test_case btree_byteswap_unaligned_access_skbd
+btree_byteswap_unaligned_access_skbd_head()
+{
+ atf_set "descr" \
+ "btree: small key, big data, byteswap unaligned access"
+}
+btree_byteswap_unaligned_access_skbd_body()
+{
+ # 484 = 512 - 20 (header) - 7 ("foo1234") - 1 (newline)
+ (echo foo1234; echo bar1234) |
+ awk '{
+ s = $0
+ for (i = 0; i < 484; i++) {
+ s = s "x";
+ }
+ printf("p\nk%s\nd%s\n", $0, s);
+ }' > in
+ for order in 1234 4321; do
+ atf_check \
+ "$(prog_db)" -o out -i psize=512,lorder=$order btree in
+ done
+}
+
+atf_test_case btree_known_byte_order
+btree_known_byte_order_head()
+{
+ atf_set "descr" \
+ "btree: small key, big data, known byte order"
+}
+btree_known_byte_order_body()
+{
+ local a="-i psize=512,lorder="
+
+ (echo foo1234; echo bar1234) |
+ awk '{
+ s = $0
+ for (i = 0; i < 484; i++) {
+ s = s "x";
+ }
+ printf("%s\n", s);
+ }' > exp
+ (echo foo1234; echo bar1234) |
+ awk '{
+ s = $0
+ for (i = 0; i < 484; i++) {
+ s = s "x";
+ }
+ printf("p\nk%s\nd%s\n", $0, s);
+ }' > in1
+ for order in 1234 4321; do
+ atf_check \
+ "$(prog_db)" -f out.$order $a$order btree in1
+ done
+ (echo g; echo kfoo1234; echo g; echo kbar1234) > in2
+ for order in 1234 4321; do
+ atf_check -o file:exp \
+ "$(prog_db)" -s -f out.$order $a$order btree in2
+ done
+}
+
atf_init_test_cases()
{
atf_add_test_case small_btree
@@ -937,4 +1294,17 @@ atf_init_test_cases()
atf_add_test_case byte_orders_hash
atf_add_test_case bsize_ffactor
atf_add_test_case four_char_hash
+ atf_add_test_case bsize_torture
+ atf_add_test_case btree_weird_page_split
+ atf_add_test_case btree_tricky_page_split
+ # Begin FreeBSD
+ if false; then
+ # End FreeBSD
+ atf_add_test_case btree_recursive_traversal
+ # Begin FreeBSD
+ fi
+ # End FreeBSD
+ atf_add_test_case btree_byteswap_unaligned_access_bksd
+ atf_add_test_case btree_byteswap_unaligned_access_skbd
+ atf_add_test_case btree_known_byte_order
}
diff --git a/contrib/netbsd-tests/lib/libc/db/t_db_hash_seq.c b/contrib/netbsd-tests/lib/libc/db/t_db_hash_seq.c
new file mode 100644
index 0000000..6e19e22
--- /dev/null
+++ b/contrib/netbsd-tests/lib/libc/db/t_db_hash_seq.c
@@ -0,0 +1,343 @@
+/* $NetBSD: t_db_hash_seq.c,v 1.2 2015/06/22 22:35:51 christos Exp $ */
+
+/*-
+ * Copyright (c) 2015 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Christos Zoulas.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+__RCSID("$NetBSD: t_db_hash_seq.c,v 1.2 2015/06/22 22:35:51 christos Exp $");
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <db.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <syslog.h>
+#include <netinet/in.h>
+
+#define ATF
+
+struct conf {
+ struct sockaddr_storage c_ss;
+ int c_lmask;
+ int c_port;
+ int c_proto;
+ int c_family;
+ int c_uid;
+ int c_nfail;
+ char c_name[128];
+ int c_rmask;
+ int c_duration;
+};
+
+struct dbinfo {
+ int count;
+ time_t last;
+ char id[64];
+};
+
+#ifdef ATF
+#include <atf-c.h>
+
+#define DO_ERR(msg, ...) ATF_REQUIRE_MSG(0, msg, __VA_ARGS__)
+#define DO_WARNX(msg, ...) ATF_REQUIRE_MSG(0, msg, __VA_ARGS__)
+#else
+#include <err.h>
+
+#define DO_ERR(fmt, ...) err(EXIT_FAILURE, fmt, __VA_ARGS__)
+#define DO_WARNX(fmt, ...) warnx(fmt, __VA_ARGS__)
+#endif
+
+#define DO_DEBUG(fmt, ...) fprintf(stderr, fmt, __VA_ARGS__)
+
+static HASHINFO openinfo = {
+ 4096, /* bsize */
+ 32, /* ffactor */
+ 256, /* nelem */
+ 8 * 1024 * 1024,/* cachesize */
+ NULL, /* hash() */
+ 0 /* lorder */
+};
+
+static int debug = 0;
+
+static int
+state_close(DB *db)
+{
+ if (db == NULL)
+ return -1;
+ if ((*db->close)(db) == -1)
+ DO_ERR("%s: can't close db", __func__);
+ return 0;
+}
+
+static DB *
+state_open(const char *dbname, int flags, mode_t perm)
+{
+ DB *db;
+
+ db = dbopen(dbname, flags, perm, DB_HASH, &openinfo);
+ if (db == NULL) {
+ if (errno == ENOENT && (flags & O_CREAT) == 0)
+ return NULL;
+ DO_ERR("%s: can't open `%s'", __func__, dbname);
+ }
+ return db;
+}
+
+static int
+state_sizecheck(const DBT *t)
+{
+ if (sizeof(struct conf) == t->size)
+ return 0;
+ DO_WARNX("Key size mismatch %zu != %zu", sizeof(struct conf), t->size);
+ return 0;
+}
+
+static int
+state_del(DB *db, const struct conf *c)
+{
+ int rv;
+ DBT k;
+
+ if (db == NULL)
+ return -1;
+
+ k.data = __UNCONST(c);
+ k.size = sizeof(*c);
+
+ switch (rv = (*db->del)(db, &k, 1)) {
+ case 0:
+ case 1:
+ if (debug > 1) {
+ DO_DEBUG("%s: returns %d", __func__, rv);
+ (*db->sync)(db, 0);
+ }
+ return 0;
+ default:
+ DO_ERR("%s: failed", __func__);
+ return -1;
+ }
+}
+
+#if 0
+static int
+state_get(DB *db, const struct conf *c, struct dbinfo *dbi)
+{
+ int rv;
+ DBT k, v;
+
+ if (db == NULL)
+ return -1;
+
+ k.data = __UNCONST(c);
+ k.size = sizeof(*c);
+
+ switch (rv = (*db->get)(db, &k, &v, 0)) {
+ case 0:
+ case 1:
+ if (rv)
+ memset(dbi, 0, sizeof(*dbi));
+ else
+ memcpy(dbi, v.data, sizeof(*dbi));
+ if (debug > 1)
+ DO_DEBUG("%s: returns %d", __func__, rv);
+ return 0;
+ default:
+ DO_ERR("%s: failed", __func__);
+ return -1;
+ }
+}
+#endif
+
+static int
+state_put(DB *db, const struct conf *c, const struct dbinfo *dbi)
+{
+ int rv;
+ DBT k, v;
+
+ if (db == NULL)
+ return -1;
+
+ k.data = __UNCONST(c);
+ k.size = sizeof(*c);
+ v.data = __UNCONST(dbi);
+ v.size = sizeof(*dbi);
+
+ switch (rv = (*db->put)(db, &k, &v, 0)) {
+ case 0:
+ if (debug > 1) {
+ DO_DEBUG("%s: returns %d", __func__, rv);
+ (*db->sync)(db, 0);
+ }
+ return 0;
+ case 1:
+ errno = EEXIST;
+ /*FALLTHROUGH*/
+ default:
+ DO_ERR("%s: failed", __func__);
+ }
+}
+
+static int
+state_iterate(DB *db, struct conf *c, struct dbinfo *dbi, unsigned int first)
+{
+ int rv;
+ DBT k, v;
+
+ if (db == NULL)
+ return -1;
+
+ first = first ? R_FIRST : R_NEXT;
+
+ switch (rv = (*db->seq)(db, &k, &v, first)) {
+ case 0:
+ if (state_sizecheck(&k) == -1)
+ return -1;
+ memcpy(c, k.data, sizeof(*c));
+ memcpy(dbi, v.data, sizeof(*dbi));
+ if (debug > 1)
+ DO_DEBUG("%s: returns %d", __func__, rv);
+ return 1;
+ case 1:
+ if (debug > 1)
+ DO_DEBUG("%s: returns %d", __func__, rv);
+ return 0;
+ default:
+ DO_ERR("%s: failed", __func__);
+ return -1;
+ }
+}
+
+#define MAXB 100
+
+static int
+testdb(int skip)
+{
+ size_t i;
+ int f;
+ char flag[MAXB];
+ DB *db;
+ struct conf c;
+ struct dbinfo d;
+
+ db = state_open(NULL, O_RDWR|O_CREAT|O_TRUNC, 0600);
+ if (db == NULL)
+ DO_ERR("%s: cannot open `%s'", __func__, "foo");
+
+ memset(&c, 0, sizeof(c));
+ memset(&d, 0, sizeof(d));
+ memset(flag, 0, sizeof(flag));
+
+ for (i = 0; i < __arraycount(flag); i++) {
+ c.c_port = i;
+ state_put(db, &c, &d);
+ }
+
+ for (f = 1, i = 0; state_iterate(db, &c, &d, f) == 1; f = 0, i++) {
+ if (debug > 1)
+ DO_DEBUG("%zu %d\n", i, c.c_port);
+ if (flag[c.c_port])
+ DO_WARNX("Already visited %d", c.c_port);
+ flag[c.c_port] = 1;
+ if (skip == 0 || c.c_port % skip != 0)
+ continue;
+ state_del(db, &c);
+ }
+ state_close(db);
+ for (i = 0; i < __arraycount(flag); i++) {
+ if (flag[i] == 0)
+ DO_WARNX("Not visited %zu", i);
+ }
+ return 0;
+}
+
+#ifndef ATF
+int
+main(int argc, char *argv[])
+{
+ return testdb(6);
+}
+#else
+
+ATF_TC(test_hash_del_none);
+ATF_TC_HEAD(test_hash_del_none, tc)
+{
+ atf_tc_set_md_var(tc, "descr", "Check sequential scan of hash tables deleting none");
+}
+
+ATF_TC_BODY(test_hash_del_none, tc)
+{
+ testdb(0);
+}
+
+ATF_TC(test_hash_del_all);
+ATF_TC_HEAD(test_hash_del_all, tc)
+{
+ atf_tc_set_md_var(tc, "descr", "Check sequential scan of hash tables deleting all");
+}
+
+ATF_TC_BODY(test_hash_del_all, tc)
+{
+ testdb(1);
+}
+
+ATF_TC(test_hash_del_alt);
+ATF_TC_HEAD(test_hash_del_alt, tc)
+{
+ atf_tc_set_md_var(tc, "descr", "Check sequential scan of hash tables alternating deletets");
+}
+
+ATF_TC_BODY(test_hash_del_alt, tc)
+{
+ testdb(2);
+}
+
+ATF_TC(test_hash_del_every_7);
+ATF_TC_HEAD(test_hash_del_every_7, tc)
+{
+ atf_tc_set_md_var(tc, "descr", "Check sequential scan of hash tables deleting every 7 elements");
+}
+
+ATF_TC_BODY(test_hash_del_every_7, tc)
+{
+ testdb(7);
+}
+
+ATF_TP_ADD_TCS(tp)
+{
+ ATF_TP_ADD_TC(tp, test_hash_del_none);
+ ATF_TP_ADD_TC(tp, test_hash_del_all);
+ ATF_TP_ADD_TC(tp, test_hash_del_alt);
+ ATF_TP_ADD_TC(tp, test_hash_del_every_7);
+
+ return 0;
+}
+#endif
OpenPOWER on IntegriCloud