summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/conf/files.amd646
-rw-r--r--sys/conf/files.i3861
-rw-r--r--sys/libkern/crc32.c11
-rw-r--r--sys/libkern/x86/crc32_sse42.c379
-rw-r--r--sys/sys/libkern.h5
-rw-r--r--tests/sys/kern/Makefile8
-rw-r--r--tests/sys/kern/libkern_crc32.c132
7 files changed, 538 insertions, 4 deletions
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index 4a03c0e..2271e1c 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -552,6 +552,9 @@ isa/syscons_isa.c optional sc
isa/vga_isa.c optional vga
kern/kern_clocksource.c standard
kern/link_elf_obj.c standard
+libkern/x86/crc32_sse42.c standard
+libkern/memmove.c standard
+libkern/memset.c standard
#
# IA32 binary support
#
@@ -609,9 +612,6 @@ compat/ndis/subr_pe.c optional ndisapi pci
compat/ndis/subr_usbd.c optional ndisapi pci
compat/ndis/winx64_wrap.S optional ndisapi pci
#
-libkern/memmove.c standard
-libkern/memset.c standard
-#
# x86 real mode BIOS emulator, required by dpms/pci/vesa
#
compat/x86bios/x86bios.c optional x86bios | dpms | pci | vesa
diff --git a/sys/conf/files.i386 b/sys/conf/files.i386
index d00b376..c25a989 100644
--- a/sys/conf/files.i386
+++ b/sys/conf/files.i386
@@ -564,6 +564,7 @@ libkern/qdivrem.c standard
libkern/ucmpdi2.c standard
libkern/udivdi3.c standard
libkern/umoddi3.c standard
+libkern/x86/crc32_sse42.c standard
i386/xbox/xbox.c optional xbox
i386/xbox/xboxfb.c optional xboxfb
dev/fb/boot_font.c optional xboxfb
diff --git a/sys/libkern/crc32.c b/sys/libkern/crc32.c
index 65331ce..6bc469e 100644
--- a/sys/libkern/crc32.c
+++ b/sys/libkern/crc32.c
@@ -46,8 +46,14 @@
__FBSDID("$FreeBSD$");
#include <sys/param.h>
+#include <sys/libkern.h>
#include <sys/systm.h>
+#if defined(__amd64__) || defined(__i386__)
+#include <machine/md_var.h>
+#include <machine/specialreg.h>
+#endif
+
const uint32_t crc32_tab[] = {
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
@@ -749,6 +755,11 @@ calculate_crc32c(uint32_t crc32c,
const unsigned char *buffer,
unsigned int length)
{
+#if defined(__amd64__) || defined(__i386__)
+ if ((cpu_feature2 & CPUID2_SSE42) != 0) {
+ return (sse42_crc32c(crc32c, buffer, length));
+ } else
+#endif
if (length < 4) {
return (singletable_crc32c(crc32c, buffer, length));
} else {
diff --git a/sys/libkern/x86/crc32_sse42.c b/sys/libkern/x86/crc32_sse42.c
new file mode 100644
index 0000000..f56a10e
--- /dev/null
+++ b/sys/libkern/x86/crc32_sse42.c
@@ -0,0 +1,379 @@
+/*
+ * Derived from crc32c.c version 1.1 by Mark Adler.
+ *
+ * Copyright (C) 2013 Mark Adler
+ *
+ * This software is provided 'as-is', without any express or implied warranty.
+ * In no event will the author be held liable for any damages arising from the
+ * use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ *
+ * Mark Adler
+ * madler@alumni.caltech.edu
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * This file is compiled in userspace in order to run ATF unit tests.
+ */
+#ifdef USERSPACE_TESTING
+#include <stdint.h>
+#include <stdlib.h>
+#else
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#endif
+
+static __inline uint32_t
+_mm_crc32_u8(uint32_t x, uint8_t y)
+{
+ /*
+ * clang (at least 3.9.[0-1]) pessimizes "rm" (y) and "m" (y)
+ * significantly and "r" (y) a lot by copying y to a different
+ * local variable (on the stack or in a register), so only use
+ * the latter. This costs a register and an instruction but
+ * not a uop.
+ */
+ __asm("crc32b %1,%0" : "+r" (x) : "r" (y));
+ return (x);
+}
+
+static __inline uint32_t
+_mm_crc32_u32(uint32_t x, uint32_t y)
+{
+ __asm("crc32l %1,%0" : "+r" (x) : "r" (y));
+ return (x);
+}
+
+static __inline uint64_t
+_mm_crc32_u64(uint64_t x, uint64_t y)
+{
+ __asm("crc32q %1,%0" : "+r" (x) : "r" (y));
+ return (x);
+}
+
+/* CRC-32C (iSCSI) polynomial in reversed bit order. */
+#define POLY 0x82f63b78
+
+/*
+ * Block sizes for three-way parallel crc computation. LONG and SHORT must
+ * both be powers of two.
+ */
+#define LONG 128
+#define SHORT 64
+
+/*
+ * Tables for updating a crc for LONG, 2 * LONG, SHORT and 2 * SHORT bytes
+ * of value 0 later in the input stream, in the same way that the hardware
+ * would, but in software without calculating intermediate steps.
+ */
+static uint32_t crc32c_long[4][256];
+static uint32_t crc32c_2long[4][256];
+static uint32_t crc32c_short[4][256];
+static uint32_t crc32c_2short[4][256];
+
+/*
+ * Multiply a matrix times a vector over the Galois field of two elements,
+ * GF(2). Each element is a bit in an unsigned integer. mat must have at
+ * least as many entries as the power of two for most significant one bit in
+ * vec.
+ */
+static inline uint32_t
+gf2_matrix_times(uint32_t *mat, uint32_t vec)
+{
+ uint32_t sum;
+
+ sum = 0;
+ while (vec) {
+ if (vec & 1)
+ sum ^= *mat;
+ vec >>= 1;
+ mat++;
+ }
+ return (sum);
+}
+
+/*
+ * Multiply a matrix by itself over GF(2). Both mat and square must have 32
+ * rows.
+ */
+static inline void
+gf2_matrix_square(uint32_t *square, uint32_t *mat)
+{
+ int n;
+
+ for (n = 0; n < 32; n++)
+ square[n] = gf2_matrix_times(mat, mat[n]);
+}
+
+/*
+ * Construct an operator to apply len zeros to a crc. len must be a power of
+ * two. If len is not a power of two, then the result is the same as for the
+ * largest power of two less than len. The result for len == 0 is the same as
+ * for len == 1. A version of this routine could be easily written for any
+ * len, but that is not needed for this application.
+ */
+static void
+crc32c_zeros_op(uint32_t *even, size_t len)
+{
+ uint32_t odd[32]; /* odd-power-of-two zeros operator */
+ uint32_t row;
+ int n;
+
+ /* put operator for one zero bit in odd */
+ odd[0] = POLY; /* CRC-32C polynomial */
+ row = 1;
+ for (n = 1; n < 32; n++) {
+ odd[n] = row;
+ row <<= 1;
+ }
+
+ /* put operator for two zero bits in even */
+ gf2_matrix_square(even, odd);
+
+ /* put operator for four zero bits in odd */
+ gf2_matrix_square(odd, even);
+
+ /*
+ * first square will put the operator for one zero byte (eight zero
+ * bits), in even -- next square puts operator for two zero bytes in
+ * odd, and so on, until len has been rotated down to zero
+ */
+ do {
+ gf2_matrix_square(even, odd);
+ len >>= 1;
+ if (len == 0)
+ return;
+ gf2_matrix_square(odd, even);
+ len >>= 1;
+ } while (len);
+
+ /* answer ended up in odd -- copy to even */
+ for (n = 0; n < 32; n++)
+ even[n] = odd[n];
+}
+
+/*
+ * Take a length and build four lookup tables for applying the zeros operator
+ * for that length, byte-by-byte on the operand.
+ */
+static void
+crc32c_zeros(uint32_t zeros[][256], size_t len)
+{
+ uint32_t op[32];
+ uint32_t n;
+
+ crc32c_zeros_op(op, len);
+ for (n = 0; n < 256; n++) {
+ zeros[0][n] = gf2_matrix_times(op, n);
+ zeros[1][n] = gf2_matrix_times(op, n << 8);
+ zeros[2][n] = gf2_matrix_times(op, n << 16);
+ zeros[3][n] = gf2_matrix_times(op, n << 24);
+ }
+}
+
+/* Apply the zeros operator table to crc. */
+static inline uint32_t
+crc32c_shift(uint32_t zeros[][256], uint32_t crc)
+{
+
+ return (zeros[0][crc & 0xff] ^ zeros[1][(crc >> 8) & 0xff] ^
+ zeros[2][(crc >> 16) & 0xff] ^ zeros[3][crc >> 24]);
+}
+
+/* Initialize tables for shifting crcs. */
+static void
+#ifdef USERSPACE_TESTING
+__attribute__((__constructor__))
+#endif
+crc32c_init_hw(void)
+{
+ crc32c_zeros(crc32c_long, LONG);
+ crc32c_zeros(crc32c_2long, 2 * LONG);
+ crc32c_zeros(crc32c_short, SHORT);
+ crc32c_zeros(crc32c_2short, 2 * SHORT);
+}
+#ifdef _KERNEL
+SYSINIT(crc32c_sse42, SI_SUB_LOCK, SI_ORDER_ANY, crc32c_init_hw, NULL);
+#endif
+
+/* Compute CRC-32C using the Intel hardware instruction. */
+#ifdef USERSPACE_TESTING
+uint32_t sse42_crc32c(uint32_t, const unsigned char *, unsigned);
+#endif
+uint32_t
+sse42_crc32c(uint32_t crc, const unsigned char *buf, unsigned len)
+{
+#ifdef __amd64__
+ const size_t align = 8;
+#else
+ const size_t align = 4;
+#endif
+ const unsigned char *next, *end;
+#ifdef __amd64__
+ uint64_t crc0, crc1, crc2;
+#else
+ uint32_t crc0, crc1, crc2;
+#endif
+
+ next = buf;
+ crc0 = crc;
+
+ /* Compute the crc to bring the data pointer to an aligned boundary. */
+ while (len && ((uintptr_t)next & (align - 1)) != 0) {
+ crc0 = _mm_crc32_u8(crc0, *next);
+ next++;
+ len--;
+ }
+
+#if LONG > SHORT
+ /*
+ * Compute the crc on sets of LONG*3 bytes, executing three independent
+ * crc instructions, each on LONG bytes -- this is optimized for the
+ * Nehalem, Westmere, Sandy Bridge, and Ivy Bridge architectures, which
+ * have a throughput of one crc per cycle, but a latency of three
+ * cycles.
+ */
+ crc = 0;
+ while (len >= LONG * 3) {
+ crc1 = 0;
+ crc2 = 0;
+ end = next + LONG;
+ do {
+#ifdef __amd64__
+ crc0 = _mm_crc32_u64(crc0, *(const uint64_t *)next);
+ crc1 = _mm_crc32_u64(crc1,
+ *(const uint64_t *)(next + LONG));
+ crc2 = _mm_crc32_u64(crc2,
+ *(const uint64_t *)(next + (LONG * 2)));
+#else
+ crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)next);
+ crc1 = _mm_crc32_u32(crc1,
+ *(const uint32_t *)(next + LONG));
+ crc2 = _mm_crc32_u32(crc2,
+ *(const uint32_t *)(next + (LONG * 2)));
+#endif
+ next += align;
+ } while (next < end);
+ /*-
+ * Update the crc. Try to do it in parallel with the inner
+ * loop. 'crc' is used to accumulate crc0 and crc1
+ * produced by the inner loop so that the next iteration
+ * of the loop doesn't depend on anything except crc2.
+ *
+ * The full expression for the update is:
+ * crc = S*S*S*crc + S*S*crc0 + S*crc1
+ * where the terms are polynomials modulo the CRC polynomial.
+ * We regroup this subtly as:
+ * crc = S*S * (S*crc + crc0) + S*crc1.
+ * This has an extra dependency which reduces possible
+ * parallelism for the expression, but it turns out to be
+ * best to intentionally delay evaluation of this expression
+ * so that it competes less with the inner loop.
+ *
+ * We also intentionally reduce parallelism by feedng back
+ * crc2 to the inner loop as crc0 instead of accumulating
+ * it in crc. This synchronizes the loop with crc update.
+ * CPU and/or compiler schedulers produced bad order without
+ * this.
+ *
+ * Shifts take about 12 cycles each, so 3 here with 2
+ * parallelizable take about 24 cycles and the crc update
+ * takes slightly longer. 8 dependent crc32 instructions
+ * can run in 24 cycles, so the 3-way blocking is worse
+ * than useless for sizes less than 8 * <word size> = 64
+ * on amd64. In practice, SHORT = 32 confirms these
+ * timing calculations by giving a small improvement
+ * starting at size 96. Then the inner loop takes about
+ * 12 cycles and the crc update about 24, but these are
+ * partly in parallel so the total time is less than the
+ * 36 cycles that 12 dependent crc32 instructions would
+ * take.
+ *
+ * To have a chance of completely hiding the overhead for
+ * the crc update, the inner loop must take considerably
+ * longer than 24 cycles. LONG = 64 makes the inner loop
+ * take about 24 cycles, so is not quite large enough.
+ * LONG = 128 works OK. Unhideable overheads are about
+ * 12 cycles per inner loop. All assuming timing like
+ * Haswell.
+ */
+ crc = crc32c_shift(crc32c_long, crc) ^ crc0;
+ crc1 = crc32c_shift(crc32c_long, crc1);
+ crc = crc32c_shift(crc32c_2long, crc) ^ crc1;
+ crc0 = crc2;
+ next += LONG * 2;
+ len -= LONG * 3;
+ }
+ crc0 ^= crc;
+#endif /* LONG > SHORT */
+
+ /*
+ * Do the same thing, but now on SHORT*3 blocks for the remaining data
+ * less than a LONG*3 block
+ */
+ crc = 0;
+ while (len >= SHORT * 3) {
+ crc1 = 0;
+ crc2 = 0;
+ end = next + SHORT;
+ do {
+#ifdef __amd64__
+ crc0 = _mm_crc32_u64(crc0, *(const uint64_t *)next);
+ crc1 = _mm_crc32_u64(crc1,
+ *(const uint64_t *)(next + SHORT));
+ crc2 = _mm_crc32_u64(crc2,
+ *(const uint64_t *)(next + (SHORT * 2)));
+#else
+ crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)next);
+ crc1 = _mm_crc32_u32(crc1,
+ *(const uint32_t *)(next + SHORT));
+ crc2 = _mm_crc32_u32(crc2,
+ *(const uint32_t *)(next + (SHORT * 2)));
+#endif
+ next += align;
+ } while (next < end);
+ crc = crc32c_shift(crc32c_short, crc) ^ crc0;
+ crc1 = crc32c_shift(crc32c_short, crc1);
+ crc = crc32c_shift(crc32c_2short, crc) ^ crc1;
+ crc0 = crc2;
+ next += SHORT * 2;
+ len -= SHORT * 3;
+ }
+ crc0 ^= crc;
+
+ /* Compute the crc on the remaining bytes at native word size. */
+ end = next + (len - (len & (align - 1)));
+ while (next < end) {
+#ifdef __amd64__
+ crc0 = _mm_crc32_u64(crc0, *(const uint64_t *)next);
+#else
+ crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)next);
+#endif
+ next += align;
+ }
+ len &= (align - 1);
+
+ /* Compute the crc for any trailing bytes. */
+ while (len) {
+ crc0 = _mm_crc32_u8(crc0, *next);
+ next++;
+ len--;
+ }
+
+ return ((uint32_t)crc0);
+}
diff --git a/sys/sys/libkern.h b/sys/sys/libkern.h
index 28ab3df..a960df8 100644
--- a/sys/sys/libkern.h
+++ b/sys/sys/libkern.h
@@ -178,6 +178,11 @@ crc32(const void *buf, size_t size)
uint32_t
calculate_crc32c(uint32_t crc32c, const unsigned char *buffer,
unsigned int length);
+#ifdef _KERNEL
+#if defined(__amd64__) || defined(__i386__)
+uint32_t sse42_crc32c(uint32_t, const unsigned char *, unsigned);
+#endif
+#endif
LIBKERN_INLINE void *memset(void *, int, size_t);
diff --git a/tests/sys/kern/Makefile b/tests/sys/kern/Makefile
index e2b2d4a..d78785c 100644
--- a/tests/sys/kern/Makefile
+++ b/tests/sys/kern/Makefile
@@ -25,13 +25,19 @@ NETBSD_ATF_TESTS_C+= mqueue_test
CFLAGS.mqueue_test+= -I${SRCTOP}/tests
LIBADD.mqueue_test+= rt
+.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386"
+ATF_TESTS_C+= libkern_crc32
+CFLAGS.libkern_crc32+= -msse4 -DUSERSPACE_TESTING
+LDADD.libkern_crc32+= ${SRCTOP}/sys/libkern/x86/crc32_sse42.c
+.endif
+
# subr_unit.c contains functions whose prototypes lie in headers that cannot be
# included in userland. But as far as subr_unit_test goes, they're effectively
# static. So it's ok to disable -Wmissing-prototypes for this program.
CFLAGS.subr_unit.c+= -Wno-missing-prototypes
SRCS.subr_unit_test+= subr_unit.c
-WARNS?= 5
+WARNS?= 3
TESTS_SUBDIRS+= acct
TESTS_SUBDIRS+= execve
diff --git a/tests/sys/kern/libkern_crc32.c b/tests/sys/kern/libkern_crc32.c
new file mode 100644
index 0000000..5eef045
--- /dev/null
+++ b/tests/sys/kern/libkern_crc32.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+
+#include <stdint.h>
+
+#include <atf-c.h>
+
+extern uint32_t sse42_crc32c(uint32_t, const unsigned char *, unsigned);
+
+ATF_TC_WITHOUT_HEAD(crc32c_basic_correctness);
+ATF_TC_BODY(crc32c_basic_correctness, tc)
+{
+ const uint64_t inputs[] = {
+ 0xf408c634b3a9142,
+ 0x80539e8c7c352e2b,
+ 0x62e9121db6e4d649,
+ 0x899345850ed0a286,
+ 0x2302df11b4a43b15,
+ 0xe943de7b3d35d70,
+ 0xdf1ff2bf41abf56b,
+ 0x9bc138abae315de2,
+ 0x31cc82e56234f0ff,
+ 0xce63c0cd6988e847,
+ 0x3e42f6b78ee352fa,
+ 0xfa4085436078cfa6,
+ 0x53349558bf670a4b,
+ 0x2714e10e7d722c61,
+ 0xc0d3261addfc6908,
+ 0xd1567c3181d3a1bf,
+ };
+ const uint32_t results[] = {
+ 0x2ce33ede,
+ 0xc49cc573,
+ 0xb8683c96,
+ 0x6918660d,
+ 0xa904e522,
+ 0x52dbc42c,
+ 0x98863c22,
+ 0x894d5d2c,
+ 0xb003745d,
+ 0xfc496dbd,
+ 0x97d2fbb5,
+ 0x3c062ef1,
+ 0xcc2eff18,
+ 0x6a9b09f6,
+ 0x420242c1,
+ 0xfd562dc3,
+ };
+ size_t i;
+ uint32_t act;
+
+ ATF_REQUIRE(nitems(inputs) == nitems(results));
+
+ for (i = 0; i < nitems(inputs); i++) {
+ act = sse42_crc32c(~0, (const void *)&inputs[i],
+ sizeof(inputs[0]));
+ ATF_REQUIRE_MSG(act == results[i],
+ "crc32c(0x%jx) = 0x%08x, got 0x%08x", (uintmax_t)inputs[i],
+ results[i], act);
+ }
+}
+
+ATF_TC_WITHOUT_HEAD(crc32c_alignment);
+ATF_TC_BODY(crc32c_alignment, tc)
+{
+ const uint64_t input = 0xf408c634b3a9142;
+ const uint32_t result = 0x2ce33ede;
+ unsigned char buf[15];
+ size_t i;
+ uint32_t act;
+
+
+ for (i = 1; i < 8; i++) {
+ memcpy(&buf[i], &input, sizeof(input));
+
+ act = sse42_crc32c(~0, (const void *)&buf[i], sizeof(input));
+ ATF_REQUIRE_MSG(act == result,
+ "crc32c(0x%jx) = 0x%08x, got 0x%08x", (uintmax_t)input,
+ result, act);
+ }
+}
+
+ATF_TC_WITHOUT_HEAD(crc32c_trailing_bytes);
+ATF_TC_BODY(crc32c_trailing_bytes, tc)
+{
+ const unsigned char input[] = {
+ 0x87, 0x54, 0x74, 0xd2, 0xb, 0x9b, 0xdd, 0xf6, 0x68, 0x37,
+ 0xd4, 0x4, 0x5e, 0xa9, 0xb3
+ };
+ const uint32_t result = 0xec638d62;
+ uint32_t act;
+
+ act = sse42_crc32c(~0, input, sizeof(input));
+ ATF_REQUIRE_MSG(act == result, "expected 0x%08x, got 0x%08x", result,
+ act);
+}
+
+ATF_TP_ADD_TCS(tp)
+{
+
+ ATF_TP_ADD_TC(tp, crc32c_basic_correctness);
+ ATF_TP_ADD_TC(tp, crc32c_alignment);
+ ATF_TP_ADD_TC(tp, crc32c_trailing_bytes);
+ return (atf_no_error());
+}
OpenPOWER on IntegriCloud