summaryrefslogtreecommitdiffstats
path: root/tools/regression/usr.bin/make/common.sh
diff options
context:
space:
mode:
Diffstat (limited to 'tools/regression/usr.bin/make/common.sh')
-rw-r--r--tools/regression/usr.bin/make/common.sh493
1 files changed, 369 insertions, 124 deletions
diff --git a/tools/regression/usr.bin/make/common.sh b/tools/regression/usr.bin/make/common.sh
index 4bd9f7f..3f7f1e1 100644
--- a/tools/regression/usr.bin/make/common.sh
+++ b/tools/regression/usr.bin/make/common.sh
@@ -5,102 +5,286 @@
# $FreeBSD$
#
-# Output usage messsage.
+# Output a message and exit with an error.
#
-print_usage()
+fatal()
{
- echo "Usage: $0 command"
- echo " clean - remove temp files (get initial state)"
- echo " compare - compare result of test to expected"
- echo " desc - print description of test"
- echo " diff - print diffs between results and expected"
- echo " harness - produce output suiteable for Test::Harness"
- echo " run - run the {test, compare, clean}"
- echo " test - run test case"
- echo " update - update the expected with current results"
+ echo "fatal: $*" >/dev/stderr
+ exit 1
}
#
-# Check if the test result is the same as the expected result.
+# Check whether the working directory exists - it must.
#
-# $1 Input file
+ensure_workdir()
+{
+ if [ ! -d ${WORK_DIR} ] ; then
+ fatal "working directory ${WORK_DIR} does not exist."
+ fi
+}
+
+#
+# Make sure all tests have been run
#
-hack_cmp()
+ensure_run()
{
- local EXPECTED RESULT
- EXPECTED="expected.$1"
- RESULT=${WORK_DIR}/$1
+ if [ -z "${TEST_N}" ] ; then
+ TEST_N=1
+ fi
- if [ -f $EXPECTED ]; then
- diff -q $EXPECTED $RESULT 1> /dev/null 2> /dev/null
- return $?
- else
- return 1 # FAIL
+ FAIL=
+ N=1
+ while [ ${N} -le ${TEST_N} ] ; do
+ eval skip=\${TEST_${N}_SKIP}
+ if [ -z "${skip}" ] ; then
+ if [ ! -f ${OUTPUT_DIR}/status.${N} -o \
+ ! -f ${OUTPUT_DIR}/stdout.${N} -o \
+ ! -f ${OUTPUT_DIR}/stderr.${N} ] ; then
+ echo "Test ${SUBDIR}/${N} no yet run"
+ FAIL=yes
+ fi
+ fi
+ N=$((N + 1))
+ done
+
+ if [ ! -z "${FAIL}" ] ; then
+ exit 1
fi
}
#
-# Check if the test result is the same as the expected result.
-#
-# $1 Input file
+# Output usage messsage.
#
-hack_diff()
+print_usage()
{
- local EXPECTED RESULT
- EXPECTED="expected.$1"
- RESULT=${WORK_DIR}/$1
+ echo "Usage: sh -v -m <path> -w <dir> $0 command(s)"
+ echo " setup - setup working directory"
+ echo " run - run the tests"
+ echo " show - show test results"
+ echo " compare - compare actual and expected results"
+ echo " diff - diff actual and expected results"
+ echo " reset - reset the test to its initial state"
+ echo " clean - delete working and output directory"
+ echo " test - setup + run + compare"
+ echo " prove - setup + run + compare + clean"
+ echo " desc - print short description"
+ echo " update - update the expected results with the current results"
+ echo " help - show this information"
+}
- echo diff -u $EXPECTED $RESULT
- if [ -f $EXPECTED ]; then
- diff -u $EXPECTED $RESULT
- return $?
- else
- return 1 # FAIL
+#
+# Common function for setup and reset.
+#
+common_setup()
+{
+ #
+ # If a Makefile exists in the source directory - copy it over
+ #
+ if [ -e Makefile -a ! -e ${WORK_DIR}/Makefile ] ; then
+ cp Makefile ${WORK_DIR}/Makefile
fi
+
+ #
+ # If the TEST_MAKE_DIRS variable is set, create those directories
+ #
+ set -- ${TEST_MAKE_DIRS}
+ while [ $# -ne 0 ] ; do
+ if [ ! -d ${WORK_DIR}/${1} ] ; then
+ mkdir -p -m ${2} ${WORK_DIR}/${1}
+ else
+ chmod ${2} ${WORK_DIR}/${1}
+ fi
+ shift ; shift
+ done
+
+ #
+ # If the TEST_COPY_FILES variable is set, copy those files over to
+ # the working directory. The value is assumed to be pairs of
+ # filenames and modes.
+ #
+ set -- ${TEST_COPY_FILES}
+ while [ $# -ne 0 ] ; do
+ if [ ! -e ${WORK_DIR}/${1} ] ; then
+ cp ${1} ${WORK_DIR}/${1}
+ fi
+ chmod ${2} ${WORK_DIR}/${1}
+ shift ; shift
+ done
+
+ #
+ # If the TEST_TOUCH variable is set, it is taken to be a list
+ # of pairs of filenames and arguments to touch(1). The arguments
+ # to touch must be surrounded by single quotes if there are more
+ # than one argument.
+ #
+ eval set -- ${TEST_TOUCH}
+ while [ $# -ne 0 ] ; do
+ eval touch ${2} ${WORK_DIR}/${1}
+ shift ; shift
+ done
+
+ #
+ # Now create links
+ #
+ eval set -- ${TEST_LINKS}
+ while [ $# -ne 0 ] ; do
+ eval ln ${WORK_DIR}/${1} ${WORK_DIR}/${2}
+ shift ; shift
+ done
}
#
-# Default setup_test() function.
+# Setup the test. This creates the working and output directories and
+# populates it with files. If there is a setup_test() function - call it.
#
-# The default function just does nothing.
+eval_setup()
+{
+ #
+ # Check whether the working directory exists. If it does exit
+ # fatally so that we don't clobber a test the user is working on.
+ #
+ if [ -d ${WORK_DIR} ] ; then
+ fatal "working directory ${WORK_DIR} already exists."
+ fi
+
+ #
+ # Now create it and the output directory
+ #
+ mkdir -p ${WORK_DIR}
+ rm -rf ${OUTPUT_DIR}
+ mkdir -p ${OUTPUT_DIR}
+
+ #
+ # Common stuff
+ #
+ common_setup
+
+ #
+ # Now after all execute the user's setup function if it exists.
+ #
+ setup_test
+}
+
#
-# Both the variables SRC_BASE WORK_BASE are available.
+# Default setup_test function does nothing. This may be overriden by
+# the test.
#
setup_test()
{
}
#
-# Default run_test() function. It can be replace by the
-# user specified regression test.
+# Reset the test. Here we need to rely on information from the test.
+# We executed the same steps as in the setup, by try not to clobber existing
+# files.
+# All files and directories that are listed on the TEST_CLEAN_FILES
+# variable are removed. Then the TEST_TOUCH list is executed and finally
+# the reset_test() function called if it exists.
#
-# Both the variables SRC_BASE WORK_BASE are available.
+eval_reset()
+{
+ ensure_workdir
+
+ #
+ # Clean the output directory
+ #
+ rm -rf ${OUTPUT_DIR}/*
+
+ #
+ # Common stuff
+ #
+ common_setup
+
+ #
+ # Remove files.
+ #
+ for f in ${TEST_CLEAN_FILES} ; do
+ rm -rf ${WORK_DIR}/${f}
+ done
+
+ #
+ # Execute test's function
+ #
+ reset_test
+}
+
#
-# Note: this function executes from a subshell.
+# Default reset_test function does nothing. This may be overriden by
+# the test.
#
-run_test()
+reset_test()
{
- cd ${WORK_DIR}
- $MAKE_PROG 1> stdout 2> stderr
- echo $? > status
}
#
-# Default clean routine
+# Clean the test. This simply removes the working and output directories.
#
-clean_test()
+eval_clean()
{
+ rm -rf ${WORK_DIR}
+ rm -rf ${OUTPUT_DIR}
}
#
-# Clean working directory
+# Run the test.
#
-eval_clean()
+eval_run()
+{
+ ensure_workdir
+
+ if [ -z "${TEST_N}" ] ; then
+ TEST_N=1
+ fi
+
+ N=1
+ while [ ${N} -le ${TEST_N} ] ; do
+ eval skip=\${TEST_${N}_SKIP}
+ if [ -z "${skip}" ] ; then
+ ( cd ${WORK_DIR} ;
+ exec 1>${OUTPUT_DIR}/stdout.${N} 2>${OUTPUT_DIR}/stderr.${N}
+ run_test ${N}
+ echo $? >${OUTPUT_DIR}/status.${N}
+ )
+ fi
+ N=$((N + 1))
+ done
+}
+
+#
+# Default run_test() function. It can be replaced by the
+# user specified regression test. The argument to this function is
+# the test number.
+#
+run_test()
{
- rm -f ${WORK_DIR}/stdout
- rm -f ${WORK_DIR}/stderr
- rm -f ${WORK_DIR}/status
- clean_test
+ eval args=\${TEST_${1}-test${1}}
+ ${MAKE_PROG} $args
+}
+
+#
+# Show test results.
+#
+eval_show()
+{
+ ensure_workdir
+
+ if [ -z "${TEST_N}" ] ; then
+ TEST_N=1
+ fi
+
+ N=1
+ while [ ${N} -le ${TEST_N} ] ; do
+ eval skip=\${TEST_${N}_SKIP}
+ if [ -z "${skip}" ] ; then
+ echo "=== Test ${N} Status =================="
+ cat ${OUTPUT_DIR}/status.${N}
+ echo ".......... Stdout .................."
+ cat ${OUTPUT_DIR}/stdout.${N}
+ echo ".......... Stderr .................."
+ cat ${OUTPUT_DIR}/stderr.${N}
+ fi
+ N=$((N + 1))
+ done
}
#
@@ -108,122 +292,183 @@ eval_clean()
#
eval_compare()
{
- hack_cmp stdout || FAIL="stdout $FAIL"
- hack_cmp stderr || FAIL="stderr $FAIL"
- hack_cmp status || FAIL="status $FAIL"
+ ensure_workdir
+ ensure_run
- if [ ! -z "$FAIL" ]; then
- FAIL=`echo $FAIL`
- echo "$SUBDIR: Test failed {$FAIL}"
+ if [ -z "${TEST_N}" ] ; then
+ TEST_N=1
fi
+
+ echo "1..${TEST_N}"
+ N=1
+ while [ ${N} -le ${TEST_N} ] ; do
+ eval skip=\${TEST_${N}_SKIP}
+ if [ -z "${skip}" ] ; then
+ FAIL=
+ do_compare stdout ${N} || FAIL="${FAIL}stdout "
+ do_compare stderr ${N} || FAIL="${FAIL}stderr "
+ do_compare status ${N} || FAIL="${FAIL}status "
+ if [ ! -z "$FAIL" ]; then
+ echo "not ok ${N} ${SUBDIR}/${N} # reason: ${FAIL}"
+ else
+ echo "ok ${N} ${SUBDIR}/${N}"
+ fi
+ else
+ echo "ok ${N} ${SUBDIR}/${N} # skip: ${skip}"
+ fi
+ N=$((N + 1))
+ done
}
#
-# Compare results with expected results for prove(1)
+# Check if the test result is the same as the expected result.
#
-eval_hcompare()
+# $1 Input file
+# $2 Test number
+#
+do_compare()
{
- FAIL=
- hack_cmp stdout || FAIL="stdout $FAIL"
- hack_cmp stderr || FAIL="stderr $FAIL"
- hack_cmp status || FAIL="status $FAIL"
+ local EXPECTED RESULT
+ EXPECTED="expected.$1.$2"
+ RESULT="${OUTPUT_DIR}/$1.$2"
- if [ ! -z "$FAIL" ]; then
- FAIL=`echo $FAIL`
- echo "not ok 1 $SUBDIR # reason: {$FAIL}"
+ if [ -f $EXPECTED ]; then
+ diff -q $EXPECTED $RESULT 1>/dev/null 2>/dev/null
+ return $?
else
- echo "ok 1 $SUBDIR"
+ return 1 # FAIL
fi
}
#
-# Print description
+# Diff current and expected results
#
-eval_desc()
+eval_diff()
{
- echo -n "$SUBDIR: "
- desc_test
+ ensure_workdir
+ ensure_run
+
+ if [ -z "${TEST_N}" ] ; then
+ TEST_N=1
+ fi
+
+ N=1
+ while [ ${N} -le ${TEST_N} ] ; do
+ eval skip=\${TEST_${N}_SKIP}
+ if [ -z "${skip}" ] ; then
+ FAIL=
+ do_diff stdout ${N}
+ do_diff stderr ${N}
+ do_diff status ${N}
+ fi
+ N=$((N + 1))
+ done
}
#
-# Prepare and run the test
+# Check if the test result is the same as the expected result.
#
-eval_test()
+# $1 Input file
+# $2 Test number
+#
+do_diff()
{
- [ -d ${WORK_DIR} ] || mkdir -p ${WORK_DIR}
- if [ -f Makefile ] ; then
- cp Makefile ${WORK_DIR}
+ local EXPECTED RESULT
+ EXPECTED="expected.$1.$2"
+ RESULT="${OUTPUT_DIR}/$1.$2"
+
+ echo diff -u $EXPECTED $RESULT
+ if [ -f $EXPECTED ]; then
+ diff -u $EXPECTED $RESULT
+ else
+ echo "${EXPECTED} does not exist"
fi
- setup_test
- ( run_test )
}
#
-# Diff current and expected results
+# Update expected results
#
-eval_diff()
+eval_update()
{
- eval_test
- echo "------------------------"
- echo "- $SUBDIR"
- echo "------------------------"
- hack_diff stdout
- hack_diff stderr
- hack_diff status
+ ensure_workdir
+ ensure_run
+
+ if [ -z "${TEST_N}" ] ; then
+ TEST_N=1
+ fi
+
+ FAIL=
+ N=1
+ while [ ${N} -le ${TEST_N} ] ; do
+ eval skip=\${TEST_${N}_SKIP}
+ if [ -z "${skip}" ] ; then
+ cp ${OUTPUT_DIR}/stdout.${N} expected.stdout.${N}
+ cp ${OUTPUT_DIR}/stderr.${N} expected.stderr.${N}
+ cp ${OUTPUT_DIR}/status.${N} expected.status.${N}
+ fi
+ N=$((N + 1))
+ done
}
#
-# Run the test for prove(1)
+# Print description
#
-eval_harness()
+eval_desc()
{
- echo 1..1
- eval_test
- eval_hcompare
- eval_clean
+ echo "${SUBDIR}: ${DESC}"
}
#
# Run the test
#
-eval_run()
+eval_test()
{
- eval_test
+ eval_setup
+ eval_run
eval_compare
- eval_clean
}
#
-# Update expected results
+# Run the test for prove(1)
#
-eval_update()
+eval_prove()
{
- eval_test
- cat ${WORK_DIR}/stdout > expected.stdout
- cat ${WORK_DIR}/stderr > expected.stderr
- cat ${WORK_DIR}/status > expected.status
+ eval_setup
+ eval_run
+ eval_compare
+ eval_clean
}
#
-# Note: Uses global variable $DIR which might be assigned by
-# the script which sourced this file.
+# Main function. Execute the command(s) on the command line.
#
eval_cmd()
{
if [ $# -eq 0 ] ; then
- set -- harness
+ # if no arguments given default to 'prove'
+ set -- prove
fi
- case $1 in
- clean|compare|hcompare|desc|diff|harness|run|test|update)
- eval eval_$1
- ;;
- *)
- print_usage
- ;;
- esac
+ for i
+ do
+ case $i in
+
+ setup | run | compare | diff | clean | reset | show | \
+ test | prove | desc | update)
+ eval eval_$i
+ ;;
+ * | help)
+ print_usage
+ ;;
+ esac
+ done
}
+##############################################################################
+#
+# Main code
+#
+
#
# Parse command line arguments.
#
@@ -263,14 +508,14 @@ SRC_DIR=`pwd`
SRC_BASE=`while [ ! -f common.sh ] ; do cd .. ; done ; pwd`
SUBDIR=`echo ${SRC_DIR} | sed "s@${SRC_BASE}/@@"`
+#
+# Construct working directory
+#
WORK_BASE=${WORK_BASE:-"/tmp/$USER.make.test"}
WORK_DIR=${WORK_BASE}/${SUBDIR}
-MAKE_PROG=${MAKE_PROG:-/usr/bin/make}
+OUTPUT_DIR=${WORK_DIR}.OUTPUT
-export MAKE_PROG
-export VERBOSE
-export SRC_BASE
-export WORK_BASE
-export SUBDIR
-export SRC_DIR
-export WORK_DIR
+#
+# Make to use
+#
+MAKE_PROG=${MAKE_PROG:-/usr/bin/make}
OpenPOWER on IntegriCloud