diff options
Diffstat (limited to 'contrib/llvm/utils/lit')
-rw-r--r-- | contrib/llvm/utils/lit/lit/ExampleTests/lit.cfg | 3 | ||||
-rw-r--r-- | contrib/llvm/utils/lit/lit/ExampleTests/required-and-missing.c | 4 | ||||
-rw-r--r-- | contrib/llvm/utils/lit/lit/ExampleTests/required-and-present.c | 2 | ||||
-rw-r--r-- | contrib/llvm/utils/lit/lit/TestFormats.py | 7 | ||||
-rw-r--r-- | contrib/llvm/utils/lit/lit/TestRunner.py | 86 | ||||
-rw-r--r-- | contrib/llvm/utils/lit/lit/TestingConfig.py | 9 | ||||
-rwxr-xr-x | contrib/llvm/utils/lit/lit/lit.py | 5 |
7 files changed, 82 insertions, 34 deletions
diff --git a/contrib/llvm/utils/lit/lit/ExampleTests/lit.cfg b/contrib/llvm/utils/lit/lit/ExampleTests/lit.cfg index dbd574f..20ee37d 100644 --- a/contrib/llvm/utils/lit/lit/ExampleTests/lit.cfg +++ b/contrib/llvm/utils/lit/lit/ExampleTests/lit.cfg @@ -21,3 +21,6 @@ config.test_exec_root = None # target_triple: Used by ShTest and TclTest formats for XFAIL checks. config.target_triple = 'foo' + +# available_features: Used by ShTest and TclTest formats for REQUIRES checks. +config.available_features = ['some-feature-name'] diff --git a/contrib/llvm/utils/lit/lit/ExampleTests/required-and-missing.c b/contrib/llvm/utils/lit/lit/ExampleTests/required-and-missing.c new file mode 100644 index 0000000..47ba72e --- /dev/null +++ b/contrib/llvm/utils/lit/lit/ExampleTests/required-and-missing.c @@ -0,0 +1,4 @@ +// This test shouldn't be run, the required feature is missing. +// +// RUN: false +// REQUIRES: some-missing-feature-name diff --git a/contrib/llvm/utils/lit/lit/ExampleTests/required-and-present.c b/contrib/llvm/utils/lit/lit/ExampleTests/required-and-present.c new file mode 100644 index 0000000..2a09e08 --- /dev/null +++ b/contrib/llvm/utils/lit/lit/ExampleTests/required-and-present.c @@ -0,0 +1,2 @@ +// RUN: true +// REQUIRES: some-feature-name diff --git a/contrib/llvm/utils/lit/lit/TestFormats.py b/contrib/llvm/utils/lit/lit/TestFormats.py index e52d0e4..7ffbd2b 100644 --- a/contrib/llvm/utils/lit/lit/TestFormats.py +++ b/contrib/llvm/utils/lit/lit/TestFormats.py @@ -1,14 +1,21 @@ import os +import platform import Test import TestRunner import Util +kIsWindows = platform.system() == 'Windows' + class GoogleTest(object): def __init__(self, test_sub_dir, test_suffix): self.test_sub_dir = str(test_sub_dir) self.test_suffix = str(test_suffix) + # On Windows, assume tests will also end in '.exe'. + if kIsWindows: + self.test_suffix += '.exe' + def getGTestTests(self, path, litConfig, localConfig): """getGTestTests(path) - [name] diff --git a/contrib/llvm/utils/lit/lit/TestRunner.py b/contrib/llvm/utils/lit/lit/TestRunner.py index cdf1c93..0eb51a8 100644 --- a/contrib/llvm/utils/lit/lit/TestRunner.py +++ b/contrib/llvm/utils/lit/lit/TestRunner.py @@ -312,11 +312,6 @@ def executeTclScriptInternal(test, litConfig, tmpBase, commands, cwd): out,err,exitCode = executeCommand(command, cwd=cwd, env=test.config.environment) - # Tcl commands fail on standard error output. - if err: - exitCode = 1 - out = 'Command has output on stderr!\n\n' + out - return out,err,exitCode else: results = [] @@ -328,11 +323,6 @@ def executeTclScriptInternal(test, litConfig, tmpBase, commands, cwd): out = err = '' - # Tcl commands fail on standard error output. - if [True for _,_,err,res in results if err]: - exitCode = 1 - out += 'Command has output on stderr!\n\n' - for i,(cmd, cmd_out, cmd_err, res) in enumerate(results): out += 'Command %d: %s\n' % (i, ' '.join('"%s"' % s for s in cmd.args)) out += 'Command %d Result: %r\n' % (i, res) @@ -422,6 +412,7 @@ def parseIntegratedTestScript(test, normalize_slashes=False): script = [] xfails = [] xtargets = [] + requires = [] for ln in open(sourcepath): if 'RUN:' in ln: # Isolate the command to run. @@ -442,6 +433,9 @@ def parseIntegratedTestScript(test, normalize_slashes=False): elif 'XTARGET:' in ln: items = ln[ln.index('XTARGET:') + 8:].split(',') xtargets.extend([s.strip() for s in items]) + elif 'REQUIRES:' in ln: + items = ln[ln.index('REQUIRES:') + 9:].split(',') + requires.extend([s.strip() for s in items]) elif 'END.' in ln: # Check for END. lines. if ln[ln.index('END.'):].strip() == 'END.': @@ -461,27 +455,42 @@ def parseIntegratedTestScript(test, normalize_slashes=False): if not script: return (Test.UNRESOLVED, "Test has no run line!") + # Check for unterminated run lines. if script[-1][-1] == '\\': return (Test.UNRESOLVED, "Test has unterminated run lines (with '\\')") + # Check that we have the required features: + missing_required_features = [f for f in requires + if f not in test.config.available_features] + if missing_required_features: + msg = ', '.join(missing_required_features) + return (Test.UNSUPPORTED, + "Test requires the following features: %s" % msg) + isXFail = isExpectedFail(xfails, xtargets, test.suite.config.target_triple) return script,isXFail,tmpBase,execdir -def formatTestOutput(status, out, err, exitCode, script): +def formatTestOutput(status, out, err, exitCode, failDueToStderr, script): output = StringIO.StringIO() print >>output, "Script:" print >>output, "--" print >>output, '\n'.join(script) print >>output, "--" - print >>output, "Exit Code: %r" % exitCode - print >>output, "Command Output (stdout):" - print >>output, "--" - output.write(out) - print >>output, "--" - print >>output, "Command Output (stderr):" - print >>output, "--" - output.write(err) - print >>output, "--" + print >>output, "Exit Code: %r" % exitCode, + if failDueToStderr: + print >>output, "(but there was output on stderr)" + else: + print >>output + if out: + print >>output, "Command Output (stdout):" + print >>output, "--" + output.write(out) + print >>output, "--" + if err: + print >>output, "Command Output (stderr):" + print >>output, "--" + output.write(err) + print >>output, "--" return (status, output.getvalue()) def executeTclTest(test, litConfig): @@ -506,18 +515,30 @@ def executeTclTest(test, litConfig): if len(res) == 2: return res + # Test for failure. In addition to the exit code, Tcl commands are + # considered to fail if there is any standard error output. out,err,exitCode = res if isXFail: - ok = exitCode != 0 - status = (Test.XPASS, Test.XFAIL)[ok] + ok = exitCode != 0 or err + if ok: + status = Test.XFAIL + else: + status = Test.XPASS else: - ok = exitCode == 0 - status = (Test.FAIL, Test.PASS)[ok] + ok = exitCode == 0 and not err + if ok: + status = Test.PASS + else: + status = Test.FAIL if ok: return (status,'') - return formatTestOutput(status, out, err, exitCode, script) + # Set a flag for formatTestOutput so it can explain why the test was + # considered to have failed, despite having an exit code of 0. + failDueToStderr = exitCode == 0 and err + + return formatTestOutput(status, out, err, exitCode, failDueToStderr, script) def executeShTest(test, litConfig, useExternalSh): if test.config.unsupported: @@ -545,12 +566,21 @@ def executeShTest(test, litConfig, useExternalSh): out,err,exitCode = res if isXFail: ok = exitCode != 0 - status = (Test.XPASS, Test.XFAIL)[ok] + if ok: + status = Test.XFAIL + else: + status = Test.XPASS else: ok = exitCode == 0 - status = (Test.FAIL, Test.PASS)[ok] + if ok: + status = Test.PASS + else: + status = Test.FAIL if ok: return (status,'') - return formatTestOutput(status, out, err, exitCode, script) + # Sh tests are not considered to fail just from stderr output. + failDueToStderr = False + + return formatTestOutput(status, out, err, exitCode, failDueToStderr, script) diff --git a/contrib/llvm/utils/lit/lit/TestingConfig.py b/contrib/llvm/utils/lit/lit/TestingConfig.py index dd905ef..5c1b273 100644 --- a/contrib/llvm/utils/lit/lit/TestingConfig.py +++ b/contrib/llvm/utils/lit/lit/TestingConfig.py @@ -28,7 +28,8 @@ class TestingConfig: on_clone = None, test_exec_root = None, test_source_root = None, - excludes = []) + excludes = [], + available_features = []) if os.path.exists(path): # FIXME: Improve detection and error reporting of errors in the @@ -54,7 +55,8 @@ class TestingConfig: def __init__(self, parent, name, suffixes, test_format, environment, substitutions, unsupported, on_clone, - test_exec_root, test_source_root, excludes): + test_exec_root, test_source_root, excludes, + available_features): self.parent = parent self.name = str(name) self.suffixes = set(suffixes) @@ -66,6 +68,7 @@ class TestingConfig: self.test_exec_root = test_exec_root self.test_source_root = test_source_root self.excludes = set(excludes) + self.available_features = set(available_features) def clone(self, path): # FIXME: Chain implementations? @@ -75,7 +78,7 @@ class TestingConfig: self.environment, self.substitutions, self.unsupported, self.on_clone, self.test_exec_root, self.test_source_root, - self.excludes) + self.excludes, self.available_features) if cfg.on_clone: cfg.on_clone(self, cfg, path) return cfg diff --git a/contrib/llvm/utils/lit/lit/lit.py b/contrib/llvm/utils/lit/lit/lit.py index db0653f..13d2630 100755 --- a/contrib/llvm/utils/lit/lit/lit.py +++ b/contrib/llvm/utils/lit/lit/lit.py @@ -358,8 +358,7 @@ def load_test_suite(inputs): from LitTestCase import LitTestCase return unittest.TestSuite([LitTestCase(test, litConfig) for test in tests]) -def main(): - # Bump the GIL check interval, its more important to get any one thread to a +def main(builtinParameters = {}): # Bump the GIL check interval, its more important to get any one thread to a # blocking operation (hopefully exec) than to try and unblock other threads. # # FIXME: This is a hack. @@ -469,7 +468,7 @@ def main(): inputs = args # Create the user defined parameters. - userParams = {} + userParams = dict(builtinParameters) for entry in opts.userParameters: if '=' not in entry: name,val = entry,'' |