summaryrefslogtreecommitdiffstats
path: root/utils/lit
diff options
context:
space:
mode:
Diffstat (limited to 'utils/lit')
-rw-r--r--utils/lit/MANIFEST.in7
-rw-r--r--utils/lit/TODO17
-rw-r--r--utils/lit/lit/ExampleTests/Clang/lit.cfg2
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/data.txt1
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/dg.exp6
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/pct-S.ll1
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg75
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.site.cfg3
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.InTree/test/site.exp10
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/lit.site.cfg3
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/site.exp10
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/dg.exp6
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg75
-rw-r--r--utils/lit/lit/ExampleTests/ManyTests/lit.local.cfg23
-rw-r--r--utils/lit/lit/ExampleTests/TclTest/lit.local.cfg5
-rw-r--r--utils/lit/lit/ExampleTests/TclTest/stderr-pipe.ll1
-rw-r--r--utils/lit/lit/ExampleTests/TclTest/tcl-redir-1.ll7
-rw-r--r--utils/lit/lit/ExampleTests/lit.cfg4
-rw-r--r--utils/lit/lit/LitConfig.py14
-rw-r--r--utils/lit/lit/LitFormats.py3
-rw-r--r--utils/lit/lit/ShUtil.py22
-rw-r--r--utils/lit/lit/TclUtil.py322
-rw-r--r--utils/lit/lit/Test.py4
-rw-r--r--utils/lit/lit/TestFormats.py58
-rw-r--r--utils/lit/lit/TestRunner.py180
-rw-r--r--utils/lit/lit/__init__.py2
-rw-r--r--utils/lit/lit/discovery.py234
-rwxr-xr-xutils/lit/lit/main.py267
-rw-r--r--utils/lit/tests/.coveragerc11
-rw-r--r--utils/lit/tests/Inputs/discovery/lit.cfg5
-rw-r--r--utils/lit/tests/Inputs/discovery/subdir/lit.local.cfg1
-rw-r--r--utils/lit/tests/Inputs/discovery/subdir/test-three.py1
-rw-r--r--utils/lit/tests/Inputs/discovery/subsuite/lit.cfg5
-rw-r--r--utils/lit/tests/Inputs/discovery/subsuite/test-one.txt1
-rw-r--r--utils/lit/tests/Inputs/discovery/subsuite/test-two.txt1
-rw-r--r--utils/lit/tests/Inputs/discovery/test-one.txt1
-rw-r--r--utils/lit/tests/Inputs/discovery/test-two.txt1
-rw-r--r--utils/lit/tests/Inputs/shtest-format/external_shell/fail.txt3
-rw-r--r--utils/lit/tests/Inputs/shtest-format/external_shell/lit.local.cfg1
-rw-r--r--utils/lit/tests/Inputs/shtest-format/external_shell/pass.txt1
-rw-r--r--utils/lit/tests/Inputs/shtest-format/fail.txt1
-rw-r--r--utils/lit/tests/Inputs/shtest-format/lit.cfg7
-rw-r--r--utils/lit/tests/Inputs/shtest-format/no-test-line.txt1
-rw-r--r--utils/lit/tests/Inputs/shtest-format/pass.txt1
-rw-r--r--utils/lit/tests/Inputs/shtest-format/requires-missing.txt2
-rw-r--r--utils/lit/tests/Inputs/shtest-format/requires-present.txt2
-rw-r--r--utils/lit/tests/Inputs/shtest-format/unsupported_dir/lit.local.cfg1
-rw-r--r--utils/lit/tests/Inputs/shtest-format/unsupported_dir/some-test.txt1
-rw-r--r--utils/lit/tests/Inputs/shtest-format/xfail-feature.txt2
-rw-r--r--utils/lit/tests/Inputs/shtest-format/xfail-target.txt2
-rw-r--r--utils/lit/tests/Inputs/shtest-format/xfail.txt2
-rw-r--r--utils/lit/tests/Inputs/shtest-format/xpass.txt2
-rw-r--r--utils/lit/tests/Inputs/shtest-shell/error-0.txt3
-rw-r--r--utils/lit/tests/Inputs/shtest-shell/error-1.txt3
-rw-r--r--utils/lit/tests/Inputs/shtest-shell/error-2.txt3
-rw-r--r--utils/lit/tests/Inputs/shtest-shell/lit.cfg5
-rw-r--r--utils/lit/tests/Inputs/shtest-shell/redirects.txt41
-rw-r--r--utils/lit/tests/Inputs/shtest-shell/sequencing-0.txt28
-rw-r--r--utils/lit/tests/Inputs/shtest-shell/sequencing-1.txt2
-rwxr-xr-xutils/lit/tests/Inputs/shtest-shell/write-to-stderr.sh3
-rwxr-xr-xutils/lit/tests/Inputs/shtest-shell/write-to-stdout-and-stderr.sh4
-rw-r--r--utils/lit/tests/Inputs/unittest-adaptor/lit.cfg5
-rw-r--r--utils/lit/tests/Inputs/unittest-adaptor/test-one.txt1
-rw-r--r--utils/lit/tests/Inputs/unittest-adaptor/test-two.txt1
-rw-r--r--utils/lit/tests/discovery.py25
-rw-r--r--utils/lit/tests/lit.cfg36
-rw-r--r--utils/lit/tests/shell-parsing.py3
-rw-r--r--utils/lit/tests/shtest-format.py43
-rw-r--r--utils/lit/tests/shtest-shell.py33
-rw-r--r--utils/lit/tests/unittest-adaptor.py18
-rw-r--r--utils/lit/tests/usage.py6
-rw-r--r--utils/lit/utils/README.txt2
-rwxr-xr-xutils/lit/utils/check-coverage50
-rwxr-xr-xutils/lit/utils/check-sdist44
74 files changed, 798 insertions, 979 deletions
diff --git a/utils/lit/MANIFEST.in b/utils/lit/MANIFEST.in
new file mode 100644
index 0000000..6491a02
--- /dev/null
+++ b/utils/lit/MANIFEST.in
@@ -0,0 +1,7 @@
+include TODO lit.py
+recursive-include tests *
+global-exclude *pyc
+global-exclude *~
+prune tests/Output
+prune tests/*/Output
+prune tests/*/*/Output
diff --git a/utils/lit/TODO b/utils/lit/TODO
index 6d7f7ea..d2ff842 100644
--- a/utils/lit/TODO
+++ b/utils/lit/TODO
@@ -7,3 +7,20 @@
- Support valgrind in all configs, and LLVM style valgrind.
- Support a timeout / ulimit.
+
+ - Rename 'lit' injected variable for config to be lit_config.
+
+ - Allow import of 'lit' in test suite definitions.
+
+ - Create an explicit test suite object (instead of using the top-level
+ TestingConfig object).
+
+ - Allow 'lit' driver to cooperate with test suites to add options (or at least
+ sanitize accepted params).
+
+ - Consider move to identifying all tests by path-to-test-suite and then path to
+ subtest, and don't use test suite names.
+
+ - Consider move to change workflow to always load suites, then resolve command
+ line arguments.
+
diff --git a/utils/lit/lit/ExampleTests/Clang/lit.cfg b/utils/lit/lit/ExampleTests/Clang/lit.cfg
index 1e1e807..9295bd9 100644
--- a/utils/lit/lit/ExampleTests/Clang/lit.cfg
+++ b/utils/lit/lit/ExampleTests/Clang/lit.cfg
@@ -14,7 +14,7 @@ config.test_format = lit.formats.ShTest(execute_external = True)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = ['.c', '.cpp', '.m', '.mm']
-# target_triple: Used by ShTest and TclTest formats for XFAIL checks.
+# target_triple: Used by ShTest format for XFAIL checks.
config.target_triple = 'foo'
###
diff --git a/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/data.txt b/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/data.txt
new file mode 100644
index 0000000..45b983b
--- /dev/null
+++ b/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/data.txt
@@ -0,0 +1 @@
+hi
diff --git a/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/dg.exp b/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/dg.exp
deleted file mode 100644
index 2bda07a..0000000
--- a/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/dg.exp
+++ /dev/null
@@ -1,6 +0,0 @@
-load_lib llvm.exp
-
-if { [llvm_supports_target X86] } {
- RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll}]]
-}
-
diff --git a/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/pct-S.ll b/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/pct-S.ll
new file mode 100644
index 0000000..3ff3633
--- /dev/null
+++ b/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/pct-S.ll
@@ -0,0 +1 @@
+; RUN: grep "hi" %S/data.txt
diff --git a/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg b/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg
index 3fdd63c..533c445 100644
--- a/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg
+++ b/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg
@@ -8,11 +8,11 @@ import os
config.name = 'LLVM'
# testFormat: The test format to use to interpret tests.
-config.test_format = lit.formats.TclTest()
+config.test_format = lit.formats.ShTest()
# suffixes: A list of file extensions to treat as test files, this is actually
# set by on_clone().
-config.suffixes = []
+config.suffixes = [ '.ll' ]
# test_source_root: The root path where tests are located.
config.test_source_root = os.path.dirname(__file__)
@@ -64,74 +64,3 @@ if config.test_exec_root is None:
lit.load_config(config, site_cfg)
raise SystemExit
-###
-
-# Load site data from DejaGNU's site.exp.
-import re
-site_exp = {}
-# FIXME: Implement lit.site.cfg.
-for line in open(os.path.join(config.llvm_obj_root, 'test', 'site.exp')):
- m = re.match('set ([^ ]+) "([^"]*)"', line)
- if m:
- site_exp[m.group(1)] = m.group(2)
-
-excludes = []
-
-# Provide target_triple for use in XFAIL.
-config.target_triple = site_exp['target_triplet']
-
-# Provide llvm_supports_target for use in local configs.
-targets = set(site_exp["TARGETS_TO_BUILD"].split())
-def llvm_supports_target(name):
- return name in targets
-
-# Provide on_clone hook for reading 'dg.exp'.
-import os
-simpleLibData = re.compile(r"""load_lib llvm.exp
-
-RunLLVMTests \[lsort \[glob -nocomplain \$srcdir/\$subdir/\*\.(.*)\]\]""",
- re.MULTILINE)
-conditionalLibData = re.compile(r"""load_lib llvm.exp
-
-if.*\[ ?(llvm[^ ]*) ([^ ]*) ?\].*{
- *RunLLVMTests \[lsort \[glob -nocomplain \$srcdir/\$subdir/\*\.(.*)\]\]
-\}""", re.MULTILINE)
-def on_clone(parent, cfg, for_path):
- def addSuffixes(match):
- if match[0] == '{' and match[-1] == '}':
- cfg.suffixes = ['.' + s for s in match[1:-1].split(',')]
- else:
- cfg.suffixes = ['.' + match]
-
- libPath = os.path.join(os.path.dirname(for_path),
- 'dg.exp')
- if not os.path.exists(libPath):
- cfg.unsupported = True
- return
-
- # Reset unsupported, in case we inherited it.
- cfg.unsupported = False
- lib = open(libPath).read().strip()
-
- # Check for a simple library.
- m = simpleLibData.match(lib)
- if m:
- addSuffixes(m.group(1))
- return
-
- # Check for a conditional test set.
- m = conditionalLibData.match(lib)
- if m:
- funcname,arg,match = m.groups()
- addSuffixes(match)
-
- func = globals().get(funcname)
- if not func:
- lit.error('unsupported predicate %r' % funcname)
- elif not func(arg):
- cfg.unsupported = True
- return
- # Otherwise, give up.
- lit.error('unable to understand %r:\n%s' % (libPath, lib))
-
-config.on_clone = on_clone
diff --git a/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.site.cfg b/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.site.cfg
index 3bfee54..d45f3ac 100644
--- a/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.site.cfg
+++ b/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.site.cfg
@@ -1,8 +1,5 @@
# -*- Python -*-
-## Autogenerated by Makefile ##
-# Do not edit!
-
# Preserve some key paths for use by main LLVM test suite config.
config.llvm_obj_root = os.path.dirname(os.path.dirname(__file__))
diff --git a/utils/lit/lit/ExampleTests/LLVM.InTree/test/site.exp b/utils/lit/lit/ExampleTests/LLVM.InTree/test/site.exp
deleted file mode 100644
index 4bc58d7..0000000
--- a/utils/lit/lit/ExampleTests/LLVM.InTree/test/site.exp
+++ /dev/null
@@ -1,10 +0,0 @@
-## these variables are automatically generated by make ##
-# Do not edit here. If you wish to override these values
-# edit the last section
-set target_triplet "x86_64-apple-darwin10"
-set TARGETS_TO_BUILD "X86 Sparc PowerPC ARM Mips CellSPU PIC16 XCore MSP430 Blackfin MSIL CppBackend"
-set srcroot "/Volumes/Data/ddunbar/llvm"
-set objroot "/Volumes/Data/ddunbar/llvm.obj.64"
-set srcdir "/Volumes/Data/ddunbar/llvm/test"
-set objdir "/Volumes/Data/ddunbar/llvm.obj.64/test"
-## All variables above are generated by configure. Do Not Edit ##
diff --git a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/lit.site.cfg b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/lit.site.cfg
index bdcc35e..94a02d8 100644
--- a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/lit.site.cfg
+++ b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/lit.site.cfg
@@ -1,8 +1,5 @@
# -*- Python -*-
-## Autogenerated by Makefile ##
-# Do not edit!
-
# Preserve some key paths for use by main LLVM test suite config.
config.llvm_obj_root = os.path.dirname(os.path.dirname(__file__))
diff --git a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/site.exp b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/site.exp
deleted file mode 100644
index 4bc58d7..0000000
--- a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/site.exp
+++ /dev/null
@@ -1,10 +0,0 @@
-## these variables are automatically generated by make ##
-# Do not edit here. If you wish to override these values
-# edit the last section
-set target_triplet "x86_64-apple-darwin10"
-set TARGETS_TO_BUILD "X86 Sparc PowerPC ARM Mips CellSPU PIC16 XCore MSP430 Blackfin MSIL CppBackend"
-set srcroot "/Volumes/Data/ddunbar/llvm"
-set objroot "/Volumes/Data/ddunbar/llvm.obj.64"
-set srcdir "/Volumes/Data/ddunbar/llvm/test"
-set objdir "/Volumes/Data/ddunbar/llvm.obj.64/test"
-## All variables above are generated by configure. Do Not Edit ##
diff --git a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/dg.exp b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/dg.exp
deleted file mode 100644
index 2bda07a..0000000
--- a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/dg.exp
+++ /dev/null
@@ -1,6 +0,0 @@
-load_lib llvm.exp
-
-if { [llvm_supports_target X86] } {
- RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll}]]
-}
-
diff --git a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg
index 3fdd63c..533c445 100644
--- a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg
+++ b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg
@@ -8,11 +8,11 @@ import os
config.name = 'LLVM'
# testFormat: The test format to use to interpret tests.
-config.test_format = lit.formats.TclTest()
+config.test_format = lit.formats.ShTest()
# suffixes: A list of file extensions to treat as test files, this is actually
# set by on_clone().
-config.suffixes = []
+config.suffixes = [ '.ll' ]
# test_source_root: The root path where tests are located.
config.test_source_root = os.path.dirname(__file__)
@@ -64,74 +64,3 @@ if config.test_exec_root is None:
lit.load_config(config, site_cfg)
raise SystemExit
-###
-
-# Load site data from DejaGNU's site.exp.
-import re
-site_exp = {}
-# FIXME: Implement lit.site.cfg.
-for line in open(os.path.join(config.llvm_obj_root, 'test', 'site.exp')):
- m = re.match('set ([^ ]+) "([^"]*)"', line)
- if m:
- site_exp[m.group(1)] = m.group(2)
-
-excludes = []
-
-# Provide target_triple for use in XFAIL.
-config.target_triple = site_exp['target_triplet']
-
-# Provide llvm_supports_target for use in local configs.
-targets = set(site_exp["TARGETS_TO_BUILD"].split())
-def llvm_supports_target(name):
- return name in targets
-
-# Provide on_clone hook for reading 'dg.exp'.
-import os
-simpleLibData = re.compile(r"""load_lib llvm.exp
-
-RunLLVMTests \[lsort \[glob -nocomplain \$srcdir/\$subdir/\*\.(.*)\]\]""",
- re.MULTILINE)
-conditionalLibData = re.compile(r"""load_lib llvm.exp
-
-if.*\[ ?(llvm[^ ]*) ([^ ]*) ?\].*{
- *RunLLVMTests \[lsort \[glob -nocomplain \$srcdir/\$subdir/\*\.(.*)\]\]
-\}""", re.MULTILINE)
-def on_clone(parent, cfg, for_path):
- def addSuffixes(match):
- if match[0] == '{' and match[-1] == '}':
- cfg.suffixes = ['.' + s for s in match[1:-1].split(',')]
- else:
- cfg.suffixes = ['.' + match]
-
- libPath = os.path.join(os.path.dirname(for_path),
- 'dg.exp')
- if not os.path.exists(libPath):
- cfg.unsupported = True
- return
-
- # Reset unsupported, in case we inherited it.
- cfg.unsupported = False
- lib = open(libPath).read().strip()
-
- # Check for a simple library.
- m = simpleLibData.match(lib)
- if m:
- addSuffixes(m.group(1))
- return
-
- # Check for a conditional test set.
- m = conditionalLibData.match(lib)
- if m:
- funcname,arg,match = m.groups()
- addSuffixes(match)
-
- func = globals().get(funcname)
- if not func:
- lit.error('unsupported predicate %r' % funcname)
- elif not func(arg):
- cfg.unsupported = True
- return
- # Otherwise, give up.
- lit.error('unable to understand %r:\n%s' % (libPath, lib))
-
-config.on_clone = on_clone
diff --git a/utils/lit/lit/ExampleTests/ManyTests/lit.local.cfg b/utils/lit/lit/ExampleTests/ManyTests/lit.local.cfg
new file mode 100644
index 0000000..6cc4752
--- /dev/null
+++ b/utils/lit/lit/ExampleTests/ManyTests/lit.local.cfg
@@ -0,0 +1,23 @@
+# -*- Python -*-
+
+Test = lit.Test
+
+class ManyTests(object):
+ def __init__(self, N=10000):
+ self.N = N
+
+ def getTestsInDirectory(self, testSuite, path_in_suite,
+ litConfig, localConfig):
+ for i in range(self.N):
+ test_name = 'test-%04d' % (i,)
+ yield Test.Test(testSuite, path_in_suite + (test_name,),
+ localConfig)
+
+ def execute(self, test, litConfig):
+ # Do a "non-trivial" amount of Python work.
+ sum = 0
+ for i in range(10000):
+ sum += i
+ return Test.PASS,''
+
+config.test_format = ManyTests()
diff --git a/utils/lit/lit/ExampleTests/TclTest/lit.local.cfg b/utils/lit/lit/ExampleTests/TclTest/lit.local.cfg
deleted file mode 100644
index 6a37129..0000000
--- a/utils/lit/lit/ExampleTests/TclTest/lit.local.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-# -*- Python -*-
-
-config.test_format = lit.formats.TclTest()
-
-config.suffixes = ['.ll']
diff --git a/utils/lit/lit/ExampleTests/TclTest/stderr-pipe.ll b/utils/lit/lit/ExampleTests/TclTest/stderr-pipe.ll
deleted file mode 100644
index 6c55fe8..0000000
--- a/utils/lit/lit/ExampleTests/TclTest/stderr-pipe.ll
+++ /dev/null
@@ -1 +0,0 @@
-; RUN: gcc -### > /dev/null |& grep {gcc version}
diff --git a/utils/lit/lit/ExampleTests/TclTest/tcl-redir-1.ll b/utils/lit/lit/ExampleTests/TclTest/tcl-redir-1.ll
deleted file mode 100644
index 61240ba..0000000
--- a/utils/lit/lit/ExampleTests/TclTest/tcl-redir-1.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: echo 'hi' > %t.1 | echo 'hello' > %t.2
-; RUN: not grep 'hi' %t.1
-; RUN: grep 'hello' %t.2
-
-
-
-
diff --git a/utils/lit/lit/ExampleTests/lit.cfg b/utils/lit/lit/ExampleTests/lit.cfg
index 2629918..164daba 100644
--- a/utils/lit/lit/ExampleTests/lit.cfg
+++ b/utils/lit/lit/ExampleTests/lit.cfg
@@ -19,8 +19,8 @@ config.test_source_root = None
# root).
config.test_exec_root = None
-# target_triple: Used by ShTest and TclTest formats for XFAIL checks.
+# target_triple: Used by ShTest format for XFAIL checks.
config.target_triple = 'foo'
-# available_features: Used by ShTest and TclTest formats for REQUIRES checks.
+# available_features: Used by ShTest format for REQUIRES checks.
config.available_features.add('some-feature-name')
diff --git a/utils/lit/lit/LitConfig.py b/utils/lit/lit/LitConfig.py
index 0a359a3..9bcf20b 100644
--- a/utils/lit/lit/LitConfig.py
+++ b/utils/lit/lit/LitConfig.py
@@ -12,16 +12,15 @@ class LitConfig:
import Test
# Provide access to built-in formats.
- import LitFormats as formats
+ import TestFormats as formats
# Provide access to built-in utility functions.
import Util as util
def __init__(self, progname, path, quiet,
useValgrind, valgrindLeakCheck, valgrindArgs,
- useTclAsSh,
noExecute, ignoreStdErr, debug, isWindows,
- params):
+ params, config_prefix = None):
# The name of the test runner.
self.progname = progname
# The items to add to the PATH environment variable.
@@ -30,7 +29,6 @@ class LitConfig:
self.useValgrind = bool(useValgrind)
self.valgrindLeakCheck = bool(valgrindLeakCheck)
self.valgrindUserArgs = list(valgrindArgs)
- self.useTclAsSh = bool(useTclAsSh)
self.noExecute = noExecute
self.ignoreStdErr = ignoreStdErr
self.debug = debug
@@ -38,6 +36,12 @@ class LitConfig:
self.params = dict(params)
self.bashPath = None
+ # Configuration files to look for when discovering test suites.
+ self.config_prefix = config_prefix or 'lit'
+ self.config_name = '%s.cfg' % (self.config_prefix,)
+ self.site_config_name = '%s.site.cfg' % (self.config_prefix,)
+ self.local_config_name = '%s.local.cfg' % (self.config_prefix,)
+
self.numErrors = 0
self.numWarnings = 0
@@ -80,7 +84,7 @@ class LitConfig:
break
if self.bashPath is None:
- self.warning("Unable to find 'bash', running Tcl tests internally.")
+ self.warning("Unable to find 'bash'.")
self.bashPath = ''
return self.bashPath
diff --git a/utils/lit/lit/LitFormats.py b/utils/lit/lit/LitFormats.py
deleted file mode 100644
index 931d107..0000000
--- a/utils/lit/lit/LitFormats.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from TestFormats import FileBasedTest
-from TestFormats import GoogleTest, ShTest, TclTest
-from TestFormats import SyntaxCheckTest, OneCommandPerFileTest
diff --git a/utils/lit/lit/ShUtil.py b/utils/lit/lit/ShUtil.py
index dda622a..50f7910 100644
--- a/utils/lit/lit/ShUtil.py
+++ b/utils/lit/lit/ShUtil.py
@@ -35,7 +35,7 @@ class ShLexer:
if ('|' in chunk or '&' in chunk or
'<' in chunk or '>' in chunk or
"'" in chunk or '"' in chunk or
- '\\' in chunk):
+ ';' in chunk or '\\' in chunk):
return None
self.pos = self.pos - 1 + len(chunk)
@@ -48,7 +48,7 @@ class ShLexer:
str = c
while self.pos != self.end:
c = self.look()
- if c.isspace() or c in "|&":
+ if c.isspace() or c in "|&;":
break
elif c in '><':
# This is an annoying case; we treat '2>' as a single token so
@@ -129,7 +129,7 @@ class ShLexer:
lex_one_token - Lex a single 'sh' token. """
c = self.eat()
- if c in ';!':
+ if c == ';':
return (c,)
if c == '|':
if self.maybe_eat('|'):
@@ -219,9 +219,6 @@ class ShParser:
def parse_pipeline(self):
negate = False
- if self.look() == ('!',):
- self.lex()
- negate = True
commands = [self.parse_command()]
while self.look() == ('|',):
@@ -253,9 +250,9 @@ class TestShLexer(unittest.TestCase):
return list(ShLexer(str, *args, **kwargs).lex())
def test_basic(self):
- self.assertEqual(self.lex('a|b>c&d<e'),
+ self.assertEqual(self.lex('a|b>c&d<e;f'),
['a', ('|',), 'b', ('>',), 'c', ('&',), 'd',
- ('<',), 'e'])
+ ('<',), 'e', (';',), 'f'])
def test_redirection_tokens(self):
self.assertEqual(self.lex('a2>c'),
@@ -317,10 +314,6 @@ class TestShParse(unittest.TestCase):
Command(['c'], [])],
False))
- self.assertEqual(self.parse('! a'),
- Pipeline([Command(['a'], [])],
- True))
-
def test_list(self):
self.assertEqual(self.parse('a ; b'),
Seq(Pipeline([Command(['a'], [])], False),
@@ -349,5 +342,10 @@ class TestShParse(unittest.TestCase):
'||',
Pipeline([Command(['c'], [])], False)))
+ self.assertEqual(self.parse('a; b'),
+ Seq(Pipeline([Command(['a'], [])], False),
+ ';',
+ Pipeline([Command(['b'], [])], False)))
+
if __name__ == '__main__':
unittest.main()
diff --git a/utils/lit/lit/TclUtil.py b/utils/lit/lit/TclUtil.py
deleted file mode 100644
index 4a3f345..0000000
--- a/utils/lit/lit/TclUtil.py
+++ /dev/null
@@ -1,322 +0,0 @@
-import itertools
-
-from ShCommands import Command, Pipeline
-
-def tcl_preprocess(data):
- # Tcl has a preprocessing step to replace escaped newlines.
- i = data.find('\\\n')
- if i == -1:
- return data
-
- # Replace '\\\n' and subsequent whitespace by a single space.
- n = len(data)
- str = data[:i]
- i += 2
- while i < n and data[i] in ' \t':
- i += 1
- return str + ' ' + data[i:]
-
-class TclLexer:
- """TclLexer - Lex a string into "words", following the Tcl syntax."""
-
- def __init__(self, data):
- self.data = tcl_preprocess(data)
- self.pos = 0
- self.end = len(self.data)
-
- def at_end(self):
- return self.pos == self.end
-
- def eat(self):
- c = self.data[self.pos]
- self.pos += 1
- return c
-
- def look(self):
- return self.data[self.pos]
-
- def maybe_eat(self, c):
- """
- maybe_eat(c) - Consume the character c if it is the next character,
- returning True if a character was consumed. """
- if self.data[self.pos] == c:
- self.pos += 1
- return True
- return False
-
- def escape(self, c):
- if c == 'a':
- return '\x07'
- elif c == 'b':
- return '\x08'
- elif c == 'f':
- return '\x0c'
- elif c == 'n':
- return '\n'
- elif c == 'r':
- return '\r'
- elif c == 't':
- return '\t'
- elif c == 'v':
- return '\x0b'
- elif c in 'uxo':
- raise ValueError,'Invalid quoted character %r' % c
- else:
- return c
-
- def lex_braced(self):
- # Lex until whitespace or end of string, the opening brace has already
- # been consumed.
-
- str = ''
- while 1:
- if self.at_end():
- raise ValueError,"Unterminated '{' quoted word"
-
- c = self.eat()
- if c == '}':
- break
- elif c == '{':
- str += '{' + self.lex_braced() + '}'
- elif c == '\\' and self.look() in '{}':
- str += self.eat()
- else:
- str += c
-
- return str
-
- def lex_quoted(self):
- str = ''
-
- while 1:
- if self.at_end():
- raise ValueError,"Unterminated '\"' quoted word"
-
- c = self.eat()
- if c == '"':
- break
- elif c == '\\':
- if self.at_end():
- raise ValueError,'Missing quoted character'
-
- str += self.escape(self.eat())
- else:
- str += c
-
- return str
-
- def lex_unquoted(self, process_all=False):
- # Lex until whitespace or end of string.
- str = ''
- while not self.at_end():
- if not process_all:
- if self.look().isspace() or self.look() == ';':
- break
-
- c = self.eat()
- if c == '\\':
- if self.at_end():
- raise ValueError,'Missing quoted character'
-
- str += self.escape(self.eat())
- elif c == '[':
- raise NotImplementedError, ('Command substitution is '
- 'not supported')
- elif c == '$' and not self.at_end() and (self.look().isalpha() or
- self.look() == '{'):
- raise NotImplementedError, ('Variable substitution is '
- 'not supported')
- else:
- str += c
-
- return str
-
- def lex_one_token(self):
- if self.maybe_eat('"'):
- return self.lex_quoted()
- elif self.maybe_eat('{'):
- # Check for argument substitution.
- if not self.maybe_eat('*'):
- return self.lex_braced()
-
- if not self.maybe_eat('}'):
- return '*' + self.lex_braced()
-
- if self.at_end() or self.look().isspace():
- return '*'
-
- raise NotImplementedError, "Argument substitution is unsupported"
- else:
- return self.lex_unquoted()
-
- def lex(self):
- while not self.at_end():
- c = self.look()
- if c in ' \t':
- self.eat()
- elif c in ';\n':
- self.eat()
- yield (';',)
- else:
- yield self.lex_one_token()
-
-class TclExecCommand:
- kRedirectPrefixes1 = ('<', '>')
- kRedirectPrefixes2 = ('<@', '<<', '2>', '>&', '>>', '>@')
- kRedirectPrefixes3 = ('2>@', '2>>', '>>&', '>&@')
- kRedirectPrefixes4 = ('2>@1',)
-
- def __init__(self, args):
- self.args = iter(args)
-
- def lex(self):
- try:
- return self.args.next()
- except StopIteration:
- return None
-
- def look(self):
- next = self.lex()
- if next is not None:
- self.args = itertools.chain([next], self.args)
- return next
-
- def parse_redirect(self, tok, length):
- if len(tok) == length:
- arg = self.lex()
- if arg is None:
- raise ValueError,'Missing argument to %r redirection' % tok
- else:
- tok,arg = tok[:length],tok[length:]
-
- if tok[0] == '2':
- op = (tok[1:],2)
- else:
- op = (tok,)
- return (op, arg)
-
- def parse_pipeline(self):
- if self.look() is None:
- raise ValueError,"Expected at least one argument to exec"
-
- commands = [Command([],[])]
- while 1:
- arg = self.lex()
- if arg is None:
- break
- elif arg == '|':
- commands.append(Command([],[]))
- elif arg == '|&':
- # Write this as a redirect of stderr; it must come first because
- # stdout may have already been redirected.
- commands[-1].redirects.insert(0, (('>&',2),'1'))
- commands.append(Command([],[]))
- elif arg[:4] in TclExecCommand.kRedirectPrefixes4:
- commands[-1].redirects.append(self.parse_redirect(arg, 4))
- elif arg[:3] in TclExecCommand.kRedirectPrefixes3:
- commands[-1].redirects.append(self.parse_redirect(arg, 3))
- elif arg[:2] in TclExecCommand.kRedirectPrefixes2:
- commands[-1].redirects.append(self.parse_redirect(arg, 2))
- elif arg[:1] in TclExecCommand.kRedirectPrefixes1:
- commands[-1].redirects.append(self.parse_redirect(arg, 1))
- else:
- commands[-1].args.append(arg)
-
- return Pipeline(commands, False, pipe_err=True)
-
- def parse(self):
- ignoreStderr = False
- keepNewline = False
-
- # Parse arguments.
- while 1:
- next = self.look()
- if not isinstance(next, str) or next[0] != '-':
- break
-
- if next == '--':
- self.lex()
- break
- elif next == '-ignorestderr':
- ignoreStderr = True
- elif next == '-keepnewline':
- keepNewline = True
- else:
- raise ValueError,"Invalid exec argument %r" % next
-
- return (ignoreStderr, keepNewline, self.parse_pipeline())
-
-###
-
-import unittest
-
-class TestTclLexer(unittest.TestCase):
- def lex(self, str, *args, **kwargs):
- return list(TclLexer(str, *args, **kwargs).lex())
-
- def test_preprocess(self):
- self.assertEqual(tcl_preprocess('a b'), 'a b')
- self.assertEqual(tcl_preprocess('a\\\nb c'), 'a b c')
-
- def test_unquoted(self):
- self.assertEqual(self.lex('a b c'),
- ['a', 'b', 'c'])
- self.assertEqual(self.lex(r'a\nb\tc\ '),
- ['a\nb\tc '])
- self.assertEqual(self.lex(r'a \\\$b c $\\'),
- ['a', r'\$b', 'c', '$\\'])
-
- def test_braced(self):
- self.assertEqual(self.lex('a {b c} {}'),
- ['a', 'b c', ''])
- self.assertEqual(self.lex(r'a {b {c\n}}'),
- ['a', 'b {c\\n}'])
- self.assertEqual(self.lex(r'a {b\{}'),
- ['a', 'b{'])
- self.assertEqual(self.lex(r'{*}'), ['*'])
- self.assertEqual(self.lex(r'{*} a'), ['*', 'a'])
- self.assertEqual(self.lex(r'{*} a'), ['*', 'a'])
- self.assertEqual(self.lex('{a\\\n b}'),
- ['a b'])
-
- def test_quoted(self):
- self.assertEqual(self.lex('a "b c"'),
- ['a', 'b c'])
-
- def test_terminators(self):
- self.assertEqual(self.lex('a\nb'),
- ['a', (';',), 'b'])
- self.assertEqual(self.lex('a;b'),
- ['a', (';',), 'b'])
- self.assertEqual(self.lex('a ; b'),
- ['a', (';',), 'b'])
-
-class TestTclExecCommand(unittest.TestCase):
- def parse(self, str):
- return TclExecCommand(list(TclLexer(str).lex())).parse()
-
- def test_basic(self):
- self.assertEqual(self.parse('echo hello'),
- (False, False,
- Pipeline([Command(['echo', 'hello'], [])],
- False, True)))
- self.assertEqual(self.parse('echo hello | grep hello'),
- (False, False,
- Pipeline([Command(['echo', 'hello'], []),
- Command(['grep', 'hello'], [])],
- False, True)))
-
- def test_redirect(self):
- self.assertEqual(self.parse('echo hello > a >b >>c 2> d |& e'),
- (False, False,
- Pipeline([Command(['echo', 'hello'],
- [(('>&',2),'1'),
- (('>',),'a'),
- (('>',),'b'),
- (('>>',),'c'),
- (('>',2),'d')]),
- Command(['e'], [])],
- False, True)))
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/utils/lit/lit/Test.py b/utils/lit/lit/Test.py
index db2e032..9471e3a 100644
--- a/utils/lit/lit/Test.py
+++ b/utils/lit/lit/Test.py
@@ -7,6 +7,10 @@ class TestResult:
self.name = name
self.isFailure = isFailure
+ def __repr__(self):
+ return '%s%r' % (self.__class__.__name__,
+ (self.name, self.isFailure))
+
PASS = TestResult('PASS', False)
XFAIL = TestResult('XFAIL', False)
FAIL = TestResult('FAIL', True)
diff --git a/utils/lit/lit/TestFormats.py b/utils/lit/lit/TestFormats.py
index d1c0558..26541f1 100644
--- a/utils/lit/lit/TestFormats.py
+++ b/utils/lit/lit/TestFormats.py
@@ -54,28 +54,36 @@ class GoogleTest(object):
else:
yield ''.join(nested_tests) + ln
+ def getTestsInExecutable(self, testSuite, path_in_suite, execpath,
+ litConfig, localConfig):
+ if not execpath.endswith(self.test_suffix):
+ return
+ (dirname, basename) = os.path.split(execpath)
+ # Discover the tests in this executable.
+ for testname in self.getGTestTests(execpath, litConfig, localConfig):
+ testPath = path_in_suite + (dirname, basename, testname)
+ yield Test.Test(testSuite, testPath, localConfig)
+
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
- # Check for the one subdirectory (build directory) tests will be in.
- if not '.' in self.test_sub_dir:
+ filepath = os.path.join(source_path, filename)
+ if os.path.isdir(filepath):
+ # Iterate over executables in a directory.
if not os.path.normcase(filename) in self.test_sub_dir:
continue
-
- filepath = os.path.join(source_path, filename)
- if not os.path.isdir(filepath):
- continue
-
- for subfilename in os.listdir(filepath):
- if subfilename.endswith(self.test_suffix):
+ for subfilename in os.listdir(filepath):
execpath = os.path.join(filepath, subfilename)
-
- # Discover the tests in this executable.
- for name in self.getGTestTests(execpath, litConfig,
- localConfig):
- testPath = path_in_suite + (filename, subfilename, name)
- yield Test.Test(testSuite, testPath, localConfig)
+ for test in self.getTestsInExecutable(
+ testSuite, path_in_suite, execpath,
+ litConfig, localConfig):
+ yield test
+ elif ('.' in self.test_sub_dir):
+ for test in self.getTestsInExecutable(
+ testSuite, path_in_suite, filepath,
+ litConfig, localConfig):
+ yield test
def execute(self, test, litConfig):
testPath,testName = os.path.split(test.getSourcePath())
@@ -89,6 +97,9 @@ class GoogleTest(object):
if litConfig.useValgrind:
cmd = litConfig.valgrindArgs + cmd
+ if litConfig.noExecute:
+ return Test.PASS, ''
+
out, err, exitCode = TestRunner.executeCommand(
cmd, env=test.config.environment)
@@ -124,14 +135,6 @@ class ShTest(FileBasedTest):
return TestRunner.executeShTest(test, litConfig,
self.execute_external)
-class TclTest(FileBasedTest):
- def __init__(self, ignoreStdErr=False):
- self.ignoreStdErr = ignoreStdErr
-
- def execute(self, test, litConfig):
- litConfig.ignoreStdErr = self.ignoreStdErr
- return TestRunner.executeTclTest(test, litConfig)
-
###
import re
@@ -221,12 +224,3 @@ class OneCommandPerFileTest:
report += """Output:\n--\n%s--""" % diags
return Test.FAIL, report
-
-class SyntaxCheckTest(OneCommandPerFileTest):
- def __init__(self, compiler, dir, extra_cxx_args=[], *args, **kwargs):
- cmd = [compiler, '-x', 'c++', '-fsyntax-only'] + extra_cxx_args
- OneCommandPerFileTest.__init__(self, cmd, dir,
- useTempInput=1, *args, **kwargs)
-
- def createTempInput(self, tmp, test):
- print >>tmp, '#include "%s"' % test.source_path
diff --git a/utils/lit/lit/TestRunner.py b/utils/lit/lit/TestRunner.py
index 0c1911e..8417699 100644
--- a/utils/lit/lit/TestRunner.py
+++ b/utils/lit/lit/TestRunner.py
@@ -49,13 +49,14 @@ def executeShCmd(cmd, cfg, cwd, results):
return executeShCmd(cmd.rhs, cfg, cwd, results)
if cmd.op == '&':
- raise NotImplementedError,"unsupported test command: '&'"
+ raise InternalShellError(cmd,"unsupported shell operator: '&'")
if cmd.op == '||':
res = executeShCmd(cmd.lhs, cfg, cwd, results)
if res != 0:
res = executeShCmd(cmd.rhs, cfg, cwd, results)
return res
+
if cmd.op == '&&':
res = executeShCmd(cmd.lhs, cfg, cwd, results)
if res is None:
@@ -77,7 +78,7 @@ def executeShCmd(cmd, cfg, cwd, results):
# output. This is null until we have seen some output using
# stderr.
for i,j in enumerate(cmd.commands):
- # Apply the redirections, we use (N,) as a sentinal to indicate stdin,
+ # Apply the redirections, we use (N,) as a sentinel to indicate stdin,
# stdout, stderr for N equal to 0, 1, or 2 respectively. Redirects to or
# from a file are represented with a list [file, mode, file-object]
# where file-object is initially None.
@@ -98,7 +99,7 @@ def executeShCmd(cmd, cfg, cwd, results):
elif r[0] == ('<',):
redirects[0] = [r[1], 'r', None]
else:
- raise NotImplementedError,"Unsupported redirect: %r" % (r,)
+ raise InternalShellError(j,"Unsupported redirect: %r" % (r,))
# Map from the final redirections to something subprocess can handle.
final_redirects = []
@@ -107,14 +108,14 @@ def executeShCmd(cmd, cfg, cwd, results):
result = input
elif r == (1,):
if index == 0:
- raise NotImplementedError,"Unsupported redirect for stdin"
+ raise InternalShellError(j,"Unsupported redirect for stdin")
elif index == 1:
result = subprocess.PIPE
else:
result = subprocess.STDOUT
elif r == (2,):
if index != 2:
- raise NotImplementedError,"Unsupported redirect on stdout"
+ raise InternalShellError(j,"Unsupported redirect on stdout")
result = subprocess.PIPE
else:
if r[2] is None:
@@ -241,98 +242,26 @@ def executeShCmd(cmd, cfg, cwd, results):
return exitCode
def executeScriptInternal(test, litConfig, tmpBase, commands, cwd):
- ln = ' &&\n'.join(commands)
- try:
- cmd = ShUtil.ShParser(ln, litConfig.isWindows).parse()
- except:
- return (Test.FAIL, "shell parser error on: %r" % ln)
-
- results = []
- try:
- exitCode = executeShCmd(cmd, test.config, cwd, results)
- except InternalShellError,e:
- out = ''
- err = e.message
- exitCode = 255
-
- out = err = ''
- for i,(cmd, cmd_out,cmd_err,res) in enumerate(results):
- out += 'Command %d: %s\n' % (i, ' '.join('"%s"' % s for s in cmd.args))
- out += 'Command %d Result: %r\n' % (i, res)
- out += 'Command %d Output:\n%s\n\n' % (i, cmd_out)
- out += 'Command %d Stderr:\n%s\n\n' % (i, cmd_err)
-
- return out, err, exitCode
-
-def executeTclScriptInternal(test, litConfig, tmpBase, commands, cwd):
- import TclUtil
cmds = []
for ln in commands:
- # Given the unfortunate way LLVM's test are written, the line gets
- # backslash substitution done twice.
- ln = TclUtil.TclLexer(ln).lex_unquoted(process_all = True)
-
try:
- tokens = list(TclUtil.TclLexer(ln).lex())
+ cmds.append(ShUtil.ShParser(ln, litConfig.isWindows).parse())
except:
- return (Test.FAIL, "Tcl lexer error on: %r" % ln)
-
- # Validate there are no control tokens.
- for t in tokens:
- if not isinstance(t, str):
- return (Test.FAIL,
- "Invalid test line: %r containing %r" % (ln, t))
-
- try:
- cmds.append(TclUtil.TclExecCommand(tokens).parse_pipeline())
- except:
- return (Test.FAIL, "Tcl 'exec' parse error on: %r" % ln)
-
- if litConfig.useValgrind:
- for pipeline in cmds:
- if pipeline.commands:
- # Only valgrind the first command in each pipeline, to avoid
- # valgrinding things like grep, not, and FileCheck.
- cmd = pipeline.commands[0]
- cmd.args = litConfig.valgrindArgs + cmd.args
+ return (Test.FAIL, "shell parser error on: %r" % ln)
cmd = cmds[0]
for c in cmds[1:]:
cmd = ShUtil.Seq(cmd, '&&', c)
- # FIXME: This is lame, we shouldn't need bash. See PR5240.
- bashPath = litConfig.getBashPath()
- if litConfig.useTclAsSh and bashPath:
- script = tmpBase + '.script'
-
- # Write script file
- f = open(script,'w')
- print >>f, 'set -o pipefail'
- cmd.toShell(f, pipefail = True)
- f.close()
-
- if 0:
- print >>sys.stdout, cmd
- print >>sys.stdout, open(script).read()
- print >>sys.stdout
- return '', '', 0
-
- command = [litConfig.getBashPath(), script]
- out,err,exitCode = executeCommand(command, cwd=cwd,
- env=test.config.environment)
-
- return out,err,exitCode
- else:
- results = []
- try:
- exitCode = executeShCmd(cmd, test.config, cwd, results)
- except InternalShellError,e:
- results.append((e.command, '', e.message + '\n', 255))
- exitCode = 255
+ results = []
+ try:
+ exitCode = executeShCmd(cmd, test.config, cwd, results)
+ except InternalShellError,e:
+ exitCode = 127
+ results.append((e.command, '', e.message, exitCode))
out = err = ''
-
- for i,(cmd, cmd_out, cmd_err, res) in enumerate(results):
+ for i,(cmd, cmd_out,cmd_err,res) in enumerate(results):
out += 'Command %d: %s\n' % (i, ' '.join('"%s"' % s for s in cmd.args))
out += 'Command %d Result: %r\n' % (i, res)
out += 'Command %d Output:\n%s\n\n' % (i, cmd_out)
@@ -348,11 +277,14 @@ def executeScript(test, litConfig, tmpBase, commands, cwd):
script += '.bat'
# Write script file
- f = open(script,'w')
+ mode = 'w'
+ if litConfig.isWindows and not isWin32CMDEXE:
+ mode += 'b' # Avoid CRLFs when writing bash scripts.
+ f = open(script, mode)
if isWin32CMDEXE:
f.write('\nif %ERRORLEVEL% NEQ 0 EXIT\n'.join(commands))
else:
- f.write(' &&\n'.join(commands))
+ f.write('{ ' + '; } &&\n{ '.join(commands) + '; }')
f.write('\n')
f.close()
@@ -424,15 +356,15 @@ def parseIntegratedTestScript(test, normalize_slashes=False,
('%{pathsep}', os.pathsep),
('%t', tmpBase + '.tmp'),
('%T', tmpDir),
- # FIXME: Remove this once we kill DejaGNU.
- ('%abs_tmp', tmpBase + '.tmp'),
('#_MARKER_#', '%')])
# Collect the test lines from the script.
script = []
xfails = []
requires = []
+ line_number = 0
for ln in open(sourcepath):
+ line_number += 1
if 'RUN:' in ln:
# Isolate the command to run.
index = ln.index('RUN:')
@@ -441,6 +373,15 @@ def parseIntegratedTestScript(test, normalize_slashes=False,
# Trim trailing whitespace.
ln = ln.rstrip()
+ # Substitute line number expressions
+ ln = re.sub('%\(line\)', str(line_number), ln)
+ def replace_line_number(match):
+ if match.group(1) == '+':
+ return str(line_number + int(match.group(2)))
+ if match.group(1) == '-':
+ return str(line_number - int(match.group(2)))
+ ln = re.sub('%\(line *([\+-]) *(\d+)\)', replace_line_number, ln)
+
# Collapse lines with trailing '\\'.
if script and script[-1][-1] == '\\':
script[-1] = script[-1][:-1] + ln
@@ -490,17 +431,14 @@ def parseIntegratedTestScript(test, normalize_slashes=False,
isXFail = isExpectedFail(test, xfails)
return script,isXFail,tmpBase,execdir
-def formatTestOutput(status, out, err, exitCode, failDueToStderr, script):
+def formatTestOutput(status, out, err, exitCode, script):
output = StringIO.StringIO()
print >>output, "Script:"
print >>output, "--"
print >>output, '\n'.join(script)
print >>output, "--"
print >>output, "Exit Code: %r" % exitCode,
- if failDueToStderr:
- print >>output, "(but there was output on stderr)"
- else:
- print >>output
+ print >>output
if out:
print >>output, "Command Output (stdout):"
print >>output, "--"
@@ -513,53 +451,6 @@ def formatTestOutput(status, out, err, exitCode, failDueToStderr, script):
print >>output, "--"
return (status, output.getvalue())
-def executeTclTest(test, litConfig):
- if test.config.unsupported:
- return (Test.UNSUPPORTED, 'Test is unsupported')
-
- # Parse the test script, normalizing slashes in substitutions on Windows
- # (since otherwise Tcl style lexing will treat them as escapes).
- res = parseIntegratedTestScript(test, normalize_slashes=kIsWindows)
- if len(res) == 2:
- return res
-
- script, isXFail, tmpBase, execdir = res
-
- if litConfig.noExecute:
- return (Test.PASS, '')
-
- # Create the output directory if it does not already exist.
- Util.mkdir_p(os.path.dirname(tmpBase))
-
- res = executeTclScriptInternal(test, litConfig, tmpBase, script, execdir)
- if len(res) == 2:
- return res
-
- # Test for failure. In addition to the exit code, Tcl commands are
- # considered to fail if there is any standard error output.
- out,err,exitCode = res
- if isXFail:
- ok = exitCode != 0 or err and not litConfig.ignoreStdErr
- if ok:
- status = Test.XFAIL
- else:
- status = Test.XPASS
- else:
- ok = exitCode == 0 and (not err or litConfig.ignoreStdErr)
- if ok:
- status = Test.PASS
- else:
- status = Test.FAIL
-
- if ok:
- return (status,'')
-
- # Set a flag for formatTestOutput so it can explain why the test was
- # considered to have failed, despite having an exit code of 0.
- failDueToStderr = exitCode == 0 and err and not litConfig.ignoreStdErr
-
- return formatTestOutput(status, out, err, exitCode, failDueToStderr, script)
-
def executeShTest(test, litConfig, useExternalSh,
extra_substitutions=[]):
if test.config.unsupported:
@@ -601,7 +492,4 @@ def executeShTest(test, litConfig, useExternalSh,
if ok:
return (status,'')
- # Sh tests are not considered to fail just from stderr output.
- failDueToStderr = False
-
- return formatTestOutput(status, out, err, exitCode, failDueToStderr, script)
+ return formatTestOutput(status, out, err, exitCode, script)
diff --git a/utils/lit/lit/__init__.py b/utils/lit/lit/__init__.py
index f3fbb1c..3e61bbd 100644
--- a/utils/lit/lit/__init__.py
+++ b/utils/lit/lit/__init__.py
@@ -4,7 +4,7 @@ from main import main
__author__ = 'Daniel Dunbar'
__email__ = 'daniel@zuster.org'
-__versioninfo__ = (0, 2, 0)
+__versioninfo__ = (0, 3, 0)
__version__ = '.'.join(map(str, __versioninfo__)) + 'dev'
__all__ = []
diff --git a/utils/lit/lit/discovery.py b/utils/lit/lit/discovery.py
new file mode 100644
index 0000000..c869a67
--- /dev/null
+++ b/utils/lit/lit/discovery.py
@@ -0,0 +1,234 @@
+"""
+Test discovery functions.
+"""
+
+import os
+import sys
+
+from lit.TestingConfig import TestingConfig
+from lit import LitConfig, Test
+
+def dirContainsTestSuite(path, lit_config):
+ cfgpath = os.path.join(path, lit_config.site_config_name)
+ if os.path.exists(cfgpath):
+ return cfgpath
+ cfgpath = os.path.join(path, lit_config.config_name)
+ if os.path.exists(cfgpath):
+ return cfgpath
+
+def getTestSuite(item, litConfig, cache):
+ """getTestSuite(item, litConfig, cache) -> (suite, relative_path)
+
+ Find the test suite containing @arg item.
+
+ @retval (None, ...) - Indicates no test suite contains @arg item.
+ @retval (suite, relative_path) - The suite that @arg item is in, and its
+ relative path inside that suite.
+ """
+ def search1(path):
+ # Check for a site config or a lit config.
+ cfgpath = dirContainsTestSuite(path, litConfig)
+
+ # If we didn't find a config file, keep looking.
+ if not cfgpath:
+ parent,base = os.path.split(path)
+ if parent == path:
+ return (None, ())
+
+ ts, relative = search(parent)
+ return (ts, relative + (base,))
+
+ # We found a config file, load it.
+ if litConfig.debug:
+ litConfig.note('loading suite config %r' % cfgpath)
+
+ cfg = TestingConfig.frompath(cfgpath, None, litConfig, mustExist = True)
+ source_root = os.path.realpath(cfg.test_source_root or path)
+ exec_root = os.path.realpath(cfg.test_exec_root or path)
+ return Test.TestSuite(cfg.name, source_root, exec_root, cfg), ()
+
+ def search(path):
+ # Check for an already instantiated test suite.
+ res = cache.get(path)
+ if res is None:
+ cache[path] = res = search1(path)
+ return res
+
+ # Canonicalize the path.
+ item = os.path.realpath(item)
+
+ # Skip files and virtual components.
+ components = []
+ while not os.path.isdir(item):
+ parent,base = os.path.split(item)
+ if parent == item:
+ return (None, ())
+ components.append(base)
+ item = parent
+ components.reverse()
+
+ ts, relative = search(item)
+ return ts, tuple(relative + tuple(components))
+
+def getLocalConfig(ts, path_in_suite, litConfig, cache):
+ def search1(path_in_suite):
+ # Get the parent config.
+ if not path_in_suite:
+ parent = ts.config
+ else:
+ parent = search(path_in_suite[:-1])
+
+ # Load the local configuration.
+ source_path = ts.getSourcePath(path_in_suite)
+ cfgpath = os.path.join(source_path, litConfig.local_config_name)
+ if litConfig.debug:
+ litConfig.note('loading local config %r' % cfgpath)
+ return TestingConfig.frompath(cfgpath, parent, litConfig,
+ mustExist = False,
+ config = parent.clone(cfgpath))
+
+ def search(path_in_suite):
+ key = (ts, path_in_suite)
+ res = cache.get(key)
+ if res is None:
+ cache[key] = res = search1(path_in_suite)
+ return res
+
+ return search(path_in_suite)
+
+def getTests(path, litConfig, testSuiteCache, localConfigCache):
+ # Find the test suite for this input and its relative path.
+ ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
+ if ts is None:
+ litConfig.warning('unable to find test suite for %r' % path)
+ return (),()
+
+ if litConfig.debug:
+ litConfig.note('resolved input %r to %r::%r' % (path, ts.name,
+ path_in_suite))
+
+ return ts, getTestsInSuite(ts, path_in_suite, litConfig,
+ testSuiteCache, localConfigCache)
+
+def getTestsInSuite(ts, path_in_suite, litConfig,
+ testSuiteCache, localConfigCache):
+ # Check that the source path exists (errors here are reported by the
+ # caller).
+ source_path = ts.getSourcePath(path_in_suite)
+ if not os.path.exists(source_path):
+ return
+
+ # Check if the user named a test directly.
+ if not os.path.isdir(source_path):
+ lc = getLocalConfig(ts, path_in_suite[:-1], litConfig, localConfigCache)
+ yield Test.Test(ts, path_in_suite, lc)
+ return
+
+ # Otherwise we have a directory to search for tests, start by getting the
+ # local configuration.
+ lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
+
+ # Search for tests.
+ if lc.test_format is not None:
+ for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
+ litConfig, lc):
+ yield res
+
+ # Search subdirectories.
+ for filename in os.listdir(source_path):
+ # FIXME: This doesn't belong here?
+ if filename in ('Output', '.svn') or filename in lc.excludes:
+ continue
+
+ # Ignore non-directories.
+ file_sourcepath = os.path.join(source_path, filename)
+ if not os.path.isdir(file_sourcepath):
+ continue
+
+ # Check for nested test suites, first in the execpath in case there is a
+ # site configuration and then in the source path.
+ file_execpath = ts.getExecPath(path_in_suite + (filename,))
+ if dirContainsTestSuite(file_execpath, litConfig):
+ sub_ts, subiter = getTests(file_execpath, litConfig,
+ testSuiteCache, localConfigCache)
+ elif dirContainsTestSuite(file_sourcepath, litConfig):
+ sub_ts, subiter = getTests(file_sourcepath, litConfig,
+ testSuiteCache, localConfigCache)
+ else:
+ # Otherwise, continue loading from inside this test suite.
+ subiter = getTestsInSuite(ts, path_in_suite + (filename,),
+ litConfig, testSuiteCache,
+ localConfigCache)
+ sub_ts = None
+
+ N = 0
+ for res in subiter:
+ N += 1
+ yield res
+ if sub_ts and not N:
+ litConfig.warning('test suite %r contained no tests' % sub_ts.name)
+
+def find_tests_for_inputs(lit_config, inputs):
+ """
+ find_tests_for_inputs(lit_config, inputs) -> [Test]
+
+ Given a configuration object and a list of input specifiers, find all the
+ tests to execute.
+ """
+
+ # Expand '@...' form in inputs.
+ actual_inputs = []
+ for input in inputs:
+ if os.path.exists(input) or not input.startswith('@'):
+ actual_inputs.append(input)
+ else:
+ f = open(input[1:])
+ try:
+ for ln in f:
+ ln = ln.strip()
+ if ln:
+ actual_inputs.append(ln)
+ finally:
+ f.close()
+
+ # Load the tests from the inputs.
+ tests = []
+ test_suite_cache = {}
+ local_config_cache = {}
+ for input in actual_inputs:
+ prev = len(tests)
+ tests.extend(getTests(input, lit_config,
+ test_suite_cache, local_config_cache)[1])
+ if prev == len(tests):
+ lit_config.warning('input %r contained no tests' % input)
+
+ # If there were any errors during test discovery, exit now.
+ if lit_config.numErrors:
+ print >>sys.stderr, '%d errors, exiting.' % lit_config.numErrors
+ sys.exit(2)
+
+ return tests
+
+def load_test_suite(inputs):
+ import platform
+ import unittest
+ from lit.LitTestCase import LitTestCase
+
+ # Create the global config object.
+ litConfig = LitConfig.LitConfig(progname = 'lit',
+ path = [],
+ quiet = False,
+ useValgrind = False,
+ valgrindLeakCheck = False,
+ valgrindArgs = [],
+ noExecute = False,
+ ignoreStdErr = False,
+ debug = False,
+ isWindows = (platform.system()=='Windows'),
+ params = {})
+
+ tests = find_tests_for_inputs(litConfig, inputs)
+
+ # Return a unittest test suite which just runs the tests in order.
+ return unittest.TestSuite([LitTestCase(test, litConfig) for test in tests])
+
diff --git a/utils/lit/lit/main.py b/utils/lit/lit/main.py
index 25bbcbd..da961ee 100755
--- a/utils/lit/lit/main.py
+++ b/utils/lit/lit/main.py
@@ -12,18 +12,10 @@ import ProgressBar
import TestRunner
import Util
-from TestingConfig import TestingConfig
import LitConfig
import Test
-# Configuration files to look for when discovering test suites. These can be
-# overridden with --config-prefix.
-#
-# FIXME: Rename to 'config.lit', 'site.lit', and 'local.lit' ?
-gConfigName = 'lit.cfg'
-gSiteConfigName = 'lit.site.cfg'
-
-kLocalConfigName = 'lit.local.cfg'
+import lit.discovery
class TestingProgressDisplay:
def __init__(self, opts, numTests, progressBar=None):
@@ -137,166 +129,6 @@ class Tester(threading.Thread):
test.setResult(result, output, elapsed)
self.display.update(test)
-def dirContainsTestSuite(path):
- cfgpath = os.path.join(path, gSiteConfigName)
- if os.path.exists(cfgpath):
- return cfgpath
- cfgpath = os.path.join(path, gConfigName)
- if os.path.exists(cfgpath):
- return cfgpath
-
-def getTestSuite(item, litConfig, cache):
- """getTestSuite(item, litConfig, cache) -> (suite, relative_path)
-
- Find the test suite containing @arg item.
-
- @retval (None, ...) - Indicates no test suite contains @arg item.
- @retval (suite, relative_path) - The suite that @arg item is in, and its
- relative path inside that suite.
- """
- def search1(path):
- # Check for a site config or a lit config.
- cfgpath = dirContainsTestSuite(path)
-
- # If we didn't find a config file, keep looking.
- if not cfgpath:
- parent,base = os.path.split(path)
- if parent == path:
- return (None, ())
-
- ts, relative = search(parent)
- return (ts, relative + (base,))
-
- # We found a config file, load it.
- if litConfig.debug:
- litConfig.note('loading suite config %r' % cfgpath)
-
- cfg = TestingConfig.frompath(cfgpath, None, litConfig, mustExist = True)
- source_root = os.path.realpath(cfg.test_source_root or path)
- exec_root = os.path.realpath(cfg.test_exec_root or path)
- return Test.TestSuite(cfg.name, source_root, exec_root, cfg), ()
-
- def search(path):
- # Check for an already instantiated test suite.
- res = cache.get(path)
- if res is None:
- cache[path] = res = search1(path)
- return res
-
- # Canonicalize the path.
- item = os.path.realpath(item)
-
- # Skip files and virtual components.
- components = []
- while not os.path.isdir(item):
- parent,base = os.path.split(item)
- if parent == item:
- return (None, ())
- components.append(base)
- item = parent
- components.reverse()
-
- ts, relative = search(item)
- return ts, tuple(relative + tuple(components))
-
-def getLocalConfig(ts, path_in_suite, litConfig, cache):
- def search1(path_in_suite):
- # Get the parent config.
- if not path_in_suite:
- parent = ts.config
- else:
- parent = search(path_in_suite[:-1])
-
- # Load the local configuration.
- source_path = ts.getSourcePath(path_in_suite)
- cfgpath = os.path.join(source_path, kLocalConfigName)
- if litConfig.debug:
- litConfig.note('loading local config %r' % cfgpath)
- return TestingConfig.frompath(cfgpath, parent, litConfig,
- mustExist = False,
- config = parent.clone(cfgpath))
-
- def search(path_in_suite):
- key = (ts, path_in_suite)
- res = cache.get(key)
- if res is None:
- cache[key] = res = search1(path_in_suite)
- return res
-
- return search(path_in_suite)
-
-def getTests(path, litConfig, testSuiteCache, localConfigCache):
- # Find the test suite for this input and its relative path.
- ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
- if ts is None:
- litConfig.warning('unable to find test suite for %r' % path)
- return (),()
-
- if litConfig.debug:
- litConfig.note('resolved input %r to %r::%r' % (path, ts.name,
- path_in_suite))
-
- return ts, getTestsInSuite(ts, path_in_suite, litConfig,
- testSuiteCache, localConfigCache)
-
-def getTestsInSuite(ts, path_in_suite, litConfig,
- testSuiteCache, localConfigCache):
- # Check that the source path exists (errors here are reported by the
- # caller).
- source_path = ts.getSourcePath(path_in_suite)
- if not os.path.exists(source_path):
- return
-
- # Check if the user named a test directly.
- if not os.path.isdir(source_path):
- lc = getLocalConfig(ts, path_in_suite[:-1], litConfig, localConfigCache)
- yield Test.Test(ts, path_in_suite, lc)
- return
-
- # Otherwise we have a directory to search for tests, start by getting the
- # local configuration.
- lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
-
- # Search for tests.
- if lc.test_format is not None:
- for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
- litConfig, lc):
- yield res
-
- # Search subdirectories.
- for filename in os.listdir(source_path):
- # FIXME: This doesn't belong here?
- if filename in ('Output', '.svn') or filename in lc.excludes:
- continue
-
- # Ignore non-directories.
- file_sourcepath = os.path.join(source_path, filename)
- if not os.path.isdir(file_sourcepath):
- continue
-
- # Check for nested test suites, first in the execpath in case there is a
- # site configuration and then in the source path.
- file_execpath = ts.getExecPath(path_in_suite + (filename,))
- if dirContainsTestSuite(file_execpath):
- sub_ts, subiter = getTests(file_execpath, litConfig,
- testSuiteCache, localConfigCache)
- elif dirContainsTestSuite(file_sourcepath):
- sub_ts, subiter = getTests(file_sourcepath, litConfig,
- testSuiteCache, localConfigCache)
- else:
- # Otherwise, continue loading from inside this test suite.
- subiter = getTestsInSuite(ts, path_in_suite + (filename,),
- litConfig, testSuiteCache,
- localConfigCache)
- sub_ts = None
-
- N = 0
- for res in subiter:
- N += 1
- yield res
- if sub_ts and not N:
- litConfig.warning('test suite %r contained no tests' % sub_ts.name)
-
def runTests(numThreads, litConfig, provider, display):
# If only using one testing thread, don't use threads at all; this lets us
# profile, among other things.
@@ -316,50 +148,8 @@ def runTests(numThreads, litConfig, provider, display):
except KeyboardInterrupt:
sys.exit(2)
-def load_test_suite(inputs):
- import unittest
-
- # Create the global config object.
- litConfig = LitConfig.LitConfig(progname = 'lit',
- path = [],
- quiet = False,
- useValgrind = False,
- valgrindLeakCheck = False,
- valgrindArgs = [],
- useTclAsSh = False,
- noExecute = False,
- ignoreStdErr = False,
- debug = False,
- isWindows = (platform.system()=='Windows'),
- params = {})
-
- # Load the tests from the inputs.
- tests = []
- testSuiteCache = {}
- localConfigCache = {}
- for input in inputs:
- prev = len(tests)
- tests.extend(getTests(input, litConfig,
- testSuiteCache, localConfigCache)[1])
- if prev == len(tests):
- litConfig.warning('input %r contained no tests' % input)
-
- # If there were any errors during test discovery, exit now.
- if litConfig.numErrors:
- print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
- sys.exit(2)
-
- # Return a unittest test suite which just runs the tests in order.
- def get_test_fn(test):
- return unittest.FunctionTestCase(
- lambda: test.config.test_format.execute(
- test, litConfig),
- description = test.getFullName())
-
- from LitTestCase import LitTestCase
- return unittest.TestSuite([LitTestCase(test, litConfig) for test in tests])
-
-def main(builtinParameters = {}): # Bump the GIL check interval, its more important to get any one thread to a
+def main(builtinParameters = {}):
+ # Bump the GIL check interval, its more important to get any one thread to a
# blocking operation (hopefully exec) than to try and unblock other threads.
#
# FIXME: This is a hack.
@@ -442,9 +232,6 @@ def main(builtinParameters = {}): # Bump the GIL check interval, its more imp
group.add_option("", "--show-suites", dest="showSuites",
help="Show discovered test suites",
action="store_true", default=False)
- group.add_option("", "--no-tcl-as-sh", dest="useTclAsSh",
- help="Don't run Tcl scripts using 'sh'",
- action="store_false", default=True)
group.add_option("", "--repeat", dest="repeatTests", metavar="N",
help="Repeat tests N times (for timing)",
action="store", default=None, type=int)
@@ -455,12 +242,6 @@ def main(builtinParameters = {}): # Bump the GIL check interval, its more imp
if not args:
parser.error('No inputs specified')
- if opts.configPrefix is not None:
- global gConfigName, gSiteConfigName, kLocalConfigName
- gConfigName = '%s.cfg' % opts.configPrefix
- gSiteConfigName = '%s.site.cfg' % opts.configPrefix
- kLocalConfigName = '%s.local.cfg' % opts.configPrefix
-
if opts.numThreads is None:
# Python <2.5 has a race condition causing lit to always fail with numThreads>1
# http://bugs.python.org/issue1731717
@@ -489,50 +270,20 @@ def main(builtinParameters = {}): # Bump the GIL check interval, its more imp
useValgrind = opts.useValgrind,
valgrindLeakCheck = opts.valgrindLeakCheck,
valgrindArgs = opts.valgrindArgs,
- useTclAsSh = opts.useTclAsSh,
noExecute = opts.noExecute,
ignoreStdErr = False,
debug = opts.debug,
isWindows = (platform.system()=='Windows'),
- params = userParams)
+ params = userParams,
+ config_prefix = opts.configPrefix)
- # Expand '@...' form in inputs.
- actual_inputs = []
- for input in inputs:
- if os.path.exists(input) or not input.startswith('@'):
- actual_inputs.append(input)
- else:
- f = open(input[1:])
- try:
- for ln in f:
- ln = ln.strip()
- if ln:
- actual_inputs.append(ln)
- finally:
- f.close()
-
-
- # Load the tests from the inputs.
- tests = []
- testSuiteCache = {}
- localConfigCache = {}
- for input in actual_inputs:
- prev = len(tests)
- tests.extend(getTests(input, litConfig,
- testSuiteCache, localConfigCache)[1])
- if prev == len(tests):
- litConfig.warning('input %r contained no tests' % input)
-
- # If there were any errors during test discovery, exit now.
- if litConfig.numErrors:
- print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
- sys.exit(2)
+ tests = lit.discovery.find_tests_for_inputs(litConfig, inputs)
if opts.showSuites:
- suitesAndTests = dict([(ts,[])
- for ts,_ in testSuiteCache.values()
- if ts])
+ suitesAndTests = {}
for t in tests:
+ if t.suite not in suitesAndTests:
+ suitesAndTests[t.suite] = []
suitesAndTests[t.suite].append(t)
print '-- Test Suites --'
diff --git a/utils/lit/tests/.coveragerc b/utils/lit/tests/.coveragerc
new file mode 100644
index 0000000..c886d0a
--- /dev/null
+++ b/utils/lit/tests/.coveragerc
@@ -0,0 +1,11 @@
+# .coveragerc to control coverage.py
+[run]
+branch = False
+parallel = True
+source = lit
+
+[html]
+directory = coverage_html_report
+
+[report]
+omit = Inputs
diff --git a/utils/lit/tests/Inputs/discovery/lit.cfg b/utils/lit/tests/Inputs/discovery/lit.cfg
new file mode 100644
index 0000000..3513bff
--- /dev/null
+++ b/utils/lit/tests/Inputs/discovery/lit.cfg
@@ -0,0 +1,5 @@
+config.name = 'top-level-suite'
+config.suffixes = ['.txt']
+config.test_format = lit.formats.ShTest()
+config.test_source_root = None
+config.test_exec_root = None
diff --git a/utils/lit/tests/Inputs/discovery/subdir/lit.local.cfg b/utils/lit/tests/Inputs/discovery/subdir/lit.local.cfg
new file mode 100644
index 0000000..5ae6b3c
--- /dev/null
+++ b/utils/lit/tests/Inputs/discovery/subdir/lit.local.cfg
@@ -0,0 +1 @@
+config.suffixes = ['.py']
diff --git a/utils/lit/tests/Inputs/discovery/subdir/test-three.py b/utils/lit/tests/Inputs/discovery/subdir/test-three.py
new file mode 100644
index 0000000..b80b60b
--- /dev/null
+++ b/utils/lit/tests/Inputs/discovery/subdir/test-three.py
@@ -0,0 +1 @@
+# RUN: true
diff --git a/utils/lit/tests/Inputs/discovery/subsuite/lit.cfg b/utils/lit/tests/Inputs/discovery/subsuite/lit.cfg
new file mode 100644
index 0000000..0c2979d
--- /dev/null
+++ b/utils/lit/tests/Inputs/discovery/subsuite/lit.cfg
@@ -0,0 +1,5 @@
+config.name = 'sub-suite'
+config.suffixes = ['.txt']
+config.test_format = lit.formats.ShTest()
+config.test_source_root = None
+config.test_exec_root = None
diff --git a/utils/lit/tests/Inputs/discovery/subsuite/test-one.txt b/utils/lit/tests/Inputs/discovery/subsuite/test-one.txt
new file mode 100644
index 0000000..b80b60b
--- /dev/null
+++ b/utils/lit/tests/Inputs/discovery/subsuite/test-one.txt
@@ -0,0 +1 @@
+# RUN: true
diff --git a/utils/lit/tests/Inputs/discovery/subsuite/test-two.txt b/utils/lit/tests/Inputs/discovery/subsuite/test-two.txt
new file mode 100644
index 0000000..b80b60b
--- /dev/null
+++ b/utils/lit/tests/Inputs/discovery/subsuite/test-two.txt
@@ -0,0 +1 @@
+# RUN: true
diff --git a/utils/lit/tests/Inputs/discovery/test-one.txt b/utils/lit/tests/Inputs/discovery/test-one.txt
new file mode 100644
index 0000000..b80b60b
--- /dev/null
+++ b/utils/lit/tests/Inputs/discovery/test-one.txt
@@ -0,0 +1 @@
+# RUN: true
diff --git a/utils/lit/tests/Inputs/discovery/test-two.txt b/utils/lit/tests/Inputs/discovery/test-two.txt
new file mode 100644
index 0000000..b80b60b
--- /dev/null
+++ b/utils/lit/tests/Inputs/discovery/test-two.txt
@@ -0,0 +1 @@
+# RUN: true
diff --git a/utils/lit/tests/Inputs/shtest-format/external_shell/fail.txt b/utils/lit/tests/Inputs/shtest-format/external_shell/fail.txt
new file mode 100644
index 0000000..1e74be5
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/external_shell/fail.txt
@@ -0,0 +1,3 @@
+# Run a command that fails with error on stdout.
+#
+# RUN: cat "does-not-exist"
diff --git a/utils/lit/tests/Inputs/shtest-format/external_shell/lit.local.cfg b/utils/lit/tests/Inputs/shtest-format/external_shell/lit.local.cfg
new file mode 100644
index 0000000..d14d147
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/external_shell/lit.local.cfg
@@ -0,0 +1 @@
+config.test_format = lit.formats.ShTest(execute_external=True)
diff --git a/utils/lit/tests/Inputs/shtest-format/external_shell/pass.txt b/utils/lit/tests/Inputs/shtest-format/external_shell/pass.txt
new file mode 100644
index 0000000..b80b60b
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/external_shell/pass.txt
@@ -0,0 +1 @@
+# RUN: true
diff --git a/utils/lit/tests/Inputs/shtest-format/fail.txt b/utils/lit/tests/Inputs/shtest-format/fail.txt
new file mode 100644
index 0000000..49932c3
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/fail.txt
@@ -0,0 +1 @@
+# RUN: false
diff --git a/utils/lit/tests/Inputs/shtest-format/lit.cfg b/utils/lit/tests/Inputs/shtest-format/lit.cfg
new file mode 100644
index 0000000..78dd1bf
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/lit.cfg
@@ -0,0 +1,7 @@
+config.name = 'shtest-format'
+config.suffixes = ['.txt']
+config.test_format = lit.formats.ShTest()
+config.test_source_root = None
+config.test_exec_root = None
+config.target_triple = 'x86_64-unknown-unknown'
+config.available_features.add('a-present-feature')
diff --git a/utils/lit/tests/Inputs/shtest-format/no-test-line.txt b/utils/lit/tests/Inputs/shtest-format/no-test-line.txt
new file mode 100644
index 0000000..f2316bd
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/no-test-line.txt
@@ -0,0 +1 @@
+# Empty!
diff --git a/utils/lit/tests/Inputs/shtest-format/pass.txt b/utils/lit/tests/Inputs/shtest-format/pass.txt
new file mode 100644
index 0000000..b80b60b
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/pass.txt
@@ -0,0 +1 @@
+# RUN: true
diff --git a/utils/lit/tests/Inputs/shtest-format/requires-missing.txt b/utils/lit/tests/Inputs/shtest-format/requires-missing.txt
new file mode 100644
index 0000000..9e6648d
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/requires-missing.txt
@@ -0,0 +1,2 @@
+RUN: true
+REQUIRES: a-missing-feature
diff --git a/utils/lit/tests/Inputs/shtest-format/requires-present.txt b/utils/lit/tests/Inputs/shtest-format/requires-present.txt
new file mode 100644
index 0000000..064f707
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/requires-present.txt
@@ -0,0 +1,2 @@
+RUN: true
+REQUIRES: a-present-feature
diff --git a/utils/lit/tests/Inputs/shtest-format/unsupported_dir/lit.local.cfg b/utils/lit/tests/Inputs/shtest-format/unsupported_dir/lit.local.cfg
new file mode 100644
index 0000000..462e3dc
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/unsupported_dir/lit.local.cfg
@@ -0,0 +1 @@
+config.unsupported = True
diff --git a/utils/lit/tests/Inputs/shtest-format/unsupported_dir/some-test.txt b/utils/lit/tests/Inputs/shtest-format/unsupported_dir/some-test.txt
new file mode 100644
index 0000000..b80b60b
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/unsupported_dir/some-test.txt
@@ -0,0 +1 @@
+# RUN: true
diff --git a/utils/lit/tests/Inputs/shtest-format/xfail-feature.txt b/utils/lit/tests/Inputs/shtest-format/xfail-feature.txt
new file mode 100644
index 0000000..bd6241f
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/xfail-feature.txt
@@ -0,0 +1,2 @@
+# RUN: false
+# XFAIL: a-present-feature
diff --git a/utils/lit/tests/Inputs/shtest-format/xfail-target.txt b/utils/lit/tests/Inputs/shtest-format/xfail-target.txt
new file mode 100644
index 0000000..36760be
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/xfail-target.txt
@@ -0,0 +1,2 @@
+RUN: false
+XFAIL: x86_64
diff --git a/utils/lit/tests/Inputs/shtest-format/xfail.txt b/utils/lit/tests/Inputs/shtest-format/xfail.txt
new file mode 100644
index 0000000..6814cda
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/xfail.txt
@@ -0,0 +1,2 @@
+RUN: false
+XFAIL: *
diff --git a/utils/lit/tests/Inputs/shtest-format/xpass.txt b/utils/lit/tests/Inputs/shtest-format/xpass.txt
new file mode 100644
index 0000000..764d217
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/xpass.txt
@@ -0,0 +1,2 @@
+RUN: true
+XFAIL: x86_64
diff --git a/utils/lit/tests/Inputs/shtest-shell/error-0.txt b/utils/lit/tests/Inputs/shtest-shell/error-0.txt
new file mode 100644
index 0000000..631c8df
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-shell/error-0.txt
@@ -0,0 +1,3 @@
+# Check error on an internal shell error (unable to find command).
+#
+# RUN: not-a-real-command
diff --git a/utils/lit/tests/Inputs/shtest-shell/error-1.txt b/utils/lit/tests/Inputs/shtest-shell/error-1.txt
new file mode 100644
index 0000000..e5c8be6
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-shell/error-1.txt
@@ -0,0 +1,3 @@
+# Check error on a shell parsing failure.
+#
+# RUN: echo "missing quote
diff --git a/utils/lit/tests/Inputs/shtest-shell/error-2.txt b/utils/lit/tests/Inputs/shtest-shell/error-2.txt
new file mode 100644
index 0000000..a976286
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-shell/error-2.txt
@@ -0,0 +1,3 @@
+# Check error on a unsupported redirect.
+#
+# RUN: echo "hello" 3>&1
diff --git a/utils/lit/tests/Inputs/shtest-shell/lit.cfg b/utils/lit/tests/Inputs/shtest-shell/lit.cfg
new file mode 100644
index 0000000..4878b65
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-shell/lit.cfg
@@ -0,0 +1,5 @@
+config.name = 'shtest-shell'
+config.suffixes = ['.txt']
+config.test_format = lit.formats.ShTest()
+config.test_source_root = None
+config.test_exec_root = None
diff --git a/utils/lit/tests/Inputs/shtest-shell/redirects.txt b/utils/lit/tests/Inputs/shtest-shell/redirects.txt
new file mode 100644
index 0000000..6be88b6
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-shell/redirects.txt
@@ -0,0 +1,41 @@
+# Check stdout redirect (> and >>).
+#
+# RUN: echo "not-present" > %t.stdout-write
+# RUN: echo "is-present" > %t.stdout-write
+# RUN: FileCheck --check-prefix=STDOUT-WRITE < %t.stdout-write %s
+#
+# STDOUT-WRITE-NOT: not-present
+# STDOUT-WRITE: is-present
+#
+# RUN: echo "appended-line" >> %t.stdout-write
+# RUN: FileCheck --check-prefix=STDOUT-APPEND < %t.stdout-write %s
+#
+# STDOUT-APPEND: is-present
+# STDOUT-APPEND: appended-line
+
+
+# Check stderr redirect (2> and 2>>).
+#
+# RUN: echo "not-present" > %t.stderr-write
+# RUN: %S/write-to-stderr.sh 2> %t.stderr-write
+# RUN: FileCheck --check-prefix=STDERR-WRITE < %t.stderr-write %s
+#
+# STDERR-WRITE-NOT: not-present
+# STDERR-WRITE: a line on stderr
+#
+# RUN: %S/write-to-stderr.sh 2>> %t.stderr-write
+# RUN: FileCheck --check-prefix=STDERR-APPEND < %t.stderr-write %s
+#
+# STDERR-APPEND: a line on stderr
+# STDERR-APPEND: a line on stderr
+
+
+# Check combined redirect (&>).
+#
+# RUN: echo "not-present" > %t.combined
+# RUN: %S/write-to-stdout-and-stderr.sh &> %t.combined
+# RUN: FileCheck --check-prefix=COMBINED-WRITE < %t.combined %s
+#
+# COMBINED-WRITE-NOT: not-present
+# COMBINED-WRITE: a line on stdout
+# COMBINED-WRITE: a line on stderr
diff --git a/utils/lit/tests/Inputs/shtest-shell/sequencing-0.txt b/utils/lit/tests/Inputs/shtest-shell/sequencing-0.txt
new file mode 100644
index 0000000..6578db2
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-shell/sequencing-0.txt
@@ -0,0 +1,28 @@
+# Check sequencing operations.
+#
+# RUN: echo "first-line" > %t.out && echo "second-line" >> %t.out
+# RUN: FileCheck --check-prefix CHECK-AND < %t.out %s
+#
+# CHECK-AND: first-line
+# CHECK-AND: second-line
+#
+# The false case of && is tested in sequencing-2.txt
+
+
+# RUN: echo "first-line" > %t.out || echo "second-line" >> %t.out
+# RUN: FileCheck --check-prefix CHECK-OR-1 < %t.out %s
+#
+# CHECK-OR-1: first-line
+# CHECK-OR-1-NOT: second-line
+
+# RUN: false || echo "second-line" > %t.out
+# RUN: FileCheck --check-prefix CHECK-OR-2 < %t.out %s
+#
+# CHECK-OR-2: second-line
+
+
+# RUN: echo "first-line" > %t.out; echo "second-line" >> %t.out
+# RUN: FileCheck --check-prefix CHECK-SEQ < %t.out %s
+#
+# CHECK-SEQ: first-line
+# CHECK-SEQ: second-line
diff --git a/utils/lit/tests/Inputs/shtest-shell/sequencing-1.txt b/utils/lit/tests/Inputs/shtest-shell/sequencing-1.txt
new file mode 100644
index 0000000..5a1794c
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-shell/sequencing-1.txt
@@ -0,0 +1,2 @@
+# RUN: false && true
+# XFAIL: *
diff --git a/utils/lit/tests/Inputs/shtest-shell/write-to-stderr.sh b/utils/lit/tests/Inputs/shtest-shell/write-to-stderr.sh
new file mode 100755
index 0000000..ead3fd3
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-shell/write-to-stderr.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+echo "a line on stderr" 1>&2
diff --git a/utils/lit/tests/Inputs/shtest-shell/write-to-stdout-and-stderr.sh b/utils/lit/tests/Inputs/shtest-shell/write-to-stdout-and-stderr.sh
new file mode 100755
index 0000000..f20de5d
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-shell/write-to-stdout-and-stderr.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+echo "a line on stdout"
+echo "a line on stderr" 1>&2
diff --git a/utils/lit/tests/Inputs/unittest-adaptor/lit.cfg b/utils/lit/tests/Inputs/unittest-adaptor/lit.cfg
new file mode 100644
index 0000000..52de709
--- /dev/null
+++ b/utils/lit/tests/Inputs/unittest-adaptor/lit.cfg
@@ -0,0 +1,5 @@
+config.name = 'unittest-adaptor'
+config.suffixes = ['.txt']
+config.test_format = lit.formats.ShTest()
+config.test_source_root = None
+config.test_exec_root = None
diff --git a/utils/lit/tests/Inputs/unittest-adaptor/test-one.txt b/utils/lit/tests/Inputs/unittest-adaptor/test-one.txt
new file mode 100644
index 0000000..b80b60b
--- /dev/null
+++ b/utils/lit/tests/Inputs/unittest-adaptor/test-one.txt
@@ -0,0 +1 @@
+# RUN: true
diff --git a/utils/lit/tests/Inputs/unittest-adaptor/test-two.txt b/utils/lit/tests/Inputs/unittest-adaptor/test-two.txt
new file mode 100644
index 0000000..49932c3
--- /dev/null
+++ b/utils/lit/tests/Inputs/unittest-adaptor/test-two.txt
@@ -0,0 +1 @@
+# RUN: false
diff --git a/utils/lit/tests/discovery.py b/utils/lit/tests/discovery.py
new file mode 100644
index 0000000..54b99d3
--- /dev/null
+++ b/utils/lit/tests/discovery.py
@@ -0,0 +1,25 @@
+# Check the basic discovery process, including a sub-suite.
+#
+# RUN: %{lit} %{inputs}/discovery \
+# RUN: -j 1 --debug --no-execute --show-suites -v > %t.out 2> %t.err
+# RUN: FileCheck --check-prefix=CHECK-BASIC-OUT < %t.out %s
+# RUN: FileCheck --check-prefix=CHECK-BASIC-ERR < %t.err %s
+#
+# CHECK-BASIC-ERR: loading suite config '{{.*}}/tests/Inputs/discovery/lit.cfg'
+# CHECK-BASIC-ERR: loading local config '{{.*}}/tests/Inputs/discovery/subdir/lit.local.cfg'
+# CHECK-BASIC-ERR: loading suite config '{{.*}}/tests/Inputs/discovery/subsuite/lit.cfg'
+#
+# CHECK-BASIC-OUT: -- Test Suites --
+# CHECK-BASIC-OUT: sub-suite - 2 tests
+# CHECK-BASIC-OUT: Source Root:
+# CHECK-BASIC-OUT: Exec Root :
+# CHECK-BASIC-OUT: top-level-suite - 3 tests
+# CHECK-BASIC-OUT: Source Root:
+# CHECK-BASIC-OUT: Exec Root :
+#
+# CHECK-BASIC-OUT: -- Testing: 5 tests, 1 threads --
+# CHECK-BASIC-OUT: PASS: sub-suite :: test-one
+# CHECK-BASIC-OUT: PASS: sub-suite :: test-two
+# CHECK-BASIC-OUT: PASS: top-level-suite :: subdir/test-three
+# CHECK-BASIC-OUT: PASS: top-level-suite :: test-one
+# CHECK-BASIC-OUT: PASS: top-level-suite :: test-two
diff --git a/utils/lit/tests/lit.cfg b/utils/lit/tests/lit.cfg
new file mode 100644
index 0000000..32760ce
--- /dev/null
+++ b/utils/lit/tests/lit.cfg
@@ -0,0 +1,36 @@
+# -*- Python -*-
+
+import os
+
+# Configuration file for the 'lit' test runner.
+
+# name: The name of this test suite.
+config.name = 'lit'
+
+# testFormat: The test format to use to interpret tests.
+config.test_format = lit.formats.ShTest(execute_external=False)
+
+# suffixes: A list of file extensions to treat as test files.
+config.suffixes = ['.py']
+
+# excludes: A list of individual files to exclude.
+config.excludes = ['Inputs']
+
+# test_source_root: The root path where tests are located.
+config.test_source_root = os.path.dirname(__file__)
+config.test_exec_root = config.test_source_root
+
+config.target_triple = None
+
+src_root = os.path.join(config.test_source_root, '..')
+config.environment['PYTHONPATH'] = src_root
+config.substitutions.append(('%{src_root}', src_root))
+config.substitutions.append(('%{inputs}', os.path.join(
+ src_root, 'tests', 'Inputs')))
+config.substitutions.append(('%{lit}', os.path.join(src_root, 'lit.py')))
+
+# Enable coverage.py reporting, assuming the coverage module has been installed
+# and sitecustomize.py in the virtualenv has been modified appropriately.
+if lit.params.get('check-coverage', None):
+ config.environment['COVERAGE_PROCESS_START'] = os.path.join(
+ os.path.dirname(__file__), ".coveragerc")
diff --git a/utils/lit/tests/shell-parsing.py b/utils/lit/tests/shell-parsing.py
new file mode 100644
index 0000000..f644132
--- /dev/null
+++ b/utils/lit/tests/shell-parsing.py
@@ -0,0 +1,3 @@
+# Just run the ShUtil unit tests.
+#
+# RUN: python -m lit.ShUtil
diff --git a/utils/lit/tests/shtest-format.py b/utils/lit/tests/shtest-format.py
new file mode 100644
index 0000000..4b36873
--- /dev/null
+++ b/utils/lit/tests/shtest-format.py
@@ -0,0 +1,43 @@
+# Check the various features of the ShTest format.
+#
+# RUN: not %{lit} -j 1 -v %{inputs}/shtest-format > %t.out
+# RUN: FileCheck < %t.out %s
+#
+# END.
+
+# CHECK: -- Testing:
+
+# CHECK: FAIL: shtest-format :: external_shell/fail.txt
+# CHECK: *** TEST 'shtest-format :: external_shell/fail.txt' FAILED ***
+# CHECK: Command Output (stderr):
+# CHECK: cat: does-not-exist: No such file or directory
+# CHECK: --
+
+# CHECK: PASS: shtest-format :: external_shell/pass.txt
+
+# CHECK: FAIL: shtest-format :: fail.txt
+
+# CHECK: UNRESOLVED: shtest-format :: no-test-line.txt
+# CHECK: PASS: shtest-format :: pass.txt
+# CHECK: UNSUPPORTED: shtest-format :: requires-missing.txt
+# CHECK: PASS: shtest-format :: requires-present.txt
+# CHECK: UNSUPPORTED: shtest-format :: unsupported_dir/some-test.txt
+# CHECK: XFAIL: shtest-format :: xfail-feature.txt
+# CHECK: XFAIL: shtest-format :: xfail-target.txt
+# CHECK: XFAIL: shtest-format :: xfail.txt
+# CHECK: XPASS: shtest-format :: xpass.txt
+# CHECK: Testing Time
+
+# CHECK: Unexpected Passing Tests (1)
+# CHECK: shtest-format :: xpass.txt
+
+# CHECK: Failing Tests (2)
+# CHECK: shtest-format :: external_shell/fail.txt
+# CHECK: shtest-format :: fail.txt
+
+# CHECK: Expected Passes : 3
+# CHECK: Expected Failures : 3
+# CHECK: Unsupported Tests : 2
+# CHECK: Unresolved Tests : 1
+# CHECK: Unexpected Passes : 1
+# CHECK: Unexpected Failures: 2
diff --git a/utils/lit/tests/shtest-shell.py b/utils/lit/tests/shtest-shell.py
new file mode 100644
index 0000000..32479e1
--- /dev/null
+++ b/utils/lit/tests/shtest-shell.py
@@ -0,0 +1,33 @@
+# Check the internal shell handling component of the ShTest format.
+#
+# RUN: not %{lit} -j 1 -v %{inputs}/shtest-shell > %t.out
+# RUN: FileCheck < %t.out %s
+#
+# END.
+
+# CHECK: -- Testing:
+
+# CHECK: FAIL: shtest-shell :: error-0.txt
+# CHECK: *** TEST 'shtest-shell :: error-0.txt' FAILED ***
+# CHECK: Command 0: "not-a-real-command"
+# CHECK: Command 0 Result: 127
+# CHECK: Command 0 Stderr:
+# CHECK: 'not-a-real-command': command not found
+# CHECK: ***
+
+# FIXME: The output here sucks.
+#
+# CHECK: FAIL: shtest-shell :: error-1.txt
+# CHECK: *** TEST 'shtest-shell :: error-1.txt' FAILED ***
+# CHECK: shell parser error on: 'echo "missing quote'
+# CHECK: ***
+
+# CHECK: FAIL: shtest-shell :: error-2.txt
+# CHECK: *** TEST 'shtest-shell :: error-2.txt' FAILED ***
+# CHECK: Unsupported redirect:
+# CHECK: ***
+
+# CHECK: PASS: shtest-shell :: redirects.txt
+# CHECK: PASS: shtest-shell :: sequencing-0.txt
+# CHECK: XFAIL: shtest-shell :: sequencing-1.txt
+# CHECK: Failing Tests (3)
diff --git a/utils/lit/tests/unittest-adaptor.py b/utils/lit/tests/unittest-adaptor.py
new file mode 100644
index 0000000..243dd41
--- /dev/null
+++ b/utils/lit/tests/unittest-adaptor.py
@@ -0,0 +1,18 @@
+# Check the lit adaption to run under unittest.
+#
+# RUN: python %s %{inputs}/unittest-adaptor 2> %t.err
+# RUN: FileCheck < %t.err %s
+#
+# CHECK: unittest-adaptor :: test-one.txt ... ok
+# CHECK: unittest-adaptor :: test-two.txt ... FAIL
+
+import unittest
+import sys
+
+import lit
+import lit.discovery
+
+input_path = sys.argv[1]
+unittest_suite = lit.discovery.load_test_suite([input_path])
+runner = unittest.TextTestRunner(verbosity=2)
+runner.run(unittest_suite)
diff --git a/utils/lit/tests/usage.py b/utils/lit/tests/usage.py
new file mode 100644
index 0000000..e10d613
--- /dev/null
+++ b/utils/lit/tests/usage.py
@@ -0,0 +1,6 @@
+# Basic sanity check that usage works.
+#
+# RUN: %{lit} --help > %t.out
+# RUN: FileCheck < %t.out %s
+#
+# CHECK: Usage: lit.py [options] {file-or-path}
diff --git a/utils/lit/utils/README.txt b/utils/lit/utils/README.txt
new file mode 100644
index 0000000..81862ba
--- /dev/null
+++ b/utils/lit/utils/README.txt
@@ -0,0 +1,2 @@
+Utilities for the project that aren't intended to be part of a source
+distribution.
diff --git a/utils/lit/utils/check-coverage b/utils/lit/utils/check-coverage
new file mode 100755
index 0000000..bb3d17e
--- /dev/null
+++ b/utils/lit/utils/check-coverage
@@ -0,0 +1,50 @@
+#!/bin/sh
+
+prog=$(basename $0)
+
+# Expect to be run from the parent lit directory.
+if [ ! -f setup.py ] || [ ! -d lit ]; then
+ printf 1>&2 "%s: expected to be run from base lit directory\n" "$prog"
+ exit 1
+fi
+
+# Parse command line arguments.
+if [ "$1" == "--generate-html" ]; then
+ GENERATE_HTML=1
+ shift
+fi
+
+# If invoked with no arguments, run all the tests.
+if [ $# == "0" ]; then
+ set -- "tests"
+fi
+
+# Check that the active python has been modified to enable coverage in its
+# sitecustomize.
+if ! python -c \
+ 'import sitecustomize, sys; sys.exit("coverage" not in dir(sitecustomize))' \
+ &> /dev/null; then
+ printf 1>&2 "error: active python does not appear to enable coverage in its 'sitecustomize.py'\n"
+ exit 1
+fi
+
+# First, remove any existing coverage data files.
+rm -f tests/.coverage
+find tests -name .coverage.\* -exec rm {} \;
+
+# Next, run the tests.
+lit -sv --param check-coverage=1 "$@"
+
+# Next, move all the data files from subdirectories up.
+find tests/* -name .coverage.\* -exec mv {} tests \;
+
+# Combine all the data files.
+(cd tests && python -m coverage combine)
+
+# Finally, generate the report.
+(cd tests && python -m coverage report)
+
+# Generate the HTML report, if requested.
+if [ ! -z "$GENERATE_HTML" ]; then
+ (cd tests && python -m coverage html)
+fi
diff --git a/utils/lit/utils/check-sdist b/utils/lit/utils/check-sdist
new file mode 100755
index 0000000..6186446a
--- /dev/null
+++ b/utils/lit/utils/check-sdist
@@ -0,0 +1,44 @@
+#!/bin/sh
+
+if [ $# == 1 ]; then
+ cd $1
+fi
+
+# Create a list of all the files in the source tree, excluding various things we
+# know don't belong.
+echo "Creating current directory contents list."
+find . | \
+ grep -v '^\./.gitignore' | \
+ grep -v '^\./dist' | \
+ grep -v '^\./utils' | \
+ grep -v '^\./venv' | \
+ grep -v '^\./lit.egg-info' | \
+ grep -v '^\./lit/ExampleTests' | \
+ grep -v '/Output' | \
+ grep -v '__pycache__' | \
+ grep -v '.pyc$' | grep -v '~$' | \
+ sort > /tmp/lit_source_files.txt
+
+# Create the source distribution.
+echo "Creating source distribution."
+rm -rf lit.egg-info dist
+python setup.py sdist > /tmp/lit_sdist_log.txt
+
+# Creating list of files in source distribution.
+echo "Creating source distribution file list."
+tar zft dist/lit*.tar.gz | \
+ sed -e 's#lit-[0-9.dev]*/#./#' | \
+ sed -e 's#/$##' | \
+ grep -v '^\./PKG-INFO' | \
+ grep -v '^\./setup.cfg' | \
+ grep -v '^\./lit.egg-info' | \
+ sort > /tmp/lit_sdist_files.txt
+
+# Diff the files.
+echo "Running diff..."
+if (diff /tmp/lit_source_files.txt /tmp/lit_sdist_files.txt); then
+ echo "Diff is clean!"
+else
+ echo "error: there were differences in the source lists!"
+ exit 1
+fi
OpenPOWER on IntegriCloud