summaryrefslogtreecommitdiffstats
path: root/utils
diff options
context:
space:
mode:
Diffstat (limited to 'utils')
-rwxr-xr-xutils/ABITest/ABITestGen.py638
-rw-r--r--utils/ABITest/Enumeration.py276
-rw-r--r--utils/ABITest/Makefile.test.common170
-rw-r--r--utils/ABITest/TypeGen.py381
-rwxr-xr-xutils/ABITest/build-and-summarize-all.sh15
-rwxr-xr-xutils/ABITest/build-and-summarize.sh14
-rwxr-xr-xutils/ABITest/build.sh12
-rw-r--r--utils/ABITest/layout/Makefile68
-rw-r--r--utils/ABITest/return-types-32/Makefile7
-rw-r--r--utils/ABITest/return-types-64/Makefile7
-rw-r--r--utils/ABITest/single-args-32/Makefile7
-rw-r--r--utils/ABITest/single-args-64/Makefile13
-rwxr-xr-xutils/ABITest/summarize.sh15
-rwxr-xr-xutils/CaptureCmd73
-rwxr-xr-xutils/CmpDriver194
-rwxr-xr-xutils/FindSpecRefs910
-rwxr-xr-xutils/SummarizeErrors117
-rw-r--r--utils/builtin-defines.c85
-rwxr-xr-xutils/ccc-analyzer617
-rwxr-xr-xutils/pch-test.pl61
-rwxr-xr-xutils/scan-build1278
-rw-r--r--utils/scanview.css62
-rw-r--r--utils/sorttable.js493
-rw-r--r--utils/test/Makefile.multi21
-rwxr-xr-xutils/test/MultiTestRunner.py331
-rw-r--r--utils/test/ProgressBar.py227
-rwxr-xr-xutils/test/TestRunner.py210
-rwxr-xr-xutils/token-delta.py251
-rwxr-xr-xutils/ubiviz74
29 files changed, 6627 insertions, 0 deletions
diff --git a/utils/ABITest/ABITestGen.py b/utils/ABITest/ABITestGen.py
new file mode 100755
index 0000000..5598caa
--- /dev/null
+++ b/utils/ABITest/ABITestGen.py
@@ -0,0 +1,638 @@
+#!/usr/bin/python
+
+from pprint import pprint
+import random, atexit, time
+from random import randrange
+import re
+
+from Enumeration import *
+from TypeGen import *
+
+####
+
+class TypePrinter:
+ def __init__(self, output, outputHeader=None,
+ outputTests=None, outputDriver=None,
+ headerName=None, info=None):
+ self.output = output
+ self.outputHeader = outputHeader
+ self.outputTests = outputTests
+ self.outputDriver = outputDriver
+ self.writeBody = outputHeader or outputTests or outputDriver
+ self.types = {}
+ self.testValues = {}
+ self.testReturnValues = {}
+ self.layoutTests = []
+
+ if info:
+ for f in (self.output,self.outputHeader,self.outputTests,self.outputDriver):
+ if f:
+ print >>f,info
+
+ if self.writeBody:
+ print >>self.output, '#include <stdio.h>\n'
+ if self.outputTests:
+ print >>self.outputTests, '#include <stdio.h>'
+ print >>self.outputTests, '#include <string.h>'
+ print >>self.outputTests, '#include <assert.h>\n'
+
+ if headerName:
+ for f in (self.output,self.outputTests,self.outputDriver):
+ if f is not None:
+ print >>f, '#include "%s"\n'%(headerName,)
+
+ if self.outputDriver:
+ print >>self.outputDriver, '#include <stdio.h>\n'
+ print >>self.outputDriver, 'int main(int argc, char **argv) {'
+ print >>self.outputDriver, ' int index = -1;'
+ print >>self.outputDriver, ' if (argc > 1) index = atoi(argv[1]);'
+
+ def finish(self):
+ if self.layoutTests:
+ print >>self.output, 'int main(int argc, char **argv) {'
+ print >>self.output, ' int index = -1;'
+ print >>self.output, ' if (argc > 1) index = atoi(argv[1]);'
+ for i,f in self.layoutTests:
+ print >>self.output, ' if (index == -1 || index == %d)' % i
+ print >>self.output, ' %s();' % f
+ print >>self.output, ' return 0;'
+ print >>self.output, '}'
+
+ if self.outputDriver:
+ print >>self.outputDriver, ' printf("DONE\\n");'
+ print >>self.outputDriver, ' return 0;'
+ print >>self.outputDriver, '}'
+
+ def getTypeName(self, T):
+ if isinstance(T,BuiltinType):
+ return T.name
+ name = self.types.get(T)
+ if name is None:
+ name = 'T%d'%(len(self.types),)
+ # Reserve slot
+ self.types[T] = None
+ if self.outputHeader:
+ print >>self.outputHeader,T.getTypedefDef(name, self)
+ else:
+ print >>self.output,T.getTypedefDef(name, self)
+ if self.outputTests:
+ print >>self.outputTests,T.getTypedefDef(name, self)
+ self.types[T] = name
+ return name
+
+ def writeLayoutTest(self, i, ty):
+ tyName = self.getTypeName(ty)
+ tyNameClean = tyName.replace(' ','_').replace('*','star')
+ fnName = 'test_%s' % tyNameClean
+
+ print >>self.output,'void %s(void) {' % fnName
+ self.printSizeOfType(' %s'%fnName, tyName, ty, self.output)
+ self.printAlignOfType(' %s'%fnName, tyName, ty, self.output)
+ self.printOffsetsOfType(' %s'%fnName, tyName, ty, self.output)
+ print >>self.output,'}'
+ print >>self.output
+
+ self.layoutTests.append((i,fnName))
+
+ def writeFunction(self, i, FT):
+ args = ', '.join(['%s arg%d'%(self.getTypeName(t),i) for i,t in enumerate(FT.argTypes)])
+ if not args:
+ args = 'void'
+
+ if FT.returnType is None:
+ retvalName = None
+ retvalTypeName = 'void'
+ else:
+ retvalTypeName = self.getTypeName(FT.returnType)
+ if self.writeBody or self.outputTests:
+ retvalName = self.getTestReturnValue(FT.returnType)
+
+ fnName = 'fn%d'%(FT.index,)
+ if self.outputHeader:
+ print >>self.outputHeader,'%s %s(%s);'%(retvalTypeName, fnName, args)
+ elif self.outputTests:
+ print >>self.outputTests,'%s %s(%s);'%(retvalTypeName, fnName, args)
+
+ print >>self.output,'%s %s(%s)'%(retvalTypeName, fnName, args),
+ if self.writeBody:
+ print >>self.output, '{'
+
+ for i,t in enumerate(FT.argTypes):
+ self.printValueOfType(' %s'%fnName, 'arg%d'%i, t)
+
+ if retvalName is not None:
+ print >>self.output, ' return %s;'%(retvalName,)
+ print >>self.output, '}'
+ else:
+ print >>self.output, '{}'
+ print >>self.output
+
+ if self.outputDriver:
+ print >>self.outputDriver, ' if (index == -1 || index == %d) {' % i
+ print >>self.outputDriver, ' extern void test_%s(void);' % fnName
+ print >>self.outputDriver, ' test_%s();' % fnName
+ print >>self.outputDriver, ' }'
+
+ if self.outputTests:
+ if self.outputHeader:
+ print >>self.outputHeader, 'void test_%s(void);'%(fnName,)
+
+ if retvalName is None:
+ retvalTests = None
+ else:
+ retvalTests = self.getTestValuesArray(FT.returnType)
+ tests = map(self.getTestValuesArray, FT.argTypes)
+ print >>self.outputTests, 'void test_%s(void) {'%(fnName,)
+
+ if retvalTests is not None:
+ print >>self.outputTests, ' printf("%s: testing return.\\n");'%(fnName,)
+ print >>self.outputTests, ' for (int i=0; i<%d; ++i) {'%(retvalTests[1],)
+ args = ', '.join(['%s[%d]'%(t,randrange(l)) for t,l in tests])
+ print >>self.outputTests, ' %s RV;'%(retvalTypeName,)
+ print >>self.outputTests, ' %s = %s[i];'%(retvalName, retvalTests[0])
+ print >>self.outputTests, ' RV = %s(%s);'%(fnName, args)
+ self.printValueOfType(' %s_RV'%fnName, 'RV', FT.returnType, output=self.outputTests, indent=4)
+ self.checkTypeValues('RV', '%s[i]' % retvalTests[0], FT.returnType, output=self.outputTests, indent=4)
+ print >>self.outputTests, ' }'
+
+ if tests:
+ print >>self.outputTests, ' printf("%s: testing arguments.\\n");'%(fnName,)
+ for i,(array,length) in enumerate(tests):
+ for j in range(length):
+ args = ['%s[%d]'%(t,randrange(l)) for t,l in tests]
+ args[i] = '%s[%d]'%(array,j)
+ print >>self.outputTests, ' %s(%s);'%(fnName, ', '.join(args),)
+ print >>self.outputTests, '}'
+
+ def getTestReturnValue(self, type):
+ typeName = self.getTypeName(type)
+ info = self.testReturnValues.get(typeName)
+ if info is None:
+ name = '%s_retval'%(typeName.replace(' ','_').replace('*','star'),)
+ print >>self.output, '%s %s;'%(typeName,name)
+ if self.outputHeader:
+ print >>self.outputHeader, 'extern %s %s;'%(typeName,name)
+ elif self.outputTests:
+ print >>self.outputTests, 'extern %s %s;'%(typeName,name)
+ info = self.testReturnValues[typeName] = name
+ return info
+
+ def getTestValuesArray(self, type):
+ typeName = self.getTypeName(type)
+ info = self.testValues.get(typeName)
+ if info is None:
+ name = '%s_values'%(typeName.replace(' ','_').replace('*','star'),)
+ print >>self.outputTests, 'static %s %s[] = {'%(typeName,name)
+ length = 0
+ for item in self.getTestValues(type):
+ print >>self.outputTests, '\t%s,'%(item,)
+ length += 1
+ print >>self.outputTests,'};'
+ info = self.testValues[typeName] = (name,length)
+ return info
+
+ def getTestValues(self, t):
+ if isinstance(t, BuiltinType):
+ if t.name=='float':
+ for i in ['0.0','-1.0','1.0']:
+ yield i+'f'
+ elif t.name=='double':
+ for i in ['0.0','-1.0','1.0']:
+ yield i
+ elif t.name in ('void *'):
+ yield '(void*) 0'
+ yield '(void*) -1'
+ else:
+ yield '(%s) 0'%(t.name,)
+ yield '(%s) -1'%(t.name,)
+ yield '(%s) 1'%(t.name,)
+ elif isinstance(t, RecordType):
+ nonPadding = [f for f in t.fields
+ if not f.isPaddingBitField()]
+
+ if not nonPadding:
+ yield '{ }'
+ return
+
+ # FIXME: Use designated initializers to access non-first
+ # fields of unions.
+ if t.isUnion:
+ for v in self.getTestValues(nonPadding[0]):
+ yield '{ %s }' % v
+ return
+
+ fieldValues = map(list, map(self.getTestValues, nonPadding))
+ for i,values in enumerate(fieldValues):
+ for v in values:
+ elements = map(random.choice,fieldValues)
+ elements[i] = v
+ yield '{ %s }'%(', '.join(elements))
+
+ elif isinstance(t, ComplexType):
+ for t in self.getTestValues(t.elementType):
+ yield '%s + %s * 1i'%(t,t)
+ elif isinstance(t, ArrayType):
+ values = list(self.getTestValues(t.elementType))
+ if not values:
+ yield '{ }'
+ for i in range(t.numElements):
+ for v in values:
+ elements = [random.choice(values) for i in range(t.numElements)]
+ elements[i] = v
+ yield '{ %s }'%(', '.join(elements))
+ else:
+ raise NotImplementedError,'Cannot make tests values of type: "%s"'%(t,)
+
+ def printSizeOfType(self, prefix, name, t, output=None, indent=2):
+ print >>output, '%*sprintf("%s: sizeof(%s) = %%ld\\n", (long)sizeof(%s));'%(indent, '', prefix, name, name)
+ def printAlignOfType(self, prefix, name, t, output=None, indent=2):
+ print >>output, '%*sprintf("%s: __alignof__(%s) = %%ld\\n", (long)__alignof__(%s));'%(indent, '', prefix, name, name)
+ def printOffsetsOfType(self, prefix, name, t, output=None, indent=2):
+ if isinstance(t, RecordType):
+ for i,f in enumerate(t.fields):
+ if f.isBitField():
+ continue
+ fname = 'field%d' % i
+ print >>output, '%*sprintf("%s: __builtin_offsetof(%s, %s) = %%ld\\n", (long)__builtin_offsetof(%s, %s));'%(indent, '', prefix, name, fname, name, fname)
+
+ def printValueOfType(self, prefix, name, t, output=None, indent=2):
+ if output is None:
+ output = self.output
+ if isinstance(t, BuiltinType):
+ if t.name.endswith('long long'):
+ code = 'lld'
+ elif t.name.endswith('long'):
+ code = 'ld'
+ elif t.name.split(' ')[-1] in ('_Bool','char','short','int'):
+ code = 'd'
+ elif t.name in ('float','double'):
+ code = 'f'
+ elif t.name == 'long double':
+ code = 'Lf'
+ else:
+ code = 'p'
+ print >>output, '%*sprintf("%s: %s = %%%s\\n", %s);'%(indent, '', prefix, name, code, name)
+ elif isinstance(t, RecordType):
+ if not t.fields:
+ print >>output, '%*sprintf("%s: %s (empty)\\n");'%(indent, '', prefix, name)
+ for i,f in enumerate(t.fields):
+ if f.isPaddingBitField():
+ continue
+ fname = '%s.field%d'%(name,i)
+ self.printValueOfType(prefix, fname, f, output=output, indent=indent)
+ elif isinstance(t, ComplexType):
+ self.printValueOfType(prefix, '(__real %s)'%name, t.elementType, output=output,indent=indent)
+ self.printValueOfType(prefix, '(__imag %s)'%name, t.elementType, output=output,indent=indent)
+ elif isinstance(t, ArrayType):
+ for i in range(t.numElements):
+ # Access in this fashion as a hackish way to portably
+ # access vectors.
+ if t.isVector:
+ self.printValueOfType(prefix, '((%s*) &%s)[%d]'%(t.elementType,name,i), t.elementType, output=output,indent=indent)
+ else:
+ self.printValueOfType(prefix, '%s[%d]'%(name,i), t.elementType, output=output,indent=indent)
+ else:
+ raise NotImplementedError,'Cannot print value of type: "%s"'%(t,)
+
+ def checkTypeValues(self, nameLHS, nameRHS, t, output=None, indent=2):
+ prefix = 'foo'
+ if output is None:
+ output = self.output
+ if isinstance(t, BuiltinType):
+ print >>output, '%*sassert(%s == %s);' % (indent, '', nameLHS, nameRHS)
+ elif isinstance(t, RecordType):
+ for i,f in enumerate(t.fields):
+ if f.isPaddingBitField():
+ continue
+ self.checkTypeValues('%s.field%d'%(nameLHS,i), '%s.field%d'%(nameRHS,i),
+ f, output=output, indent=indent)
+ if t.isUnion:
+ break
+ elif isinstance(t, ComplexType):
+ self.checkTypeValues('(__real %s)'%nameLHS, '(__real %s)'%nameRHS, t.elementType, output=output,indent=indent)
+ self.checkTypeValues('(__imag %s)'%nameLHS, '(__imag %s)'%nameRHS, t.elementType, output=output,indent=indent)
+ elif isinstance(t, ArrayType):
+ for i in range(t.numElements):
+ # Access in this fashion as a hackish way to portably
+ # access vectors.
+ if t.isVector:
+ self.checkTypeValues('((%s*) &%s)[%d]'%(t.elementType,nameLHS,i),
+ '((%s*) &%s)[%d]'%(t.elementType,nameRHS,i),
+ t.elementType, output=output,indent=indent)
+ else:
+ self.checkTypeValues('%s[%d]'%(nameLHS,i), '%s[%d]'%(nameRHS,i),
+ t.elementType, output=output,indent=indent)
+ else:
+ raise NotImplementedError,'Cannot print value of type: "%s"'%(t,)
+
+import sys
+
+def main():
+ from optparse import OptionParser, OptionGroup
+ parser = OptionParser("%prog [options] {indices}")
+ parser.add_option("", "--mode", dest="mode",
+ help="autogeneration mode (random or linear) [default %default]",
+ type='choice', choices=('random','linear'), default='linear')
+ parser.add_option("", "--count", dest="count",
+ help="autogenerate COUNT functions according to MODE",
+ type=int, default=0)
+ parser.add_option("", "--min", dest="minIndex", metavar="N",
+ help="start autogeneration with the Nth function type [default %default]",
+ type=int, default=0)
+ parser.add_option("", "--max", dest="maxIndex", metavar="N",
+ help="maximum index for random autogeneration [default %default]",
+ type=int, default=10000000)
+ parser.add_option("", "--seed", dest="seed",
+ help="random number generator seed [default %default]",
+ type=int, default=1)
+ parser.add_option("", "--use-random-seed", dest="useRandomSeed",
+ help="use random value for initial random number generator seed",
+ action='store_true', default=False)
+ parser.add_option("-o", "--output", dest="output", metavar="FILE",
+ help="write output to FILE [default %default]",
+ type=str, default='-')
+ parser.add_option("-O", "--output-header", dest="outputHeader", metavar="FILE",
+ help="write header file for output to FILE [default %default]",
+ type=str, default=None)
+ parser.add_option("-T", "--output-tests", dest="outputTests", metavar="FILE",
+ help="write function tests to FILE [default %default]",
+ type=str, default=None)
+ parser.add_option("-D", "--output-driver", dest="outputDriver", metavar="FILE",
+ help="write test driver to FILE [default %default]",
+ type=str, default=None)
+ parser.add_option("", "--test-layout", dest="testLayout", metavar="FILE",
+ help="test structure layout",
+ action='store_true', default=False)
+
+ group = OptionGroup(parser, "Type Enumeration Options")
+ # Builtins - Ints
+ group.add_option("", "--no-char", dest="useChar",
+ help="do not generate char types",
+ action="store_false", default=True)
+ group.add_option("", "--no-short", dest="useShort",
+ help="do not generate short types",
+ action="store_false", default=True)
+ group.add_option("", "--no-int", dest="useInt",
+ help="do not generate int types",
+ action="store_false", default=True)
+ group.add_option("", "--no-long", dest="useLong",
+ help="do not generate long types",
+ action="store_false", default=True)
+ group.add_option("", "--no-long-long", dest="useLongLong",
+ help="do not generate long long types",
+ action="store_false", default=True)
+ group.add_option("", "--no-unsigned", dest="useUnsigned",
+ help="do not generate unsigned integer types",
+ action="store_false", default=True)
+
+ # Other builtins
+ group.add_option("", "--no-bool", dest="useBool",
+ help="do not generate bool types",
+ action="store_false", default=True)
+ group.add_option("", "--no-float", dest="useFloat",
+ help="do not generate float types",
+ action="store_false", default=True)
+ group.add_option("", "--no-double", dest="useDouble",
+ help="do not generate double types",
+ action="store_false", default=True)
+ group.add_option("", "--no-long-double", dest="useLongDouble",
+ help="do not generate long double types",
+ action="store_false", default=True)
+ group.add_option("", "--no-void-pointer", dest="useVoidPointer",
+ help="do not generate void* types",
+ action="store_false", default=True)
+
+ # Derived types
+ group.add_option("", "--no-array", dest="useArray",
+ help="do not generate record types",
+ action="store_false", default=True)
+ group.add_option("", "--no-complex", dest="useComplex",
+ help="do not generate complex types",
+ action="store_false", default=True)
+ group.add_option("", "--no-record", dest="useRecord",
+ help="do not generate record types",
+ action="store_false", default=True)
+ group.add_option("", "--no-union", dest="recordUseUnion",
+ help="do not generate union types",
+ action="store_false", default=True)
+ group.add_option("", "--no-vector", dest="useVector",
+ help="do not generate vector types",
+ action="store_false", default=True)
+ group.add_option("", "--no-bit-field", dest="useBitField",
+ help="do not generate bit-field record members",
+ action="store_false", default=True)
+ group.add_option("", "--no-builtins", dest="useBuiltins",
+ help="do not use any types",
+ action="store_false", default=True)
+
+ # Tuning
+ group.add_option("", "--no-function-return", dest="functionUseReturn",
+ help="do not generate return types for functions",
+ action="store_false", default=True)
+ group.add_option("", "--vector-types", dest="vectorTypes",
+ help="comma separated list of vector types (e.g., v2i32) [default %default]",
+ action="store", type=str, default='v2i16, v1i64, v2i32, v4i16, v8i8, v2f32, v2i64, v4i32, v8i16, v16i8, v2f64, v4f32, v16f32', metavar="N")
+ group.add_option("", "--bit-fields", dest="bitFields",
+ help="comma separated list 'type:width' bit-field specifiers [default %default]",
+ action="store", type=str, default="char:0,char:4,unsigned:0,unsigned:4,unsigned:13,unsigned:24")
+ group.add_option("", "--max-args", dest="functionMaxArgs",
+ help="maximum number of arguments per function [default %default]",
+ action="store", type=int, default=4, metavar="N")
+ group.add_option("", "--max-array", dest="arrayMaxSize",
+ help="maximum array size [default %default]",
+ action="store", type=int, default=4, metavar="N")
+ group.add_option("", "--max-record", dest="recordMaxSize",
+ help="maximum number of fields per record [default %default]",
+ action="store", type=int, default=4, metavar="N")
+ group.add_option("", "--max-record-depth", dest="recordMaxDepth",
+ help="maximum nested structure depth [default %default]",
+ action="store", type=int, default=None, metavar="N")
+ parser.add_option_group(group)
+ (opts, args) = parser.parse_args()
+
+ if not opts.useRandomSeed:
+ random.seed(opts.seed)
+
+ # Contruct type generator
+ builtins = []
+ if opts.useBuiltins:
+ ints = []
+ if opts.useChar: ints.append(('char',1))
+ if opts.useShort: ints.append(('short',2))
+ if opts.useInt: ints.append(('int',4))
+ # FIXME: Wrong size.
+ if opts.useLong: ints.append(('long',4))
+ if opts.useLongLong: ints.append(('long long',8))
+ if opts.useUnsigned:
+ ints = ([('unsigned %s'%i,s) for i,s in ints] +
+ [('signed %s'%i,s) for i,s in ints])
+ builtins.extend(ints)
+
+ if opts.useBool: builtins.append(('_Bool',1))
+ if opts.useFloat: builtins.append(('float',4))
+ if opts.useDouble: builtins.append(('double',8))
+ if opts.useLongDouble: builtins.append(('long double',16))
+ # FIXME: Wrong size.
+ if opts.useVoidPointer: builtins.append(('void*',4))
+
+ btg = FixedTypeGenerator([BuiltinType(n,s) for n,s in builtins])
+
+ bitfields = []
+ for specifier in opts.bitFields.split(','):
+ if not specifier.strip():
+ continue
+ name,width = specifier.strip().split(':', 1)
+ bitfields.append(BuiltinType(name,None,int(width)))
+ bftg = FixedTypeGenerator(bitfields)
+
+ charType = BuiltinType('char',1)
+ shortType = BuiltinType('short',2)
+ intType = BuiltinType('int',4)
+ longlongType = BuiltinType('long long',8)
+ floatType = BuiltinType('float',4)
+ doubleType = BuiltinType('double',8)
+ sbtg = FixedTypeGenerator([charType, intType, floatType, doubleType])
+
+ atg = AnyTypeGenerator()
+ artg = AnyTypeGenerator()
+ def makeGenerator(atg, subgen, subfieldgen, useRecord, useArray, useBitField):
+ atg.addGenerator(btg)
+ if useBitField and opts.useBitField:
+ atg.addGenerator(bftg)
+ if useRecord and opts.useRecord:
+ assert subgen
+ atg.addGenerator(RecordTypeGenerator(subfieldgen, opts.recordUseUnion,
+ opts.recordMaxSize))
+ if opts.useComplex:
+ # FIXME: Allow overriding builtins here
+ atg.addGenerator(ComplexTypeGenerator(sbtg))
+ if useArray and opts.useArray:
+ assert subgen
+ atg.addGenerator(ArrayTypeGenerator(subgen, opts.arrayMaxSize))
+ if opts.useVector:
+ vTypes = []
+ for i,t in enumerate(opts.vectorTypes.split(',')):
+ m = re.match('v([1-9][0-9]*)([if][1-9][0-9]*)', t.strip())
+ if not m:
+ parser.error('Invalid vector type: %r' % t)
+ count,kind = m.groups()
+ count = int(count)
+ type = { 'i8' : charType,
+ 'i16' : shortType,
+ 'i32' : intType,
+ 'i64' : longlongType,
+ 'f32' : floatType,
+ 'f64' : doubleType,
+ }.get(kind)
+ if not type:
+ parser.error('Invalid vector type: %r' % t)
+ vTypes.append(ArrayType(i, True, type, count * type.size))
+
+ atg.addGenerator(FixedTypeGenerator(vTypes))
+
+ if opts.recordMaxDepth is None:
+ # Fully recursive, just avoid top-level arrays.
+ subFTG = AnyTypeGenerator()
+ subTG = AnyTypeGenerator()
+ atg = AnyTypeGenerator()
+ makeGenerator(subFTG, atg, atg, True, True, True)
+ makeGenerator(subTG, atg, subFTG, True, True, False)
+ makeGenerator(atg, subTG, subFTG, True, False, False)
+ else:
+ # Make a chain of type generators, each builds smaller
+ # structures.
+ base = AnyTypeGenerator()
+ fbase = AnyTypeGenerator()
+ makeGenerator(base, None, None, False, False, False)
+ makeGenerator(fbase, None, None, False, False, True)
+ for i in range(opts.recordMaxDepth):
+ n = AnyTypeGenerator()
+ fn = AnyTypeGenerator()
+ makeGenerator(n, base, fbase, True, True, False)
+ makeGenerator(fn, base, fbase, True, True, True)
+ base = n
+ fbase = fn
+ atg = AnyTypeGenerator()
+ makeGenerator(atg, base, fbase, True, False, False)
+
+ if opts.testLayout:
+ ftg = atg
+ else:
+ ftg = FunctionTypeGenerator(atg, opts.functionUseReturn, opts.functionMaxArgs)
+
+ # Override max,min,count if finite
+ if opts.maxIndex is None:
+ if ftg.cardinality is aleph0:
+ opts.maxIndex = 10000000
+ else:
+ opts.maxIndex = ftg.cardinality
+ opts.maxIndex = min(opts.maxIndex, ftg.cardinality)
+ opts.minIndex = max(0,min(opts.maxIndex-1, opts.minIndex))
+ if not opts.mode=='random':
+ opts.count = min(opts.count, opts.maxIndex-opts.minIndex)
+
+ if opts.output=='-':
+ output = sys.stdout
+ else:
+ output = open(opts.output,'w')
+ atexit.register(lambda: output.close())
+
+ outputHeader = None
+ if opts.outputHeader:
+ outputHeader = open(opts.outputHeader,'w')
+ atexit.register(lambda: outputHeader.close())
+
+ outputTests = None
+ if opts.outputTests:
+ outputTests = open(opts.outputTests,'w')
+ atexit.register(lambda: outputTests.close())
+
+ outputDriver = None
+ if opts.outputDriver:
+ outputDriver = open(opts.outputDriver,'w')
+ atexit.register(lambda: outputDriver.close())
+
+ info = ''
+ info += '// %s\n'%(' '.join(sys.argv),)
+ info += '// Generated: %s\n'%(time.strftime('%Y-%m-%d %H:%M'),)
+ info += '// Cardinality of function generator: %s\n'%(ftg.cardinality,)
+ info += '// Cardinality of type generator: %s\n'%(atg.cardinality,)
+
+ if opts.testLayout:
+ info += '\n#include <stdio.h>'
+
+ P = TypePrinter(output,
+ outputHeader=outputHeader,
+ outputTests=outputTests,
+ outputDriver=outputDriver,
+ headerName=opts.outputHeader,
+ info=info)
+
+ def write(N):
+ try:
+ FT = ftg.get(N)
+ except RuntimeError,e:
+ if e.args[0]=='maximum recursion depth exceeded':
+ print >>sys.stderr,'WARNING: Skipped %d, recursion limit exceeded (bad arguments?)'%(N,)
+ return
+ raise
+ if opts.testLayout:
+ P.writeLayoutTest(N, FT)
+ else:
+ P.writeFunction(N, FT)
+
+ if args:
+ [write(int(a)) for a in args]
+
+ for i in range(opts.count):
+ if opts.mode=='linear':
+ index = opts.minIndex + i
+ else:
+ index = opts.minIndex + int((opts.maxIndex-opts.minIndex) * random.random())
+ write(index)
+
+ P.finish()
+
+if __name__=='__main__':
+ main()
+
diff --git a/utils/ABITest/Enumeration.py b/utils/ABITest/Enumeration.py
new file mode 100644
index 0000000..47e4702
--- /dev/null
+++ b/utils/ABITest/Enumeration.py
@@ -0,0 +1,276 @@
+"""Utilities for enumeration of finite and countably infinite sets.
+"""
+###
+# Countable iteration
+
+# Simplifies some calculations
+class Aleph0(int):
+ _singleton = None
+ def __new__(type):
+ if type._singleton is None:
+ type._singleton = int.__new__(type)
+ return type._singleton
+ def __repr__(self): return '<aleph0>'
+ def __str__(self): return 'inf'
+
+ def __cmp__(self, b):
+ return 1
+
+ def __sub__(self, b):
+ raise ValueError,"Cannot subtract aleph0"
+ __rsub__ = __sub__
+
+ def __add__(self, b):
+ return self
+ __radd__ = __add__
+
+ def __mul__(self, b):
+ if b == 0: return b
+ return self
+ __rmul__ = __mul__
+
+ def __floordiv__(self, b):
+ if b == 0: raise ZeroDivisionError
+ return self
+ __rfloordiv__ = __floordiv__
+ __truediv__ = __floordiv__
+ __rtuediv__ = __floordiv__
+ __div__ = __floordiv__
+ __rdiv__ = __floordiv__
+
+ def __pow__(self, b):
+ if b == 0: return 1
+ return self
+aleph0 = Aleph0()
+
+def base(line):
+ return line*(line+1)//2
+
+def pairToN((x,y)):
+ line,index = x+y,y
+ return base(line)+index
+
+def getNthPairInfo(N):
+ # Avoid various singularities
+ if N==0:
+ return (0,0)
+
+ # Gallop to find bounds for line
+ line = 1
+ next = 2
+ while base(next)<=N:
+ line = next
+ next = line << 1
+
+ # Binary search for starting line
+ lo = line
+ hi = line<<1
+ while lo + 1 != hi:
+ #assert base(lo) <= N < base(hi)
+ mid = (lo + hi)>>1
+ if base(mid)<=N:
+ lo = mid
+ else:
+ hi = mid
+
+ line = lo
+ return line, N - base(line)
+
+def getNthPair(N):
+ line,index = getNthPairInfo(N)
+ return (line - index, index)
+
+def getNthPairBounded(N,W=aleph0,H=aleph0,useDivmod=False):
+ """getNthPairBounded(N, W, H) -> (x, y)
+
+ Return the N-th pair such that 0 <= x < W and 0 <= y < H."""
+
+ if W <= 0 or H <= 0:
+ raise ValueError,"Invalid bounds"
+ elif N >= W*H:
+ raise ValueError,"Invalid input (out of bounds)"
+
+ # Simple case...
+ if W is aleph0 and H is aleph0:
+ return getNthPair(N)
+
+ # Otherwise simplify by assuming W < H
+ if H < W:
+ x,y = getNthPairBounded(N,H,W,useDivmod=useDivmod)
+ return y,x
+
+ if useDivmod:
+ return N%W,N//W
+ else:
+ # Conceptually we want to slide a diagonal line across a
+ # rectangle. This gives more interesting results for large
+ # bounds than using divmod.
+
+ # If in lower left, just return as usual
+ cornerSize = base(W)
+ if N < cornerSize:
+ return getNthPair(N)
+
+ # Otherwise if in upper right, subtract from corner
+ if H is not aleph0:
+ M = W*H - N - 1
+ if M < cornerSize:
+ x,y = getNthPair(M)
+ return (W-1-x,H-1-y)
+
+ # Otherwise, compile line and index from number of times we
+ # wrap.
+ N = N - cornerSize
+ index,offset = N%W,N//W
+ # p = (W-1, 1+offset) + (-1,1)*index
+ return (W-1-index, 1+offset+index)
+def getNthPairBoundedChecked(N,W=aleph0,H=aleph0,useDivmod=False,GNP=getNthPairBounded):
+ x,y = GNP(N,W,H,useDivmod)
+ assert 0 <= x < W and 0 <= y < H
+ return x,y
+
+def getNthNTuple(N, W, H=aleph0, useLeftToRight=False):
+ """getNthNTuple(N, W, H) -> (x_0, x_1, ..., x_W)
+
+ Return the N-th W-tuple, where for 0 <= x_i < H."""
+
+ if useLeftToRight:
+ elts = [None]*W
+ for i in range(W):
+ elts[i],N = getNthPairBounded(N, H)
+ return tuple(elts)
+ else:
+ if W==0:
+ return ()
+ elif W==1:
+ return (N,)
+ elif W==2:
+ return getNthPairBounded(N, H, H)
+ else:
+ LW,RW = W//2, W - (W//2)
+ L,R = getNthPairBounded(N, H**LW, H**RW)
+ return (getNthNTuple(L,LW,H=H,useLeftToRight=useLeftToRight) +
+ getNthNTuple(R,RW,H=H,useLeftToRight=useLeftToRight))
+def getNthNTupleChecked(N, W, H=aleph0, useLeftToRight=False, GNT=getNthNTuple):
+ t = GNT(N,W,H,useLeftToRight)
+ assert len(t) == W
+ for i in t:
+ assert i < H
+ return t
+
+def getNthTuple(N, maxSize=aleph0, maxElement=aleph0, useDivmod=False, useLeftToRight=False):
+ """getNthTuple(N, maxSize, maxElement) -> x
+
+ Return the N-th tuple where len(x) < maxSize and for y in x, 0 <=
+ y < maxElement."""
+
+ # All zero sized tuples are isomorphic, don't ya know.
+ if N == 0:
+ return ()
+ N -= 1
+ if maxElement is not aleph0:
+ if maxSize is aleph0:
+ raise NotImplementedError,'Max element size without max size unhandled'
+ bounds = [maxElement**i for i in range(1, maxSize+1)]
+ S,M = getNthPairVariableBounds(N, bounds)
+ else:
+ S,M = getNthPairBounded(N, maxSize, useDivmod=useDivmod)
+ return getNthNTuple(M, S+1, maxElement, useLeftToRight=useLeftToRight)
+def getNthTupleChecked(N, maxSize=aleph0, maxElement=aleph0,
+ useDivmod=False, useLeftToRight=False, GNT=getNthTuple):
+ # FIXME: maxsize is inclusive
+ t = GNT(N,maxSize,maxElement,useDivmod,useLeftToRight)
+ assert len(t) <= maxSize
+ for i in t:
+ assert i < maxElement
+ return t
+
+def getNthPairVariableBounds(N, bounds):
+ """getNthPairVariableBounds(N, bounds) -> (x, y)
+
+ Given a finite list of bounds (which may be finite or aleph0),
+ return the N-th pair such that 0 <= x < len(bounds) and 0 <= y <
+ bounds[x]."""
+
+ if not bounds:
+ raise ValueError,"Invalid bounds"
+ if not (0 <= N < sum(bounds)):
+ raise ValueError,"Invalid input (out of bounds)"
+
+ level = 0
+ active = range(len(bounds))
+ active.sort(key=lambda i: bounds[i])
+ prevLevel = 0
+ for i,index in enumerate(active):
+ level = bounds[index]
+ W = len(active) - i
+ if level is aleph0:
+ H = aleph0
+ else:
+ H = level - prevLevel
+ levelSize = W*H
+ if N<levelSize: # Found the level
+ idelta,delta = getNthPairBounded(N, W, H)
+ return active[i+idelta],prevLevel+delta
+ else:
+ N -= levelSize
+ prevLevel = level
+ else:
+ raise RuntimError,"Unexpected loop completion"
+
+def getNthPairVariableBoundsChecked(N, bounds, GNVP=getNthPairVariableBounds):
+ x,y = GNVP(N,bounds)
+ assert 0 <= x < len(bounds) and 0 <= y < bounds[x]
+ return (x,y)
+
+###
+
+def testPairs():
+ W = 3
+ H = 6
+ a = [[' ' for x in range(10)] for y in range(10)]
+ b = [[' ' for x in range(10)] for y in range(10)]
+ for i in range(min(W*H,40)):
+ x,y = getNthPairBounded(i,W,H)
+ x2,y2 = getNthPairBounded(i,W,H,useDivmod=True)
+ print i,(x,y),(x2,y2)
+ a[y][x] = '%2d'%i
+ b[y2][x2] = '%2d'%i
+
+ print '-- a --'
+ for ln in a[::-1]:
+ if ''.join(ln).strip():
+ print ' '.join(ln)
+ print '-- b --'
+ for ln in b[::-1]:
+ if ''.join(ln).strip():
+ print ' '.join(ln)
+
+def testPairsVB():
+ bounds = [2,2,4,aleph0,5,aleph0]
+ a = [[' ' for x in range(15)] for y in range(15)]
+ b = [[' ' for x in range(15)] for y in range(15)]
+ for i in range(min(sum(bounds),40)):
+ x,y = getNthPairVariableBounds(i, bounds)
+ print i,(x,y)
+ a[y][x] = '%2d'%i
+
+ print '-- a --'
+ for ln in a[::-1]:
+ if ''.join(ln).strip():
+ print ' '.join(ln)
+
+###
+
+# Toggle to use checked versions of enumeration routines.
+if False:
+ getNthPairVariableBounds = getNthPairVariableBoundsChecked
+ getNthPairBounded = getNthPairBoundedChecked
+ getNthNTuple = getNthNTupleChecked
+ getNthTuple = getNthTupleChecked
+
+if __name__ == '__main__':
+ testPairs()
+
+ testPairsVB()
+
diff --git a/utils/ABITest/Makefile.test.common b/utils/ABITest/Makefile.test.common
new file mode 100644
index 0000000..3c208ad
--- /dev/null
+++ b/utils/ABITest/Makefile.test.common
@@ -0,0 +1,170 @@
+# -*- Makefile -*-
+
+# Usage: make test.N.report
+#
+# COUNT can be over-ridden to change the number of tests generated per
+# file, and TESTARGS is used to change the type generation. Make sure
+# to 'make clean' after changing either of these parameters.
+
+TESTARGS := --no-unsigned --no-vector --no-complex --no-bool
+
+COUNT := 1
+TIMEOUT := 5
+
+CFLAGS := -std=gnu99
+
+X_COMPILER := gcc
+X_LL_CFLAGS := -emit-llvm -S
+Y_COMPILER := clang
+Y_LL_CFLAGS := -emit-llvm -S
+CC := gcc
+
+###
+
+ABITESTGEN := ../ABITestGen.py
+
+ifndef VERBOSE
+ Verb := @
+endif
+
+.PHONY: test.%.report
+test.%.report: temps/test.%.xx.diff temps/test.%.xy.diff temps/test.%.yx.diff temps/test.%.yy.diff
+ @ok=1;\
+ for t in $^; do \
+ if [ -s $$t ]; then \
+ echo "TEST $*: $$t failed"; \
+ ok=0;\
+ fi; \
+ done; \
+ if [ $$ok -eq 1 ]; then \
+ true; \
+ else \
+ false; \
+ fi
+
+
+.PHONY: test.%.defs-report
+test.%.defs-report: temps/test.%.defs.diff
+ @for t in $^; do \
+ if [ -s $$t ]; then \
+ echo "TEST $*: $$t failed"; \
+ cat $$t; \
+ fi; \
+ done
+
+.PHONY: test.%.build
+test.%.build: temps/test.%.ref temps/test.%.xx temps/test.%.xy temps/test.%.yx temps/test.%.yy temps/test.%.x.defs temps/test.%.y.defs
+ @true
+
+###
+
+# Diffs and output
+
+.PRECIOUS: temps/.dir
+
+.PRECIOUS: temps/test.%.xx.diff
+temps/test.%.xx.diff: temps/test.%.ref.out temps/test.%.xx.out
+ $(Verb) diff $^ > $@ || true
+.PRECIOUS: temps/test.%.xy.diff
+temps/test.%.xy.diff: temps/test.%.ref.out temps/test.%.xy.out
+ $(Verb) diff $^ > $@ || true
+.PRECIOUS: temps/test.%.yx.diff
+temps/test.%.yx.diff: temps/test.%.ref.out temps/test.%.yx.out
+ $(Verb) diff $^ > $@ || true
+.PRECIOUS: temps/test.%.yy.diff
+temps/test.%.yy.diff: temps/test.%.ref.out temps/test.%.yy.out
+ $(Verb) diff $^ > $@ || true
+.PRECIOUS: temps/test.%.defs.diff
+temps/test.%.defs.diff: temps/test.%.x.defs temps/test.%.y.defs
+ $(Verb) zipdifflines \
+ --replace "%struct.T[0-9]+" "%struct.s" \
+ --replace "%union.T[0-9]+" "%struct.s" \
+ --replace "byval align [0-9]+" "byval" \
+ $^ > $@
+
+.PRECIOUS: temps/test.%.out
+temps/test.%.out: temps/test.%
+ -$(Verb) ./$< > $@
+
+# Executables
+
+.PRECIOUS: temps/test.%.ref
+temps/test.%.ref: temps/test.%.driver.ref.o temps/test.%.a.ref.o temps/test.%.b.ref.o
+ $(Verb) $(CC) $(CFLAGS) $(CC_CFLAGS) -O3 -o $@ $^
+.PRECIOUS: temps/test.%.xx
+temps/test.%.xx: temps/test.%.driver.ref.o temps/test.%.a.x.o temps/test.%.b.x.o
+ $(Verb) $(CC) $(CFLAGS) $(CC_CFLAGS) -O3 -o $@ $^
+.PRECIOUS: temps/test.%.xy
+temps/test.%.xy: temps/test.%.driver.ref.o temps/test.%.a.x.o temps/test.%.b.y.o
+ $(Verb) $(CC) $(CFLAGS) $(CC_CFLAGS) -O3 -o $@ $^
+.PRECIOUS: temps/test.%.yx
+temps/test.%.yx: temps/test.%.driver.ref.o temps/test.%.a.y.o temps/test.%.b.x.o
+ $(Verb) $(CC) $(CFLAGS) $(CC_CFLAGS) -O3 -o $@ $^
+.PRECIOUS: temps/test.%.yy
+temps/test.%.yy: temps/test.%.driver.ref.o temps/test.%.a.y.o temps/test.%.b.y.o
+ $(Verb) $(CC) $(CFLAGS) $(CC_CFLAGS) -O3 -o $@ $^
+
+# Object files
+
+.PRECIOUS: temps/test.%.ref.o
+temps/test.%.ref.o: inputs/test.%.c temps/.dir
+ $(Verb) $(CC) -c $(CFLAGS) $(CC_CFLAGS) -o $@ $<
+.PRECIOUS: temps/test.%.x.o
+temps/test.%.x.o: inputs/test.%.c temps/.dir
+ $(Verb) $(X_COMPILER) -c $(CFLAGS) $(X_CFLAGS) -o $@ $<
+.PRECIOUS: temps/test.%.y.o
+temps/test.%.y.o: inputs/test.%.c temps/.dir
+ $(Verb) $(Y_COMPILER) -c $(CFLAGS) $(Y_CFLAGS) -o $@ $<
+
+.PRECIOUS: temps/test.%.x.defs
+temps/test.%.x.defs: temps/test.%.a.x.ll temps/.dir
+ -$(Verb) -grep '^define ' $< > $@
+.PRECIOUS: temps/test.%.y.defs
+temps/test.%.y.defs: temps/test.%.a.y.ll temps/.dir
+ -$(Verb) -grep '^define ' $< > $@
+
+.PRECIOUS: temps/test.%.a.x.ll
+temps/test.%.a.x.ll: inputs/test.%.a.c temps/.dir
+ $(Verb) $(X_COMPILER) $(CFLAGS) $(X_LL_CFLAGS) $(X_CFLAGS) -o $@ $<
+.PRECIOUS: temps/test.%.b.x.ll
+temps/test.%.b.x.ll: inputs/test.%.b.c temps/.dir
+ $(Verb) $(X_COMPILER) $(CFLAGS) $(X_LL_CFLAGS) $(X_CFLAGS) -o $@ $<
+.PRECIOUS: temps/test.%.a.y.ll
+temps/test.%.a.y.ll: inputs/test.%.a.c temps/.dir
+ $(Verb) $(Y_COMPILER) $(CFLAGS) $(Y_LL_CFLAGS) $(Y_CFLAGS) -o $@ $<
+.PRECIOUS: temps/test.%.b.y.ll
+temps/test.%.b.y.ll: inputs/test.%.b.c temps/.dir
+ $(Verb) $(Y_COMPILER) $(CFLAGS) $(Y_LL_CFLAGS) $(Y_CFLAGS) -o $@ $<
+
+# Input generation
+
+.PHONY: test.%.top
+test.%.top: inputs/test.%.a.c inputs/test.%.b.c inputs/test.%.driver.c
+ @true
+
+.PRECIOUS: inputs/test.%.a.c inputs/test.%.b.c inputs/test.%.driver.c
+inputs/test.%.a.c: test.%.generate
+ @true
+inputs/test.%.b.c: test.%.generate
+ @true
+inputs/test.%.driver.c: test.%.generate
+ @true
+
+.PHONY: test.%.generate
+.PRECIOUS: inputs/.dir
+test.%.generate: $(ABITESTGEN) inputs/.dir
+ $(Verb) $(ABITESTGEN) $(TESTARGS) -o inputs/test.$*.a.c -T inputs/test.$*.b.c -D inputs/test.$*.driver.c --min=$(shell expr $* '*' $(COUNT)) --count=$(COUNT)
+
+# Cleaning
+
+clean-temps:
+ $(Verb) rm -rf temps
+
+clean:
+ $(Verb) rm -rf temps inputs
+
+# Etc.
+
+%/.dir:
+ $(Verb) mkdir -p $* > /dev/null
+ $(Verb) $(DATE) > $@
diff --git a/utils/ABITest/TypeGen.py b/utils/ABITest/TypeGen.py
new file mode 100644
index 0000000..d5678db
--- /dev/null
+++ b/utils/ABITest/TypeGen.py
@@ -0,0 +1,381 @@
+"""Flexible enumeration of C types."""
+
+from Enumeration import *
+
+# TODO:
+
+# - struct improvements (flexible arrays, packed &
+# unpacked, alignment)
+# - objective-c qualified id
+# - anonymous / transparent unions
+# - VLAs
+# - block types
+# - K&R functions
+# - pass arguments of different types (test extension, transparent union)
+# - varargs
+
+###
+# Actual type types
+
+class Type:
+ def isBitField(self):
+ return False
+
+ def isPaddingBitField(self):
+ return False
+
+class BuiltinType(Type):
+ def __init__(self, name, size, bitFieldSize=None):
+ self.name = name
+ self.size = size
+ self.bitFieldSize = bitFieldSize
+
+ def isBitField(self):
+ return self.bitFieldSize is not None
+
+ def isPaddingBitField(self):
+ return self.bitFieldSize is 0
+
+ def getBitFieldSize(self):
+ assert self.isBitField()
+ return self.bitFieldSize
+
+ def sizeof(self):
+ return self.size
+
+ def __str__(self):
+ return self.name
+
+class RecordType(Type):
+ def __init__(self, index, isUnion, fields):
+ self.index = index
+ self.isUnion = isUnion
+ self.fields = fields
+ self.name = None
+
+ def __str__(self):
+ def getField(t):
+ if t.isBitField():
+ return "%s : %d;" % (t, t.getBitFieldSize())
+ else:
+ return "%s;" % t
+
+ return '%s { %s }'%(('struct','union')[self.isUnion],
+ ' '.join(map(getField, self.fields)))
+
+ def getTypedefDef(self, name, printer):
+ def getField((i, t)):
+ if t.isBitField():
+ if t.isPaddingBitField():
+ return '%s : 0;'%(printer.getTypeName(t),)
+ else:
+ return '%s field%d : %d;'%(printer.getTypeName(t),i,
+ t.getBitFieldSize())
+ else:
+ return '%s field%d;'%(printer.getTypeName(t),i)
+ fields = map(getField, enumerate(self.fields))
+ # Name the struct for more readable LLVM IR.
+ return 'typedef %s %s { %s } %s;'%(('struct','union')[self.isUnion],
+ name, ' '.join(fields), name)
+
+class ArrayType(Type):
+ def __init__(self, index, isVector, elementType, size):
+ if isVector:
+ # Note that for vectors, this is the size in bytes.
+ assert size > 0
+ else:
+ assert size is None or size >= 0
+ self.index = index
+ self.isVector = isVector
+ self.elementType = elementType
+ self.size = size
+ if isVector:
+ eltSize = self.elementType.sizeof()
+ assert not (self.size % eltSize)
+ self.numElements = self.size // eltSize
+ else:
+ self.numElements = self.size
+
+ def __str__(self):
+ if self.isVector:
+ return 'vector (%s)[%d]'%(self.elementType,self.size)
+ elif self.size is not None:
+ return '(%s)[%d]'%(self.elementType,self.size)
+ else:
+ return '(%s)[]'%(self.elementType,)
+
+ def getTypedefDef(self, name, printer):
+ elementName = printer.getTypeName(self.elementType)
+ if self.isVector:
+ return 'typedef %s %s __attribute__ ((vector_size (%d)));'%(elementName,
+ name,
+ self.size)
+ else:
+ if self.size is None:
+ sizeStr = ''
+ else:
+ sizeStr = str(self.size)
+ return 'typedef %s %s[%s];'%(elementName, name, sizeStr)
+
+class ComplexType(Type):
+ def __init__(self, index, elementType):
+ self.index = index
+ self.elementType = elementType
+
+ def __str__(self):
+ return '_Complex (%s)'%(self.elementType)
+
+ def getTypedefDef(self, name, printer):
+ return 'typedef _Complex %s %s;'%(printer.getTypeName(self.elementType), name)
+
+class FunctionType(Type):
+ def __init__(self, index, returnType, argTypes):
+ self.index = index
+ self.returnType = returnType
+ self.argTypes = argTypes
+
+ def __str__(self):
+ if self.returnType is None:
+ rt = 'void'
+ else:
+ rt = str(self.returnType)
+ if not self.argTypes:
+ at = 'void'
+ else:
+ at = ', '.join(map(str, self.argTypes))
+ return '%s (*)(%s)'%(rt, at)
+
+ def getTypedefDef(self, name, printer):
+ if self.returnType is None:
+ rt = 'void'
+ else:
+ rt = str(self.returnType)
+ if not self.argTypes:
+ at = 'void'
+ else:
+ at = ', '.join(map(str, self.argTypes))
+ return 'typedef %s (*%s)(%s);'%(rt, name, at)
+
+###
+# Type enumerators
+
+class TypeGenerator(object):
+ def __init__(self):
+ self.cache = {}
+
+ def setCardinality(self):
+ abstract
+
+ def get(self, N):
+ T = self.cache.get(N)
+ if T is None:
+ assert 0 <= N < self.cardinality
+ T = self.cache[N] = self.generateType(N)
+ return T
+
+ def generateType(self, N):
+ abstract
+
+class FixedTypeGenerator(TypeGenerator):
+ def __init__(self, types):
+ TypeGenerator.__init__(self)
+ self.types = types
+ self.setCardinality()
+
+ def setCardinality(self):
+ self.cardinality = len(self.types)
+
+ def generateType(self, N):
+ return self.types[N]
+
+class ComplexTypeGenerator(TypeGenerator):
+ def __init__(self, typeGen):
+ TypeGenerator.__init__(self)
+ self.typeGen = typeGen
+ self.setCardinality()
+
+ def setCardinality(self):
+ self.cardinality = self.typeGen.cardinality
+
+ def generateType(self, N):
+ return ComplexType(N, self.typeGen.get(N))
+
+class VectorTypeGenerator(TypeGenerator):
+ def __init__(self, typeGen, sizes):
+ TypeGenerator.__init__(self)
+ self.typeGen = typeGen
+ self.sizes = tuple(map(int,sizes))
+ self.setCardinality()
+
+ def setCardinality(self):
+ self.cardinality = len(self.sizes)*self.typeGen.cardinality
+
+ def generateType(self, N):
+ S,T = getNthPairBounded(N, len(self.sizes), self.typeGen.cardinality)
+ return ArrayType(N, True, self.typeGen.get(T), self.sizes[S])
+
+class FixedArrayTypeGenerator(TypeGenerator):
+ def __init__(self, typeGen, sizes):
+ TypeGenerator.__init__(self)
+ self.typeGen = typeGen
+ self.sizes = tuple(size)
+ self.setCardinality()
+
+ def setCardinality(self):
+ self.cardinality = len(self.sizes)*self.typeGen.cardinality
+
+ def generateType(self, N):
+ S,T = getNthPairBounded(N, len(self.sizes), self.typeGen.cardinality)
+ return ArrayType(N, false, self.typeGen.get(T), self.sizes[S])
+
+class ArrayTypeGenerator(TypeGenerator):
+ def __init__(self, typeGen, maxSize, useIncomplete=False, useZero=False):
+ TypeGenerator.__init__(self)
+ self.typeGen = typeGen
+ self.useIncomplete = useIncomplete
+ self.useZero = useZero
+ self.maxSize = int(maxSize)
+ self.W = useIncomplete + useZero + self.maxSize
+ self.setCardinality()
+
+ def setCardinality(self):
+ self.cardinality = self.W * self.typeGen.cardinality
+
+ def generateType(self, N):
+ S,T = getNthPairBounded(N, self.W, self.typeGen.cardinality)
+ if self.useIncomplete:
+ if S==0:
+ size = None
+ S = None
+ else:
+ S = S - 1
+ if S is not None:
+ if self.useZero:
+ size = S
+ else:
+ size = S + 1
+ return ArrayType(N, False, self.typeGen.get(T), size)
+
+class RecordTypeGenerator(TypeGenerator):
+ def __init__(self, typeGen, useUnion, maxSize):
+ TypeGenerator.__init__(self)
+ self.typeGen = typeGen
+ self.useUnion = bool(useUnion)
+ self.maxSize = int(maxSize)
+ self.setCardinality()
+
+ def setCardinality(self):
+ M = 1 + self.useUnion
+ if self.maxSize is aleph0:
+ S = aleph0 * self.typeGen.cardinality
+ else:
+ S = 0
+ for i in range(self.maxSize+1):
+ S += M * (self.typeGen.cardinality ** i)
+ self.cardinality = S
+
+ def generateType(self, N):
+ isUnion,I = False,N
+ if self.useUnion:
+ isUnion,I = (I&1),I>>1
+ fields = map(self.typeGen.get,getNthTuple(I,self.maxSize,self.typeGen.cardinality))
+ return RecordType(N, isUnion, fields)
+
+class FunctionTypeGenerator(TypeGenerator):
+ def __init__(self, typeGen, useReturn, maxSize):
+ TypeGenerator.__init__(self)
+ self.typeGen = typeGen
+ self.useReturn = useReturn
+ self.maxSize = maxSize
+ self.setCardinality()
+
+ def setCardinality(self):
+ if self.maxSize is aleph0:
+ S = aleph0 * self.typeGen.cardinality()
+ elif self.useReturn:
+ S = 0
+ for i in range(1,self.maxSize+1+1):
+ S += self.typeGen.cardinality ** i
+ else:
+ S = 0
+ for i in range(self.maxSize+1):
+ S += self.typeGen.cardinality ** i
+ self.cardinality = S
+
+ def generateType(self, N):
+ if self.useReturn:
+ # Skip the empty tuple
+ argIndices = getNthTuple(N+1, self.maxSize+1, self.typeGen.cardinality)
+ retIndex,argIndices = argIndices[0],argIndices[1:]
+ retTy = self.typeGen.get(retIndex)
+ else:
+ retTy = None
+ argIndices = getNthTuple(N, self.maxSize, self.typeGen.cardinality)
+ args = map(self.typeGen.get, argIndices)
+ return FunctionType(N, retTy, args)
+
+class AnyTypeGenerator(TypeGenerator):
+ def __init__(self):
+ TypeGenerator.__init__(self)
+ self.generators = []
+ self.bounds = []
+ self.setCardinality()
+ self._cardinality = None
+
+ def getCardinality(self):
+ if self._cardinality is None:
+ return aleph0
+ else:
+ return self._cardinality
+ def setCardinality(self):
+ self.bounds = [g.cardinality for g in self.generators]
+ self._cardinality = sum(self.bounds)
+ cardinality = property(getCardinality, None)
+
+ def addGenerator(self, g):
+ self.generators.append(g)
+ for i in range(100):
+ prev = self._cardinality
+ self._cardinality = None
+ for g in self.generators:
+ g.setCardinality()
+ self.setCardinality()
+ if (self._cardinality is aleph0) or prev==self._cardinality:
+ break
+ else:
+ raise RuntimeError,"Infinite loop in setting cardinality"
+
+ def generateType(self, N):
+ index,M = getNthPairVariableBounds(N, self.bounds)
+ return self.generators[index].get(M)
+
+def test():
+ fbtg = FixedTypeGenerator([BuiltinType('char', 4),
+ BuiltinType('char', 4, 0),
+ BuiltinType('int', 4, 5)])
+
+ fields1 = AnyTypeGenerator()
+ fields1.addGenerator( fbtg )
+
+ fields0 = AnyTypeGenerator()
+ fields0.addGenerator( fbtg )
+# fields0.addGenerator( RecordTypeGenerator(fields1, False, 4) )
+
+ btg = FixedTypeGenerator([BuiltinType('char', 4),
+ BuiltinType('int', 4)])
+
+ atg = AnyTypeGenerator()
+ atg.addGenerator( btg )
+ atg.addGenerator( RecordTypeGenerator(fields0, False, 4) )
+ print 'Cardinality:',atg.cardinality
+ for i in range(100):
+ if i == atg.cardinality:
+ try:
+ atg.get(i)
+ raise RuntimeError,"Cardinality was wrong"
+ except AssertionError:
+ break
+ print '%4d: %s'%(i, atg.get(i))
+
+if __name__ == '__main__':
+ test()
diff --git a/utils/ABITest/build-and-summarize-all.sh b/utils/ABITest/build-and-summarize-all.sh
new file mode 100755
index 0000000..23e34a4
--- /dev/null
+++ b/utils/ABITest/build-and-summarize-all.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -eu
+
+if [ $# != 1 ]; then
+ echo "usage: $0 <num-tests>"
+ exit 1
+fi
+
+for bits in 32 64; do
+ for kind in return-types single-args; do
+ echo "-- $kind-$bits --"
+ (cd $kind-$bits && ../build-and-summarize.sh $1)
+ done
+done
diff --git a/utils/ABITest/build-and-summarize.sh b/utils/ABITest/build-and-summarize.sh
new file mode 100755
index 0000000..602728b
--- /dev/null
+++ b/utils/ABITest/build-and-summarize.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+set -eu
+
+if [ $# != 1 ]; then
+ echo "usage: $0 <num-tests>"
+ exit 1
+fi
+
+dir=$(dirname $0)
+$dir/build.sh $1 &> /dev/null || true
+../summarize.sh $1 &> fails-x.txt
+cat fails-x.txt
+wc -l fails-x.txt
diff --git a/utils/ABITest/build.sh b/utils/ABITest/build.sh
new file mode 100755
index 0000000..a50d14a
--- /dev/null
+++ b/utils/ABITest/build.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+set -eu
+
+if [ $# != 1 ]; then
+ echo "usage: $0 <num-tests>"
+ exit 1
+fi
+
+CPUS=2
+make -j $CPUS \
+ $(for i in $(seq 0 $1); do echo test.$i.report; done) -k
diff --git a/utils/ABITest/layout/Makefile b/utils/ABITest/layout/Makefile
new file mode 100644
index 0000000..0520625
--- /dev/null
+++ b/utils/ABITest/layout/Makefile
@@ -0,0 +1,68 @@
+# Usage: make test.N.report
+#
+# COUNT can be over-ridden to change the number of tests generated per
+# file, and TESTARGS is used to change the type generation. Make sure
+# to 'make clean' after changing either of these parameters.
+
+ABITESTGEN := ../ABITestGen.py
+TESTARGS := --max-args 0 --test-layout
+COUNT := 1000
+TIMEOUT := 5
+
+CFLAGS := -std=gnu99
+
+X_COMPILER := llvm-gcc
+Y_COMPILER := clang
+CC := gcc
+
+ifeq (0, 0)
+X_CFLAGS := -m32
+Y_CFLAGS := -m32
+CC_CFLAGS := -m32
+else
+X_CFLAGS := -m64
+Y_CFLAGS := -m64
+CC_CFLAGS := -m64
+endif
+
+.PHONY: test.%.report
+test.%.report: test.%.x.diff test.%.y.diff
+ @for t in $^; do \
+ if [ -s $$t ]; then \
+ echo "TEST $*: $$t failed"; \
+ fi; \
+ done
+
+.PHONY: test.%.build
+test.%.build: test.%.ref test.%.x test.%.y
+ @true
+
+###
+
+.PRECIOUS: test.%.x.diff
+test.%.x.diff: test.%.ref.out test.%.x.out
+ -diff $^ > $@
+.PRECIOUS: test.%.y.diff
+test.%.y.diff: test.%.ref.out test.%.y.out
+ -diff $^ > $@
+
+.PRECIOUS: test.%.out
+test.%.out: test.%
+ -./$< > $@
+
+.PRECIOUS: test.%.ref
+test.%.ref: test.%.c
+ $(CC) $(CFLAGS) $(CC_CFLAGS) -o $@ $^
+.PRECIOUS: test.%.x
+test.%.x: test.%.c
+ $(X_COMPILER) $(CFLAGS) $(X_CFLAGS) -o $@ $^
+.PRECIOUS: test.%.y
+test.%.y: test.%.c
+ $(Y_COMPILER) $(CFLAGS) $(Y_CFLAGS) -o $@ $^
+
+.PRECIOUS: test.%.c
+test.%.c: $(ABITESTGEN)
+ $(ABITESTGEN) $(TESTARGS) -o $@ --min=$(shell expr $* '*' $(COUNT)) --count=$(COUNT)
+
+clean:
+ rm -f test.* *~
diff --git a/utils/ABITest/return-types-32/Makefile b/utils/ABITest/return-types-32/Makefile
new file mode 100644
index 0000000..df1c53f
--- /dev/null
+++ b/utils/ABITest/return-types-32/Makefile
@@ -0,0 +1,7 @@
+X_CFLAGS := -m32
+Y_CFLAGS := -m32
+CC_CFLAGS := -m32
+
+include ../Makefile.test.common
+
+TESTARGS += --max-args 0
diff --git a/utils/ABITest/return-types-64/Makefile b/utils/ABITest/return-types-64/Makefile
new file mode 100644
index 0000000..9616e45
--- /dev/null
+++ b/utils/ABITest/return-types-64/Makefile
@@ -0,0 +1,7 @@
+X_CFLAGS := -m64
+Y_CFLAGS := -m64
+CC_CFLAGS := -m64
+
+include ../Makefile.test.common
+
+TESTARGS += --max-args 0
diff --git a/utils/ABITest/single-args-32/Makefile b/utils/ABITest/single-args-32/Makefile
new file mode 100644
index 0000000..9ff417f
--- /dev/null
+++ b/utils/ABITest/single-args-32/Makefile
@@ -0,0 +1,7 @@
+X_CFLAGS := -m32
+Y_CFLAGS := -m32
+CC_CFLAGS := -m32
+
+include ../Makefile.test.common
+
+TESTARGS += --no-function-return --max-args 1
diff --git a/utils/ABITest/single-args-64/Makefile b/utils/ABITest/single-args-64/Makefile
new file mode 100644
index 0000000..b8acb70
--- /dev/null
+++ b/utils/ABITest/single-args-64/Makefile
@@ -0,0 +1,13 @@
+# Usage: make test.N.report
+#
+# COUNT can be over-ridden to change the number of tests generated per
+# file, and TESTARGS is used to change the type generation. Make sure
+# to 'make clean' after changing either of these parameters.
+
+X_CFLAGS := -m64
+Y_CFLAGS := -m64
+CC_CFLAGS := -m64
+
+include ../Makefile.test.common
+
+TESTARGS += --no-function-return --max-args 1
diff --git a/utils/ABITest/summarize.sh b/utils/ABITest/summarize.sh
new file mode 100755
index 0000000..3efb52b
--- /dev/null
+++ b/utils/ABITest/summarize.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -eu
+
+if [ $# != 1 ]; then
+ echo "usage: $0 <num-tests>"
+ exit 1
+fi
+
+for i in $(seq 0 $1); do
+ if (! make test.$i.report &> /dev/null); then
+ echo "FAIL: $i";
+ fi;
+done
+
diff --git a/utils/CaptureCmd b/utils/CaptureCmd
new file mode 100755
index 0000000..3bce357
--- /dev/null
+++ b/utils/CaptureCmd
@@ -0,0 +1,73 @@
+#!/usr/bin/python
+
+"""CaptureCmd - A generic tool for capturing information about the
+invocations of another program.
+
+Usage
+--
+1. Move the original tool to a safe known location.
+
+2. Link CaptureCmd to the original tool's location.
+
+3. Define CAPTURE_CMD_PROGRAM to the known location of the original
+tool; this must be an absolute path.
+
+4. Define CAPTURE_CMD_DIR to a directory to write invocation
+information to.
+"""
+
+import hashlib
+import os
+import sys
+import time
+
+def saveCaptureData(prefix, dir, object):
+ string = repr(object) + '\n'
+ key = hashlib.sha1(string).hexdigest()
+ path = os.path.join(dir,
+ prefix + key)
+ if not os.path.exists(path):
+ f = open(path, 'wb')
+ f.write(string)
+ f.close()
+ return prefix + key
+
+def main():
+ program = os.getenv('CAPTURE_CMD_PROGRAM')
+ dir = os.getenv('CAPTURE_CMD_DIR')
+ fallback = os.getenv('CAPTURE_CMD_FALLBACK')
+ if not program:
+ raise ValueError('CAPTURE_CMD_PROGRAM is not defined!')
+ if not dir:
+ raise ValueError('CAPTURE_CMD_DIR is not defined!')
+
+ # Make the output directory if it doesn't already exist.
+ if not os.path.exists(dir):
+ os.mkdir(dir, 0700)
+
+ # Get keys for various data.
+ env = os.environ.items()
+ env.sort()
+ envKey = saveCaptureData('env-', dir, env)
+ cwdKey = saveCaptureData('cwd-', dir, os.getcwd())
+ argvKey = saveCaptureData('argv-', dir, sys.argv)
+ entry = (time.time(), envKey, cwdKey, argvKey)
+ saveCaptureData('cmd-', dir, entry)
+
+ if fallback:
+ pid = os.fork()
+ if not pid:
+ os.execv(program, sys.argv)
+ os._exit(1)
+ else:
+ res = os.waitpid(pid, 0)
+ if not res:
+ os.execv(fallback, sys.argv)
+ os._exit(1)
+ os._exit(res)
+ else:
+ os.execv(program, sys.argv)
+ os._exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/utils/CmpDriver b/utils/CmpDriver
new file mode 100755
index 0000000..97c91a8
--- /dev/null
+++ b/utils/CmpDriver
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+
+import subprocess
+
+def splitArgs(s):
+ it = iter(s)
+ current = ''
+ inQuote = False
+ for c in it:
+ if c == '"':
+ if inQuote:
+ inQuote = False
+ yield current + '"'
+ else:
+ inQuote = True
+ current = '"'
+ elif inQuote:
+ if c == '\\':
+ current += c
+ current += it.next()
+ else:
+ current += c
+ elif not c.isspace():
+ yield c
+
+def insertMinimumPadding(a, b, dist):
+ """insertMinimumPadding(a,b) -> (a',b')
+
+ Return two lists of equal length, where some number of Nones have
+ been inserted into the shorter list such that sum(map(dist, a',
+ b')) is minimized.
+
+ Assumes dist(X, Y) -> int and non-negative.
+ """
+
+ # Yay for simplicity over complexity.
+
+ def extend(aElt, bElt, solution):
+ d0,(a0,b0) = solution
+ return d0 + dist(aElt,bElt), (([aElt]+a0),([bElt]+b0))
+
+ def f(a, b):
+ if len(a) == len(b):
+ return (sum(map(dist, a, b)), (a, b))
+
+ if not a or not b:
+ if not a:
+ a += [None] * len(b)
+ else:
+ b += [None] * len(a)
+ return (sum(map(dist, a, b)), (a, b))
+
+ if int(dist(a[0], b[0])) == 0:
+ # Non-negative condition implies maximum is satisfied
+ # taking this.
+ return extend(a[0], b[0], f(a[1:], b[1:]))
+
+ if len(a) < len(b):
+ return min(f([None] + a, b),
+ extend(a[0], b[0], f(a[1:], b[1:])))
+ else:
+ return min(f(a, [None] + b),
+ extend(a[0], b[0], f(a[1:], b[1:])))
+
+ return f(a, b)[1]
+
+class ZipperDiff(object):
+ """ZipperDiff - Simple (slow) diff only accomodating inserts."""
+
+ def __init__(self, a, b):
+ self.a = a
+ self.b = b
+
+ def dist(self, a, b):
+ return a != b
+
+ def getDiffs(self):
+ a,b = insertMinimumPadding(self.a, self.b, self.dist)
+ for aElt,bElt in zip(a,b):
+ if self.dist(aElt, bElt):
+ yield aElt,bElt
+
+class DriverZipperDiff(ZipperDiff):
+ def isTempFile(self, filename):
+ if filename[0] != '"' or filename[-1] != '"':
+ return False
+ return (filename.startswith('/tmp/', 1) or
+ filename.startswith('/var/', 1))
+
+ def dist(self, a, b):
+ if a and b and self.isTempFile(a) and self.isTempFile(b):
+ return 0
+ return super(DriverZipperDiff, self).dist(a,b)
+
+class CompileInfo:
+ def __init__(self, out, err, res):
+ self.commands = []
+
+ # Standard out isn't used for much.
+ self.stdout = out
+ self.stderr = ''
+
+ # FIXME: Compare error messages as well.
+ for ln in err.split('\n'):
+ if (ln == 'Using built-in specs.' or
+ ln.startswith('Target: ') or
+ ln.startswith('Configured with: ') or
+ ln.startswith('Thread model: ') or
+ ln.startswith('gcc version') or
+ ln.startswith('ccc version')):
+ pass
+ elif ln.strip().startswith('"'):
+ self.commands.append(list(splitArgs(ln)))
+ else:
+ self.stderr += ln + '\n'
+
+ self.stderr = self.stderr.strip()
+ self.exitCode = res
+
+def captureDriverInfo(cmd, args):
+ p = subprocess.Popen([cmd,'-###'] + args,
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out,err = p.communicate()
+ res = p.wait()
+ return CompileInfo(out,err,res)
+
+def main():
+ import os, sys
+
+ args = sys.argv[1:]
+ driverA = os.getenv('DRIVER_A') or 'gcc'
+ driverB = os.getenv('DRIVER_B') or 'xcc'
+
+ infoA = captureDriverInfo(driverA, args)
+ infoB = captureDriverInfo(driverB, args)
+
+ differ = False
+
+ # Compare stdout.
+ if infoA.stdout != infoB.stdout:
+ print '-- STDOUT DIFFERS -'
+ print 'A: ',infoA.stdout
+ print 'B: ',infoB.stdout
+ differ = True
+
+ # Compare stderr.
+ if infoA.stderr != infoB.stderr:
+ print '-- STDERR DIFFERS -'
+ print 'A: ',infoA.stderr
+ print 'B: ',infoB.stderr
+ differ = True
+
+ # Compare commands.
+ for i,(a,b) in enumerate(map(None, infoA.commands, infoB.commands)):
+ if a is None:
+ print 'A MISSING:',' '.join(b)
+ differ = True
+ continue
+ elif b is None:
+ print 'B MISSING:',' '.join(a)
+ differ = True
+ continue
+
+ diff = DriverZipperDiff(a,b)
+ diffs = list(diff.getDiffs())
+ if diffs:
+ print '-- COMMAND %d DIFFERS -' % i
+ print 'A COMMAND:',' '.join(a)
+ print 'B COMMAND:',' '.join(b)
+ print
+ for i,(aElt,bElt) in enumerate(diffs):
+ if aElt is None:
+ print 'A missing: %s' % bElt
+ elif bElt is None:
+ print 'B missing: %s' % aElt
+ else:
+ print 'mismatch: A: %s' % aElt
+ print ' B: %s' % bElt
+ differ = True
+
+ # Compare result codes.
+ if infoA.exitCode != infoB.exitCode:
+ print '-- EXIT CODES DIFFER -'
+ print 'A: ',infoA.exitCode
+ print 'B: ',infoB.exitCode
+ differ = True
+
+ if differ:
+ sys.exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/utils/FindSpecRefs b/utils/FindSpecRefs
new file mode 100755
index 0000000..c74ca3d
--- /dev/null
+++ b/utils/FindSpecRefs
@@ -0,0 +1,910 @@
+#!/usr/bin/python
+
+import os
+import re
+import time
+from pprint import pprint
+
+###
+
+c99URL = 'http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1124.pdf'
+c99TOC = [('Foreword', 'xi'),
+('Introduction', 'xiv'),
+('1. Scope', '1'),
+('2. Normative references', '2'),
+('3. Terms, definitions, and symbols', '3'),
+('4. Conformance', '7'),
+('5. Environment', '9'),
+('5.1 Conceptual models', '9'),
+('5.1.1 Translation environment', '9'),
+('5.1.2 Execution environments', '11'),
+('5.2 Environmental considerations', '17'),
+('5.2.1 Character sets', '17'),
+('5.2.2 Character display semantics', '19'),
+('5.2.3 Signals and interrupts', '20'),
+('5.2.4 Environmental limits', '20'),
+('6. Language', '29'),
+('6.1 Notation', '29'),
+('6.2 Concepts', '29'),
+('6.2.1 Scopes of identifiers', '29'),
+('6.2.2 Linkages of identifiers', '30'),
+('6.2.3 Name spaces of identifiers', '31'),
+('6.2.4 Storage durations of objects', '32'),
+('6.2.5 Types', '33'),
+('6.2.6 Representations of types', '37'),
+('6.2.7 Compatible type and composite type', '40'),
+('6.3 Conversions', '42'),
+('6.3.1 Arithmetic operands', '42'),
+('6.3.2 Other operands', '46'),
+('6.4 Lexical elements', '49'),
+('6.4.1 Keywords', '50'),
+('6.4.2 Identifiers', '51'),
+('6.4.3 Universal character names', '53'),
+('6.4.4 Constants', '54'),
+('6.4.5 String literals', '62'),
+('6.4.6 Punctuators', '63'),
+('6.4.7 Header names', '64'),
+('6.4.8 Preprocessing numbers', '65'),
+('6.4.9 Comments', '66'),
+('6.5 Expressions', '67'),
+('6.5.1 Primary expressions', '69'),
+('6.5.2 Postfix operators', '69'),
+('6.5.3 Unary operators', '78'),
+('6.5.4 Cast operators', '81'),
+('6.5.5 Multiplicative operators', '82'),
+('6.5.6 Additive operators', '82'),
+('6.5.7 Bitwise shift operators', '84'),
+('6.5.8 Relational operators', '85'),
+('6.5.9 Equality operators', '86'),
+('6.5.10 Bitwise AND operator', '87'),
+('6.5.11 Bitwise exclusive OR operator', '88'),
+('6.5.12 Bitwise inclusive OR operator', '88'),
+('6.5.13 Logical AND operator', '89'),
+('6.5.14 Logical OR operator', '89'),
+('6.5.15 Conditional operator', '90'),
+('6.5.16 Assignment operators', '91'),
+('6.5.17 Comma operator', '94'),
+('6.6 Constant expressions', '95'),
+('6.7 Declarations', '97'),
+('6.7.1 Storage-class specifiers', '98'),
+('6.7.2 Type specifiers', '99'),
+('6.7.3 Type qualifiers', '108'),
+('6.7.4 Function specifiers', '112'),
+('6.7.5 Declarators', '114'),
+('6.7.6 Type names', '122'),
+('6.7.7 Type definitions', '123'),
+('6.7.8 Initialization', '125'),
+('6.8 Statements and blocks', '131'),
+('6.8.1 Labeled statements', '131'),
+('6.8.2 Compound statement', '132'),
+('6.8.3 Expression and null statements', '132'),
+('6.8.4 Selection statements', '133'),
+('6.8.5 Iteration statements', '135'),
+('6.8.6 Jump statements', '136'),
+('6.9 External definitions', '140'),
+('6.9.1 Function definitions', '141'),
+('6.9.2 External object definitions', '143'),
+('6.10 Preprocessing directives', '145'),
+('6.10.1 Conditional inclusion', '147'),
+('6.10.2 Source file inclusion', '149'),
+('6.10.3 Macro replacement', '151'),
+('6.10.4 Line control', '158'),
+('6.10.5 Error directive', '159'),
+('6.10.6 Pragma directive', '159'),
+('6.10.7 Null directive', '160'),
+('6.10.8 Predefined macro names', '160'),
+('6.10.9 Pragma operator', '161'),
+('6.11 Future language directions', '163'),
+('6.11.1 Floating types', '163'),
+('6.11.2 Linkages of identifiers', '163'),
+('6.11.3 External names', '163'),
+('6.11.4 Character escape sequences', '163'),
+('6.11.5 Storage-class specifiers', '163'),
+('6.11.6 Function declarators', '163'),
+('6.11.7 Function definitions', '163'),
+('6.11.8 Pragma directives', '163'),
+('6.11.9 Predefined macro names', '163'),
+('7. Library', '164'),
+('7.1 Introduction', '164'),
+('7.1.1 Definitions of terms', '164'),
+('7.1.2 Standard headers', '165'),
+('7.1.3 Reserved identifiers', '166'),
+('7.1.4 Use of library functions', '166'),
+('7.2 Diagnostics <assert.h>', '169'),
+('7.2.1 Program diagnostics', '169'),
+('7.3 Complex arithmetic <complex.h>', '170'),
+('7.3.1 Introduction', '170'),
+('7.3.2 Conventions', '170'),
+('7.3.3 Branch cuts', '171'),
+('7.3.4 The CX_LIMITED_RANGE pragma', '171'),
+('7.3.5 Trigonometric functions', '172'),
+('7.3.6 Hyperbolic functions', '174'),
+('7.3.7 Exponential and logarithmic functions', '176'),
+('7.3.8 Power and absolute-value functions', '177'),
+('7.3.9 Manipulation functions', '178'),
+('7.4 Character handling <ctype.h>', '181'),
+('7.4.1 Character classification functions', '181'),
+('7.4.2 Character case mapping functions', '184'),
+('7.5 Errors <errno.h>', '186'),
+('7.6 Floating-point environment <fenv.h>', '187'),
+('7.6.1 The FENV_ACCESS pragma', '189'),
+('7.6.2 Floating-point exceptions', '190'),
+('7.6.3 Rounding', '193'),
+('7.6.4 Environment', '194'),
+('7.7 Characteristics of floating types <float.h>', '197'),
+('7.8 Format conversion of integer types <inttypes.h>', '198'),
+('7.8.1 Macros for format specifiers', '198'),
+('7.8.2 Functions for greatest-width integer types', '199'),
+('7.9 Alternative spellings <iso646.h>', '202'),
+('7.10 Sizes of integer types <limits.h>', '203'),
+('7.11 Localization <locale.h>', '204'),
+('7.11.1 Locale control', '205'),
+('7.11.2 Numeric formatting convention inquiry', '206'),
+('7.12 Mathematics <math.h>', '212'),
+('7.12.1 Treatment of error conditions', '214'),
+('7.12.2 The FP_CONTRACT pragma', '215'),
+('7.12.3 Classification macros', '216'),
+('7.12.4 Trigonometric functions', '218'),
+('7.12.5 Hyperbolic functions', '221'),
+('7.12.6 Exponential and logarithmic functions', '223'),
+('7.12.7 Power and absolute-value functions', '228'),
+('7.12.8 Error and gamma functions', '230'),
+('7.12.9 Nearest integer functions', '231'),
+('7.12.10 Remainder functions', '235'),
+('7.12.11 Manipulation functions', '236'),
+('7.12.12 Maximum, minimum, and positive difference functions', '238'),
+('7.12.13 Floating multiply-add', '239'),
+('7.12.14 Comparison macros', '240'),
+('7.13 Nonlocal jumps <setjmp.h>', '243'),
+('7.13.1 Save calling environment', '243'),
+('7.13.2 Restore calling environment', '244'),
+('7.14 Signal handling <signal.h>', '246'),
+('7.14.1 Specify signal handling', '247'),
+('7.14.2 Send signal', '248'),
+('7.15 Variable arguments <stdarg.h>', '249'),
+('7.15.1 Variable argument list access macros', '249'),
+('7.16 Boolean type and values <stdbool.h>', '253'),
+('7.17 Common definitions <stddef.h>', '254'),
+('7.18 Integer types <stdint.h>', '255'),
+('7.18.1 Integer types', '255'),
+('7.18.2 Limits of specified-width integer types', '257'),
+('7.18.3 Limits of other integer types', '259'),
+('7.18.4 Macros for integer constants', '260'),
+('7.19 Input/output <stdio.h>', '262'),
+('7.19.1 Introduction', '262'),
+('7.19.2 Streams', '264'),
+('7.19.3 Files', '266'),
+('7.19.4 Operations on files', '268'),
+('7.19.5 File access functions', '270'),
+('7.19.6 Formatted input/output functions', '274'),
+('7.19.7 Character input/output functions', '296'),
+('7.19.8 Direct input/output functions', '301'),
+('7.19.9 File positioning functions', '302'),
+('7.19.10 Error-handling functions', '304'),
+('7.20 General utilities <stdlib.h>', '306'),
+('7.20.1 Numeric conversion functions', '307'),
+('7.20.2 Pseudo-random sequence generation functions', '312'),
+('7.20.3 Memory management functions', '313'),
+('7.20.4 Communication with the environment', '315'),
+('7.20.5 Searching and sorting utilities', '318'),
+('7.20.6 Integer arithmetic functions', '320'),
+('7.20.7 Multibyte/wide character conversion functions', '321'),
+('7.20.8 Multibyte/wide string conversion functions', '323'),
+('7.21 String handling <string.h>', '325'),
+('7.21.1 String function conventions', '325'),
+('7.21.2 Copying functions', '325'),
+('7.21.3 Concatenation functions', '327'),
+('7.21.4 Comparison functions', '328'),
+('7.21.5 Search functions', '330'),
+('7.21.6 Miscellaneous functions', '333'),
+('7.22 Type-generic math <tgmath.h>', '335'),
+('7.23 Date and time <time.h>', '338'),
+('7.23.1 Components of time', '338'),
+('7.23.2 Time manipulation functions', '339'),
+('7.23.3 Time conversion functions', '341'),
+('7.24 Extended multibyte and wide character utilities <wchar.h>', '348'),
+('7.24.1 Introduction', '348'),
+('7.24.2 Formatted wide character input/output functions', '349'),
+('7.24.3 Wide character input/output functions', '367'),
+('7.24.4 General wide string utilities', '371'),
+('7.24.5 Wide character time conversion functions', '385'),
+('7.24.6 Extended multibyte/wide character conversion utilities', '386'),
+('7.25 Wide character classification and mapping utilities <wctype.h>',
+ '393'),
+('7.25.1 Introduction', '393'),
+('7.25.2 Wide character classification utilities', '394'),
+('7.25.3 Wide character case mapping utilities', '399'),
+('7.26 Future library directions', '401'),
+('7.26.1 Complex arithmetic <complex.h>', '401'),
+('7.26.2 Character handling <ctype.h>', '401'),
+('7.26.3 Errors <errno.h>', '401'),
+('7.26.4 Format conversion of integer types <inttypes.h>', '401'),
+('7.26.5 Localization <locale.h>', '401'),
+('7.26.6 Signal handling <signal.h>', '401'),
+('7.26.7 Boolean type and values <stdbool.h>', '401'),
+('7.26.8 Integer types <stdint.h>', '401'),
+('7.26.9 Input/output <stdio.h>', '402'),
+('7.26.10 General utilities <stdlib.h>', '402'),
+('7.26.11 String handling <string.h>', '402'),
+('<wchar.h>', '402'),
+('<wctype.h>', '402'),
+('Annex A (informative) Language syntax summary', '403'),
+('A.1 Lexical grammar', '403'),
+('A.2 Phrase structure grammar', '409'),
+('A.3 Preprocessing directives', '416'),
+('Annex B (informative) Library summary', '418'),
+('B.1 Diagnostics <assert.h>', '418'),
+('B.2 Complex <complex.h>', '418'),
+('B.3 Character handling <ctype.h>', '420'),
+('B.4 Errors <errno.h>', '420'),
+('B.5 Floating-point environment <fenv.h>', '420'),
+('B.6 Characteristics of floating types <float.h>', '421'),
+('B.7 Format conversion of integer types <inttypes.h>', '421'),
+('B.8 Alternative spellings <iso646.h>', '422'),
+('B.9 Sizes of integer types <limits.h>', '422'),
+('B.10 Localization <locale.h>', '422'),
+('B.11 Mathematics <math.h>', '422'),
+('B.12 Nonlocal jumps <setjmp.h>', '427'),
+('B.13 Signal handling <signal.h>', '427'),
+('B.14 Variable arguments <stdarg.h>', '427'),
+('B.15 Boolean type and values <stdbool.h>', '427'),
+('B.16 Common definitions <stddef.h>', '428'),
+('B.17 Integer types <stdint.h>', '428'),
+('B.18 Input/output <stdio.h>', '428'),
+('B.19 General utilities <stdlib.h>', '430'),
+('B.20 String handling <string.h>', '432'),
+('B.21 Type-generic math <tgmath.h>', '433'),
+('B.22 Date and time <time.h>', '433'),
+('B.23 Extended multibyte/wide character utilities <wchar.h>', '434'),
+('B.24 Wide character classification and mapping utilities <wctype.h>',
+ '436'),
+('Annex C (informative) Sequence points', '438'),
+('Annex D (normative) Universal character names for identifiers', '439'),
+('Annex E (informative) Implementation limits', '441'),
+('Annex F (normative) IEC 60559 floating-point arithmetic', '443'),
+('F.1 Introduction', '443'),
+('F.2 Types', '443'),
+('F.3 Operators and functions', '444'),
+('F.4 Floating to integer conversion', '446'),
+('F.5 Binary-decimal conversion', '446'),
+('F.6 Contracted expressions', '447'),
+('F.7 Floating-point environment', '447'),
+('F.8 Optimization', '450'),
+('F.9 Mathematics <math.h>', '453'),
+('Annex G (informative) IEC 60559-compatible complex arithmetic', '466'),
+('G.1 Introduction', '466'),
+('G.2 Types', '466'),
+('G.3 Conventions', '466'),
+('G.4 Conversions', '467'),
+('G.5 Binary operators', '467'),
+('G.6 Complex arithmetic <complex.h>', '471'),
+('G.7 Type-generic math <tgmath.h>', '479'),
+('Annex H (informative) Language independent arithmetic', '480'),
+('H.1 Introduction', '480'),
+('H.2 Types', '480'),
+('H.3 Notification', '484'),
+('Annex I (informative) Common warnings', '486'),
+('Annex J (informative) Portability issues', '488'),
+('J.1 Unspecified behavior', '488'),
+('J.2 Undefined behavior', '491'),
+('J.3 Implementation-defined behavior', '504'),
+('J.4 Locale-specific behavior', '511'),
+('J.5 Common extensions', '512'),
+('Bibliography', '515'),
+('Index', '517')]
+
+cXXURL = 'http://open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2723.pdf'
+cXXTOC = [('Contents', 'ii'),
+('List of Tables', 'ix'),
+('1 General', '1'),
+('1.1 Scope', '1'),
+('1.2 Normative references', '1'),
+('1.3 Definitions', '2'),
+('1.4 Implementation compliance', '4'),
+('1.5 Structure of this International Standard', '5'),
+('1.6 Syntax notation', '5'),
+('1.7 The C++ memory model', '6'),
+('1.8 The C++ object model', '6'),
+('1.9 Program execution', '7'),
+('1.10 Multi-threaded executions and data races', '10'),
+('1.11 Acknowledgments', '13'),
+('2 Lexical conventions', '15'),
+('2.1 Phases of translation', '15'),
+('2.2 Character sets', '16'),
+('2.3 Trigraph sequences', '17'),
+('2.4 Preprocessing tokens', '17'),
+('2.5 Alternative tokens', '18'),
+('2.6 Tokens', '19'),
+('2.7 Comments', '19'),
+('2.8 Header names', '19'),
+('2.9 Preprocessing numbers', '20'),
+('2.10 Identifiers', '20'),
+('2.11 Keywords', '20'),
+('2.12 Operators and punctuators', '21'),
+('2.13 Literals', '21'),
+('3 Basic concepts', '29'),
+('3.1 Declarations and definitions', '29'),
+('3.2 One definition rule', '31'),
+('3.3 Declarative regions and scopes', '33'),
+('3.4 Name lookup', '38'),
+('3.5 Program and linkage', '51'),
+('3.6 Start and termination', '54'),
+('3.7 Storage duration', '58'),
+('3.8 Object Lifetime', '62'),
+('3.9 Types', '65'),
+('3.10 Lvalues and rvalues', '70'),
+('3.11 Alignment', '72'),
+('4 Standard conversions', '73'),
+('4.1 Lvalue-to-rvalue conversion', '74'),
+('4.2 Array-to-pointer conversion', '74'),
+('4.3 Function-to-pointer conversion', '74'),
+('4.4 Qualification conversions', '74'),
+('4.5 Integral promotions', '75'),
+('4.6 Floating point promotion', '76'),
+('4.7 Integral conversions', '76'),
+('4.8 Floating point conversions', '76'),
+('4.9 Floating-integral conversions', '77'),
+('4.10 Pointer conversions', '77'),
+('4.11 Pointer to member conversions', '77'),
+('4.12 Boolean conversions', '78'),
+('4.13 Integer conversion rank', '78'),
+('5 Expressions', '79'),
+('5.1 Primary expressions', '80'),
+('5.2 Postfix expressions', '85'),
+('5.3 Unary expressions', '96'),
+('5.4 Explicit type conversion (cast notation)', '104'),
+('5.5 Pointer-to-member operators', '105'),
+('5.6 Multiplicative operators', '106'),
+('5.7 Additive operators', '106'),
+('5.8 Shift operators', '107'),
+('5.9 Relational operators', '108'),
+('5.10 Equality operators', '109'),
+('5.11 Bitwise AND operator', '110'),
+('5.12 Bitwise exclusive OR operator', '110'),
+('5.13 Bitwise inclusive OR operator', '110'),
+('5.14 Logical AND operator', '110'),
+('5.15 Logical OR operator', '110'),
+('5.16 Conditional operator', '111'),
+('5.17 Assignment and compound assignment operators', '112'),
+('5.18 Comma operator', '113'),
+('5.19 Constant expressions', '113'),
+('6 Statements', '116'),
+('6.1 Labeled statement', '116'),
+('6.2 Expression statement', '116'),
+('6.3 Compound statement or block', '116'),
+('6.4 Selection statements', '117'),
+('6.5 Iteration statements', '118'),
+('6.6 Jump statements', '121'),
+('6.7 Declaration statement', '122'),
+('6.8 Ambiguity resolution', '123'),
+('7 Declarations', '125'),
+('7.1 Specifiers', '126'),
+('7.2 Enumeration declarations', '140'),
+('7.3 Namespaces', '143'),
+('7.4 The asm declaration', '156'),
+('7.5 Linkage specifications', '156'),
+('8 Declarators', '160'),
+('8.1 Type names', '161'),
+('8.2 Ambiguity resolution', '161'),
+('8.3 Meaning of declarators', '163'),
+('8.4 Function definitions', '175'),
+('8.5 Initializers', '177'),
+('9 Classes', '191'),
+('9.1 Class names', '193'),
+('9.2 Class members', '194'),
+('9.3 Member functions', '197'),
+('9.4 Static members', '200'),
+('9.5 Unions', '202'),
+('9.6 Bit-fields', '203'),
+('9.7 Nested class declarations', '204'),
+('9.8 Local class declarations', '205'),
+('9.9 Nested type names', '206'),
+('10 Derived classes', '207'),
+('10.1 Multiple base classes', '208'),
+('10.2 Member name lookup', '210'),
+('10.3 Virtual functions', '213'),
+('10.4 Abstract classes', '217'),
+('11 Member access control', '219'),
+('11.1 Access specifiers', '221'),
+('11.2 Accessibility of base classes and base class members', '222'),
+('11.3 Access declarations', '224'),
+('11.4 Friends', '225'),
+('11.5 Protected member access', '228'),
+('11.6 Access to virtual functions', '229'),
+('11.7 Multiple access', '230'),
+('11.8 Nested classes', '230'),
+('12 Special member functions', '231'),
+('12.1 Constructors', '231'),
+('12.2 Temporary objects', '233'),
+('12.3 Conversions', '235'),
+('12.4 Destructors', '238'),
+('12.5 Free store', '240'),
+('12.6 Initialization', '242'),
+('12.7 Construction and destruction', '247'),
+('12.8 Copying class objects', '250'),
+('12.9 Inheriting Constructors', '255'),
+('13 Overloading', '259'),
+('13.1 Overloadable declarations', '259'),
+('13.2 Declaration matching', '261'),
+('13.3 Overload resolution', '262'),
+('13.4 Address of overloaded function', '281'),
+('13.5 Overloaded operators', '282'),
+('13.6 Built-in operators', '286'),
+('14 Templates', '290'),
+('14.1 Template parameters', '291'),
+('14.2 Names of template specializations', '294'),
+('14.3 Template arguments', '296'),
+('14.4 Type equivalence', '302'),
+('14.5 Template declarations', '303'),
+('14.6 Name resolution', '318'),
+('14.7 Template instantiation and specialization', '331'),
+('14.8 Function template specializations', '343'),
+('15 Exception handling', '363'),
+('15.1 Throwing an exception', '364'),
+('15.2 Constructors and destructors', '366'),
+('15.3 Handling an exception', '366'),
+('15.4 Exception specifications', '368'),
+('15.5 Special functions', '371'),
+('15.6 Exceptions and access', '372'),
+('16 Preprocessing directives', '373'),
+('16.1 Conditional inclusion', '375'),
+('16.2 Source file inclusion', '376'),
+('16.3 Macro replacement', '377'),
+('16.4 Line control', '382'),
+('16.5 Error directive', '383'),
+('16.6 Pragma directive', '383'),
+('16.7 Null directive', '383'),
+('16.8 Predefined macro names', '383'),
+('16.9 Pragma operator', '384'),
+('17 Library introduction', '386'),
+('17.1 General', '386'),
+('17.2 Overview', '386'),
+('17.3 Definitions', '386'),
+('17.4 Additional definitions', '390'),
+('17.5 Method of description (Informative)', '390'),
+('17.6 Library-wide requirements', '396'),
+('18 Language support library', '407'),
+('18.1 Types', '407'),
+('18.2 Implementation properties', '408'),
+('18.3 Integer types', '417'),
+('18.4 Start and termination', '418'),
+('18.5 Dynamic memory management', '420'),
+('18.6 Type identification', '424'),
+('18.7 Exception handling', '427'),
+('18.8 Initializer lists', '432'),
+('18.9 Other runtime support', '434'),
+('19 Diagnostics library', '435'),
+('19.1 Exception classes', '435'),
+('19.2 Assertions', '439'),
+('19.3 Error numbers', '440'),
+('19.4 System error support', '440'),
+('20 General utilities library', '452'),
+('20.1 Requirements', '452'),
+('20.2 Utility components', '457'),
+('20.3 Compile-time rational arithmetic', '463'),
+('20.4 Tuples', '465'),
+('20.5 Metaprogramming and type traits', '473'),
+('20.6 Function objects', '486'),
+('20.7 Memory', '509'),
+('20.8 Time utilities', '548'),
+('20.9 Date and time functions', '562'),
+('21 Strings library', '563'),
+('21.1 Character traits', '563'),
+('21.2 String classes', '569'),
+('21.3 Class template basic_string', '572'),
+('21.4 Numeric Conversions', '599'),
+('21.5 Null-terminated sequence utilities', '600'),
+('22 Localization library', '604'),
+('22.1 Locales', '604'),
+('22.2 Standard locale categories', '617'),
+('22.3 Standard code conversion facets', '657'),
+('22.4 C Library Locales', '659'),
+('23 Containers library', '660'),
+('23.1 Container requirements', '660'),
+('23.2 Sequence containers', '681'),
+('23.3 Associative containers', '719'),
+('23.4 Unordered associative containers', '744'),
+('24 Iterators library', '759'),
+('24.1 Iterator requirements', '759'),
+('24.2 Header <iterator> synopsis', '764'),
+('24.3 Iterator primitives', '767'),
+('24.4 Predefined iterators', '770'),
+('24.5 Stream iterators', '784'),
+('25 Algorithms library', '792'),
+('25.1 Non-modifying sequence operations', '802'),
+('25.2 Mutating sequence operations', '806'),
+('25.3 Sorting and related operations', '815'),
+('25.4 C library algorithms', '829'),
+('26 Numerics library', '831'),
+('26.1 Numeric type requirements', '831'),
+('26.2 The floating-point environment', '832'),
+('26.3 Complex numbers', '833'),
+('26.4 Random number generation', '842'),
+('26.5 Numeric arrays', '884'),
+('26.6 Generalized numeric operations', '904'),
+('26.7 C Library', '907'),
+('27 Input/output library', '912'),
+('27.1 Iostreams requirements', '912'),
+('27.2 Forward declarations', '912'),
+('27.3 Standard iostream objects', '915'),
+('27.4 Iostreams base classes', '916'),
+('27.5 Stream buffers', '934'),
+('27.6 Formatting and manipulators', '944'),
+('27.7 String-based streams', '972'),
+('27.8 File-based streams', '984'),
+('28 Regular expressions library', '1000'),
+('28.1 Definitions', '1000'),
+('28.2 Requirements', '1000'),
+('28.3 Regular expressions summary', '1002'),
+('28.4 Header <regex> synopsis', '1003'),
+('28.5 Namespace std::regex_constants', '1009'),
+('28.6 Class regex_error', '1012'),
+('28.7 Class template regex_traits', '1012'),
+('28.8 Class template basic_regex', '1015'),
+('28.9 Class template sub_match', '1020'),
+('28.10Class template match_results', '1025'),
+('28.11Regular expression algorithms', '1029'),
+('28.12Regular expression Iterators', '1033'),
+('28.13Modified ECMAScript regular expression grammar', '1039'),
+('29 Atomic operations library', '1042'),
+('29.1 Order and Consistency', '1044'),
+('29.2 Lock-free Property', '1046'),
+('29.3 Atomic Types', '1046'),
+('29.4 Operations on Atomic Types', '1051'),
+('29.5 Flag Type and Operations', '1054'),
+('30 Thread support library', '1057'),
+('30.1 Requirements', '1057'),
+('30.2 Threads', '1058'),
+('30.3 Mutual exclusion', '1063'),
+('30.4 Condition variables', '1077'),
+('A Grammar summary', '1085'),
+('A.1 Keywords', '1085'),
+('A.2 Lexical conventions', '1085'),
+('A.3 Basic concepts', '1089'),
+('A.4 Expressions', '1090'),
+('A.5 Statements', '1093'),
+('A.6 Declarations', '1094'),
+('A.7 Declarators', '1097'),
+('A.8 Classes', '1098'),
+('A.9 Derived classes', '1099'),
+('A.10 Special member functions', '1099'),
+('A.11 Overloading', '1100'),
+('A.12 Templates', '1100'),
+('A.13 Exception handling', '1101'),
+('A.14 Preprocessing directives', '1101'),
+('B Implementation quantities', '1103'),
+('C Compatibility', '1105'),
+('C.1 C++ and ISO C', '1105'),
+('C.2 Standard C library', '1114'),
+('D Compatibility features', '1119'),
+('D.1 Increment operator with bool operand', '1119'),
+('D.2 static keyword', '1119'),
+('D.3 Access declarations', '1119'),
+('D.4 Implicit conversion from const strings', '1119'),
+('D.5 C standard library headers', '1119'),
+('D.6 Old iostreams members', '1120'),
+('D.7 char* streams', '1121'),
+('D.8 Binders', '1130'),
+('D.9 auto_ptr', '1132'),
+('E Universal-character-names', '1135'),
+('F Cross references', '1137'),
+('Index', '1153')]
+
+kDocuments = {
+ 'C99' : (c99URL, c99TOC, 12),
+ 'C++' : (cXXURL, cXXTOC, 12),
+}
+
+def findClosestTOCEntry(data, target):
+ # FIXME: Fix for named spec references
+ if isinstance(target[0],str):
+ return ('.'.join(target),'<named>',1)
+
+ offset = data[2]
+ best = None
+ for (name,page) in data[1]:
+ if ' ' in name:
+ section,name = name.split(' ',1)
+ if section == 'Annex':
+ section,name = name.split(' ',1)
+ section = 'Annex '+section
+ else:
+ section = None
+ try:
+ page = int(page) + offset
+ except:
+ page = 1
+ try:
+ spec = SpecIndex.fromstring(section)
+ except:
+ spec = None
+
+ # Meh, could be better...
+ if spec is not None:
+ dist = spec - target
+ if best is None or dist < best[0]:
+ best = (dist, (section, name, page))
+ return best[1]
+
+# What a hack. Slow to boot.
+doxyLineRefRE = re.compile(r"<a name=\"l([0-9]+)\"></a>")
+def findClosestLineReference(clangRoot, doxyName, target):
+ try:
+ f = open(os.path.join(clangRoot, 'docs', 'doxygen', 'html', doxyName))
+ except:
+ return None
+
+ best = None
+ for m in doxyLineRefRE.finditer(f.read()):
+ line = int(m.group(1), 10)
+ dist = abs(line - target)
+ if best is None or dist < best[0]:
+ best = (dist,'l'+m.group(1))
+ f.close()
+ if best is not None:
+ return best[1]
+ return None
+
+###
+
+nameAndSpecRefRE = re.compile(r"(C99|C90|C\+\+|H\&S) ((([0-9]+)(\.[0-9]+)*|\[[^]]+\])(p[0-9]+)?)")
+loneSpecRefRE = re.compile(r" (([0-9]+)(\.[0-9]+){2,100}(p[0-9]+)?)")
+def scanFile(path, filename):
+ try:
+ f = open(path)
+ except IOError:
+ print >>sys.stderr,'WARNING: Unable to open:',path
+ return
+
+ for i,ln in enumerate(f):
+ ignore = set()
+ for m in nameAndSpecRefRE.finditer(ln):
+ section = m.group(2)
+ name = m.group(1)
+ if section.endswith('.'):
+ section = section[:-1]
+ yield RefItem(name, section, filename, path, i+1)
+ ignore.add(section)
+ for m in loneSpecRefRE.finditer(ln):
+ section = m.group(1)
+ if section.endswith('.'):
+ section = section[:-1]
+ if section not in ignore:
+ yield RefItem(None, section, filename, path, i+1)
+
+###
+
+class SpecIndex:
+ @staticmethod
+ def fromstring(str):
+ # Check for named sections
+ if str[0] == '[':
+ assert ']' in str
+ secs = str[1:str.index(']')].split('.')
+ tail = str[str.index(']')+1:]
+ if tail:
+ assert tail[0] == 'p'
+ paragraph = int(tail[1:])
+ else:
+ paragraph = None
+ indices = secs
+ else:
+ secs = str.split('.')
+ paragraph = None
+ if 'p' in secs[-1]:
+ secs[-1],p = secs[-1].split('p',1)
+ paragraph = int(p)
+ indices = map(int, secs)
+ return SpecIndex(indices, paragraph)
+
+ def __init__(self, indices, paragraph=None):
+ assert len(indices)>0
+ self.indices = tuple(indices)
+ self.paragraph = paragraph
+
+ def __str__(self):
+ s = '.'.join(map(str,self.indices))
+ if self.paragraph is not None:
+ s += '.p%d'%(self.paragraph,)
+ return s
+
+ def __repr__(self):
+ return 'SpecIndex(%s, %s)'%(self.indices, self.paragraph)
+
+ def __cmp__(self, b):
+ return cmp((self.indices,self.paragraph),
+ (b.indices,b.paragraph))
+
+ def __hash__(self):
+ return hash((self.indices,self.paragraph))
+
+ def __sub__(self, indices):
+ def sub(a,b):
+ a = a or 0
+ b = b or 0
+ return abs(a-b)
+ return map(sub,self.indices,indices)
+
+class RefItem:
+ def __init__(self, name, section, filename, path, line):
+ self.name = name
+ self.section = SpecIndex.fromstring(section)
+ self.filename = filename
+ self.path = path
+ self.line = line
+
+ def __str__(self):
+ if self.name is not None:
+ return '%s %s'%(self.name, self.section)
+ else:
+ return '--- %s'%(self.section,)
+
+ def __repr__(self):
+ return 'RefItem(%s, %r, "%s", "%s", %d)'%(self.name,
+ self.section,
+ self.filename,
+ self.path,
+ self.line)
+
+ def __cmp__(self, b):
+ return cmp((self.name,self.section,self.filename,self.path,self.line),
+ (b.name,b.section,self.filename,self.path,self.line))
+
+ def __hash__(self):
+ return hash((self.name,self.section,self.filename,self.path,self.line))
+
+###
+
+def sorted(l):
+ l = list(l)
+ l.sort()
+ return l
+
+def getRevision(path):
+ import subprocess
+ p = subprocess.Popen(['svn', 'info', path],
+ stdin=open('/dev/null','r'),
+ stdout=subprocess.PIPE)
+ for ln in p.stdout.read(1024).split('\n'):
+ if ln.startswith('Revision:'):
+ return ln.split(':',1)[1].strip()
+ return None
+
+def buildRefTree(references):
+ root = (None, {}, [])
+
+ def getNode(keys):
+ if not keys:
+ return root
+ key,parent = keys[-1],getNode(keys[:-1])
+ node = parent[1].get(key)
+ if node is None:
+ parent[1][key] = node = (key, {}, [])
+ return node
+
+ for ref in references:
+ n = getNode((ref.name,) + ref.section.indices)
+ n[2].append(ref)
+
+ def flatten((key, children, data)):
+ children = sorted(map(flatten,children.values()))
+ return (key, children, sorted(data))
+
+ return flatten(root)
+
+def preorder(node,parents=(),first=True):
+ (key,children,data) = node
+ if first:
+ yield parents+(node,)
+ for c in children:
+ for item in preorder(c, parents+(node,)):
+ yield item
+
+def main():
+ global options
+ from optparse import OptionParser
+ parser = OptionParser("usage: %prog [options] CLANG_ROOT <output-dir>")
+ parser.add_option("", "--debug", dest="debug",
+ help="Print extra debugging output",
+ action="store_true",
+ default=False)
+ (opts, args) = parser.parse_args()
+
+ if len(args) != 2:
+ parser.error("incorrect number of arguments")
+
+ references = []
+ root,outputDir = args
+ if os.path.isdir(root):
+ for (dirpath, dirnames, filenames) in os.walk(root):
+ for filename in filenames:
+ name,ext = os.path.splitext(filename)
+ if ext in ('.c', '.cpp', '.h', '.def'):
+ fullpath = os.path.join(dirpath, filename)
+ references.extend(list(scanFile(fullpath, filename)))
+ else:
+ references.extend(list(scanFile(root, root)))
+
+ refTree = buildRefTree(references)
+
+ specs = {}
+ for ref in references:
+ spec = specs[ref.name] = specs.get(ref.name,{})
+ items = spec[ref.section] = spec.get(ref.section,[])
+ items.append(ref)
+
+ print 'Found %d references.'%(len(references),)
+
+ if opts.debug:
+ pprint(refTree)
+
+ referencesPath = os.path.join(outputDir,'references.html')
+ print 'Writing: %s'%(referencesPath,)
+ f = open(referencesPath,'w')
+ print >>f, '<html><head><title>clang: Specification References</title></head>'
+ print >>f, '<body>'
+ print >>f, '\t<h2>Specification References</h2>'
+ for i,node in enumerate(refTree[1]):
+ specName = node[0] or 'Unknown'
+ print >>f, '<a href="#spec%d">%s</a><br>'%(i,specName)
+ for i,node in enumerate(refTree[1]):
+ specName = node[0] or 'Unknown'
+ print >>f, '<hr>'
+ print >>f, '<a name="spec%d">'%(i,)
+ print >>f, '<h3>Document: %s</h3>'%(specName or 'Unknown',)
+ print >>f, '<table border="1" cellspacing="2" width="80%">'
+ print >>f, '<tr><th width="20%">Name</th><th>References</th></tr>'
+ docData = kDocuments.get(specName)
+ for path in preorder(node,first=False):
+ if not path[-1][2]:
+ continue
+ components = '.'.join([str(p[0]) for p in path[1:]])
+ print >>f, '\t<tr>'
+ tocEntry = None
+ if docData is not None:
+ tocEntry = findClosestTOCEntry(docData, [p[0] for p in path[1:]])
+ if tocEntry is not None:
+ section,name,page = tocEntry
+ # If section is exact print the TOC name
+ if page is not None:
+ linkStr = '<a href="%s#page=%d">%s</a> (pg.%d)'%(docData[0],page,components,page)
+ else:
+ linkStr = components
+ if section == components:
+ print >>f, '\t\t<td valign=top>%s<br>%s</td>'%(linkStr,name)
+ else:
+ print >>f, '\t\t<td valign=top>%s</td>'%(linkStr,)
+ else:
+ print >>f, '\t\t<td valign=top>%s</td>'%(components,)
+ print >>f, '\t\t<td valign=top>'
+ for item in path[-1][2]:
+ # XXX total hack
+ relativePath = item.path[len(root):]
+ if relativePath.startswith('/'):
+ relativePath = relativePath[1:]
+ # XXX this is broken, how does doxygen mangle w/ multiple
+ # refs? Can we just read its map?
+ filename = os.path.basename(relativePath)
+ doxyName = '%s-source.html'%(filename.replace('.','_8'),)
+ # Grrr, why can't doxygen write line number references.
+ lineReference = findClosestLineReference(root,doxyName,item.line)
+ if lineReference is not None:
+ linkStr = 'http://clang.llvm.org/doxygen/%s#%s'%(doxyName,lineReference)
+ else:
+ linkStr = 'http://clang.llvm.org/doxygen/%s'%(doxyName,)
+ if item.section.paragraph is not None:
+ paraText = '&nbsp;(p%d)'%(item.section.paragraph,)
+ else:
+ paraText = ''
+ print >>f,'<a href="%s">%s:%d</a>%s<br>'%(linkStr,relativePath,item.line,paraText)
+ print >>f, '\t\t</td>'
+ print >>f, '\t</tr>'
+ print >>f, '</table>'
+ print >>f, '<hr>'
+ print >>f, 'Generated: %s<br>'%(time.strftime('%Y-%m-%d %H:%M'),)
+ print >>f, 'SVN Revision: %s'%(getRevision(root),)
+ print >>f, '</body>'
+ f.close()
+
+if __name__=='__main__':
+ main()
diff --git a/utils/SummarizeErrors b/utils/SummarizeErrors
new file mode 100755
index 0000000..64d7824
--- /dev/null
+++ b/utils/SummarizeErrors
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+
+import os, sys, re
+
+class multidict:
+ def __init__(self, elts=()):
+ self.data = {}
+ for key,value in elts:
+ self[key] = value
+
+ def __getitem__(self, item):
+ return self.data[item]
+ def __setitem__(self, key, value):
+ if key in self.data:
+ self.data[key].append(value)
+ else:
+ self.data[key] = [value]
+ def items(self):
+ return self.data.items()
+ def values(self):
+ return self.data.values()
+ def keys(self):
+ return self.data.keys()
+ def __len__(self):
+ return len(self.data)
+
+kDiagnosticRE = re.compile(': (error|warning): (.*)')
+kAssertionRE = re.compile('Assertion failed: (.*, function .*, file .*, line [0-9]+\\.)')
+
+def readInfo(path, opts):
+ lastProgress = [-100,0]
+ def progress(pos):
+ pct = (100. * pos) / (size * 2)
+ if (pct - lastProgress[0]) >= 10:
+ lastProgress[0] = pct
+ print '%d/%d = %.2f%%' % (pos, size*2, pct)
+
+ f = open(path)
+ data = f.read()
+ f.close()
+
+ if opts.truncate != -1:
+ data = data[:opts.truncate]
+
+ size = len(data)
+ warnings = multidict()
+ errors = multidict()
+ for m in kDiagnosticRE.finditer(data):
+ progress(m.end())
+ if m.group(1) == 'error':
+ d = errors
+ else:
+ d = warnings
+ d[m.group(2)] = m
+ warnings = warnings.items()
+ errors = errors.items()
+ assertions = multidict()
+ for m in kAssertionRE.finditer(data):
+ print '%d/%d = %.2f%%' % (size + m.end(), size, (float(m.end()) / (size*2)) * 100.)
+ assertions[m.group(1)] = m
+ assertions = assertions.items()
+
+ # Manual scan for stack traces
+ aborts = multidict()
+ if 0:
+ prevLine = None
+ lnIter = iter(data.split('\n'))
+ for ln in lnIter:
+ m = kStackDumpLineRE.match(ln)
+ if m:
+ stack = [m.group(2)]
+ for ln in lnIter:
+ m = kStackDumpLineRE.match(ln)
+ if not m:
+ break
+ stack.append(m.group(2))
+ if prevLine is None or not kAssertionRE.match(prevLine):
+ aborts[tuple(stack)] = stack
+ prevLine = ln
+
+ sections = [
+ (warnings, 'Warnings'),
+ (errors, 'Errors'),
+ (assertions, 'Assertions'),
+ (aborts.items(), 'Aborts'),
+ ]
+
+ if opts.ascending:
+ sections.reverse()
+
+ for l,title in sections:
+ l.sort(key = lambda (a,b): -len(b))
+ if l:
+ print '-- %d %s (%d kinds) --' % (sum([len(b) for a,b in l]), title, len(l))
+ for name,elts in l:
+ print '%5d:' % len(elts), name
+
+def main():
+ global options
+ from optparse import OptionParser
+ parser = OptionParser("usage: %prog [options] {inputs}")
+ parser.add_option("", "--ascending", dest="ascending",
+ help="Print output in ascending order of severity.",
+ action="store_true", default=False)
+ parser.add_option("", "--truncate", dest="truncate",
+ help="Truncate input file (for testing).",
+ type=int, action="store", default=-1)
+ (opts, args) = parser.parse_args()
+
+ if not args:
+ parser.error('No inputs specified')
+
+ for arg in args:
+ readInfo(arg, opts)
+
+if __name__=='__main__':
+ main()
diff --git a/utils/builtin-defines.c b/utils/builtin-defines.c
new file mode 100644
index 0000000..9bbe5be2
--- /dev/null
+++ b/utils/builtin-defines.c
@@ -0,0 +1,85 @@
+/*
+This is a clang style test case for checking that preprocessor
+defines match gcc.
+*/
+
+/*
+RUN: for arch in -m32 -m64; do \
+RUN: for lang in -std=gnu89 -ansi -std=c99 -std=gnu99; do \
+RUN: for input in c objective-c; do \
+RUN: for opts in "-O0" "-O1 -dynamic" "-O2 -static" "-Os"; do \
+RUN: echo "-- $arch, $lang, $input, $opts --"; \
+RUN: for cc in 0 1; do \
+RUN: if [ "$cc" == 0 ]; then \
+RUN: cc_prog=clang; \
+RUN: output=%t0; \
+RUN: else \
+RUN: cc_prog=gcc; \
+RUN: output=%t1; \
+RUN: fi; \
+RUN: $cc_prog $arch $lang $opts -march=core2 -dM -E -x $input %s | sort > $output; \
+RUN: done; \
+RUN: if (! diff %t0 %t1); then exit 1; fi; \
+RUN: done; \
+RUN: done; \
+RUN: done; \
+RUN: done;
+*/
+
+/* We don't care about this difference */
+#ifdef __PIC__
+#if __PIC__ == 1
+#undef __PIC__
+#undef __pic__
+#define __PIC__ 2
+#define __pic__ 2
+#endif
+#endif
+
+/* Undefine things we don't expect to match. */
+#undef __core2
+#undef __core2__
+#undef __SSSE3__
+
+/* Undefine things we don't expect to match. */
+#undef __DEC_EVAL_METHOD__
+#undef __INT16_TYPE__
+#undef __INT32_TYPE__
+#undef __INT64_TYPE__
+#undef __INT8_TYPE__
+#undef __SSP__
+#undef __APPLE_CC__
+#undef __VERSION__
+#undef __clang__
+#undef __llvm__
+#undef __nocona
+#undef __nocona__
+#undef __k8
+#undef __k8__
+#undef __tune_nocona__
+#undef __tune_core2__
+#undef __POINTER_WIDTH__
+#undef __INTPTR_TYPE__
+#undef __NO_MATH_INLINES
+
+#undef __DEC128_DEN__
+#undef __DEC128_EPSILON__
+#undef __DEC128_MANT_DIG__
+#undef __DEC128_MAX_EXP__
+#undef __DEC128_MAX__
+#undef __DEC128_MIN_EXP__
+#undef __DEC128_MIN__
+#undef __DEC32_DEN__
+#undef __DEC32_EPSILON__
+#undef __DEC32_MANT_DIG__
+#undef __DEC32_MAX_EXP__
+#undef __DEC32_MAX__
+#undef __DEC32_MIN_EXP__
+#undef __DEC32_MIN__
+#undef __DEC64_DEN__
+#undef __DEC64_EPSILON__
+#undef __DEC64_MANT_DIG__
+#undef __DEC64_MAX_EXP__
+#undef __DEC64_MAX__
+#undef __DEC64_MIN_EXP__
+#undef __DEC64_MIN__
diff --git a/utils/ccc-analyzer b/utils/ccc-analyzer
new file mode 100755
index 0000000..e4bf415
--- /dev/null
+++ b/utils/ccc-analyzer
@@ -0,0 +1,617 @@
+#!/usr/bin/env perl
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# A script designed to interpose between the build system and gcc. It invokes
+# both gcc and the static analyzer.
+#
+##===----------------------------------------------------------------------===##
+
+use strict;
+use warnings;
+use Cwd qw/ getcwd abs_path /;
+use File::Temp qw/ tempfile /;
+use File::Path qw / mkpath /;
+use File::Basename;
+use Text::ParseWords;
+
+my $CC = $ENV{'CCC_CC'};
+if (!defined $CC) { $CC = "gcc"; }
+my $CleanupFile;
+my $ResultFile;
+
+# Remove any stale files at exit.
+END {
+ if (defined $CleanupFile && -z $CleanupFile) {
+ `rm -f $CleanupFile`;
+ }
+}
+
+##----------------------------------------------------------------------------##
+# Process Clang Crashes.
+##----------------------------------------------------------------------------##
+
+sub GetPPExt {
+ my $Lang = shift;
+ if ($Lang =~ /objective-c/) { return ".mi"; }
+ return ".i";
+}
+
+# Set this to 1 if we want to include 'parser rejects' files.
+my $IncludeParserRejects = 0;
+my $ParserRejects = "Parser Rejects";
+
+my $AttributeIgnored = "Attribute Ignored";
+
+sub ProcessClangFailure {
+ my ($ClangCC, $Lang, $file, $Args, $HtmlDir, $ErrorType, $ofile) = @_;
+ my $Dir = "$HtmlDir/failures";
+ mkpath $Dir;
+
+ my $prefix = "clang_crash";
+ if ($ErrorType eq $ParserRejects) {
+ $prefix = "clang_parser_rejects";
+ }
+ elsif ($ErrorType eq $AttributeIgnored) {
+ $prefix = "clang_attribute_ignored";
+ }
+
+ # Generate the preprocessed file with cc (i.e., gcc).
+ my ($PPH, $PPFile) = tempfile( $prefix . "_XXXXXX",
+ SUFFIX => GetPPExt($Lang),
+ DIR => $Dir);
+
+ system $CC, @$Args, "-E", "-o", $PPFile;
+ close ($PPH);
+
+ # Generate the preprocessed file with clang.
+ my $PPFile_Clang = $PPFile;
+ $PPFile_Clang =~ s/[.](.+)$/.clang.$1/;
+ system $ClangCC, @$Args, "-E", "-o", "$PPFile_Clang";
+
+ # Create the info file.
+ open (OUT, ">", "$PPFile.info.txt") or die "Cannot open $PPFile.info.txt\n";
+ print OUT abs_path($file), "\n";
+ print OUT "$ErrorType\n";
+ print OUT "@$Args\n";
+ close OUT;
+ `uname -a >> $PPFile.info.txt 2>&1`;
+ `$CC -v >> $PPFile.info.txt 2>&1`;
+ system 'mv',$ofile,"$PPFile.stderr.txt";
+ return (basename $PPFile);
+}
+
+##----------------------------------------------------------------------------##
+# Running the analyzer.
+##----------------------------------------------------------------------------##
+
+# Determine what clang executable to use.
+my $Clang = $ENV{'CLANG'};
+if (!defined $Clang) { $Clang = 'clang'; }
+
+sub GetCCArgs {
+ my $Args = shift;
+
+ pipe (FROM_CHILD, TO_PARENT);
+ my $pid = fork();
+ if ($pid == 0) {
+ close FROM_CHILD;
+ open(STDOUT,">&", \*TO_PARENT);
+ open(STDERR,">&", \*TO_PARENT);
+ exec $Clang, "-###", "-fsyntax-only", @$Args;
+ }
+ close(TO_PARENT);
+ my $line;
+ while (<FROM_CHILD>) {
+ next if (!/clang-cc/);
+ $line = $_;
+ }
+
+ waitpid($pid,0);
+ close(FROM_CHILD);
+
+ die "could not find clang-cc line\n" if (!defined $line);
+ # Strip the newline and initial whitspace
+ chomp $line;
+ $line =~ s/^\s+//;
+
+ my @items = quotewords('\s+', 1, $line);
+ for (my $i = 0 ; $ i < scalar(@items); ++$i) {
+ $items[$i] =~ s/^\"//;
+ $items[$i] =~ s/\"$//;
+ }
+ my $cmd = shift @items;
+ die "cannot find 'clang-cc' in 'clang' command\n" if (!($cmd =~ /clang-cc/));
+ return \@items;
+}
+
+sub Analyze {
+ my ($ClangCC, $Args, $AnalyzeArgs, $Lang, $Output, $Verbose, $HtmlDir,
+ $file, $Analyses) = @_;
+
+ $Args = GetCCArgs($Args);
+
+ # Skip anything related to C++.
+ return if ($Lang =~ /c[+][+]/);
+
+ my $RunAnalyzer = 0;
+ my $Cmd;
+ my @CmdArgs;
+ my @CmdArgsSansAnalyses;
+
+ if ($Lang =~ /header/) {
+ exit 0 if (!defined ($Output));
+ $Cmd = 'cp';
+ push @CmdArgs,$file;
+ # Remove the PCH extension.
+ $Output =~ s/[.]gch$//;
+ push @CmdArgs,$Output;
+ @CmdArgsSansAnalyses = @CmdArgs;
+ }
+ else {
+ $Cmd = $ClangCC;
+ push @CmdArgs,'-DIBOutlet=__attribute__((iboutlet))';
+ push @CmdArgs,@$Args;
+ @CmdArgsSansAnalyses = @CmdArgs;
+ push @CmdArgs,'-analyze';
+ push @CmdArgs,"-analyzer-display-progress";
+ push @CmdArgs,"-analyzer-eagerly-assume";
+ push @CmdArgs,(split /\s/,$Analyses);
+ $RunAnalyzer = 1;
+ }
+
+ # Add the analysis arguments passed down from scan-build.
+ foreach my $Arg (@$AnalyzeArgs) {
+ push @CmdArgs, $Arg;
+ }
+
+ my @PrintArgs;
+ my $dir;
+
+ if ($Verbose) {
+ $dir = getcwd();
+ print STDERR "\n[LOCATION]: $dir\n";
+ push @PrintArgs,"'$Cmd'";
+ foreach my $arg (@CmdArgs) { push @PrintArgs,"\'$arg\'"; }
+ }
+
+ if ($Verbose == 1) {
+ # We MUST print to stderr. Some clients use the stdout output of
+ # gcc for various purposes.
+ print STDERR join(' ',@PrintArgs);
+ print STDERR "\n";
+ }
+ elsif ($Verbose == 2) {
+ print STDERR "#SHELL (cd '$dir' && @PrintArgs)\n";
+ }
+
+ if ($RunAnalyzer) {
+ if (defined $ResultFile) {
+ push @CmdArgs,'-o';
+ push @CmdArgs, $ResultFile;
+ }
+ elsif (defined $HtmlDir) {
+ push @CmdArgs,'-o';
+ push @CmdArgs, $HtmlDir;
+ }
+ }
+
+ if (defined $ENV{'CCC_UBI'}) {
+ push @CmdArgs,"--analyzer-viz-egraph-ubigraph";
+ }
+
+ # Capture the STDERR of clang and send it to a temporary file.
+ # Capture the STDOUT of clang and reroute it to ccc-analyzer's STDERR.
+ # We save the output file in the 'crashes' directory if clang encounters
+ # any problems with the file.
+ pipe (FROM_CHILD, TO_PARENT);
+ my $pid = fork();
+ if ($pid == 0) {
+ close FROM_CHILD;
+ open(STDOUT,">&", \*TO_PARENT);
+ open(STDERR,">&", \*TO_PARENT);
+ exec $Cmd, @CmdArgs;
+ }
+
+ close TO_PARENT;
+ my ($ofh, $ofile) = tempfile("clang_output_XXXXXX", DIR => $HtmlDir);
+
+ while (<FROM_CHILD>) {
+ print $ofh $_;
+ print STDERR $_;
+ }
+
+ waitpid($pid,0);
+ close(FROM_CHILD);
+ my $Result = $?;
+
+ # Did the command die because of a signal?
+ if ($Result & 127 and $Cmd eq $ClangCC and defined $HtmlDir) {
+ ProcessClangFailure($ClangCC, $Lang, $file, \@CmdArgsSansAnalyses, $HtmlDir,
+ "Crash", $ofile);
+ }
+ elsif ($Result) {
+ if ($IncludeParserRejects && !($file =~/conftest/)) {
+ ProcessClangFailure($ClangCC, $Lang, $file, \@CmdArgsSansAnalyses, $HtmlDir,
+ $ParserRejects, $ofile);
+ }
+ }
+ else {
+ # Check if there were any unhandled attributes.
+ if (open(CHILD, $ofile)) {
+ my %attributes_not_handled;
+
+ # Don't flag warnings about the following attributes that we
+ # know are currently not supported by Clang.
+ $attributes_not_handled{"cdecl"} = 1;
+
+ my $ppfile;
+ while (<CHILD>) {
+ next if (! /warning: '([^\']+)' attribute ignored/);
+
+ # Have we already spotted this unhandled attribute?
+ next if (defined $attributes_not_handled{$1});
+ $attributes_not_handled{$1} = 1;
+
+ # Get the name of the attribute file.
+ my $dir = "$HtmlDir/failures";
+ my $afile = "$dir/attribute_ignored_$1.txt";
+
+ # Only create another preprocessed file if the attribute file
+ # doesn't exist yet.
+ next if (-e $afile);
+
+ # Add this file to the list of files that contained this attribute.
+ # Generate a preprocessed file if we haven't already.
+ if (!(defined $ppfile)) {
+ $ppfile = ProcessClangFailure($ClangCC, $Lang, $file,
+ \@CmdArgsSansAnalyses,
+ $HtmlDir, $AttributeIgnored, $ofile);
+ }
+
+ mkpath $dir;
+ open(AFILE, ">$afile");
+ print AFILE "$ppfile\n";
+ close(AFILE);
+ }
+ close CHILD;
+ }
+ }
+
+ `rm -f $ofile`;
+}
+
+##----------------------------------------------------------------------------##
+# Lookup tables.
+##----------------------------------------------------------------------------##
+
+my %CompileOptionMap = (
+ '-nostdinc' => 0,
+ '-fblocks' => 0,
+ '-fobjc-gc-only' => 0,
+ '-fobjc-gc' => 0,
+ '-ffreestanding' => 0,
+ '-include' => 1,
+ '-idirafter' => 1,
+ '-iprefix' => 1,
+ '-iquote' => 1,
+ '-isystem' => 1,
+ '-iwithprefix' => 1,
+ '-iwithprefixbefore' => 1
+);
+
+my %LinkerOptionMap = (
+ '-framework' => 1
+);
+
+my %CompilerLinkerOptionMap = (
+ '-isysroot' => 1,
+ '-arch' => 1,
+ '-v' => 0,
+ '-fpascal-strings' => 0,
+ '-mmacosx-version-min' => 0, # This is really a 1 argument, but always has '='
+ '-miphoneos-version-min' => 0 # This is really a 1 argument, but always has '='
+);
+
+my %IgnoredOptionMap = (
+ '-MT' => 1, # Ignore these preprocessor options.
+ '-MF' => 1,
+
+ '-fsyntax-only' => 0,
+ '-save-temps' => 0,
+ '-install_name' => 1,
+ '-exported_symbols_list' => 1,
+ '-current_version' => 1,
+ '-compatibility_version' => 1,
+ '-init' => 1,
+ '-e' => 1,
+ '-seg1addr' => 1,
+ '-bundle_loader' => 1,
+ '-multiply_defined' => 1,
+ '-sectorder' => 3,
+ '--param' => 1,
+ '-u' => 1
+);
+
+my %LangMap = (
+ 'c' => 'c',
+ 'cpp' => 'c++',
+ 'cc' => 'c++',
+ 'i' => 'c-cpp-output',
+ 'm' => 'objective-c',
+ 'mi' => 'objective-c-cpp-output'
+);
+
+my %UniqueOptions = (
+ '-isysroot' => 0
+);
+
+my %LangsAccepted = (
+ "objective-c" => 1,
+ "c" => 1
+);
+
+##----------------------------------------------------------------------------##
+# Main Logic.
+##----------------------------------------------------------------------------##
+
+my $Action = 'link';
+my @CompileOpts;
+my @LinkOpts;
+my @Files;
+my $Lang;
+my $Output;
+my %Uniqued;
+
+# Forward arguments to gcc.
+my $Status = system($CC,@ARGV);
+if ($Status) { exit($Status >> 8); }
+
+# Get the analysis options.
+my $Analyses = $ENV{'CCC_ANALYZER_ANALYSIS'};
+if (!defined($Analyses)) { $Analyses = '-checker-cfref'; }
+
+# Get the store model.
+my $StoreModel = $ENV{'CCC_ANALYZER_STORE_MODEL'};
+if (!defined $StoreModel) { $StoreModel = "basic"; }
+
+# Get the constraints engine.
+my $ConstraintsModel = $ENV{'CCC_ANALYZER_CONSTRAINTS_MODEL'};
+if (!defined $ConstraintsModel) { $ConstraintsModel = "range"; }
+
+# Get the output format.
+my $OutputFormat = $ENV{'CCC_ANALYZER_OUTPUT_FORMAT'};
+if (!defined $OutputFormat) { $OutputFormat = "html"; }
+
+# Determine the level of verbosity.
+my $Verbose = 0;
+if (defined $ENV{CCC_ANALYZER_VERBOSE}) { $Verbose = 1; }
+if (defined $ENV{CCC_ANALYZER_LOG}) { $Verbose = 2; }
+
+# Determine what clang-cc executable to use.
+my $ClangCC = $ENV{'CLANG_CC'};
+if (!defined $ClangCC) { $ClangCC = 'clang-cc'; }
+
+# Get the HTML output directory.
+my $HtmlDir = $ENV{'CCC_ANALYZER_HTML'};
+
+my %DisabledArchs = ('ppc' => 1, 'ppc64' => 1);
+my %ArchsSeen;
+my $HadArch = 0;
+
+# Process the arguments.
+foreach (my $i = 0; $i < scalar(@ARGV); ++$i) {
+ my $Arg = $ARGV[$i];
+ my ($ArgKey) = split /=/,$Arg,2;
+
+ # Modes ccc-analyzer supports
+ if ($Arg eq '-E') { $Action = 'preprocess'; }
+ elsif ($Arg eq '-c') { $Action = 'compile'; }
+ elsif ($Arg =~ /^-print-prog-name/) { exit 0; }
+
+ # Specially handle duplicate cases of -arch
+ if ($Arg eq "-arch") {
+ my $arch = $ARGV[$i+1];
+ # We don't want to process 'ppc' because of Clang's lack of support
+ # for Altivec (also some #defines won't likely be defined correctly, etc.)
+ if (!(defined $DisabledArchs{$arch})) { $ArchsSeen{$arch} = 1; }
+ $HadArch = 1;
+ ++$i;
+ next;
+ }
+
+ # Options with possible arguments that should pass through to compiler.
+ if (defined $CompileOptionMap{$ArgKey}) {
+ my $Cnt = $CompileOptionMap{$ArgKey};
+ push @CompileOpts,$Arg;
+ while ($Cnt > 0) { ++$i; --$Cnt; push @CompileOpts, $ARGV[$i]; }
+ next;
+ }
+
+ # Options with possible arguments that should pass through to linker.
+ if (defined $LinkerOptionMap{$ArgKey}) {
+ my $Cnt = $LinkerOptionMap{$ArgKey};
+ push @LinkOpts,$Arg;
+ while ($Cnt > 0) { ++$i; --$Cnt; push @LinkOpts, $ARGV[$i]; }
+ next;
+ }
+
+ # Options with possible arguments that should pass through to both compiler
+ # and the linker.
+ if (defined $CompilerLinkerOptionMap{$ArgKey}) {
+ my $Cnt = $CompilerLinkerOptionMap{$ArgKey};
+
+ # Check if this is an option that should have a unique value, and if so
+ # determine if the value was checked before.
+ if ($UniqueOptions{$Arg}) {
+ if (defined $Uniqued{$Arg}) {
+ $i += $Cnt;
+ next;
+ }
+ $Uniqued{$Arg} = 1;
+ }
+
+ push @CompileOpts,$Arg;
+ push @LinkOpts,$Arg;
+
+ while ($Cnt > 0) {
+ ++$i; --$Cnt;
+ push @CompileOpts, $ARGV[$i];
+ push @LinkOpts, $ARGV[$i];
+ }
+ next;
+ }
+
+ # Ignored options.
+ if (defined $IgnoredOptionMap{$ArgKey}) {
+ my $Cnt = $IgnoredOptionMap{$ArgKey};
+ while ($Cnt > 0) {
+ ++$i; --$Cnt;
+ }
+ next;
+ }
+
+ # Compile mode flags.
+ if ($Arg =~ /^-[D,I,U](.*)$/) {
+ my $Tmp = $Arg;
+ if ($1 eq '') {
+ # FIXME: Check if we are going off the end.
+ ++$i;
+ $Tmp = $Arg . $ARGV[$i];
+ }
+ push @CompileOpts,$Tmp;
+ next;
+ }
+
+ # Language.
+ if ($Arg eq '-x') {
+ $Lang = $ARGV[$i+1];
+ ++$i; next;
+ }
+
+ # Output file.
+ if ($Arg eq '-o') {
+ ++$i;
+ $Output = $ARGV[$i];
+ next;
+ }
+
+ # Get the link mode.
+ if ($Arg =~ /^-[l,L,O]/) {
+ if ($Arg eq '-O') { push @LinkOpts,'-O1'; }
+ elsif ($Arg eq '-Os') { push @LinkOpts,'-O2'; }
+ else { push @LinkOpts,$Arg; }
+ next;
+ }
+
+ if ($Arg =~ /^-std=/) {
+ push @CompileOpts,$Arg;
+ next;
+ }
+
+# if ($Arg =~ /^-f/) {
+# # FIXME: Not sure if the remaining -fxxxx options have no arguments.
+# push @CompileOpts,$Arg;
+# push @LinkOpts,$Arg; # FIXME: Not sure if these are link opts.
+# }
+
+ # Get the compiler/link mode.
+ if ($Arg =~ /^-F(.+)$/) {
+ my $Tmp = $Arg;
+ if ($1 eq '') {
+ # FIXME: Check if we are going off the end.
+ ++$i;
+ $Tmp = $Arg . $ARGV[$i];
+ }
+ push @CompileOpts,$Tmp;
+ push @LinkOpts,$Tmp;
+ next;
+ }
+
+ # Input files.
+ if ($Arg eq '-filelist') {
+ # FIXME: Make sure we aren't walking off the end.
+ open(IN, $ARGV[$i+1]);
+ while (<IN>) { s/\015?\012//; push @Files,$_; }
+ close(IN);
+ ++$i; next;
+ }
+
+ if (!($Arg =~ /^-/)) {
+ push @Files,$Arg; next;
+ }
+}
+
+if ($Action eq 'compile' or $Action eq 'link') {
+ my @Archs = keys %ArchsSeen;
+ # Skip the file if we don't support the architectures specified.
+ exit 0 if ($HadArch && scalar(@Archs) == 0);
+
+ foreach my $file (@Files) {
+ # Determine the language for the file.
+ my $FileLang = $Lang;
+
+ if (!defined($FileLang)) {
+ # Infer the language from the extension.
+ if ($file =~ /[.]([^.]+)$/) {
+ $FileLang = $LangMap{$1};
+ }
+ }
+
+ next if (!defined $FileLang);
+ next if (!defined $LangsAccepted{$FileLang});
+
+ my @CmdArgs;
+ my @AnalyzeArgs;
+
+ if ($FileLang ne 'unknown') {
+ push @CmdArgs,'-x';
+ push @CmdArgs,$FileLang;
+ }
+
+ if (defined $StoreModel) {
+ push @AnalyzeArgs, "-analyzer-store=$StoreModel";
+ }
+
+ if (defined $ConstraintsModel) {
+ push @AnalyzeArgs, "-analyzer-constraints=$ConstraintsModel";
+ }
+
+ if (defined $OutputFormat) {
+ push @AnalyzeArgs, "-analyzer-output=" . $OutputFormat;
+ if ($OutputFormat eq "plist") {
+ # Change "Output" to be a file.
+ my ($h, $f) = tempfile("report-XXXXXX", SUFFIX => ".plist",
+ DIR => $HtmlDir);
+ $ResultFile = $f;
+ $CleanupFile = $f;
+ }
+ }
+
+ push @CmdArgs,@CompileOpts;
+ push @CmdArgs,$file;
+
+ if (scalar @Archs) {
+ foreach my $arch (@Archs) {
+ my @NewArgs;
+ push @NewArgs, '-arch';
+ push @NewArgs, $arch;
+ push @NewArgs, @CmdArgs;
+ Analyze($ClangCC, \@NewArgs, \@AnalyzeArgs, $FileLang, $Output,
+ $Verbose, $HtmlDir, $file, $Analyses);
+ }
+ }
+ else {
+ Analyze($ClangCC, \@CmdArgs, \@AnalyzeArgs, $FileLang, $Output,
+ $Verbose, $HtmlDir, $file, $Analyses);
+ }
+ }
+}
+
+exit($Status >> 8);
+
diff --git a/utils/pch-test.pl b/utils/pch-test.pl
new file mode 100755
index 0000000..2e17117
--- /dev/null
+++ b/utils/pch-test.pl
@@ -0,0 +1,61 @@
+#!/usr/bin/perl -w
+
+# This tiny little script, which should be run from the clang
+# directory (with clang-cc in your patch), tries to take each
+# compilable Clang test and build a PCH file from that test, then read
+# and dump the contents of the PCH file just created.
+use POSIX;
+
+$exitcode = 0;
+sub testfiles($$) {
+ my $suffix = shift;
+ my $language = shift;
+ my $passed = 0;
+ my $failed = 0;
+ my $skipped = 0;
+
+ @files = `ls test/*/*.$suffix`;
+ foreach $file (@files) {
+ chomp($file);
+ my $code = system("clang-cc -fsyntax-only -x $language $file > /dev/null 2>&1");
+ if ($code == 0) {
+ print(".");
+ $code = system("clang-cc -emit-pch -x $language -o $file.pch $file > /dev/null 2>&1");
+ if ($code == 0) {
+ $code = system("clang-cc -include-pch $file.pch -x $language -ast-dump /dev/null > /dev/null 2>&1");
+ if ($code == 0) {
+ $passed++;
+ } elsif (($code & 0xFF) == SIGINT) {
+ exit($exitcode);
+ } else {
+ print("\n---Failed to dump AST file for \"$file\"---\n");
+ $exitcode = 1;
+ $failed++;
+ }
+ unlink "$file.pch";
+ } elsif (($code & 0xFF) == SIGINT) {
+ exit($exitcode);
+ } else {
+ print("\n---Failed to build PCH file for \"$file\"---\n");
+ $exitcode = 1;
+ $failed++;
+ }
+ } elsif (($code & 0xFF) == SIGINT) {
+ exit($exitcode);
+ } else {
+ print("x");
+ $skipped++;
+ }
+ }
+
+ print("\n\n$passed tests passed\n");
+ print("$failed tests failed\n");
+ print("$skipped tests skipped ('x')\n")
+}
+
+printf("-----Testing precompiled headers for C-----\n");
+testfiles("c", "c");
+printf("\n-----Testing precompiled headers for Objective-C-----\n");
+testfiles("m", "objective-c");
+print("\n");
+exit($exitcode);
diff --git a/utils/scan-build b/utils/scan-build
new file mode 100755
index 0000000..5835628
--- /dev/null
+++ b/utils/scan-build
@@ -0,0 +1,1278 @@
+#!/usr/bin/env perl
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# A script designed to wrap a build so that all calls to gcc are intercepted
+# and piped to the static analyzer.
+#
+##===----------------------------------------------------------------------===##
+
+use strict;
+use warnings;
+use FindBin qw($RealBin);
+use Digest::MD5;
+use File::Basename;
+use Term::ANSIColor;
+use Term::ANSIColor qw(:constants);
+use Cwd qw/ getcwd abs_path /;
+use Sys::Hostname;
+
+my $Verbose = 0; # Verbose output from this script.
+my $Prog = "scan-build";
+my $BuildName;
+my $BuildDate;
+my $CXX; # Leave undefined initially.
+
+my $TERM = $ENV{'TERM'};
+my $UseColor = (defined $TERM and $TERM eq 'xterm-color' and -t STDOUT
+ and defined $ENV{'SCAN_BUILD_COLOR'});
+
+my $UserName = HtmlEscape(getpwuid($<) || 'unknown');
+my $HostName = HtmlEscape(hostname() || 'unknown');
+my $CurrentDir = HtmlEscape(getcwd());
+my $CurrentDirSuffix = basename($CurrentDir);
+
+my $CmdArgs;
+
+my $HtmlTitle;
+
+my $Date = localtime();
+
+##----------------------------------------------------------------------------##
+# Diagnostics
+##----------------------------------------------------------------------------##
+
+sub Diag {
+ if ($UseColor) {
+ print BOLD, MAGENTA "$Prog: @_";
+ print RESET;
+ }
+ else {
+ print "$Prog: @_";
+ }
+}
+
+sub DiagCrashes {
+ my $Dir = shift;
+ Diag ("The analyzer encountered problems on some source files.\n");
+ Diag ("Preprocessed versions of these sources were deposited in '$Dir/failures'.\n");
+ Diag ("Please consider submitting a bug report using these files:\n");
+ Diag (" http://clang.llvm.org/StaticAnalysisUsage.html#filingbugs\n")
+}
+
+sub DieDiag {
+ if ($UseColor) {
+ print BOLD, RED "$Prog: ";
+ print RESET, RED @_;
+ print RESET;
+ }
+ else {
+ print "$Prog: ", @_;
+ }
+ exit(0);
+}
+
+##----------------------------------------------------------------------------##
+# Some initial preprocessing of Clang options.
+##----------------------------------------------------------------------------##
+
+# First, look for 'clang-cc' in libexec.
+my $ClangCCSB = Cwd::realpath("$RealBin/libexec/clang-cc");
+# Second, look for 'clang-cc' in the same directory as scan-build.
+if (!defined $ClangCCSB || ! -x $ClangCCSB) {
+ $ClangCCSB = Cwd::realpath("$RealBin/clang-cc");
+}
+# Third, look for 'clang-cc' in ../libexec
+if (!defined $ClangCCSB || ! -x $ClangCCSB) {
+ $ClangCCSB = Cwd::realpath("$RealBin/../libexec/clang-cc");
+}
+# Finally, default to looking for 'clang-cc' in the path.
+if (!defined $ClangCCSB || ! -x $ClangCCSB) {
+ $ClangCCSB = "clang-cc";
+}
+my $ClangCC = $ClangCCSB;
+
+# Now find 'clang'
+my $ClangSB = Cwd::realpath("$RealBin/bin/clang");
+if (!defined $ClangSB || ! -x $ClangSB) {
+ $ClangSB = Cwd::realpath("$RealBin/clang");
+}
+# Third, look for 'clang' in ../bin
+if (!defined $ClangSB || ! -x $ClangSB) {
+ $ClangSB = Cwd::realpath("$RealBin/../bin/clang");
+}
+# Finally, default to looking for 'clang-cc' in the path.
+if (!defined $ClangSB || ! -x $ClangSB) {
+ $ClangSB = "clang";
+}
+my $Clang = $ClangSB;
+
+
+my %AvailableAnalyses;
+
+# Query clang for analysis options.
+open(PIPE, "-|", $ClangCC, "--help") or
+ DieDiag("Cannot execute '$ClangCC'\n");
+
+my $FoundAnalysis = 0;
+
+while(<PIPE>) {
+ if ($FoundAnalysis == 0) {
+ if (/Checks and Analyses/) {
+ $FoundAnalysis = 1;
+ }
+ next;
+ }
+
+ if (/^\s\s\s\s([^\s]+)\s(.+)$/) {
+ next if ($1 =~ /-dump/ or $1 =~ /-view/
+ or $1 =~ /-checker-simple/ or $1 =~ /-warn-uninit/);
+
+ $AvailableAnalyses{$1} = $2;
+ next;
+ }
+ last;
+}
+
+close (PIPE);
+
+my %AnalysesDefaultEnabled = (
+ '-warn-dead-stores' => 1,
+ '-checker-cfref' => 1,
+ '-warn-objc-methodsigs' => 1,
+ # Do not enable the missing -dealloc check by default.
+ # '-warn-objc-missing-dealloc' => 1,
+ '-warn-objc-unused-ivars' => 1,
+);
+
+##----------------------------------------------------------------------------##
+# GetHTMLRunDir - Construct an HTML directory name for the current sub-run.
+##----------------------------------------------------------------------------##
+
+sub GetHTMLRunDir {
+
+ die "Not enough arguments." if (@_ == 0);
+ my $Dir = shift @_;
+
+ my $TmpMode = 0;
+ if (!defined $Dir) {
+ if (`uname` =~ /Darwin/) {
+ $Dir = $ENV{'TMPDIR'};
+ if (!defined $Dir) { $Dir = "/tmp"; }
+ }
+ else {
+ $Dir = "/tmp";
+ }
+
+ $TmpMode = 1;
+ }
+
+ # Chop off any trailing '/' characters.
+ while ($Dir =~ /\/$/) { chop $Dir; }
+
+ # Get current date and time.
+
+ my @CurrentTime = localtime();
+
+ my $year = $CurrentTime[5] + 1900;
+ my $day = $CurrentTime[3];
+ my $month = $CurrentTime[4] + 1;
+
+ my $DateString = sprintf("%d-%02d-%02d", $year, $month, $day);
+
+ # Determine the run number.
+
+ my $RunNumber;
+
+ if (-d $Dir) {
+
+ if (! -r $Dir) {
+ DieDiag("directory '$Dir' exists but is not readable.\n");
+ }
+
+ # Iterate over all files in the specified directory.
+
+ my $max = 0;
+
+ opendir(DIR, $Dir);
+ my @FILES = grep { -d "$Dir/$_" } readdir(DIR);
+ closedir(DIR);
+
+ foreach my $f (@FILES) {
+
+ # Strip the prefix '$Prog-' if we are dumping files to /tmp.
+ if ($TmpMode) {
+ next if (!($f =~ /^$Prog-(.+)/));
+ $f = $1;
+ }
+
+
+ my @x = split/-/, $f;
+ next if (scalar(@x) != 4);
+ next if ($x[0] != $year);
+ next if ($x[1] != $month);
+ next if ($x[2] != $day);
+
+ if ($x[3] > $max) {
+ $max = $x[3];
+ }
+ }
+
+ $RunNumber = $max + 1;
+ }
+ else {
+
+ if (-x $Dir) {
+ DieDiag("'$Dir' exists but is not a directory.\n");
+ }
+
+ if ($TmpMode) {
+ DieDiag("The directory '/tmp' does not exist or cannot be accessed.\n");
+ }
+
+ # $Dir does not exist. It will be automatically created by the
+ # clang driver. Set the run number to 1.
+
+ $RunNumber = 1;
+ }
+
+ die "RunNumber must be defined!" if (!defined $RunNumber);
+
+ # Append the run number.
+ my $NewDir;
+ if ($TmpMode) {
+ $NewDir = "$Dir/$Prog-$DateString-$RunNumber";
+ }
+ else {
+ $NewDir = "$Dir/$DateString-$RunNumber";
+ }
+ system 'mkdir','-p',$NewDir;
+ return $NewDir;
+}
+
+sub SetHtmlEnv {
+
+ die "Wrong number of arguments." if (scalar(@_) != 2);
+
+ my $Args = shift;
+ my $Dir = shift;
+
+ die "No build command." if (scalar(@$Args) == 0);
+
+ my $Cmd = $$Args[0];
+
+ if ($Cmd =~ /configure/) {
+ return;
+ }
+
+ if ($Verbose) {
+ Diag("Emitting reports for this run to '$Dir'.\n");
+ }
+
+ $ENV{'CCC_ANALYZER_HTML'} = $Dir;
+}
+
+##----------------------------------------------------------------------------##
+# ComputeDigest - Compute a digest of the specified file.
+##----------------------------------------------------------------------------##
+
+sub ComputeDigest {
+ my $FName = shift;
+ DieDiag("Cannot read $FName to compute Digest.\n") if (! -r $FName);
+
+ # Use Digest::MD5. We don't have to be cryptographically secure. We're
+ # just looking for duplicate files that come from a non-malicious source.
+ # We use Digest::MD5 because it is a standard Perl module that should
+ # come bundled on most systems.
+ open(FILE, $FName) or DieDiag("Cannot open $FName when computing Digest.\n");
+ binmode FILE;
+ my $Result = Digest::MD5->new->addfile(*FILE)->hexdigest;
+ close(FILE);
+
+ # Return the digest.
+ return $Result;
+}
+
+##----------------------------------------------------------------------------##
+# UpdatePrefix - Compute the common prefix of files.
+##----------------------------------------------------------------------------##
+
+my $Prefix;
+
+sub UpdatePrefix {
+ my $x = shift;
+ my $y = basename($x);
+ $x =~ s/\Q$y\E$//;
+
+ if (!defined $Prefix) {
+ $Prefix = $x;
+ return;
+ }
+
+ chop $Prefix while (!($x =~ /^\Q$Prefix/));
+}
+
+sub GetPrefix {
+ return $Prefix;
+}
+
+##----------------------------------------------------------------------------##
+# UpdateInFilePath - Update the path in the report file.
+##----------------------------------------------------------------------------##
+
+sub UpdateInFilePath {
+ my $fname = shift;
+ my $regex = shift;
+ my $newtext = shift;
+
+ open (RIN, $fname) or die "cannot open $fname";
+ open (ROUT, ">", "$fname.tmp") or die "cannot open $fname.tmp";
+
+ while (<RIN>) {
+ s/$regex/$newtext/;
+ print ROUT $_;
+ }
+
+ close (ROUT);
+ close (RIN);
+ system("mv", "$fname.tmp", $fname);
+}
+
+##----------------------------------------------------------------------------##
+# ScanFile - Scan a report file for various identifying attributes.
+##----------------------------------------------------------------------------##
+
+# Sometimes a source file is scanned more than once, and thus produces
+# multiple error reports. We use a cache to solve this problem.
+
+my %AlreadyScanned;
+
+sub ScanFile {
+
+ my $Index = shift;
+ my $Dir = shift;
+ my $FName = shift;
+
+ # Compute a digest for the report file. Determine if we have already
+ # scanned a file that looks just like it.
+
+ my $digest = ComputeDigest("$Dir/$FName");
+
+ if (defined $AlreadyScanned{$digest}) {
+ # Redundant file. Remove it.
+ system ("rm", "-f", "$Dir/$FName");
+ return;
+ }
+
+ $AlreadyScanned{$digest} = 1;
+
+ # At this point the report file is not world readable. Make it happen.
+ system ("chmod", "644", "$Dir/$FName");
+
+ # Scan the report file for tags.
+ open(IN, "$Dir/$FName") or DieDiag("Cannot open '$Dir/$FName'\n");
+
+ my $BugType = "";
+ my $BugFile = "";
+ my $BugCategory;
+ my $BugPathLength = 1;
+ my $BugLine = 0;
+ my $found = 0;
+
+ while (<IN>) {
+
+ last if ($found == 5);
+
+ if (/<!-- BUGTYPE (.*) -->$/) {
+ $BugType = $1;
+ ++$found;
+ }
+ elsif (/<!-- BUGFILE (.*) -->$/) {
+ $BugFile = abs_path($1);
+ UpdatePrefix($BugFile);
+ ++$found;
+ }
+ elsif (/<!-- BUGPATHLENGTH (.*) -->$/) {
+ $BugPathLength = $1;
+ ++$found;
+ }
+ elsif (/<!-- BUGLINE (.*) -->$/) {
+ $BugLine = $1;
+ ++$found;
+ }
+ elsif (/<!-- BUGCATEGORY (.*) -->$/) {
+ $BugCategory = $1;
+ ++$found;
+ }
+ }
+
+ close(IN);
+
+ if (!defined $BugCategory) {
+ $BugCategory = "Other";
+ }
+
+ push @$Index,[ $FName, $BugCategory, $BugType, $BugFile, $BugLine,
+ $BugPathLength ];
+}
+
+##----------------------------------------------------------------------------##
+# CopyFiles - Copy resource files to target directory.
+##----------------------------------------------------------------------------##
+
+sub CopyFiles {
+
+ my $Dir = shift;
+
+ my $JS = Cwd::realpath("$RealBin/sorttable.js");
+
+ DieDiag("Cannot find 'sorttable.js'.\n")
+ if (! -r $JS);
+
+ system ("cp", $JS, "$Dir");
+
+ DieDiag("Could not copy 'sorttable.js' to '$Dir'.\n")
+ if (! -r "$Dir/sorttable.js");
+
+ my $CSS = Cwd::realpath("$RealBin/scanview.css");
+
+ DieDiag("Cannot find 'scanview.css'.\n")
+ if (! -r $CSS);
+
+ system ("cp", $CSS, "$Dir");
+
+ DieDiag("Could not copy 'scanview.css' to '$Dir'.\n")
+ if (! -r $CSS);
+}
+
+##----------------------------------------------------------------------------##
+# Postprocess - Postprocess the results of an analysis scan.
+##----------------------------------------------------------------------------##
+
+sub Postprocess {
+
+ my $Dir = shift;
+ my $BaseDir = shift;
+
+ die "No directory specified." if (!defined $Dir);
+
+ if (! -d $Dir) {
+ Diag("No bugs found.\n");
+ return 0;
+ }
+
+ opendir(DIR, $Dir);
+ my @files = grep { /^report-.*\.html$/ } readdir(DIR);
+ closedir(DIR);
+
+ if (scalar(@files) == 0 and ! -e "$Dir/failures") {
+ Diag("Removing directory '$Dir' because it contains no reports.\n");
+ system ("rm", "-fR", $Dir);
+ return 0;
+ }
+
+ # Scan each report file and build an index.
+ my @Index;
+ foreach my $file (@files) { ScanFile(\@Index, $Dir, $file); }
+
+ # Scan the failures directory and use the information in the .info files
+ # to update the common prefix directory.
+ my @failures;
+ my @attributes_ignored;
+ if (-d "$Dir/failures") {
+ opendir(DIR, "$Dir/failures");
+ @failures = grep { /[.]info.txt$/ && !/attribute_ignored/; } readdir(DIR);
+ closedir(DIR);
+ opendir(DIR, "$Dir/failures");
+ @attributes_ignored = grep { /^attribute_ignored/; } readdir(DIR);
+ closedir(DIR);
+ foreach my $file (@failures) {
+ open IN, "$Dir/failures/$file" or DieDiag("cannot open $file\n");
+ my $Path = <IN>;
+ if (defined $Path) { UpdatePrefix($Path); }
+ close IN;
+ }
+ }
+
+ # Generate an index.html file.
+ my $FName = "$Dir/index.html";
+ open(OUT, ">", $FName) or DieDiag("Cannot create file '$FName'\n");
+
+ # Print out the header.
+
+print OUT <<ENDTEXT;
+<html>
+<head>
+<title>${HtmlTitle}</title>
+<link type="text/css" rel="stylesheet" href="scanview.css"/>
+<script src="sorttable.js"></script>
+<script language='javascript' type="text/javascript">
+function SetDisplay(RowClass, DisplayVal)
+{
+ var Rows = document.getElementsByTagName("tr");
+ for ( var i = 0 ; i < Rows.length; ++i ) {
+ if (Rows[i].className == RowClass) {
+ Rows[i].style.display = DisplayVal;
+ }
+ }
+}
+
+function CopyCheckedStateToCheckButtons(SummaryCheckButton) {
+ var Inputs = document.getElementsByTagName("input");
+ for ( var i = 0 ; i < Inputs.length; ++i ) {
+ if (Inputs[i].type == "checkbox") {
+ if(Inputs[i] != SummaryCheckButton) {
+ Inputs[i].checked = SummaryCheckButton.checked;
+ Inputs[i].onclick();
+ }
+ }
+ }
+}
+
+function returnObjById( id ) {
+ if (document.getElementById)
+ var returnVar = document.getElementById(id);
+ else if (document.all)
+ var returnVar = document.all[id];
+ else if (document.layers)
+ var returnVar = document.layers[id];
+ return returnVar;
+}
+
+var NumUnchecked = 0;
+
+function ToggleDisplay(CheckButton, ClassName) {
+ if (CheckButton.checked) {
+ SetDisplay(ClassName, "");
+ if (--NumUnchecked == 0) {
+ returnObjById("AllBugsCheck").checked = true;
+ }
+ }
+ else {
+ SetDisplay(ClassName, "none");
+ NumUnchecked++;
+ returnObjById("AllBugsCheck").checked = false;
+ }
+}
+</script>
+<!-- SUMMARYENDHEAD -->
+</head>
+<body>
+<h1>${HtmlTitle}</h1>
+
+<table>
+<tr><th>User:</th><td>${UserName}\@${HostName}</td></tr>
+<tr><th>Working Directory:</th><td>${CurrentDir}</td></tr>
+<tr><th>Command Line:</th><td>${CmdArgs}</td></tr>
+<tr><th>Date:</th><td>${Date}</td></tr>
+ENDTEXT
+
+print OUT "<tr><th>Version:</th><td>${BuildName} (${BuildDate})</td></tr>\n"
+ if (defined($BuildName) && defined($BuildDate));
+
+print OUT <<ENDTEXT;
+</table>
+ENDTEXT
+
+ if (scalar(@files)) {
+ # Print out the summary table.
+ my %Totals;
+
+ for my $row ( @Index ) {
+ my $bug_type = ($row->[2]);
+ my $bug_category = ($row->[1]);
+ my $key = "$bug_category:$bug_type";
+
+ if (!defined $Totals{$key}) { $Totals{$key} = [1,$bug_category,$bug_type]; }
+ else { $Totals{$key}->[0]++; }
+ }
+
+ print OUT "<h2>Bug Summary</h2>";
+
+ if (defined $BuildName) {
+ print OUT "\n<p>Results in this analysis run are based on analyzer build <b>$BuildName</b>.</p>\n"
+ }
+
+ my $TotalBugs = scalar(@Index);
+print OUT <<ENDTEXT;
+<table>
+<thead><tr><td>Bug Type</td><td>Quantity</td><td class="sorttable_nosort">Display?</td></tr></thead>
+<tr style="font-weight:bold"><td class="SUMM_DESC">All Bugs</td><td class="Q">$TotalBugs</td><td><center><input type="checkbox" id="AllBugsCheck" onClick="CopyCheckedStateToCheckButtons(this);" checked/></center></td></tr>
+ENDTEXT
+
+ my $last_category;
+
+ for my $key (
+ sort {
+ my $x = $Totals{$a};
+ my $y = $Totals{$b};
+ my $res = $x->[1] cmp $y->[1];
+ $res = $x->[2] cmp $y->[2] if ($res == 0);
+ $res
+ } keys %Totals )
+ {
+ my $val = $Totals{$key};
+ my $category = $val->[1];
+ if (!defined $last_category or $last_category ne $category) {
+ $last_category = $category;
+ print OUT "<tr><th>$category</th><th colspan=2></th></tr>\n";
+ }
+ my $x = lc $key;
+ $x =~ s/[ ,'":\/()]+/_/g;
+ print OUT "<tr><td class=\"SUMM_DESC\">";
+ print OUT $val->[2];
+ print OUT "</td><td class=\"Q\">";
+ print OUT $val->[0];
+ print OUT "</td><td><center><input type=\"checkbox\" onClick=\"ToggleDisplay(this,'bt_$x');\" checked/></center></td></tr>\n";
+ }
+
+ # Print out the table of errors.
+
+print OUT <<ENDTEXT;
+</table>
+<h2>Reports</h2>
+
+<table class="sortable" style="table-layout:automatic">
+<thead><tr>
+ <td>Bug Group</td>
+ <td class="sorttable_sorted">Bug Type<span id="sorttable_sortfwdind">&nbsp;&#x25BE;</span></td>
+ <td>File</td>
+ <td class="Q">Line</td>
+ <td class="Q">Path Length</td>
+ <td class="sorttable_nosort"></td>
+ <!-- REPORTBUGCOL -->
+</tr></thead>
+<tbody>
+ENDTEXT
+
+ my $prefix = GetPrefix();
+ my $regex;
+ my $InFileRegex;
+ my $InFilePrefix = "File:</td><td>";
+
+ if (defined $prefix) {
+ $regex = qr/^\Q$prefix\E/is;
+ $InFileRegex = qr/\Q$InFilePrefix$prefix\E/is;
+ }
+
+ for my $row ( sort { $a->[2] cmp $b->[2] } @Index ) {
+ my $x = "$row->[1]:$row->[2]";
+ $x = lc $x;
+ $x =~ s/[ ,'":\/()]+/_/g;
+
+ my $ReportFile = $row->[0];
+
+ print OUT "<tr class=\"bt_$x\">";
+ print OUT "<td class=\"DESC\">";
+ print OUT $row->[1];
+ print OUT "</td>";
+ print OUT "<td class=\"DESC\">";
+ print OUT $row->[2];
+ print OUT "</td>";
+
+ # Update the file prefix.
+ my $fname = $row->[3];
+
+ if (defined $regex) {
+ $fname =~ s/$regex//;
+ UpdateInFilePath("$Dir/$ReportFile", $InFileRegex, $InFilePrefix)
+ }
+
+ print OUT "<td>";
+ my @fname = split /\//,$fname;
+ if ($#fname > 0) {
+ while ($#fname >= 0) {
+ my $x = shift @fname;
+ print OUT $x;
+ if ($#fname >= 0) {
+ print OUT "<span class=\"W\"> </span>/";
+ }
+ }
+ }
+ else {
+ print OUT $fname;
+ }
+ print OUT "</td>";
+
+ # Print out the quantities.
+ for my $j ( 4 .. 5 ) {
+ print OUT "<td class=\"Q\">$row->[$j]</td>";
+ }
+
+ # Print the rest of the columns.
+ for (my $j = 6; $j <= $#{$row}; ++$j) {
+ print OUT "<td>$row->[$j]</td>"
+ }
+
+ # Emit the "View" link.
+ print OUT "<td><a href=\"$ReportFile#EndPath\">View Report</a></td>";
+
+ # Emit REPORTBUG markers.
+ print OUT "\n<!-- REPORTBUG id=\"$ReportFile\" -->\n";
+
+ # End the row.
+ print OUT "</tr>\n";
+ }
+
+ print OUT "</tbody>\n</table>\n\n";
+ }
+
+ if (scalar (@failures) || scalar(@attributes_ignored)) {
+ print OUT "<h2>Analyzer Failures</h2>\n";
+
+ if (scalar @attributes_ignored) {
+ print OUT "The analyzer's parser ignored the following attributes:<p>\n";
+ print OUT "<table>\n";
+ print OUT "<thead><tr><td>Attribute</td><td>Source File</td><td>Preprocessed File</td><td>STDERR Output</td></tr></thead>\n";
+ foreach my $file (sort @attributes_ignored) {
+ die "cannot demangle attribute name\n" if (! ($file =~ /^attribute_ignored_(.+).txt/));
+ my $attribute = $1;
+ # Open the attribute file to get the first file that failed.
+ next if (!open (ATTR, "$Dir/failures/$file"));
+ my $ppfile = <ATTR>;
+ chomp $ppfile;
+ close ATTR;
+ next if (! -e "$Dir/failures/$ppfile");
+ # Open the info file and get the name of the source file.
+ open (INFO, "$Dir/failures/$ppfile.info.txt") or
+ die "Cannot open $Dir/failures/$ppfile.info.txt\n";
+ my $srcfile = <INFO>;
+ chomp $srcfile;
+ close (INFO);
+ # Print the information in the table.
+ my $prefix = GetPrefix();
+ if (defined $prefix) { $srcfile =~ s/^\Q$prefix//; }
+ print OUT "<tr><td>$attribute</td><td>$srcfile</td><td><a href=\"failures/$ppfile\">$ppfile</a></td><td><a href=\"failures/$ppfile.stderr.txt\">$ppfile.stderr.txt</a></td></tr>\n";
+ my $ppfile_clang = $ppfile;
+ $ppfile_clang =~ s/[.](.+)$/.clang.$1/;
+ print OUT " <!-- REPORTPROBLEM src=\"$srcfile\" file=\"failures/$ppfile\" clangfile=\"failures/$ppfile_clang\" stderr=\"failures/$ppfile.stderr.txt\" info=\"failures/$ppfile.info.txt\" -->\n";
+ }
+ print OUT "</table>\n";
+ }
+
+ if (scalar @failures) {
+ print OUT "<p>The analyzer had problems processing the following files:</p>\n";
+ print OUT "<table>\n";
+ print OUT "<thead><tr><td>Problem</td><td>Source File</td><td>Preprocessed File</td><td>STDERR Output</td></tr></thead>\n";
+ foreach my $file (sort @failures) {
+ $file =~ /(.+).info.txt$/;
+ # Get the preprocessed file.
+ my $ppfile = $1;
+ # Open the info file and get the name of the source file.
+ open (INFO, "$Dir/failures/$file") or
+ die "Cannot open $Dir/failures/$file\n";
+ my $srcfile = <INFO>;
+ chomp $srcfile;
+ my $problem = <INFO>;
+ chomp $problem;
+ close (INFO);
+ # Print the information in the table.
+ my $prefix = GetPrefix();
+ if (defined $prefix) { $srcfile =~ s/^\Q$prefix//; }
+ print OUT "<tr><td>$problem</td><td>$srcfile</td><td><a href=\"failures/$ppfile\">$ppfile</a></td><td><a href=\"failures/$ppfile.stderr.txt\">$ppfile.stderr.txt</a></td></tr>\n";
+ my $ppfile_clang = $ppfile;
+ $ppfile_clang =~ s/[.](.+)$/.clang.$1/;
+ print OUT " <!-- REPORTPROBLEM src=\"$srcfile\" file=\"failures/$ppfile\" clangfile=\"failures/$ppfile_clang\" stderr=\"failures/$ppfile.stderr.txt\" info=\"failures/$ppfile.info.txt\" -->\n";
+ }
+ print OUT "</table>\n";
+ }
+ print OUT "<p>Please consider submitting preprocessed files as <a href=\"http://clang.llvm.org/StaticAnalysisUsage.html#filingbugs\">bug reports</a>. <!-- REPORTCRASHES --> </p>\n";
+ }
+
+ print OUT "</body></html>\n";
+ close(OUT);
+ CopyFiles($Dir);
+
+ # Make sure $Dir and $BaseDir are world readable/executable.
+ system("chmod", "755", $Dir);
+ if (defined $BaseDir) { system("chmod", "755", $BaseDir); }
+
+ my $Num = scalar(@Index);
+ Diag("$Num bugs found.\n");
+ if ($Num > 0 && -r "$Dir/index.html") {
+ Diag("Run 'scan-view $Dir' to examine bug reports.\n");
+ }
+
+ DiagCrashes($Dir) if (scalar @failures || scalar @attributes_ignored);
+
+ return $Num;
+}
+
+##----------------------------------------------------------------------------##
+# RunBuildCommand - Run the build command.
+##----------------------------------------------------------------------------##
+
+sub AddIfNotPresent {
+ my $Args = shift;
+ my $Arg = shift;
+ my $found = 0;
+
+ foreach my $k (@$Args) {
+ if ($k eq $Arg) {
+ $found = 1;
+ last;
+ }
+ }
+
+ if ($found == 0) {
+ push @$Args, $Arg;
+ }
+}
+
+sub RunBuildCommand {
+
+ my $Args = shift;
+ my $IgnoreErrors = shift;
+ my $Cmd = $Args->[0];
+ my $CCAnalyzer = shift;
+
+ # Get only the part of the command after the last '/'.
+ if ($Cmd =~ /\/([^\/]+)$/) {
+ $Cmd = $1;
+ }
+
+ if ($Cmd =~ /(.*\/?gcc[^\/]*$)/ or
+ $Cmd =~ /(.*\/?cc[^\/]*$)/ or
+ $Cmd =~ /(.*\/?llvm-gcc[^\/]*$)/ or
+ $Cmd =~ /(.*\/?ccc-analyzer[^\/]*$)/) {
+
+ if (!($Cmd =~ /ccc-analyzer/) and !defined $ENV{"CCC_CC"}) {
+ $ENV{"CCC_CC"} = $1;
+ }
+
+ shift @$Args;
+ unshift @$Args, $CCAnalyzer;
+ }
+ elsif ($IgnoreErrors) {
+ if ($Cmd eq "make" or $Cmd eq "gmake") {
+ AddIfNotPresent($Args,"-k");
+ AddIfNotPresent($Args,"-i");
+ }
+ elsif ($Cmd eq "xcodebuild") {
+ AddIfNotPresent($Args,"-PBXBuildsContinueAfterErrors=YES");
+ }
+ }
+
+ if ($Cmd eq "xcodebuild") {
+ # Check if using iPhone SDK 3.0 (simulator). If so the compiler being
+ # used should be gcc-4.2.
+ if (!defined $ENV{"CCC_CC"}) {
+ for (my $i = 0 ; $i < scalar(@$Args); ++$i) {
+ if ($Args->[$i] eq "-sdk" && $i + 1 < scalar(@$Args)) {
+ if (@$Args[$i+1] =~ /^iphonesimulator3/) {
+ $ENV{"CCC_CC"} = "gcc-4.2";
+ }
+ }
+ }
+ }
+
+ # Disable distributed builds for xcodebuild.
+ AddIfNotPresent($Args,"-nodistribute");
+
+ # Disable PCH files until clang supports them.
+ AddIfNotPresent($Args,"GCC_PRECOMPILE_PREFIX_HEADER=NO");
+
+ # When 'CC' is set, xcodebuild uses it to do all linking, even if we are
+ # linking C++ object files. Set 'LDPLUSPLUS' so that xcodebuild uses 'g++'
+ # when linking such files.
+ die if (!defined $CXX);
+ my $LDPLUSPLUS = `which $CXX`;
+ $LDPLUSPLUS =~ s/\015?\012//; # strip newlines
+ $ENV{'LDPLUSPLUS'} = $LDPLUSPLUS;
+ }
+
+ return (system(@$Args) >> 8);
+}
+
+##----------------------------------------------------------------------------##
+# DisplayHelp - Utility function to display all help options.
+##----------------------------------------------------------------------------##
+
+sub DisplayHelp {
+
+print <<ENDTEXT;
+USAGE: $Prog [options] <build command> [build options]
+
+ENDTEXT
+
+ if (defined $BuildName) {
+ print "ANALYZER BUILD: $BuildName ($BuildDate)\n\n";
+ }
+
+print <<ENDTEXT;
+OPTIONS:
+
+ -analyze-headers - Also analyze functions in #included files.
+
+ -o - Target directory for HTML report files. Subdirectories
+ will be created as needed to represent separate "runs" of
+ the analyzer. If this option is not specified, a directory
+ is created in /tmp (TMPDIR on Mac OS X) to store the reports.
+
+ -h - Display this message.
+ --help
+
+ -k - Add a "keep on going" option to the specified build command.
+ --keep-going This option currently supports make and xcodebuild.
+ This is a convenience option; one can specify this
+ behavior directly using build options.
+
+ --html-title [title] - Specify the title used on generated HTML pages.
+ --html-title=[title] If not specified, a default title will be used.
+
+ -plist - By default the output of scan-build is a set of HTML files.
+ This option outputs the results as a set of .plist files.
+
+ --status-bugs - By default, the exit status of $Prog is the same as the
+ executed build command. Specifying this option causes the
+ exit status of $Prog to be 1 if it found potential bugs
+ and 0 otherwise.
+
+ --use-cc [compiler path] - By default, $Prog uses 'gcc' to compile and link
+ --use-cc=[compiler path] your C and Objective-C code. Use this option
+ to specify an alternate compiler.
+
+ --use-c++ [compiler path] - By default, $Prog uses 'g++' to compile and link
+ --use-c++=[compiler path] your C++ and Objective-C++ code. Use this option
+ to specify an alternate compiler.
+
+ -v - Verbose output from $Prog and the analyzer.
+ A second and third '-v' increases verbosity.
+
+ -V - View analysis results in a web browser when the build
+ --view completes.
+
+ADVANCED OPTIONS:
+
+ -constraints [model] - Specify the contraint engine used by the analyzer.
+ By default the 'range' model is used. Specifying
+ 'basic' uses a simpler, less powerful constraint model
+ used by checker-0.160 and earlier.
+
+ -store [model] - Specify the store model used by the analyzer. By default,
+ the 'basic' store model is used. 'region' specifies a field-
+ sensitive store model. Be warned that the 'region' model
+ is still in very early testing phase and may often crash.
+
+AVAILABLE ANALYSES (multiple analyses may be specified):
+
+ENDTEXT
+
+ foreach my $Analysis (sort keys %AvailableAnalyses) {
+ if (defined $AnalysesDefaultEnabled{$Analysis}) {
+ print " (+)";
+ }
+ else {
+ print " ";
+ }
+
+ print " $Analysis $AvailableAnalyses{$Analysis}\n";
+ }
+
+print <<ENDTEXT
+
+ NOTE: "(+)" indicates that an analysis is enabled by default unless one
+ or more analysis options are specified
+
+BUILD OPTIONS
+
+ You can specify any build option acceptable to the build command.
+
+EXAMPLE
+
+ $Prog -o /tmp/myhtmldir make -j4
+
+ The above example causes analysis reports to be deposited into
+ a subdirectory of "/tmp/myhtmldir" and to run "make" with the "-j4" option.
+ A different subdirectory is created each time $Prog analyzes a project.
+ The analyzer should support most parallel builds, but not distributed builds.
+
+ENDTEXT
+}
+
+##----------------------------------------------------------------------------##
+# HtmlEscape - HTML entity encode characters that are special in HTML
+##----------------------------------------------------------------------------##
+
+sub HtmlEscape {
+ # copy argument to new variable so we don't clobber the original
+ my $arg = shift || '';
+ my $tmp = $arg;
+ $tmp =~ s/&/&amp;/g;
+ $tmp =~ s/</&lt;/g;
+ $tmp =~ s/>/&gt;/g;
+ return $tmp;
+}
+
+##----------------------------------------------------------------------------##
+# ShellEscape - backslash escape characters that are special to the shell
+##----------------------------------------------------------------------------##
+
+sub ShellEscape {
+ # copy argument to new variable so we don't clobber the original
+ my $arg = shift || '';
+ if ($arg =~ /["\s]/) { return "'" . $arg . "'"; }
+ return $arg;
+}
+
+##----------------------------------------------------------------------------##
+# Process command-line arguments.
+##----------------------------------------------------------------------------##
+
+my $AnalyzeHeaders = 0;
+my $HtmlDir; # Parent directory to store HTML files.
+my $IgnoreErrors = 0; # Ignore build errors.
+my $ViewResults = 0; # View results when the build terminates.
+my $ExitStatusFoundBugs = 0; # Exit status reflects whether bugs were found
+my @AnalysesToRun;
+my $StoreModel;
+my $ConstraintsModel;
+my $OutputFormat;
+
+if (!@ARGV) {
+ DisplayHelp();
+ exit 1;
+}
+
+while (@ARGV) {
+
+ # Scan for options we recognize.
+
+ my $arg = $ARGV[0];
+
+ if ($arg eq "-h" or $arg eq "--help") {
+ DisplayHelp();
+ exit 0;
+ }
+
+ if ($arg eq '-analyze-headers') {
+ shift @ARGV;
+ $AnalyzeHeaders = 1;
+ next;
+ }
+
+ if (defined $AvailableAnalyses{$arg}) {
+ shift @ARGV;
+ push @AnalysesToRun, $arg;
+ next;
+ }
+
+ if ($arg eq "-o") {
+ shift @ARGV;
+
+ if (!@ARGV) {
+ DieDiag("'-o' option requires a target directory name.\n");
+ }
+
+ # Construct an absolute path. Uses the current working directory
+ # as a base if the original path was not absolute.
+ $HtmlDir = abs_path(shift @ARGV);
+
+ next;
+ }
+
+ if ($arg =~ /^--html-title(=(.+))?$/) {
+ shift @ARGV;
+
+ if (!defined $2 || $2 eq '') {
+ if (!@ARGV) {
+ DieDiag("'--html-title' option requires a string.\n");
+ }
+
+ $HtmlTitle = shift @ARGV;
+ } else {
+ $HtmlTitle = $2;
+ }
+
+ next;
+ }
+
+ if ($arg eq "-k" or $arg eq "--keep-going") {
+ shift @ARGV;
+ $IgnoreErrors = 1;
+ next;
+ }
+
+ if ($arg =~ /^--use-cc(=(.+))?$/) {
+ shift @ARGV;
+ my $cc;
+
+ if (!defined $2 || $2 eq "") {
+ if (!@ARGV) {
+ DieDiag("'--use-cc' option requires a compiler executable name.\n");
+ }
+ $cc = shift @ARGV;
+ }
+ else {
+ $cc = $2;
+ }
+
+ $ENV{"CCC_CC"} = $cc;
+ next;
+ }
+
+ if ($arg =~ /^--use-c\+\+(=(.+))?$/) {
+ shift @ARGV;
+
+ if (!defined $2 || $2 eq "") {
+ if (!@ARGV) {
+ DieDiag("'--use-c++' option requires a compiler executable name.\n");
+ }
+ $CXX = shift @ARGV;
+ }
+ else {
+ $CXX = $2;
+ }
+ next;
+ }
+
+ if ($arg eq "-v") {
+ shift @ARGV;
+ $Verbose++;
+ next;
+ }
+
+ if ($arg eq "-V" or $arg eq "--view") {
+ shift @ARGV;
+ $ViewResults = 1;
+ next;
+ }
+
+ if ($arg eq "--status-bugs") {
+ shift @ARGV;
+ $ExitStatusFoundBugs = 1;
+ next;
+ }
+
+ if ($arg eq "-store") {
+ shift @ARGV;
+ $StoreModel = shift @ARGV;
+ next;
+ }
+
+ if ($arg eq "-constraints") {
+ shift @ARGV;
+ $ConstraintsModel = shift @ARGV;
+ next;
+ }
+
+ if ($arg eq "-plist") {
+ shift @ARGV;
+ $OutputFormat = "plist";
+ next;
+ }
+
+ DieDiag("unrecognized option '$arg'\n") if ($arg =~ /^-/);
+
+ last;
+}
+
+if (!@ARGV) {
+ Diag("No build command specified.\n\n");
+ DisplayHelp();
+ exit 1;
+}
+
+$CmdArgs = HtmlEscape(join(' ', map(ShellEscape($_), @ARGV)));
+$HtmlTitle = "${CurrentDirSuffix} - scan-build results"
+ unless (defined($HtmlTitle));
+
+# Determine the output directory for the HTML reports.
+my $BaseDir = $HtmlDir;
+$HtmlDir = GetHTMLRunDir($HtmlDir);
+
+# Set the appropriate environment variables.
+SetHtmlEnv(\@ARGV, $HtmlDir);
+
+my $Cmd = Cwd::realpath("$RealBin/libexec/ccc-analyzer");
+if (!defined $Cmd || ! -x $Cmd) {
+ $Cmd = Cwd::realpath("$RealBin/ccc-analyzer");
+ DieDiag("Executable 'ccc-analyzer' does not exist at '$Cmd'\n") if(! -x $Cmd);
+}
+
+if (!defined $ClangCCSB || ! -x $ClangCCSB) {
+ Diag("'clang-cc' executable not found in '$RealBin/libexec'.\n");
+ Diag("Using 'clang-cc' from path.\n");
+}
+if (!defined $ClangSB || ! -x $ClangSB) {
+ Diag("'clang' executable not found in '$RealBin/bin'.\n");
+ Diag("Using 'clang' from path.\n");
+}
+
+if (defined $CXX) {
+ $ENV{'CXX'} = $CXX;
+}
+else {
+ $CXX = 'g++'; # This variable is used by other parts of scan-build
+ # that need to know a default C++ compiler to fall back to.
+}
+
+$ENV{'CC'} = $Cmd;
+$ENV{'CLANG_CC'} = $ClangCC;
+$ENV{'CLANG'} = $Clang;
+
+if ($Verbose >= 2) {
+ $ENV{'CCC_ANALYZER_VERBOSE'} = 1;
+}
+
+if ($Verbose >= 3) {
+ $ENV{'CCC_ANALYZER_LOG'} = 1;
+}
+
+if (scalar(@AnalysesToRun) == 0) {
+ foreach my $key (keys %AnalysesDefaultEnabled) {
+ push @AnalysesToRun,$key;
+ }
+}
+
+if ($AnalyzeHeaders) {
+ push @AnalysesToRun,"-analyzer-opt-analyze-headers";
+}
+
+$ENV{'CCC_ANALYZER_ANALYSIS'} = join ' ',@AnalysesToRun;
+
+if (defined $StoreModel) {
+ $ENV{'CCC_ANALYZER_STORE_MODEL'} = $StoreModel;
+}
+
+if (defined $ConstraintsModel) {
+ $ENV{'CCC_ANALYZER_CONSTRAINTS_MODEL'} = $ConstraintsModel;
+}
+
+if (defined $OutputFormat) {
+ $ENV{'CCC_ANALYZER_OUTPUT_FORMAT'} = $OutputFormat;
+}
+
+
+# Run the build.
+my $ExitStatus = RunBuildCommand(\@ARGV, $IgnoreErrors, $Cmd);
+
+if (defined $OutputFormat and $OutputFormat eq "plist") {
+ Diag "Analysis run complete.\n";
+ Diag "Analysis results (plist files) deposited in '$HtmlDir'\n";
+}
+else {
+ # Postprocess the HTML directory.
+ my $NumBugs = Postprocess($HtmlDir, $BaseDir);
+
+ if ($ViewResults and -r "$HtmlDir/index.html") {
+ Diag "Analysis run complete.\n";
+ Diag "Viewing analysis results in '$HtmlDir' using scan-view.\n";
+ my $ScanView = Cwd::realpath("$RealBin/scan-view");
+ if (! -x $ScanView) { $ScanView = "scan-view"; }
+ exec $ScanView, "$HtmlDir";
+ }
+
+ if ($ExitStatusFoundBugs) {
+ exit 1 if ($NumBugs > 0);
+ exit 0;
+ }
+}
+
+exit $ExitStatus;
+
diff --git a/utils/scanview.css b/utils/scanview.css
new file mode 100644
index 0000000..a0406f3
--- /dev/null
+++ b/utils/scanview.css
@@ -0,0 +1,62 @@
+body { color:#000000; background-color:#ffffff }
+body { font-family: Helvetica, sans-serif; font-size:9pt }
+h1 { font-size: 14pt; }
+h2 { font-size: 12pt; }
+table { font-size:9pt }
+table { border-spacing: 0px; border: 1px solid black }
+th, table thead {
+ background-color:#eee; color:#666666;
+ font-weight: bold; cursor: default;
+ text-align:center;
+ font-weight: bold; font-family: Verdana;
+ white-space:nowrap;
+}
+.W { font-size:0px }
+th, td { padding:5px; padding-left:8px; text-align:left }
+td.SUMM_DESC { padding-left:12px }
+td.DESC { white-space:pre }
+td.Q { text-align:right }
+td { text-align:left }
+tbody.scrollContent { overflow:auto }
+
+table.form_group {
+ background-color: #ccc;
+ border: 1px solid #333;
+ padding: 2px;
+}
+
+table.form_inner_group {
+ background-color: #ccc;
+ border: 1px solid #333;
+ padding: 0px;
+}
+
+table.form {
+ background-color: #999;
+ border: 1px solid #333;
+ padding: 2px;
+}
+
+td.form_label {
+ text-align: right;
+ vertical-align: top;
+}
+/* For one line entires */
+td.form_clabel {
+ text-align: right;
+ vertical-align: center;
+}
+td.form_value {
+ text-align: left;
+ vertical-align: top;
+}
+td.form_submit {
+ text-align: right;
+ vertical-align: top;
+}
+
+h1.SubmitFail {
+ color: #f00;
+}
+h1.SubmitOk {
+}
diff --git a/utils/sorttable.js b/utils/sorttable.js
new file mode 100644
index 0000000..4352d3b
--- /dev/null
+++ b/utils/sorttable.js
@@ -0,0 +1,493 @@
+/*
+ SortTable
+ version 2
+ 7th April 2007
+ Stuart Langridge, http://www.kryogenix.org/code/browser/sorttable/
+
+ Instructions:
+ Download this file
+ Add <script src="sorttable.js"></script> to your HTML
+ Add class="sortable" to any table you'd like to make sortable
+ Click on the headers to sort
+
+ Thanks to many, many people for contributions and suggestions.
+ Licenced as X11: http://www.kryogenix.org/code/browser/licence.html
+ This basically means: do what you want with it.
+*/
+
+
+var stIsIE = /*@cc_on!@*/false;
+
+sorttable = {
+ init: function() {
+ // quit if this function has already been called
+ if (arguments.callee.done) return;
+ // flag this function so we don't do the same thing twice
+ arguments.callee.done = true;
+ // kill the timer
+ if (_timer) clearInterval(_timer);
+
+ if (!document.createElement || !document.getElementsByTagName) return;
+
+ sorttable.DATE_RE = /^(\d\d?)[\/\.-](\d\d?)[\/\.-]((\d\d)?\d\d)$/;
+
+ forEach(document.getElementsByTagName('table'), function(table) {
+ if (table.className.search(/\bsortable\b/) != -1) {
+ sorttable.makeSortable(table);
+ }
+ });
+
+ },
+
+ makeSortable: function(table) {
+ if (table.getElementsByTagName('thead').length == 0) {
+ // table doesn't have a tHead. Since it should have, create one and
+ // put the first table row in it.
+ the = document.createElement('thead');
+ the.appendChild(table.rows[0]);
+ table.insertBefore(the,table.firstChild);
+ }
+ // Safari doesn't support table.tHead, sigh
+ if (table.tHead == null) table.tHead = table.getElementsByTagName('thead')[0];
+
+ if (table.tHead.rows.length != 1) return; // can't cope with two header rows
+
+ // Sorttable v1 put rows with a class of "sortbottom" at the bottom (as
+ // "total" rows, for example). This is B&R, since what you're supposed
+ // to do is put them in a tfoot. So, if there are sortbottom rows,
+ // for backwards compatibility, move them to tfoot (creating it if needed).
+ sortbottomrows = [];
+ for (var i=0; i<table.rows.length; i++) {
+ if (table.rows[i].className.search(/\bsortbottom\b/) != -1) {
+ sortbottomrows[sortbottomrows.length] = table.rows[i];
+ }
+ }
+ if (sortbottomrows) {
+ if (table.tFoot == null) {
+ // table doesn't have a tfoot. Create one.
+ tfo = document.createElement('tfoot');
+ table.appendChild(tfo);
+ }
+ for (var i=0; i<sortbottomrows.length; i++) {
+ tfo.appendChild(sortbottomrows[i]);
+ }
+ delete sortbottomrows;
+ }
+
+ // work through each column and calculate its type
+ headrow = table.tHead.rows[0].cells;
+ for (var i=0; i<headrow.length; i++) {
+ // manually override the type with a sorttable_type attribute
+ if (!headrow[i].className.match(/\bsorttable_nosort\b/)) { // skip this col
+ mtch = headrow[i].className.match(/\bsorttable_([a-z0-9]+)\b/);
+ if (mtch) { override = mtch[1]; }
+ if (mtch && typeof sorttable["sort_"+override] == 'function') {
+ headrow[i].sorttable_sortfunction = sorttable["sort_"+override];
+ } else {
+ headrow[i].sorttable_sortfunction = sorttable.guessType(table,i);
+ }
+ // make it clickable to sort
+ headrow[i].sorttable_columnindex = i;
+ headrow[i].sorttable_tbody = table.tBodies[0];
+ dean_addEvent(headrow[i],"click", function(e) {
+
+ if (this.className.search(/\bsorttable_sorted\b/) != -1) {
+ // if we're already sorted by this column, just
+ // reverse the table, which is quicker
+ sorttable.reverse(this.sorttable_tbody);
+ this.className = this.className.replace('sorttable_sorted',
+ 'sorttable_sorted_reverse');
+ this.removeChild(document.getElementById('sorttable_sortfwdind'));
+ sortrevind = document.createElement('span');
+ sortrevind.id = "sorttable_sortrevind";
+ sortrevind.innerHTML = stIsIE ? '&nbsp<font face="webdings">5</font>' : '&nbsp;&#x25B4;';
+ this.appendChild(sortrevind);
+ return;
+ }
+ if (this.className.search(/\bsorttable_sorted_reverse\b/) != -1) {
+ // if we're already sorted by this column in reverse, just
+ // re-reverse the table, which is quicker
+ sorttable.reverse(this.sorttable_tbody);
+ this.className = this.className.replace('sorttable_sorted_reverse',
+ 'sorttable_sorted');
+ this.removeChild(document.getElementById('sorttable_sortrevind'));
+ sortfwdind = document.createElement('span');
+ sortfwdind.id = "sorttable_sortfwdind";
+ sortfwdind.innerHTML = stIsIE ? '&nbsp<font face="webdings">6</font>' : '&nbsp;&#x25BE;';
+ this.appendChild(sortfwdind);
+ return;
+ }
+
+ // remove sorttable_sorted classes
+ theadrow = this.parentNode;
+ forEach(theadrow.childNodes, function(cell) {
+ if (cell.nodeType == 1) { // an element
+ cell.className = cell.className.replace('sorttable_sorted_reverse','');
+ cell.className = cell.className.replace('sorttable_sorted','');
+ }
+ });
+ sortfwdind = document.getElementById('sorttable_sortfwdind');
+ if (sortfwdind) { sortfwdind.parentNode.removeChild(sortfwdind); }
+ sortrevind = document.getElementById('sorttable_sortrevind');
+ if (sortrevind) { sortrevind.parentNode.removeChild(sortrevind); }
+
+ this.className += ' sorttable_sorted';
+ sortfwdind = document.createElement('span');
+ sortfwdind.id = "sorttable_sortfwdind";
+ sortfwdind.innerHTML = stIsIE ? '&nbsp<font face="webdings">6</font>' : '&nbsp;&#x25BE;';
+ this.appendChild(sortfwdind);
+
+ // build an array to sort. This is a Schwartzian transform thing,
+ // i.e., we "decorate" each row with the actual sort key,
+ // sort based on the sort keys, and then put the rows back in order
+ // which is a lot faster because you only do getInnerText once per row
+ row_array = [];
+ col = this.sorttable_columnindex;
+ rows = this.sorttable_tbody.rows;
+ for (var j=0; j<rows.length; j++) {
+ row_array[row_array.length] = [sorttable.getInnerText(rows[j].cells[col]), rows[j]];
+ }
+ /* If you want a stable sort, uncomment the following line */
+ sorttable.shaker_sort(row_array, this.sorttable_sortfunction);
+ /* and comment out this one */
+ //row_array.sort(this.sorttable_sortfunction);
+
+ tb = this.sorttable_tbody;
+ for (var j=0; j<row_array.length; j++) {
+ tb.appendChild(row_array[j][1]);
+ }
+
+ delete row_array;
+ });
+ }
+ }
+ },
+
+ guessType: function(table, column) {
+ // guess the type of a column based on its first non-blank row
+ sortfn = sorttable.sort_alpha;
+ for (var i=0; i<table.tBodies[0].rows.length; i++) {
+ text = sorttable.getInnerText(table.tBodies[0].rows[i].cells[column]);
+ if (text != '') {
+ if (text.match(/^-?[£$¤]?[\d,.]+%?$/)) {
+ return sorttable.sort_numeric;
+ }
+ // check for a date: dd/mm/yyyy or dd/mm/yy
+ // can have / or . or - as separator
+ // can be mm/dd as well
+ possdate = text.match(sorttable.DATE_RE)
+ if (possdate) {
+ // looks like a date
+ first = parseInt(possdate[1]);
+ second = parseInt(possdate[2]);
+ if (first > 12) {
+ // definitely dd/mm
+ return sorttable.sort_ddmm;
+ } else if (second > 12) {
+ return sorttable.sort_mmdd;
+ } else {
+ // looks like a date, but we can't tell which, so assume
+ // that it's dd/mm (English imperialism!) and keep looking
+ sortfn = sorttable.sort_ddmm;
+ }
+ }
+ }
+ }
+ return sortfn;
+ },
+
+ getInnerText: function(node) {
+ // gets the text we want to use for sorting for a cell.
+ // strips leading and trailing whitespace.
+ // this is *not* a generic getInnerText function; it's special to sorttable.
+ // for example, you can override the cell text with a customkey attribute.
+ // it also gets .value for <input> fields.
+
+ hasInputs = (typeof node.getElementsByTagName == 'function') &&
+ node.getElementsByTagName('input').length;
+
+ if (node.getAttribute("sorttable_customkey") != null) {
+ return node.getAttribute("sorttable_customkey");
+ }
+ else if (typeof node.textContent != 'undefined' && !hasInputs) {
+ return node.textContent.replace(/^\s+|\s+$/g, '');
+ }
+ else if (typeof node.innerText != 'undefined' && !hasInputs) {
+ return node.innerText.replace(/^\s+|\s+$/g, '');
+ }
+ else if (typeof node.text != 'undefined' && !hasInputs) {
+ return node.text.replace(/^\s+|\s+$/g, '');
+ }
+ else {
+ switch (node.nodeType) {
+ case 3:
+ if (node.nodeName.toLowerCase() == 'input') {
+ return node.value.replace(/^\s+|\s+$/g, '');
+ }
+ case 4:
+ return node.nodeValue.replace(/^\s+|\s+$/g, '');
+ break;
+ case 1:
+ case 11:
+ var innerText = '';
+ for (var i = 0; i < node.childNodes.length; i++) {
+ innerText += sorttable.getInnerText(node.childNodes[i]);
+ }
+ return innerText.replace(/^\s+|\s+$/g, '');
+ break;
+ default:
+ return '';
+ }
+ }
+ },
+
+ reverse: function(tbody) {
+ // reverse the rows in a tbody
+ newrows = [];
+ for (var i=0; i<tbody.rows.length; i++) {
+ newrows[newrows.length] = tbody.rows[i];
+ }
+ for (var i=newrows.length-1; i>=0; i--) {
+ tbody.appendChild(newrows[i]);
+ }
+ delete newrows;
+ },
+
+ /* sort functions
+ each sort function takes two parameters, a and b
+ you are comparing a[0] and b[0] */
+ sort_numeric: function(a,b) {
+ aa = parseFloat(a[0].replace(/[^0-9.-]/g,''));
+ if (isNaN(aa)) aa = 0;
+ bb = parseFloat(b[0].replace(/[^0-9.-]/g,''));
+ if (isNaN(bb)) bb = 0;
+ return aa-bb;
+ },
+ sort_alpha: function(a,b) {
+ if (a[0]==b[0]) return 0;
+ if (a[0]<b[0]) return -1;
+ return 1;
+ },
+ sort_ddmm: function(a,b) {
+ mtch = a[0].match(sorttable.DATE_RE);
+ y = mtch[3]; m = mtch[2]; d = mtch[1];
+ if (m.length == 1) m = '0'+m;
+ if (d.length == 1) d = '0'+d;
+ dt1 = y+m+d;
+ mtch = b[0].match(sorttable.DATE_RE);
+ y = mtch[3]; m = mtch[2]; d = mtch[1];
+ if (m.length == 1) m = '0'+m;
+ if (d.length == 1) d = '0'+d;
+ dt2 = y+m+d;
+ if (dt1==dt2) return 0;
+ if (dt1<dt2) return -1;
+ return 1;
+ },
+ sort_mmdd: function(a,b) {
+ mtch = a[0].match(sorttable.DATE_RE);
+ y = mtch[3]; d = mtch[2]; m = mtch[1];
+ if (m.length == 1) m = '0'+m;
+ if (d.length == 1) d = '0'+d;
+ dt1 = y+m+d;
+ mtch = b[0].match(sorttable.DATE_RE);
+ y = mtch[3]; d = mtch[2]; m = mtch[1];
+ if (m.length == 1) m = '0'+m;
+ if (d.length == 1) d = '0'+d;
+ dt2 = y+m+d;
+ if (dt1==dt2) return 0;
+ if (dt1<dt2) return -1;
+ return 1;
+ },
+
+ shaker_sort: function(list, comp_func) {
+ // A stable sort function to allow multi-level sorting of data
+ // see: http://en.wikipedia.org/wiki/Cocktail_sort
+ // thanks to Joseph Nahmias
+ var b = 0;
+ var t = list.length - 1;
+ var swap = true;
+
+ while(swap) {
+ swap = false;
+ for(var i = b; i < t; ++i) {
+ if ( comp_func(list[i], list[i+1]) > 0 ) {
+ var q = list[i]; list[i] = list[i+1]; list[i+1] = q;
+ swap = true;
+ }
+ } // for
+ t--;
+
+ if (!swap) break;
+
+ for(var i = t; i > b; --i) {
+ if ( comp_func(list[i], list[i-1]) < 0 ) {
+ var q = list[i]; list[i] = list[i-1]; list[i-1] = q;
+ swap = true;
+ }
+ } // for
+ b++;
+
+ } // while(swap)
+ }
+}
+
+/* ******************************************************************
+ Supporting functions: bundled here to avoid depending on a library
+ ****************************************************************** */
+
+// Dean Edwards/Matthias Miller/John Resig
+
+/* for Mozilla/Opera9 */
+if (document.addEventListener) {
+ document.addEventListener("DOMContentLoaded", sorttable.init, false);
+}
+
+/* for Internet Explorer */
+/*@cc_on @*/
+/*@if (@_win32)
+ document.write("<script id=__ie_onload defer src=javascript:void(0)><\/script>");
+ var script = document.getElementById("__ie_onload");
+ script.onreadystatechange = function() {
+ if (this.readyState == "complete") {
+ sorttable.init(); // call the onload handler
+ }
+ };
+/*@end @*/
+
+/* for Safari */
+if (/WebKit/i.test(navigator.userAgent)) { // sniff
+ var _timer = setInterval(function() {
+ if (/loaded|complete/.test(document.readyState)) {
+ sorttable.init(); // call the onload handler
+ }
+ }, 10);
+}
+
+/* for other browsers */
+window.onload = sorttable.init;
+
+// written by Dean Edwards, 2005
+// with input from Tino Zijdel, Matthias Miller, Diego Perini
+
+// http://dean.edwards.name/weblog/2005/10/add-event/
+
+function dean_addEvent(element, type, handler) {
+ if (element.addEventListener) {
+ element.addEventListener(type, handler, false);
+ } else {
+ // assign each event handler a unique ID
+ if (!handler.$$guid) handler.$$guid = dean_addEvent.guid++;
+ // create a hash table of event types for the element
+ if (!element.events) element.events = {};
+ // create a hash table of event handlers for each element/event pair
+ var handlers = element.events[type];
+ if (!handlers) {
+ handlers = element.events[type] = {};
+ // store the existing event handler (if there is one)
+ if (element["on" + type]) {
+ handlers[0] = element["on" + type];
+ }
+ }
+ // store the event handler in the hash table
+ handlers[handler.$$guid] = handler;
+ // assign a global event handler to do all the work
+ element["on" + type] = handleEvent;
+ }
+};
+// a counter used to create unique IDs
+dean_addEvent.guid = 1;
+
+function removeEvent(element, type, handler) {
+ if (element.removeEventListener) {
+ element.removeEventListener(type, handler, false);
+ } else {
+ // delete the event handler from the hash table
+ if (element.events && element.events[type]) {
+ delete element.events[type][handler.$$guid];
+ }
+ }
+};
+
+function handleEvent(event) {
+ var returnValue = true;
+ // grab the event object (IE uses a global event object)
+ event = event || fixEvent(((this.ownerDocument || this.document || this).parentWindow || window).event);
+ // get a reference to the hash table of event handlers
+ var handlers = this.events[event.type];
+ // execute each event handler
+ for (var i in handlers) {
+ this.$$handleEvent = handlers[i];
+ if (this.$$handleEvent(event) === false) {
+ returnValue = false;
+ }
+ }
+ return returnValue;
+};
+
+function fixEvent(event) {
+ // add W3C standard event methods
+ event.preventDefault = fixEvent.preventDefault;
+ event.stopPropagation = fixEvent.stopPropagation;
+ return event;
+};
+fixEvent.preventDefault = function() {
+ this.returnValue = false;
+};
+fixEvent.stopPropagation = function() {
+ this.cancelBubble = true;
+}
+
+// Dean's forEach: http://dean.edwards.name/base/forEach.js
+/*
+ forEach, version 1.0
+ Copyright 2006, Dean Edwards
+ License: http://www.opensource.org/licenses/mit-license.php
+*/
+
+// array-like enumeration
+if (!Array.forEach) { // mozilla already supports this
+ Array.forEach = function(array, block, context) {
+ for (var i = 0; i < array.length; i++) {
+ block.call(context, array[i], i, array);
+ }
+ };
+}
+
+// generic enumeration
+Function.prototype.forEach = function(object, block, context) {
+ for (var key in object) {
+ if (typeof this.prototype[key] == "undefined") {
+ block.call(context, object[key], key, object);
+ }
+ }
+};
+
+// character enumeration
+String.forEach = function(string, block, context) {
+ Array.forEach(string.split(""), function(chr, index) {
+ block.call(context, chr, index, string);
+ });
+};
+
+// globally resolve forEach enumeration
+var forEach = function(object, block, context) {
+ if (object) {
+ var resolve = Object; // default
+ if (object instanceof Function) {
+ // functions have a "length" property
+ resolve = Function;
+ } else if (object.forEach instanceof Function) {
+ // the object implements a custom forEach method so use that
+ object.forEach(block, context);
+ return;
+ } else if (typeof object == "string") {
+ // the object is a string
+ resolve = String;
+ } else if (typeof object.length == "number") {
+ // the object is array-like
+ resolve = Array;
+ }
+ resolve.forEach(object, block, context);
+ }
+};
+
diff --git a/utils/test/Makefile.multi b/utils/test/Makefile.multi
new file mode 100644
index 0000000..3e9cd56
--- /dev/null
+++ b/utils/test/Makefile.multi
@@ -0,0 +1,21 @@
+LEVEL = ../../..
+include $(LEVEL)/Makefile.common
+
+# Test in all immediate subdirectories if unset.
+TESTDIRS ?= $(shell echo $(PROJ_SRC_DIR)/*/)
+
+ifndef TESTARGS
+ifdef VERBOSE
+TESTARGS = -v
+else
+TESTARGS = -s
+endif
+endif
+
+all::
+ @ PATH=$(ToolDir):$(LLVM_SRC_ROOT)/test/Scripts:$$PATH VG=$(VG) ../utils/test/MultiTestRunner.py $(TESTARGS) $(TESTDIRS)
+
+clean::
+ @ rm -rf Output/
+
+.PHONY: all report clean
diff --git a/utils/test/MultiTestRunner.py b/utils/test/MultiTestRunner.py
new file mode 100755
index 0000000..57650f9
--- /dev/null
+++ b/utils/test/MultiTestRunner.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+
+"""
+MultiTestRunner - Harness for running multiple tests in the simple clang style.
+
+TODO
+--
+ - Fix Ctrl-c issues
+ - Use a timeout
+ - Detect signalled failures (abort)
+ - Better support for finding tests
+"""
+
+# TOD
+import os, sys, re, random, time
+import threading
+import ProgressBar
+import TestRunner
+from TestRunner import TestStatus
+from Queue import Queue
+
+kTestFileExtensions = set(['.mi','.i','.c','.cpp','.m','.mm','.ll'])
+
+def getTests(inputs):
+ for path in inputs:
+ if not os.path.exists(path):
+ print >>sys.stderr,"WARNING: Invalid test \"%s\""%(path,)
+ continue
+
+ if os.path.isdir(path):
+ for dirpath,dirnames,filenames in os.walk(path):
+ dotTests = os.path.join(dirpath,'.tests')
+ if os.path.exists(dotTests):
+ for ln in open(dotTests):
+ if ln.strip():
+ yield os.path.join(dirpath,ln.strip())
+ else:
+ # FIXME: This doesn't belong here
+ if 'Output' in dirnames:
+ dirnames.remove('Output')
+ for f in filenames:
+ base,ext = os.path.splitext(f)
+ if ext in kTestFileExtensions:
+ yield os.path.join(dirpath,f)
+ else:
+ yield path
+
+class TestingProgressDisplay:
+ def __init__(self, opts, numTests, progressBar=None):
+ self.opts = opts
+ self.numTests = numTests
+ self.digits = len(str(self.numTests))
+ self.current = None
+ self.lock = threading.Lock()
+ self.progressBar = progressBar
+ self.progress = 0.
+
+ def update(self, index, tr):
+ # Avoid locking overhead in quiet mode
+ if self.opts.quiet and not tr.failed():
+ return
+
+ # Output lock
+ self.lock.acquire()
+ try:
+ self.handleUpdate(index, tr)
+ finally:
+ self.lock.release()
+
+ def finish(self):
+ if self.progressBar:
+ self.progressBar.clear()
+ elif self.opts.succinct:
+ sys.stdout.write('\n')
+
+ def handleUpdate(self, index, tr):
+ if self.progressBar:
+ if tr.failed():
+ self.progressBar.clear()
+ else:
+ # Force monotonicity
+ self.progress = max(self.progress, float(index)/self.numTests)
+ self.progressBar.update(self.progress, tr.path)
+ return
+ elif self.opts.succinct:
+ if not tr.failed():
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ return
+ else:
+ sys.stdout.write('\n')
+
+ extra = ''
+ if tr.code==TestStatus.Invalid:
+ extra = ' - (Invalid test)'
+ elif tr.code==TestStatus.NoRunLine:
+ extra = ' - (No RUN line)'
+ elif tr.failed():
+ extra = ' - %s'%(TestStatus.getName(tr.code).upper(),)
+ print '%*d/%*d - %s%s'%(self.digits, index+1, self.digits,
+ self.numTests, tr.path, extra)
+
+ if tr.failed() and self.opts.showOutput:
+ TestRunner.cat(tr.testResults, sys.stdout)
+
+class TestResult:
+ def __init__(self, path, code, testResults):
+ self.path = path
+ self.code = code
+ self.testResults = testResults
+
+ def failed(self):
+ return self.code in (TestStatus.Fail,TestStatus.XPass)
+
+class TestProvider:
+ def __init__(self, opts, tests, display):
+ self.opts = opts
+ self.tests = tests
+ self.index = 0
+ self.lock = threading.Lock()
+ self.results = [None]*len(self.tests)
+ self.startTime = time.time()
+ self.progress = display
+
+ def get(self):
+ self.lock.acquire()
+ try:
+ if self.opts.maxTime is not None:
+ if time.time() - self.startTime > self.opts.maxTime:
+ return None
+ if self.index >= len(self.tests):
+ return None
+ item = self.tests[self.index],self.index
+ self.index += 1
+ return item
+ finally:
+ self.lock.release()
+
+ def setResult(self, index, result):
+ self.results[index] = result
+ self.progress.update(index, result)
+
+class Tester(threading.Thread):
+ def __init__(self, provider):
+ threading.Thread.__init__(self)
+ self.provider = provider
+
+ def run(self):
+ while 1:
+ item = self.provider.get()
+ if item is None:
+ break
+ self.runTest(item)
+
+ def runTest(self, (path,index)):
+ command = path
+ # Use hand concatentation here because we want to override
+ # absolute paths.
+ output = 'Output/' + path + '.out'
+ testname = path
+ testresults = 'Output/' + path + '.testresults'
+ TestRunner.mkdir_p(os.path.dirname(testresults))
+ numTests = len(self.provider.tests)
+ digits = len(str(numTests))
+ code = None
+ try:
+ opts = self.provider.opts
+ if opts.debugDoNotTest:
+ code = None
+ else:
+ code = TestRunner.runOneTest(path, command, output, testname,
+ opts.clang,
+ useValgrind=opts.useValgrind,
+ useDGCompat=opts.useDGCompat,
+ useScript=opts.testScript,
+ output=open(testresults,'w'))
+ except KeyboardInterrupt:
+ # This is a sad hack. Unfortunately subprocess goes
+ # bonkers with ctrl-c and we start forking merrily.
+ print 'Ctrl-C detected, goodbye.'
+ os.kill(0,9)
+
+ self.provider.setResult(index, TestResult(path, code, testresults))
+
+def detectCPUs():
+ """
+ Detects the number of CPUs on a system. Cribbed from pp.
+ """
+ # Linux, Unix and MacOS:
+ if hasattr(os, "sysconf"):
+ if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
+ # Linux & Unix:
+ ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
+ if isinstance(ncpus, int) and ncpus > 0:
+ return ncpus
+ else: # OSX:
+ return int(os.popen2("sysctl -n hw.ncpu")[1].read())
+ # Windows:
+ if os.environ.has_key("NUMBER_OF_PROCESSORS"):
+ ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]);
+ if ncpus > 0:
+ return ncpus
+ return 1 # Default
+
+def main():
+ global options
+ from optparse import OptionParser
+ parser = OptionParser("usage: %prog [options] {inputs}")
+ parser.add_option("-j", "--threads", dest="numThreads",
+ help="Number of testing threads",
+ type=int, action="store",
+ default=detectCPUs())
+ parser.add_option("", "--clang", dest="clang",
+ help="Program to use as \"clang\"",
+ action="store", default="clang")
+ parser.add_option("", "--vg", dest="useValgrind",
+ help="Run tests under valgrind",
+ action="store_true", default=False)
+ parser.add_option("", "--dg", dest="useDGCompat",
+ help="Use llvm dejagnu compatibility mode",
+ action="store_true", default=False)
+ parser.add_option("", "--script", dest="testScript",
+ help="Default script to use",
+ action="store", default=None)
+ parser.add_option("-v", "--verbose", dest="showOutput",
+ help="Show all test output",
+ action="store_true", default=False)
+ parser.add_option("-q", "--quiet", dest="quiet",
+ help="Suppress no error output",
+ action="store_true", default=False)
+ parser.add_option("-s", "--succinct", dest="succinct",
+ help="Reduce amount of output",
+ action="store_true", default=False)
+ parser.add_option("", "--max-tests", dest="maxTests",
+ help="Maximum number of tests to run",
+ action="store", type=int, default=None)
+ parser.add_option("", "--max-time", dest="maxTime",
+ help="Maximum time to spend testing (in seconds)",
+ action="store", type=float, default=None)
+ parser.add_option("", "--shuffle", dest="shuffle",
+ help="Run tests in random order",
+ action="store_true", default=False)
+ parser.add_option("", "--seed", dest="seed",
+ help="Seed for random number generator (default: random).",
+ action="store", default=None)
+ parser.add_option("", "--no-progress-bar", dest="useProgressBar",
+ help="Do not use curses based progress bar",
+ action="store_false", default=True)
+ parser.add_option("", "--debug-do-not-test", dest="debugDoNotTest",
+ help="DEBUG: Skip running actual test script",
+ action="store_true", default=False)
+ (opts, args) = parser.parse_args()
+
+ if not args:
+ parser.error('No inputs specified')
+
+ # FIXME: It could be worth loading these in parallel with testing.
+ allTests = list(getTests(args))
+ allTests.sort()
+
+ tests = allTests
+ if opts.seed is not None:
+ try:
+ seed = int(opts.seed)
+ except:
+ parser.error('--seed argument should be an integer')
+ random.seed(seed)
+ if opts.shuffle:
+ random.shuffle(tests)
+ if opts.maxTests is not None:
+ tests = tests[:opts.maxTests]
+
+ extra = ''
+ if len(tests) != len(allTests):
+ extra = ' of %d'%(len(allTests),)
+ header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra,opts.numThreads)
+
+ progressBar = None
+ if not opts.quiet:
+ if opts.useProgressBar:
+ try:
+ tc = ProgressBar.TerminalController()
+ progressBar = ProgressBar.ProgressBar(tc, header)
+ except ValueError:
+ pass
+
+ if not progressBar:
+ print header
+
+ display = TestingProgressDisplay(opts, len(tests), progressBar)
+ provider = TestProvider(opts, tests, display)
+
+ testers = [Tester(provider) for i in range(opts.numThreads)]
+ startTime = time.time()
+ for t in testers:
+ t.start()
+ try:
+ for t in testers:
+ t.join()
+ except KeyboardInterrupt:
+ sys.exit(1)
+
+ display.finish()
+
+ if not opts.quiet:
+ print 'Testing Time: %.2fs'%(time.time() - startTime)
+
+ # List test results organized organized by kind.
+ byCode = {}
+ for t in provider.results:
+ if t:
+ if t.code not in byCode:
+ byCode[t.code] = []
+ byCode[t.code].append(t)
+ for title,code in (('Expected Failures', TestStatus.XFail),
+ ('Unexpected Passing Tests', TestStatus.XPass),
+ ('Failing Tests', TestStatus.Fail)):
+ elts = byCode.get(code)
+ if not elts:
+ continue
+ print '*'*20
+ print '%s (%d):' % (title, len(elts))
+ for tr in elts:
+ print '\t%s'%(tr.path,)
+
+ numFailures = len(byCode.get(TestStatus.Fail,[]))
+ if numFailures:
+ print '\nFailures: %d' % (numFailures,)
+
+if __name__=='__main__':
+ main()
diff --git a/utils/test/ProgressBar.py b/utils/test/ProgressBar.py
new file mode 100644
index 0000000..2e1f24a
--- /dev/null
+++ b/utils/test/ProgressBar.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+
+# Source: http://code.activestate.com/recipes/475116/, with
+# modifications by Daniel Dunbar.
+
+import sys, re, time
+
+class TerminalController:
+ """
+ A class that can be used to portably generate formatted output to
+ a terminal.
+
+ `TerminalController` defines a set of instance variables whose
+ values are initialized to the control sequence necessary to
+ perform a given action. These can be simply included in normal
+ output to the terminal:
+
+ >>> term = TerminalController()
+ >>> print 'This is '+term.GREEN+'green'+term.NORMAL
+
+ Alternatively, the `render()` method can used, which replaces
+ '${action}' with the string required to perform 'action':
+
+ >>> term = TerminalController()
+ >>> print term.render('This is ${GREEN}green${NORMAL}')
+
+ If the terminal doesn't support a given action, then the value of
+ the corresponding instance variable will be set to ''. As a
+ result, the above code will still work on terminals that do not
+ support color, except that their output will not be colored.
+ Also, this means that you can test whether the terminal supports a
+ given action by simply testing the truth value of the
+ corresponding instance variable:
+
+ >>> term = TerminalController()
+ >>> if term.CLEAR_SCREEN:
+ ... print 'This terminal supports clearning the screen.'
+
+ Finally, if the width and height of the terminal are known, then
+ they will be stored in the `COLS` and `LINES` attributes.
+ """
+ # Cursor movement:
+ BOL = '' #: Move the cursor to the beginning of the line
+ UP = '' #: Move the cursor up one line
+ DOWN = '' #: Move the cursor down one line
+ LEFT = '' #: Move the cursor left one char
+ RIGHT = '' #: Move the cursor right one char
+
+ # Deletion:
+ CLEAR_SCREEN = '' #: Clear the screen and move to home position
+ CLEAR_EOL = '' #: Clear to the end of the line.
+ CLEAR_BOL = '' #: Clear to the beginning of the line.
+ CLEAR_EOS = '' #: Clear to the end of the screen
+
+ # Output modes:
+ BOLD = '' #: Turn on bold mode
+ BLINK = '' #: Turn on blink mode
+ DIM = '' #: Turn on half-bright mode
+ REVERSE = '' #: Turn on reverse-video mode
+ NORMAL = '' #: Turn off all modes
+
+ # Cursor display:
+ HIDE_CURSOR = '' #: Make the cursor invisible
+ SHOW_CURSOR = '' #: Make the cursor visible
+
+ # Terminal size:
+ COLS = None #: Width of the terminal (None for unknown)
+ LINES = None #: Height of the terminal (None for unknown)
+
+ # Foreground colors:
+ BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
+
+ # Background colors:
+ BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ''
+ BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ''
+
+ _STRING_CAPABILITIES = """
+ BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
+ CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold
+ BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0
+ HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split()
+ _COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
+ _ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
+
+ def __init__(self, term_stream=sys.stdout):
+ """
+ Create a `TerminalController` and initialize its attributes
+ with appropriate values for the current terminal.
+ `term_stream` is the stream that will be used for terminal
+ output; if this stream is not a tty, then the terminal is
+ assumed to be a dumb terminal (i.e., have no capabilities).
+ """
+ # Curses isn't available on all platforms
+ try: import curses
+ except: return
+
+ # If the stream isn't a tty, then assume it has no capabilities.
+ if not term_stream.isatty(): return
+
+ # Check the terminal type. If we fail, then assume that the
+ # terminal has no capabilities.
+ try: curses.setupterm()
+ except: return
+
+ # Look up numeric capabilities.
+ self.COLS = curses.tigetnum('cols')
+ self.LINES = curses.tigetnum('lines')
+
+ # Look up string capabilities.
+ for capability in self._STRING_CAPABILITIES:
+ (attrib, cap_name) = capability.split('=')
+ setattr(self, attrib, self._tigetstr(cap_name) or '')
+
+ # Colors
+ set_fg = self._tigetstr('setf')
+ if set_fg:
+ for i,color in zip(range(len(self._COLORS)), self._COLORS):
+ setattr(self, color, curses.tparm(set_fg, i) or '')
+ set_fg_ansi = self._tigetstr('setaf')
+ if set_fg_ansi:
+ for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
+ setattr(self, color, curses.tparm(set_fg_ansi, i) or '')
+ set_bg = self._tigetstr('setb')
+ if set_bg:
+ for i,color in zip(range(len(self._COLORS)), self._COLORS):
+ setattr(self, 'BG_'+color, curses.tparm(set_bg, i) or '')
+ set_bg_ansi = self._tigetstr('setab')
+ if set_bg_ansi:
+ for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
+ setattr(self, 'BG_'+color, curses.tparm(set_bg_ansi, i) or '')
+
+ def _tigetstr(self, cap_name):
+ # String capabilities can include "delays" of the form "$<2>".
+ # For any modern terminal, we should be able to just ignore
+ # these, so strip them out.
+ import curses
+ cap = curses.tigetstr(cap_name) or ''
+ return re.sub(r'\$<\d+>[/*]?', '', cap)
+
+ def render(self, template):
+ """
+ Replace each $-substitutions in the given template string with
+ the corresponding terminal control string (if it's defined) or
+ '' (if it's not).
+ """
+ return re.sub(r'\$\$|\${\w+}', self._render_sub, template)
+
+ def _render_sub(self, match):
+ s = match.group()
+ if s == '$$': return s
+ else: return getattr(self, s[2:-1])
+
+#######################################################################
+# Example use case: progress bar
+#######################################################################
+
+class ProgressBar:
+ """
+ A 3-line progress bar, which looks like::
+
+ Header
+ 20% [===========----------------------------------]
+ progress message
+
+ The progress bar is colored, if the terminal supports color
+ output; and adjusts to the width of the terminal.
+ """
+ BAR = '%s${GREEN}[${BOLD}%s%s${NORMAL}${GREEN}]${NORMAL}%s\n'
+ HEADER = '${BOLD}${CYAN}%s${NORMAL}\n\n'
+
+ def __init__(self, term, header, useETA=True):
+ self.term = term
+ if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL):
+ raise ValueError("Terminal isn't capable enough -- you "
+ "should use a simpler progress dispaly.")
+ self.width = self.term.COLS or 75
+ self.bar = term.render(self.BAR)
+ self.header = self.term.render(self.HEADER % header.center(self.width))
+ self.cleared = 1 #: true if we haven't drawn the bar yet.
+ self.useETA = useETA
+ if self.useETA:
+ self.startTime = time.time()
+ self.update(0, '')
+
+ def update(self, percent, message):
+ if self.cleared:
+ sys.stdout.write(self.header)
+ self.cleared = 0
+ prefix = '%3d%% ' % (percent*100,)
+ suffix = ''
+ if self.useETA:
+ elapsed = time.time() - self.startTime
+ if percent > .0001 and elapsed > 1:
+ total = elapsed / percent
+ eta = int(total - elapsed)
+ h = eta//3600.
+ m = (eta//60) % 60
+ s = eta % 60
+ suffix = ' ETA: %02d:%02d:%02d'%(h,m,s)
+ barWidth = self.width - len(prefix) - len(suffix) - 2
+ n = int(barWidth*percent)
+ if len(message) < self.width:
+ message = message + ' '*(self.width - len(message))
+ else:
+ message = '... ' + message[-(self.width-4):]
+ sys.stdout.write(
+ self.term.BOL + self.term.UP + self.term.CLEAR_EOL +
+ (self.bar % (prefix, '='*n, '-'*(barWidth-n), suffix)) +
+ self.term.CLEAR_EOL + message)
+
+ def clear(self):
+ if not self.cleared:
+ sys.stdout.write(self.term.BOL + self.term.CLEAR_EOL +
+ self.term.UP + self.term.CLEAR_EOL +
+ self.term.UP + self.term.CLEAR_EOL)
+ self.cleared = 1
+
+def test():
+ import time
+ tc = TerminalController()
+ p = ProgressBar(tc, 'Tests')
+ for i in range(101):
+ p.update(i/100., str(i))
+ time.sleep(.3)
+
+if __name__=='__main__':
+ test()
diff --git a/utils/test/TestRunner.py b/utils/test/TestRunner.py
new file mode 100755
index 0000000..1cb8b9d
--- /dev/null
+++ b/utils/test/TestRunner.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+#
+# TestRunner.py - This script is used to run arbitrary unit tests. Unit
+# tests must contain the command used to run them in the input file, starting
+# immediately after a "RUN:" string.
+#
+# This runner recognizes and replaces the following strings in the command:
+#
+# %s - Replaced with the input name of the program, or the program to
+# execute, as appropriate.
+# %S - Replaced with the directory where the input resides.
+# %llvmgcc - llvm-gcc command
+# %llvmgxx - llvm-g++ command
+# %prcontext - prcontext.tcl script
+# %t - temporary file name (derived from testcase name)
+#
+
+import os
+import sys
+import subprocess
+import errno
+import re
+
+class TestStatus:
+ Pass = 0
+ XFail = 1
+ Fail = 2
+ XPass = 3
+ NoRunLine = 4
+ Invalid = 5
+
+ kNames = ['Pass','XFail','Fail','XPass','NoRunLine','Invalid']
+ @staticmethod
+ def getName(code): return TestStatus.kNames[code]
+
+def mkdir_p(path):
+ if not path:
+ pass
+ elif os.path.exists(path):
+ pass
+ else:
+ parent = os.path.dirname(path)
+ if parent != path:
+ mkdir_p(parent)
+ try:
+ os.mkdir(path)
+ except OSError,e:
+ if e.errno != errno.EEXIST:
+ raise
+
+def remove(path):
+ try:
+ os.remove(path)
+ except OSError:
+ pass
+
+def cat(path, output):
+ f = open(path)
+ output.writelines(f)
+ f.close()
+
+def runOneTest(FILENAME, SUBST, OUTPUT, TESTNAME, CLANG,
+ useValgrind=False,
+ useDGCompat=False,
+ useScript=None,
+ output=sys.stdout):
+ if useValgrind:
+ VG_OUTPUT = '%s.vg'%(OUTPUT,)
+ if os.path.exists:
+ remove(VG_OUTPUT)
+ CLANG = 'valgrind --leak-check=full --quiet --log-file=%s %s'%(VG_OUTPUT, CLANG)
+
+ # Create the output directory if it does not already exist.
+ mkdir_p(os.path.dirname(OUTPUT))
+
+ # FIXME
+ #ulimit -t 40
+
+ # FIXME: Load script once
+ # FIXME: Support "short" script syntax
+
+ if useScript:
+ scriptFile = useScript
+ else:
+ # See if we have a per-dir test script.
+ dirScriptFile = os.path.join(os.path.dirname(FILENAME), 'test.script')
+ if os.path.exists(dirScriptFile):
+ scriptFile = dirScriptFile
+ else:
+ scriptFile = FILENAME
+
+ # Verify the script contains a run line.
+ for ln in open(scriptFile):
+ if 'RUN:' in ln:
+ break
+ else:
+ print >>output, "******************** TEST '%s' HAS NO RUN LINE! ********************"%(TESTNAME,)
+ output.flush()
+ return TestStatus.NoRunLine
+
+ OUTPUT = os.path.abspath(OUTPUT)
+ FILENAME = os.path.abspath(FILENAME)
+ SCRIPT = OUTPUT + '.script'
+ TEMPOUTPUT = OUTPUT + '.tmp'
+
+ substitutions = [('%s',SUBST),
+ ('%S',os.path.dirname(SUBST)),
+ ('%llvmgcc','llvm-gcc -emit-llvm -w'),
+ ('%llvmgxx','llvm-g++ -emit-llvm -w'),
+ ('%prcontext','prcontext.tcl'),
+ ('%t',TEMPOUTPUT),
+ ('clang',CLANG)]
+ scriptLines = []
+ xfailLines = []
+ for ln in open(scriptFile):
+ if 'RUN:' in ln:
+ # Isolate run parameters
+ index = ln.index('RUN:')
+ ln = ln[index+4:]
+
+ # Apply substitutions
+ for a,b in substitutions:
+ ln = ln.replace(a,b)
+
+ if useDGCompat:
+ ln = re.sub(r'\{(.*)\}', r'"\1"', ln)
+ scriptLines.append(ln)
+ elif 'XFAIL' in ln:
+ xfailLines.append(ln)
+
+ if xfailLines:
+ print >>output, "XFAILED '%s':"%(TESTNAME,)
+ output.writelines(xfailLines)
+
+ # Write script file
+ f = open(SCRIPT,'w')
+ f.write(''.join(scriptLines))
+ f.close()
+
+ outputFile = open(OUTPUT,'w')
+ p = None
+ try:
+ p = subprocess.Popen(["/bin/sh",SCRIPT],
+ cwd=os.path.dirname(FILENAME),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out,err = p.communicate()
+ outputFile.write(out)
+ outputFile.write(err)
+ SCRIPT_STATUS = p.wait()
+ except KeyboardInterrupt:
+ if p is not None:
+ os.kill(p.pid)
+ raise
+ outputFile.close()
+
+ if xfailLines:
+ SCRIPT_STATUS = not SCRIPT_STATUS
+
+ if useValgrind:
+ VG_STATUS = len(list(open(VG_OUTPUT)))
+ else:
+ VG_STATUS = 0
+
+ if SCRIPT_STATUS or VG_STATUS:
+ print >>output, "******************** TEST '%s' FAILED! ********************"%(TESTNAME,)
+ print >>output, "Command: "
+ output.writelines(scriptLines)
+ if not SCRIPT_STATUS:
+ print >>output, "Output:"
+ else:
+ print >>output, "Incorrect Output:"
+ cat(OUTPUT, output)
+ if VG_STATUS:
+ print >>output, "Valgrind Output:"
+ cat(VG_OUTPUT, output)
+ print >>output, "******************** TEST '%s' FAILED! ********************"%(TESTNAME,)
+ output.flush()
+ if xfailLines:
+ return TestStatus.XPass
+ else:
+ return TestStatus.Fail
+
+ if xfailLines:
+ return TestStatus.XFail
+ else:
+ return TestStatus.Pass
+
+def main():
+ _,path = sys.argv
+ command = path
+ # Use hand concatentation here because we want to override
+ # absolute paths.
+ output = 'Output/' + path + '.out'
+ testname = path
+
+ # Determine which clang to use.
+ CLANG = os.getenv('CLANG')
+ if not CLANG:
+ CLANG = 'clang'
+
+ res = runOneTest(path, command, output, testname, CLANG,
+ useValgrind=bool(os.getenv('VG')),
+ useDGCompat=bool(os.getenv('DG_COMPAT')),
+ useScript=os.getenv("TEST_SCRIPT"))
+
+ sys.exit(res == TestStatus.Fail or res == TestStatus.XPass)
+
+if __name__=='__main__':
+ main()
diff --git a/utils/token-delta.py b/utils/token-delta.py
new file mode 100755
index 0000000..327fa92
--- /dev/null
+++ b/utils/token-delta.py
@@ -0,0 +1,251 @@
+#!/usr/bin/env python
+
+import os
+import re
+import subprocess
+import sys
+import tempfile
+
+###
+
+class DeltaAlgorithm(object):
+ def __init__(self):
+ self.cache = set()
+
+ def test(self, changes):
+ abstract
+
+ ###
+
+ def getTestResult(self, changes):
+ # There is no reason to cache successful tests because we will
+ # always reduce the changeset when we see one.
+
+ changeset = frozenset(changes)
+ if changeset in self.cache:
+ return False
+ elif not self.test(changes):
+ self.cache.add(changeset)
+ return False
+ else:
+ return True
+
+ def run(self, changes, force=False):
+ # Make sure the initial test passes, if not then (a) either
+ # the user doesn't expect monotonicity, and we may end up
+ # doing O(N^2) tests, or (b) the test is wrong. Avoid the
+ # O(N^2) case unless user requests it.
+ if not force:
+ if not self.getTestResult(changes):
+ raise ValueError,'Initial test passed to delta fails.'
+
+ # Check empty set first to quickly find poor test functions.
+ if self.getTestResult(set()):
+ return set()
+ else:
+ return self.delta(changes, self.split(changes))
+
+ def split(self, S):
+ """split(set) -> [sets]
+
+ Partition a set into one or two pieces.
+ """
+
+ # There are many ways to split, we could do a better job with more
+ # context information (but then the API becomes grosser).
+ L = list(S)
+ mid = len(L)//2
+ if mid==0:
+ return L,
+ else:
+ return L[:mid],L[mid:]
+
+ def delta(self, c, sets):
+ # assert(reduce(set.union, sets, set()) == c)
+
+ # If there is nothing left we can remove, we are done.
+ if len(sets) <= 1:
+ return c
+
+ # Look for a passing subset.
+ res = self.search(c, sets)
+ if res is not None:
+ return res
+
+ # Otherwise, partition sets if possible; if not we are done.
+ refined = sum(map(list, map(self.split, sets)), [])
+ if len(refined) == len(sets):
+ return c
+
+ return self.delta(c, refined)
+
+ def search(self, c, sets):
+ for i,S in enumerate(sets):
+ # If test passes on this subset alone, recurse.
+ if self.getTestResult(S):
+ return self.delta(S, self.split(S))
+
+ # Otherwise if we have more than two sets, see if test
+ # pases without this subset.
+ if len(sets) > 2:
+ complement = sum(sets[:i] + sets[i+1:],[])
+ if self.getTestResult(complement):
+ return self.delta(complement, sets[:i] + sets[i+1:])
+
+###
+
+class Token:
+ def __init__(self, type, data, flags, file, line, column):
+ self.type = type
+ self.data = data
+ self.flags = flags
+ self.file = file
+ self.line = line
+ self.column = column
+
+kTokenRE = re.compile(r"""([a-z_]+) '(.*)'\t(.*)\tLoc=<(.*):(.*):(.*)>""",
+ re.DOTALL | re.MULTILINE)
+
+def getTokens(path):
+ p = subprocess.Popen(['clang','-dump-raw-tokens',path],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out,err = p.communicate()
+
+ tokens = []
+ collect = None
+ for ln in err.split('\n'):
+ # Silly programmers refuse to print in simple machine readable
+ # formats. Whatever.
+ if collect is None:
+ collect = ln
+ else:
+ collect = collect + '\n' + ln
+ if 'Loc=<' in ln and ln.endswith('>'):
+ ln,collect = collect,None
+ tokens.append(Token(*kTokenRE.match(ln).groups()))
+
+ return tokens
+
+###
+
+class TMBDDelta(DeltaAlgorithm):
+ def __init__(self, testProgram, tokenLists, log):
+ def patchName(name, suffix):
+ base,ext = os.path.splitext(name)
+ return base + '.' + suffix + ext
+ super(TMBDDelta, self).__init__()
+ self.testProgram = testProgram
+ self.tokenLists = tokenLists
+ self.tempFiles = [patchName(f,'tmp')
+ for f,_ in self.tokenLists]
+ self.targetFiles = [patchName(f,'ok')
+ for f,_ in self.tokenLists]
+ self.log = log
+ self.numTests = 0
+
+ def writeFiles(self, changes, fileNames):
+ assert len(fileNames) == len(self.tokenLists)
+ byFile = [[] for i in self.tokenLists]
+ for i,j in changes:
+ byFile[i].append(j)
+
+ for i,(file,tokens) in enumerate(self.tokenLists):
+ f = open(fileNames[i],'w')
+ for j in byFile[i]:
+ f.write(tokens[j])
+ f.close()
+
+ return byFile
+
+ def test(self, changes):
+ self.numTests += 1
+
+ byFile = self.writeFiles(changes, self.tempFiles)
+
+ if self.log:
+ print >>sys.stderr, 'TEST - ',
+ if self.log > 1:
+ for i,(file,_) in enumerate(self.tokenLists):
+ indices = byFile[i]
+ if i:
+ sys.stderr.write('\n ')
+ sys.stderr.write('%s:%d tokens: [' % (file,len(byFile[i])))
+ prev = None
+ for j in byFile[i]:
+ if prev is None or j != prev + 1:
+ if prev:
+ sys.stderr.write('%d][' % prev)
+ sys.stderr.write(str(j))
+ sys.stderr.write(':')
+ prev = j
+ if byFile[i]:
+ sys.stderr.write(str(byFile[i][-1]))
+ sys.stderr.write('] ')
+ else:
+ print >>sys.stderr, ', '.join(['%s:%d tokens' % (file, len(byFile[i]))
+ for i,(file,_) in enumerate(self.tokenLists)]),
+
+ p = subprocess.Popen([self.testProgram] + self.tempFiles)
+ res = p.wait() == 0
+
+ if res:
+ self.writeFiles(changes, self.targetFiles)
+
+ if self.log:
+ print >>sys.stderr, '=> %s' % res
+ else:
+ if res:
+ print '\nSUCCESS (%d tokens)' % len(changes)
+ else:
+ sys.stderr.write('.')
+
+ return res
+
+ def run(self):
+ res = super(TMBDDelta, self).run([(i,j)
+ for i,(file,tokens) in enumerate(self.tokenLists)
+ for j in range(len(tokens))])
+ self.writeFiles(res, self.targetFiles)
+ if not self.log:
+ print >>sys.stderr
+ return res
+
+def tokenBasedMultiDelta(program, files, log):
+ # Read in the lists of tokens.
+ tokenLists = [(file, [t.data for t in getTokens(file)])
+ for file in files]
+
+ numTokens = sum([len(tokens) for _,tokens in tokenLists])
+ print "Delta on %s with %d tokens." % (', '.join(files), numTokens)
+
+ tbmd = TMBDDelta(program, tokenLists, log)
+
+ res = tbmd.run()
+
+ print "Finished %s with %d tokens (in %d tests)." % (', '.join(tbmd.targetFiles),
+ len(res),
+ tbmd.numTests)
+
+def main():
+ from optparse import OptionParser, OptionGroup
+ parser = OptionParser("%prog <test program> {files+}")
+ parser.add_option("", "--debug", dest="debugLevel",
+ help="set debug level [default %default]",
+ action="store", type=int, default=0)
+ (opts, args) = parser.parse_args()
+
+ if len(args) <= 1:
+ parser.error('Invalid number of arguments.')
+
+ program,files = args[0],args[1:]
+
+ md = tokenBasedMultiDelta(program, files, log=opts.debugLevel)
+
+if __name__ == '__main__':
+ try:
+ main()
+ except KeyboardInterrupt:
+ print >>sys.stderr,'Interrupted.'
+ os._exit(1) # Avoid freeing our giant cache.
diff --git a/utils/ubiviz b/utils/ubiviz
new file mode 100755
index 0000000..1582797
--- /dev/null
+++ b/utils/ubiviz
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This script reads visualization data emitted by the static analyzer for
+# display in Ubigraph.
+#
+##===----------------------------------------------------------------------===##
+
+import xmlrpclib
+import sys
+
+def Error(message):
+ print >> sys.stderr, 'ubiviz: ' + message
+ sys.exit(1)
+
+def StreamData(filename):
+ file = open(filename)
+ for ln in file:
+ yield eval(ln)
+ file.close()
+
+def Display(G, data):
+ action = data[0]
+ if action == 'vertex':
+ vertex = data[1]
+ G.new_vertex_w_id(vertex)
+ for attribute in data[2:]:
+ G.set_vertex_attribute(vertex, attribute[0], attribute[1])
+ elif action == 'edge':
+ src = data[1]
+ dst = data[2]
+ edge = G.new_edge(src,dst)
+ for attribute in data[3:]:
+ G.set_edge_attribute(edge, attribute[0], attribute[1])
+ elif action == "vertex_style":
+ style_id = data[1]
+ parent_id = data[2]
+ G.new_vertex_style_w_id(style_id, parent_id)
+ for attribute in data[3:]:
+ G.set_vertex_style_attribute(style_id, attribute[0], attribute[1])
+ elif action == "vertex_style_attribute":
+ style_id = data[1]
+ for attribute in data[2:]:
+ G.set_vertex_style_attribute(style_id, attribute[0], attribute[1])
+ elif action == "change_vertex_style":
+ vertex_id = data[1]
+ style_id = data[2]
+ G.change_vertex_style(vertex_id,style_id)
+
+def main(args):
+ if len(args) == 0:
+ Error('no input files')
+
+ server = xmlrpclib.Server('http://127.0.0.1:20738/RPC2')
+ G = server.ubigraph
+
+ for arg in args:
+ G.clear()
+ for x in StreamData(arg):
+ Display(G,x)
+
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
+
+ \ No newline at end of file
OpenPOWER on IntegriCloud