4 # Copyright (c) 2009, 2011, ETH Zurich.
7 # This file is distributed under the terms in the attached LICENSE file.
8 # If you do not find this file, copies can be found by writing to:
9 # ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 from machines import MachineFactory
15 # check interpreter version to avoid confusion over syntax/module errors
16 if sys.version_info < (2, 6):
17 sys.stderr.write('Error: Python 2.6 or greater is required\n')
33 from tests.common import TimeoutError
34 from socket import gethostname
37 from junit_xml import TestSuite, TestCase
40 have_junit_xml = False
43 print 'Build types:\t', ', '.join([b.name for b in builds.all_builds])
44 print 'Machines:\t', ', '.join([m for m in MachineFactory.machineFactories.keys()])
46 for t in sorted(tests.all_tests, key=lambda test: test.name):
47 print ' %-20s %s' % (t.name, (t.__doc__ or '').strip())
51 p = optparse.OptionParser(
52 usage='Usage: %prog [options] SOURCEDIR RESULTDIR',
53 description='Barrelfish regression/benchmark harness')
55 g = optparse.OptionGroup(p, 'Basic options')
56 g.add_option('-b', '--build', action='append', dest='buildspecs',
57 metavar='BUILD', help='build types to perform [default: test]')
58 g.add_option('-B', '--buildbase', dest='buildbase', metavar='DIR',
59 help='places builds under DIR [default: SOURCEDIR/builds]')
60 g.add_option('-e', '--existingbuild', dest='existingbuild', metavar='DIR',
61 help='existing build directory (may not be used with -b)')
62 g.add_option('-m', '--machine', action='append', dest='machinespecs',
63 metavar='MACHINE', help='victim machines to use')
64 g.add_option('-t', '--test', action='append', dest='testspecs',
65 metavar='TEST', help='tests/benchmarks to run')
66 g.add_option('-c', '--comment', dest='comment',
67 help='comment to store with all collected data')
68 g.add_option('-x', '--xml', dest='xml', action='store_true',
70 help='output summary of tests in Junit XML format')
73 g = optparse.OptionGroup(p, 'Debugging options')
74 g.add_option('-L', '--listall', action='store_true', dest='listall',
75 help='list available builds, machines and tests')
76 debug.addopts(g, 'debuglevel')
77 g.add_option('-k', '--keepgoing', action='store_true', dest='keepgoing',
78 help='attempt to continue on errors')
80 p.set_defaults(debuglevel=debug.NORMAL)
82 options, args = p.parse_args()
84 debug.current_level = options.debuglevel
91 p.error('source and results directories must be specified')
92 options.sourcedir, options.resultsdir = args
94 # determine default buildbase if needed
95 if options.buildbase is None:
96 options.buildbase = os.path.join(options.sourcedir, 'builds')
98 # check validity of source and results dirs
99 if not os.path.isdir(os.path.join(options.sourcedir, 'hake')):
100 p.error('invalid source directory %s' % options.sourcedir)
101 if not (os.path.isdir(options.resultsdir)
102 and os.access(options.resultsdir, os.W_OK)):
103 p.error('invalid results directory %s' % options.resultsdir)
105 if options.xml and not have_junit_xml:
106 p.error('--xml requires junit-xml.\n'
107 'Please install junit-xml through pip or easy_install')
109 # resolve and instantiate all builds
110 def _lookup(spec, classes, nameFn=lambda c: c.name.lower()):
112 return [c for c in classes if fnmatch.fnmatch(nameFn(c), spec)]
114 if options.existingbuild:
115 if options.buildspecs:
116 p.error('existing build directory cannot be used together'
117 ' with build types (-b)')
118 options.builds = [builds.existingbuild(options, options.existingbuild)]
119 options.buildbase = options.existingbuild
122 if not options.buildspecs:
123 options.buildspecs = ['test']
124 for spec in options.buildspecs:
125 matches = _lookup(spec, builds.all_builds)
127 p.error('no builds match "%s" (try -L for a list)' % spec)
128 options.builds.extend(
129 [b for b in matches if b not in options.builds])
130 options.builds = [b(options) for b in options.builds]
132 # resolve and instantiate all machines
133 if options.machinespecs is None:
134 p.error('no machines specified')
135 options.machines = []
136 for spec in options.machinespecs:
137 matches = _lookup(spec, MachineFactory.machineFactories, nameFn=lambda fac: fac.lower())
139 p.error('no machines match "%s" (try -L for a list)' % spec)
140 options.machines.extend(
141 [m for m in matches if m not in options.machines])
142 options.machines = [MachineFactory.createMachineByName(m, options) for m in options.machines]
144 # resolve and instantiate all tests
145 if options.testspecs:
147 for spec in options.testspecs:
148 matches = _lookup(spec, tests.all_tests)
150 p.error('no tests match "%s" (try -L for a list)' % spec)
151 options.tests.extend(
152 [t for t in matches if t not in options.tests])
154 p.error('no tests specified (try -t memtest if unsure)')
155 options.tests = [t(options) for t in options.tests]
157 debug.verbose('Host: ' + gethostname())
158 debug.verbose('Builds: ' + ', '.join([b.name for b in options.builds]))
159 debug.verbose('Machines: ' + ', '.join([m.getName() for m in options.machines]))
160 debug.verbose('Tests: ' + ', '.join([t.name for t in options.tests]))
166 def __init__(self, options):
167 self._harness = harness.Harness()
168 self._options = options
170 def make_results_dir(self, build, machine, test):
171 # Create a unique directory for the output from this test
172 timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
173 dirname = '-'.join([test.name, build.name, machine.getName(), timestamp])
174 path = os.path.join(self._options.resultsdir, str(datetime.datetime.now().year), dirname)
175 debug.verbose('create result directory %s' % path)
179 def make_run_dir(self, build, machine):
180 # Create a unique directory for the output from this test
181 timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
182 dirname = '-'.join([build.name, machine.getName(), timestamp])
183 path = os.path.join(self._options.resultsdir, str(datetime.datetime.now().year), dirname)
184 debug.verbose('create result directory %s' % path)
188 def write_description(self, checkout, build, machine, test, path):
189 debug.verbose('write description file')
190 with codecs.open(os.path.join(path, 'description.txt'), 'w', 'utf-8') as f:
191 f.write('test: %s\n' % test.name)
192 f.write('revision: %s\n' % checkout.get_revision())
193 f.write('build: %s\n' % build.name)
194 f.write('machine: %s\n' % machine.getName())
195 f.write('start time: %s\n' % datetime.datetime.now())
196 f.write('user: %s\n' % getpass.getuser())
197 for item in checkout.get_meta().items():
198 f.write("%s: %s\n" % item)
200 if self._options.comment:
201 f.write('\n' + self._options.comment + '\n')
203 diff = checkout.get_diff()
205 with codecs.open(os.path.join(path, 'changes.patch'), 'w', 'utf-8') as f:
208 def write_errorcase(self, build, machine, test, path, msg, start_ts, end_ts):
209 delta = end_ts - start_ts
210 tc = { 'name': test.name,
211 'time_elapsed': delta.total_seconds(),
212 'class': machine.getName(),
213 'stdout': '\n'.join(self._harness.process_output(test, path)),
224 ju_tc.add_error_info(message=msg)
229 def write_testcase(self, build, machine, test, path, passed,
231 delta = end_ts - start_ts
232 tc = { 'name': test.name,
233 'class': machine.getName(),
234 'time_elapsed': delta.total_seconds(),
235 'stdout': '\n'.join(self._harness.process_output(test, path)),
247 errors = self._harness.extract_errors(test, path)
249 if errors is not None and len(errors) > 0:
250 errorstr += ': ' + ''.join([ unicode(l, errors='replace') for l in errors])
251 ju_tc.add_failure_info(message=errorstr)
256 def testcase_passed(self, testcase):
258 return not (testcase.is_failure() or testcase.is_error() or testcase.is_skipped())
260 return testcase['passed']
262 def testcase_name(self, testcase):
266 return testcase['name']
268 def write_xml_report(self, testcases, path):
269 assert(have_junit_xml)
270 debug.log("producing junit-xml report")
271 ts = TestSuite('harness suite', testcases)
272 with open(os.path.join(path, 'report.xml'), 'w') as f:
273 TestSuite.to_file(f, [ts], prettyprint=False)
275 def run_test(self, build, machine, test, co, testcases):
276 debug.log('running test %s on %s, cwd is %s'
277 % (test.name, machine.getName(), os.getcwd()))
278 path = self.make_results_dir(build, machine, test)
279 self.write_description(co, build, machine, test, path)
280 start_timestamp = datetime.datetime.now()
282 self._harness.run_test(build, machine, test, path)
284 msg = 'Timeout while running test'
285 if self._options.keepgoing:
286 msg += ' (attempting to continue)'
288 end_timestamp = datetime.datetime.now()
289 testcases.append(self.write_errorcase(build, machine, test, path,
290 msg + "\n" + traceback.format_exc(), start_timestamp, end_timestamp)
294 msg = 'Exception while running test'
295 if self._options.keepgoing:
296 msg += ' (attempting to continue):'
299 end_timestamp = datetime.datetime.now()
300 testcases.append(self.write_errorcase(build, machine, test, path,
301 msg + "\n" + traceback.format_exc(), start_timestamp, end_timestamp)
303 traceback.print_exc()
306 end_timestamp = datetime.datetime.now()
307 debug.log('test complete, processing results')
309 passed = self._harness.process_results(test, path)
310 debug.log('result: %s' % ("PASS" if passed else "FAIL"))
312 msg = 'Exception while processing results'
313 if self._options.keepgoing:
314 msg += ' (attempting to continue):'
316 if self._options.keepgoing:
317 traceback.print_exc()
320 self.write_testcase(build, machine, test, path, passed,
321 start_timestamp, end_timestamp))
324 def execute_tests(self, co, buildarchs, testcases):
325 for build in self._options.builds:
326 debug.log('starting build: %s' % build.name)
327 build.configure(co, buildarchs)
328 for machine in self._options.machines:
330 for test in self._options.tests:
331 passed = self.run_test(build, machine, test, co, testcases)
332 if not passed and not self._options.keepgoing:
333 # Stop looping tests if keep going is not true and there
336 # produce JUnit style xml report if requested
337 if self._options.xml:
338 path = self.make_run_dir(build, machine)
339 self.write_xml_report(testcases, path)
340 # Did we encounter an error?
341 if not passed and not self._options.keepgoing:
345 retval = True # everything was OK
346 co = checkout.create_for_dir(self._options.sourcedir)
348 # determine build architectures
350 for m in self._options.machines:
351 buildarchs |= set(m.get_buildarchs())
352 buildarchs = list(buildarchs)
356 self.execute_tests(co, buildarchs, testcases)
358 pcount = len([ t for t in testcases if self.testcase_passed(t) ])
359 debug.log('\n%d/%d tests passed' % (pcount, len(testcases)))
360 if pcount < len(testcases):
361 debug.log('Failed tests:')
362 for t in [ t for t in testcases if not self.testcase_passed(t) ]:
363 debug.log(' * %s' % self.testcase_name(t))
364 # return False if we had test failures
366 debug.log('all done!')
369 if __name__ == "__main__":
370 options = parse_args()
371 scalebench = Scalebench(options)
372 if not scalebench.main():
373 sys.exit(1) # one or more tests failed