blob: 42443e81ccd8c8fc809e1ea5959fe2548ce9be08 [file] [log] [blame]
#!/usr/bin/env python
"""Utility script for submitting benchmark results to an LNT server."""
from __future__ import print_function
import datetime
import errno
import json
import optparse
import subprocess
import sys
import urllib
import urllib2
# Test status codes.
PASS = 0
FAIL = 1
XFAIL = 2
###
def capture_with_result(args, include_stderr=False):
"""capture_with_result(command) -> (output, exit code)
Run the given command (or argv list) in a shell and return the standard
output and exit code.
"""
stderr = subprocess.PIPE
if include_stderr:
stderr = subprocess.STDOUT
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=stderr)
except OSError as e:
if e.errno == errno.ENOENT:
sys.exit('no such file or directory: %r when running %s.' % (
args[0], ' '.join(args)))
raise
out, _ = p.communicate()
return out, p.wait()
def capture(args, include_stderr=False):
"""capture(command) - Run the given command (or argv list) in a shell and
return the standard output.
"""
return capture_with_result(args, include_stderr)[0]
def timestamp():
return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
###
def submit_results_to_server(results_data, submit_url):
# Submit the URL encoded data.
data = urllib.urlencode({'input_data': results_data,
'commit': '1'})
response = urllib2.urlopen(urllib2.Request(submit_url, data))
result_data = response.read()
# The result is expected to be a JSON object.
try:
return json.loads(result_data)
except ValueError:
import traceback
print("Unable to load result, not a valid JSON object.")
print()
print("Traceback:")
traceback.print_exc()
print()
print("Result:")
print(result_data)
return
###
def main():
parser = optparse.OptionParser("""%prog [options] <results>""")
parser.add_option("", "--submit", dest="submit_url", metavar="URL",
help="Submit results to the given URL",
action="store", type=str, default=None)
parser.add_option("", "--output", dest="output", metavar="PATH",
help="Write raw report data to PATH",
action="store", type=str, default=None)
parser.add_option("", "--machine-name", dest="machine_name",
metavar="NAME",
help="Set the machine name to embed in the report",
action="store", type=str, default=None)
parser.add_option("", "--run-order", dest="run_order", metavar="REVISION",
help="Set the run order to embed in the report",
action="store", type=int, default=None)
opts, args = parser.parse_args()
# At least one of --submit or --output is required.
if len(args) != 1:
parser.error("incorrect number of arguments")
if opts.submit_url is None and opts.output is None:
parser.error("no action given (provide --submit or --output)")
if opts.machine_name is None:
parser.error("--machine-name is required")
if opts.run_order is None:
parser.error("--run-order is required")
# Load the results data.
results_path, = args
with open(results_path) as f:
data = json.load(f)
# Compute some data not present in the 'lit' report.
machine_name = opts.machine_name
run_order = str(opts.run_order)
# Estimate the end time as being now, and the start time as being that
# minus the elapsed testing time.
utcnow = datetime.datetime.utcnow()
start_time = utcnow - datetime.timedelta(seconds=data['elapsed'])
end_time = utcnow
# Create the LNT report format.
lnt_results = {}
lnt_results['Machine'] = {
'Name': machine_name,
'Info': {
'hardware': capture(["uname", "-m"], include_stderr=True).strip(),
'name': capture(["uname", "-n"], include_stderr=True).strip(),
'os': capture(["uname", "-sr"], include_stderr=True).strip(),
'uname': capture(["uname", "-a"], include_stderr=True).strip(),
}
}
# FIXME: Record source versions for LLVM, Swift, etc.?
lnt_results['Run'] = {
'Start Time': start_time.strftime('%Y-%m-%d %H:%M:%S'),
'End Time': end_time.strftime('%Y-%m-%d %H:%M:%S'),
'Info': {
'__report_version__': '1',
'tag': 'nts',
'inferred_run_order': run_order,
'run_order': run_order,
'sw_vers': capture(['sw_vers'], include_stderr=True).strip(),
}
}
lnt_results['Tests'] = lnt_tests = []
for test in data['tests']:
# Ignore tests which have unexpected status.
code = test['code']
if code not in ('PASS', 'XPASS', 'FAIL', 'XFAIL'):
sys.stderr.write("ignoring test %r with result code %r" % (
test['name'], code))
continue
# Extract the test name, which is encoded as 'suite :: name'.
test_name = 'nts.%s' % (test['name'].split('::', 1)[1][1:],)
# Convert this test to the 'nts' schema.
compile_success = test['metrics'].get('compile_success', 1)
compile_time = test['metrics']['compile_time']
exec_success = test['metrics'].get('exec_success', 1)
exec_time = test['metrics']['exec_time']
# FIXME: Ensure the test success flags matches the result code.
# FIXME: The XFAIL handling here isn't going to be right.
if not compile_success:
lnt_tests.append({'Name': '%s.compile.status' % (test_name,),
'Info': {},
'Data': [FAIL]})
if not exec_success:
lnt_tests.append({'Name': '%s.exec.status' % (test_name,),
'Info': {},
'Data': [FAIL]})
lnt_tests.append({'Name': '%s.compile' % (test_name,),
'Info': {},
'Data': [compile_time]})
lnt_tests.append({'Name': '%s.exec' % (test_name,),
'Info': {},
'Data': [exec_time]})
# Create the report data.
lnt_result_data = json.dumps(lnt_results, indent=2) + '\n'
# Write the results, if requested.
if opts.output:
sys.stderr.write('%s: generating report: %r\n' % (
timestamp(), opts.output))
with open(opts.output, 'w') as f:
f.write(lnt_result_data)
# Submit the results to an LNT server, if requested.
if opts.submit_url:
submit_results_to_server(lnt_result_data, opts.submit_url)
if __name__ == '__main__':
main()