blob: d94e8b17573b0e77b87b514258235272352269a9 [file] [log] [blame]
#!/usr/bin/env python
"""
This is a helper script for converting Xcode Server performance test data into
the format used for submission to an LNT (http://lnt.llvm.org) server.
"""
import collections
import datetime
import json
import os
import platform
import plistlib
import pprint
from optparse import OptionParser, OptionGroup
def dumpReport(logs, results, opts):
startTime = min(data['timeStarted']
for data in logs)
endTime = max(data['timeStopped']
for data in logs)
# Create the machine info.
report = collections.OrderedDict()
report["Machine"] = machine = collections.OrderedDict()
machine["Name"] = opts.machine_name
machine["Info"] = {}
# Create the run information.
report["Run"] = run = collections.OrderedDict()
run["Info"] = collections.OrderedDict()
run["Info"]["tag"] = "simple"
run["Info"]["run_order"] = str(opts.run_order)
run["Info"]["revision"] = str(opts.revision)
run["Start Time"] = startTime.strftime('%Y-%m-%d %H:%M:%S')
run["End Time"] = endTime.strftime('%Y-%m-%d %H:%M:%S')
# Add the test data
report["Tests"] = tests = []
for (name, values) in sorted(results.items()):
test = collections.OrderedDict()
test["Name"] = "simple.%s.wall" % (name,)
test["Info"] = {}
test["Data"] = values
tests.append(test)
# Write the report.
with open(opts.output_path, 'w') as f:
json.dump(report, f, indent=4)
def main():
parser = OptionParser(
"%prog [options] <input activity logs>")
parser.add_option("", "--machine-name", dest="machine_name", type='str',
help="Machine name to use in submission [%default]",
action="store", default=platform.uname()[1])
parser.add_option("", "--run-order", dest="run_order", metavar="STR",
help="String to use to identify and order this run",
action="store", type=str, default=None)
parser.add_option("", "--revision", dest="revision", metavar="STR",
help="Revision string to include with the run",
action="store", type=str, default=None)
parser.add_option("", "--limit-samples", dest="limit_samples", metavar="N",
help="If provided, limit to the first N samples",
action="store", type=int, default=None)
parser.add_option("", "--filter-minimum", dest="filter_minimum",
metavar="MIN",
help="If provided, ignore samples below MIN",
action="store", type=float, default=None)
parser.add_option("", "--output-path", dest="output_path", metavar="PATH",
help="Path to write the output report to.",
action="store", type=str, default=None)
opts, args = parser.parse_args()
if not opts.run_order:
parser.error("--run-order is required")
if not opts.revision:
parser.error("--revision is required")
if not opts.output_path:
parser.error("--output-path is required")
if len(args) == 0:
parser.error("invalid number of arguments")
logPaths = args
# Load an merge peformance data from each log:
performanceResults = {}
logs = []
for logPath in logPaths:
info_plist = os.path.join(logPath, 'Info.plist')
info = plistlib.readPlist(info_plist)
for action in info['Actions']:
logs.append({ 'timeStarted': action['StartedTime'], 'timeStopped': action['EndedTime'] })
test_summaries_plist = os.path.join(logPath, 'TestSummaries.plist')
test_summaries = plistlib.readPlist(test_summaries_plist)
if test_summaries['FormatVersion'] != '1.1':
raise RuntimeError('Unknown test summaries format version: %s' % test_summaries['FormatVersion'])
def visit(section, testNames):
if section.get('PerformanceMetrics'):
values = []
for performanceMetrics in section['PerformanceMetrics']:
if performanceMetrics['Identifier'] == 'com.apple.XCTPerformanceMetric_WallClockTime':
values = performanceMetrics['Measurements']
values = map(lambda x: round(x, 6), values)
className = testNames[-1]
testBundleName = testNames[-2]
name = '%s %s' % (className, section['TestName'].replace('()', ''))
if className != testBundleName:
name = '%s.%s' % (testBundleName, name)
performanceResults['-[%s]' % name] = values
elif section.get('Subtests'):
for subtest in section['Subtests']:
visit(subtest, testNames + [section['TestName'].replace('.xctest', '')])
for testable_summaries in test_summaries['TestableSummaries']:
for test in testable_summaries['Tests']:
visit(test, [test['TestName']])
# Dump the data to an LNT report.
dumpReport(logs, performanceResults, opts)
if __name__ == '__main__':
main()