blob: 6787d8c6075e7a7f952dd5d8b794bdfaa6542fc3 [file] [log] [blame]
import logging
import os
import site
import sys
# Setup logging.
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
file_log = logging.FileHandler("%s/test.log" % config.test_exec_root,
mode="w")
file_log.setLevel(logging.DEBUG)
logger.addHandler(file_log)
console_log = logging.StreamHandler()
console_log.setLevel(logging.WARNING)
logger.addHandler(console_log)
# Load test-suite litsupport code
site.addsitedir(os.path.dirname(__file__))
import litsupport.test
config.name = 'test-suite'
config.test_format = litsupport.test.TestSuiteTest()
config.suffixes = ['.test']
config.excludes = ['ABI-Testsuite']
config.traditional_output = False
config.single_source = False
if 'SSH_AUTH_SOCK' in os.environ:
config.environment['SSH_AUTH_SOCK'] = os.environ['SSH_AUTH_SOCK']
if not hasattr(config, 'remote_host'):
config.remote_host = ""
config.remote_host = lit_config.params.get('remote_host', config.remote_host)
if config.remote_host:
config.test_modules.append('remote')
# Load previous test results so we can skip tests that did not change.
previous_results_file = lit_config.params.get('previous', None)
if previous_results_file:
import json
config.previous_results = json.load(open(previous_results_file))
else:
config.previous_results = None
# Pass on some options to context object:
config.perf_profile_events = "cycles,cache-misses,branch-misses,instructions"
if lit_config.params.get('perf_profile_events'):
config.perf_profile_events = lit_config.params.get('perf_profile_events')
# Find and initialize lit modules.
if lit_config.params.get('profile') == 'perf':
config.test_modules += ['perf']