blob: 44ac6682de086134f43f460bfc4b4af71eea6626 [file] [log] [blame]
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs an isolate bundled Telemetry benchmark.
This script attempts to emulate the contract of gtest-style tests
invoked via recipes. The main contract is that the caller passes the
json is written to that file in the format detailed here:
Optional argument:
is a double-colon-separated ("::") list of test names, to run just that subset
of tests. This list is parsed by this harness and sent down via the
--story-filter argument.
This script is intended to be the base command invoked by the isolate,
followed by a subsequent Python script. It could be generalized to
invoke an arbitrary executable.
To test changes to this script, please run
cd tools/perf
./run_tests ScriptsSmokeTest.testRunTelemetryBenchmarkAsGoogletest
import argparse
import json
import os
import shutil
import sys
import tempfile
import traceback
import common
# Add src/testing/ into sys.path for importing xvfb.
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import xvfb
import test_env
# Unfortunately we need to copy these variables from ../
# Importing it and using its get_sandbox_env breaks test runs on Linux
# (it seems to unset DISPLAY).
CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox'
def main():
parser = argparse.ArgumentParser()
'--isolated-script-test-output', type=argparse.FileType('w'),
'--isolated-script-test-chartjson-output', required=False)
'--isolated-script-test-perf-output', required=False)
'--isolated-script-test-filter', type=str, required=False)
parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
parser.add_argument('--output-format', action='append')
args, rest_args = parser.parse_known_args()
for output_format in args.output_format:
rest_args.append('--output-format=' + output_format)
rc, perf_results, json_test_results, _ = run_benchmark(args, rest_args,
'histograms' in args.output_format)
if perf_results:
if args.isolated_script_test_perf_output:
filename = args.isolated_script_test_perf_output
elif args.isolated_script_test_chartjson_output:
filename = args.isolated_script_test_chartjson_output
filename = None
if filename is not None:
with open(filename, 'w') as perf_results_output_file:
json.dump(perf_results, perf_results_output_file)
json.dump(json_test_results, args.isolated_script_test_output)
return rc
def run_benchmark(args, rest_args, histogram_results):
"""Run benchmark with args.
args: the option object resulted from parsing commandline args required for
IsolatedScriptTest contract (see
rest_args: the args (list of strings) for running Telemetry benchmark.
histogram_results: a boolean describes whether to output histograms format
for the benchmark.
Returns: a tuple of (rc, perf_results, json_test_results, benchmark_log)
rc: the return code of benchmark
perf_results: json object contains the perf test results
json_test_results: json object contains the Pass/Fail data of the benchmark.
benchmark_log: string contains the stdout/stderr of the benchmark run.
# TODO( These arguments cannot go into
# because
# does not yet support them. Note that ideally
# we would use common.BaseIsolatedScriptArgsAdapter, but this will take
# a good deal of refactoring to accomplish.
parser = argparse.ArgumentParser()
'--isolated-script-test-repeat', type=int, required=False)
'--isolated-script-test-launcher-retry-limit', type=int, required=False,
choices=[0]) # Telemetry does not support retries.
default=False, action='store_true', required=False)
# Parse leftover args not already parsed in or in
# main().
args, rest_args = parser.parse_known_args(args=rest_args, namespace=args)
env = os.environ.copy()
env['CHROME_HEADLESS'] = '1'
# Assume we want to set up the sandbox environment variables all the
# time; doing so is harmless on non-Linux platforms and is needed
# all the time on Linux.
tempfile_dir = tempfile.mkdtemp('telemetry')
benchmark_log = ''
stdoutfile = os.path.join(tempfile_dir, 'benchmark_log.txt')
valid = True
num_failures = 0
perf_results = None
json_test_results = None
results = None
cmd_args = rest_args
if args.isolated_script_test_filter:
filter_list = common.extract_filter_list(args.isolated_script_test_filter)
# Need to convert this to a valid regex.
filter_regex = '(' + '|'.join(filter_list) + ')'
cmd_args.append('--story-filter=' + filter_regex)
if args.isolated_script_test_repeat:
cmd_args.append('--pageset-repeat=' + str(args.isolated_script_test_repeat))
if args.isolated_script_test_also_run_disabled_tests:
cmd_args.append('--output-dir=' + tempfile_dir)
cmd = [sys.executable] + cmd_args
rc = 1 # Set default returncode in case there is an exception.
if args.xvfb:
rc = xvfb.run_executable(cmd, env=env, stdoutfile=stdoutfile)
rc = test_env.run_command_with_output(cmd, env=env, stdoutfile=stdoutfile)
with open(stdoutfile) as f:
benchmark_log =
# If we have also output chartjson read it in and return it.
# results-chart.json is the file name output by telemetry when the
# chartjson output format is included
tempfile_name = None
if histogram_results:
tempfile_name = os.path.join(tempfile_dir, 'histograms.json')
tempfile_name = os.path.join(tempfile_dir, 'results-chart.json')
if tempfile_name is not None:
with open(tempfile_name) as f:
perf_results = json.load(f)
# test-results.json is the file name output by telemetry when the
# json-test-results format is included
tempfile_name = os.path.join(tempfile_dir, 'test-results.json')
with open(tempfile_name) as f:
json_test_results = json.load(f)
num_failures = json_test_results['num_failures_by_type'].get('FAIL', 0)
valid = bool(rc == 0 or num_failures != 0)
except Exception:
if results:
print 'results, which possibly caused exception: %s' % json.dumps(
results, indent=2)
valid = False
# Add ignore_errors=True because otherwise rmtree may fail due to leaky
# processes of tests are still holding opened handles to files under
# |tempfile_dir|. For example, see
shutil.rmtree(tempfile_dir, ignore_errors=True)
if not valid and num_failures == 0:
if rc == 0:
rc = 1 # Signal an abnormal exit.
return rc, perf_results, json_test_results, benchmark_log
# This is not really a "script test" so does not need to manually add
# any additional compile targets.
def main_compile_targets(args):
json.dump([], args.output)
if __name__ == '__main__':
# Conform minimally to the protocol defined by ScriptTest.
if 'compile_targets' in sys.argv:
funcs = {
'run': None,
'compile_targets': main_compile_targets,
sys.exit(common.run_script(sys.argv[1:], funcs))