blob: 9bbecf4c66813a4497979a1096e286fc339bd91f [file] [log] [blame]
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
Performance runner for d8.
Call e.g. with tools/ --arch ia32 some_suite.json
The suite json format is expected to be:
"path": <relative path chunks to perf resources and main file>,
"owners": [<list of email addresses of benchmark owners (required)>],
"name": <optional suite name, file name is default>,
"archs": [<architecture name for which this suite is run>, ...],
"binary": <name of binary to run, default "d8">,
"flags": [<flag to d8>, ...],
"test_flags": [<flag to the test file>, ...],
"run_count": <how often will this suite run (optional)>,
"run_count_XXX": <how often will this suite run for arch XXX (optional)>,
"timeout": <how long test is allowed to run>,
"timeout_XXX": <how long test is allowed run run for arch XXX>,
"retry_count": <how many times to retry failures (in addition to first try)",
"retry_count_XXX": <how many times to retry failures for arch XXX>
"resources": [<js file to be moved to android device>, ...]
"main": <main js perf runner file>,
"results_regexp": <optional regexp>,
"results_processor": <optional python results processor script>,
"units": <the unit specification for the performance dashboard>,
"process_size": <flag - collect maximum memory used by the process>,
"tests": [
"name": <name of the trace>,
"results_regexp": <optional more specific regexp>,
"results_processor": <optional python results processor script>,
"units": <the unit specification for the performance dashboard>,
"process_size": <flag - collect maximum memory used by the process>,
}, ...
The tests field can also nest other suites in arbitrary depth. A suite
with a "main" file is a leaf suite that can contain one more level of
A suite's results_regexp is expected to have one string place holder
"%s" for the trace name. A trace's results_regexp overwrites suite
A suite's results_processor may point to an optional python script. If
specified, it is called after running the tests (with a path relative to the
suite level's path). It is expected to read the measurement's output text
on stdin and print the processed output to stdout.
The results_regexp will be applied to the processed output.
A suite without "tests" is considered a performance test itself.
Full example (suite with one runner):
"path": ["."],
"owners": [""],
"flags": ["--expose-gc"],
"test_flags": ["5"],
"archs": ["ia32", "x64"],
"run_count": 5,
"run_count_ia32": 3,
"main": "run.js",
"results_regexp": "^%s: (.+)$",
"units": "score",
"tests": [
{"name": "Richards"},
{"name": "DeltaBlue"},
{"name": "NavierStokes",
"results_regexp": "^NavierStokes: (.+)$"}
Full example (suite with several runners):
"path": ["."],
"owners": ["", ""],
"flags": ["--expose-gc"],
"archs": ["ia32", "x64"],
"run_count": 5,
"units": "score",
"tests": [
{"name": "Richards",
"path": ["richards"],
"main": "run.js",
"run_count": 3,
"results_regexp": "^Richards: (.+)$"},
{"name": "NavierStokes",
"path": ["navier_stokes"],
"main": "run.js",
"results_regexp": "^NavierStokes: (.+)$"}
Path pieces are concatenated. D8 is always run with the suite's path as cwd.
The test flags are passed to the js test file after '--'.
# for py2/py3 compatibility
from __future__ import print_function
from functools import reduce
from collections import OrderedDict
import copy
import json
import logging
import math
import argparse
import os
import re
import subprocess
import sys
import time
import traceback
from testrunner.local import android
from testrunner.local import command
from testrunner.local import utils
from testrunner.objects.output import Output, NULL_OUTPUT
basestring # Python 2
except NameError: # Python 3
basestring = str
GENERIC_RESULTS_RE = re.compile(r'^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$')
RESULT_STDDEV_RE = re.compile(r'^\{([^\}]+)\}$')
RESULT_LIST_RE = re.compile(r'^\[([^\]]+)\]$')
TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
def GeometricMean(values):
"""Returns the geometric mean of a list of values.
The mean is calculated using log to avoid overflow.
values = map(float, values)
return str(math.exp(sum(map(math.log, values)) / len(values)))
class ResultTracker(object):
"""Class that tracks trace/runnable results and produces script output.
The output is structured like this:
"traces": [
"graphs": ["path", "to", "trace", "config"],
"units": <string describing units, e.g. "ms" or "KB">,
"results": [<list of values measured over several runs>],
"stddev": <stddev of the value if measure by script or ''>
"runnables": [
"graphs": ["path", "to", "runnable", "config"],
"durations": [<list of durations of each runnable run in seconds>],
"timeout": <timeout configured for runnable in seconds>,
"errors": [<list of strings describing errors>],
# These two fields are deprecated and will soon be removed.
"timeouts": [<list of traces which have timed out at least once>],
"near_timeouts": [<list of traces which have at least once run for longer
than 90% of the configured timeout>],
def __init__(self):
self.traces = {}
self.errors = []
# TODO(sergiyb): Deprecate self.timeouts/near_timeouts and compute them in
# the recipe based on self.runnable_durations. Also cleanup RunnableConfig
# by removing has_timeouts/has_near_timeouts there.
self.timeouts = []
self.near_timeouts = [] # > 90% of the max runtime
self.runnables = {}
def AddTraceResults(self, trace, results, stddev):
if not in self.traces:
self.traces[] = {
'graphs': trace.graphs,
'units': trace.units,
'results': results,
'stddev': stddev or '',
existing_entry = self.traces[]
assert trace.graphs == existing_entry['graphs']
assert trace.units == existing_entry['units']
assert not (stddev and existing_entry['stddev'])
existing_entry['stddev'] = stddev
def AddErrors(self, errors):
def AddRunnableDurations(self, runnable, durations):
"""Adds a list of durations of the different runs of the runnable."""
if not in self.runnables:
self.runnables[] = {
'graphs': runnable.graphs,
'durations': durations,
'timeout': runnable.timeout,
existing_entry = self.runnables[]
assert runnable.timeout == existing_entry['timeout']
assert runnable.graphs == existing_entry['graphs']
def ToDict(self):
return {
'traces': self.traces.values(),
'errors': self.errors,
'timeouts': self.timeouts,
'near_timeouts': self.near_timeouts,
'runnables': self.runnables.values(),
def WriteToFile(self, file_name):
with open(file_name, 'w') as f:
def __str__(self): # pragma: no cover
return json.dumps(self.ToDict(), indent=2, separators=(',', ': '))
class Measurement(object):
"""Represents a series of results of one trace.
The results are from repetitive runs of the same executable. They are
gathered by repeated calls to ConsumeOutput.
def __init__(self, trace, results_regexp, stddev_regexp):
self.trace = trace
self.results_regexp = results_regexp
self.stddev_regexp = stddev_regexp
self.results = []
self.errors = []
self.stddev = ''
def ConsumeOutput(self, output):
result =, output.stdout, re.M).group(1)
except ValueError:
self.errors.append('Regexp "%s" returned a non-numeric for test %s.'
% (self.results_regexp,
self.errors.append('Regexp "%s" did not match for test %s.'
% (self.results_regexp,
if self.stddev_regexp and self.stddev:
self.errors.append('Test %s should only run once since a stddev '
'is provided by the test.' %
if self.stddev_regexp:
self.stddev =
self.stddev_regexp, output.stdout, re.M).group(1)
self.errors.append('Regexp "%s" did not match for test %s.'
% (self.stddev_regexp,
def UpdateResults(self, result_tracker):
result_tracker.AddTraceResults(self.trace, self.results, self.stddev)
def GetResults(self):
return self.results
class NullMeasurement(object):
"""Null object to avoid having extra logic for configurations that don't
require secondary run, e.g. CI bots.
def ConsumeOutput(self, output):
def UpdateResults(self, result_tracker):
def GetResults(self):
return []
def Unzip(iterable):
left = []
right = []
for l, r in iterable:
return lambda: iter(left), lambda: iter(right)
def RunResultsProcessor(results_processor, output, count):
# Dummy pass through for null-runs.
if output.stdout is None:
return output
# We assume the results processor is relative to the suite.
assert os.path.exists(results_processor)
p = subprocess.Popen(
[sys.executable, results_processor],
new_output = copy.copy(output)
new_output.stdout, _ = p.communicate(input=output.stdout)'>>> Processed stdout (#%d):\n%s', count, output.stdout)
return new_output
def AccumulateResults(
graph, output_iter, perform_measurement, calc_total, result_tracker):
"""Iterates over the output of multiple benchmark reruns and accumulates
results for a configured list of traces.
graph: Parent GraphConfig for which results are to be accumulated.
output_iter: Iterator over the output of each test run.
perform_measurement: Whether to actually run tests and perform measurements.
This is needed so that we reuse this script for both CI
and trybot, but want to ignore second run on CI without
having to spread this logic throughout the script.
calc_total: Boolean flag to specify the calculation of a summary trace.
result_tracker: ResultTracker object to be updated.
measurements = [trace.CreateMeasurement(perform_measurement)
for trace in graph.children]
for output in output_iter():
for measurement in measurements:
for measurement in measurements:
raw_results = [m.GetResults() for m in measurements]
if not raw_results or not calc_total:
# Assume all traces have the same structure.
if len(set(map(len, raw_results))) != 1:
['Not all traces have the same number of results. Can not compute '
'total for %s' %])
# Calculate the geometric means for all traces. Above we made sure that
# there is at least one trace and that the number of results is the same
# for each trace.
n_results = len(raw_results[0])
total_results = [GeometricMean(r[i] for r in raw_results)
for i in range(0, n_results)]
total_trace = TraceConfig(
{'name': 'Total', 'units': graph.children[0].units}, graph, graph.arch)
result_tracker.AddTraceResults(total_trace, total_results, '')
class Node(object):
"""Represents a node in the suite tree structure."""
def __init__(self, *args):
self._children = []
def AppendChild(self, child):
def children(self):
return self._children
class DefaultSentinel(Node):
"""Fake parent node with all default values."""
def __init__(self, binary = 'd8'):
super(DefaultSentinel, self).__init__()
self.binary = binary
self.run_count = 10
self.timeout = 60
self.retry_count = 0
self.path = []
self.graphs = []
self.flags = []
self.test_flags = []
self.process_size = False
self.resources = []
self.results_processor = None
self.results_regexp = None
self.stddev_regexp = None
self.units = 'score' = False
self.owners = []
class GraphConfig(Node):
"""Represents a suite definition.
Can either be a leaf or an inner node that provides default values.
def __init__(self, suite, parent, arch):
super(GraphConfig, self).__init__()
self._suite = suite
self.arch = arch
assert isinstance(suite.get('path', []), list)
assert isinstance(suite.get('owners', []), list)
assert isinstance(suite['name'], basestring)
assert isinstance(suite.get('flags', []), list)
assert isinstance(suite.get('test_flags', []), list)
assert isinstance(suite.get('resources', []), list)
# Accumulated values.
self.path = parent.path[:] + suite.get('path', [])
self.graphs = parent.graphs[:] + [suite['name']]
self.flags = parent.flags[:] + suite.get('flags', [])
self.test_flags = parent.test_flags[:] + suite.get('test_flags', [])
self.owners = parent.owners[:] + suite.get('owners', [])
# Values independent of parent node.
self.resources = suite.get('resources', [])
# Descrete values (with parent defaults).
self.binary = suite.get('binary', parent.binary)
self.run_count = suite.get('run_count', parent.run_count)
self.run_count = suite.get('run_count_%s' % arch, self.run_count)
self.retry_count = suite.get('retry_count', parent.retry_count)
self.retry_count = suite.get('retry_count_%s' % arch, self.retry_count)
self.timeout = suite.get('timeout', parent.timeout)
self.timeout = suite.get('timeout_%s' % arch, self.timeout)
self.units = suite.get('units', parent.units) = suite.get('total',
self.results_processor = suite.get(
'results_processor', parent.results_processor)
self.process_size = suite.get('process_size', parent.process_size)
# A regular expression for results. If the parent graph provides a
# regexp and the current suite has none, a string place holder for the
# suite name is expected.
# TODO(machenbach): Currently that makes only sense for the leaf level.
# Multiple place holders for multiple levels are not supported.
if parent.results_regexp:
regexp_default = parent.results_regexp % re.escape(suite['name'])
regexp_default = None
self.results_regexp = suite.get('results_regexp', regexp_default)
# A similar regular expression for the standard deviation (optional).
if parent.stddev_regexp:
stddev_default = parent.stddev_regexp % re.escape(suite['name'])
stddev_default = None
self.stddev_regexp = suite.get('stddev_regexp', stddev_default)
def name(self):
return '/'.join(self.graphs)
class TraceConfig(GraphConfig):
"""Represents a leaf in the suite tree structure."""
def __init__(self, suite, parent, arch):
super(TraceConfig, self).__init__(suite, parent, arch)
assert self.results_regexp
assert self.owners
def CreateMeasurement(self, perform_measurement):
if not perform_measurement:
return NullMeasurement()
return Measurement(self, self.results_regexp, self.stddev_regexp)
class RunnableConfig(GraphConfig):
"""Represents a runnable suite definition (i.e. has a main file).
def __init__(self, suite, parent, arch):
super(RunnableConfig, self).__init__(suite, parent, arch)
self.has_timeouts = False
self.has_near_timeouts = False
def main(self):
return self._suite.get('main', '')
def PostProcess(self, outputs_iter):
if self.results_processor:
def it():
for i, output in enumerate(outputs_iter()):
yield RunResultsProcessor(self.results_processor, output, i + 1)
return it
return outputs_iter
def ChangeCWD(self, suite_path):
"""Changes the cwd to to path defined in the current graph.
The tests are supposed to be relative to the suite configuration.
suite_dir = os.path.abspath(os.path.dirname(suite_path))
bench_dir = os.path.normpath(os.path.join(*self.path))
os.chdir(os.path.join(suite_dir, bench_dir))
def GetCommandFlags(self, extra_flags=None):
suffix = ['--'] + self.test_flags if self.test_flags else []
return self.flags + (extra_flags or []) + [self.main] + suffix
def GetCommand(self, cmd_prefix, shell_dir, extra_flags=None):
# TODO(machenbach): This requires +.exe if run on windows.
extra_flags = extra_flags or []
if self.binary != 'd8' and '--prof' in extra_flags:'Profiler supported only on a benchmark run with d8')
if self.process_size:
cmd_prefix = ['/usr/bin/time', '--format=MaxMemory: %MKB'] + cmd_prefix
if self.binary.endswith('.py'):
# Copy cmd_prefix instead of update (+=).
cmd_prefix = cmd_prefix + [sys.executable]
return command.Command(
shell=os.path.join(shell_dir, self.binary),
timeout=self.timeout or 60)
def Run(self, runner, secondary, result_tracker, results_secondary):
"""Iterates over several runs and handles the output for all traces."""
output, output_secondary = Unzip(runner())
perform_measurement=secondary, # only run second time on trybots,
class RunnableTraceConfig(TraceConfig, RunnableConfig):
"""Represents a runnable suite definition that is a leaf."""
def __init__(self, suite, parent, arch):
super(RunnableTraceConfig, self).__init__(suite, parent, arch)
def Run(self, runner, secondary, result_tracker, results_secondary):
"""Iterates over several runs and handles the output."""
measurement = self.CreateMeasurement(perform_measurement=True)
measurement_secondary = self.CreateMeasurement(
for output, output_secondary in runner():
def MakeGraphConfig(suite, arch, parent):
"""Factory method for making graph configuration objects."""
if isinstance(parent, RunnableConfig):
# Below a runnable can only be traces.
return TraceConfig(suite, parent, arch)
elif suite.get('main') is not None:
# A main file makes this graph runnable. Empty strings are accepted.
if suite.get('tests'):
# This graph has subgraphs (traces).
return RunnableConfig(suite, parent, arch)
# This graph has no subgraphs, it's a leaf.
return RunnableTraceConfig(suite, parent, arch)
elif suite.get('tests'):
# This is neither a leaf nor a runnable.
return GraphConfig(suite, parent, arch)
else: # pragma: no cover
raise Exception('Invalid suite configuration.')
def BuildGraphConfigs(suite, arch, parent):
"""Builds a tree structure of graph objects that corresponds to the suite
# TODO(machenbach): Implement notion of cpu type?
if arch not in suite.get('archs', SUPPORTED_ARCHS):
return None
graph = MakeGraphConfig(suite, arch, parent)
for subsuite in suite.get('tests', []):
BuildGraphConfigs(subsuite, arch, graph)
return graph
def FlattenRunnables(node, node_cb):
"""Generator that traverses the tree structure and iterates over all
if isinstance(node, RunnableConfig):
yield node
elif isinstance(node, Node):
for child in node._children:
for result in FlattenRunnables(child, node_cb):
yield result
else: # pragma: no cover
raise Exception('Invalid suite configuration.')
class Platform(object):
def __init__(self, args):
self.shell_dir = args.shell_dir
self.shell_dir_secondary = args.shell_dir_secondary
self.extra_flags = args.extra_flags.split()
self.args = args
def ReadBuildConfig(args):
config_path = os.path.join(args.shell_dir, 'v8_build_config.json')
if not os.path.isfile(config_path):
return {}
with open(config_path) as f:
return json.load(f)
def GetPlatform(args):
if Platform.ReadBuildConfig(args).get('is_android', False):
return AndroidPlatform(args)
return DesktopPlatform(args)
def _Run(self, runnable, count, secondary=False):
raise NotImplementedError() # pragma: no cover
def _LoggedRun(self, runnable, count, secondary=False):
suffix = ' - secondary' if secondary else ''
title = '>>> %%s (#%d)%s:' % ((count + 1), suffix)
output = self._Run(runnable, count, secondary)
except OSError:
logging.exception(title % 'OSError')
if output.duration > 0.9 * runnable.timeout:
runnable.has_near_timeouts = True
if output.stdout: % 'Stdout' + '\n%s', output.stdout)
if output.stderr: # pragma: no cover
# Print stderr for debugging. % 'Stderr' + '\n%s', output.stderr)
if output.timed_out:
runnable.has_timeouts = True
logging.warning('>>> Test timed out after %ss.', runnable.timeout)
if output.exit_code != 0:
logging.warning('>>> Test crashed with exit code %d.', output.exit_code)
return output
def Run(self, runnable, count):
"""Execute the benchmark's main file.
If args.shell_dir_secondary is specified, the benchmark is run twice, e.g.
with and without patch.
runnable: A Runnable benchmark instance.
count: The number of this (repeated) run.
Returns: A tuple with the two benchmark outputs. The latter will be None if
args.shell_dir_secondary was not specified.
output = self._LoggedRun(runnable, count, secondary=False)
if self.shell_dir_secondary:
return output, self._LoggedRun(runnable, count, secondary=True)
return output, NULL_OUTPUT
class DesktopPlatform(Platform):
def __init__(self, args):
super(DesktopPlatform, self).__init__(args)
self.command_prefix = []
# Setup command class to OS specific version.
command.setup(utils.GuessOS(), args.device)
if args.prioritize or args.affinitize != None:
self.command_prefix = ['schedtool']
if args.prioritize:
self.command_prefix += ['-n', '-20']
if args.affinitize != None:
# schedtool expects a bit pattern when setting affinity, where each
# bit set to '1' corresponds to a core where the process may run on.
# First bit corresponds to CPU 0. Since the 'affinitize' parameter is
# a core number, we need to map to said bit pattern.
cpu = int(args.affinitize)
core = 1 << cpu
self.command_prefix += ['-a', ('0x%x' % core)]
self.command_prefix += ['-e']
def PreExecution(self):
def PostExecution(self):
def PreTests(self, node, path):
if isinstance(node, RunnableConfig):
def _Run(self, runnable, count, secondary=False):
shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
cmd = runnable.GetCommand(self.command_prefix, shell_dir, self.extra_flags)
output = cmd.execute()
if output.IsSuccess() and '--prof' in self.extra_flags:
os_prefix = {'linux': 'linux', 'macos': 'mac'}.get(utils.GuessOS())
if os_prefix:
tick_tools = os.path.join(TOOLS_BASE, '%s-tick-processor' % os_prefix)
subprocess.check_call(tick_tools + ' --only-summary', shell=True)
else: # pragma: no cover
'Profiler option currently supported on Linux and Mac OS.')
# /usr/bin/time outputs to stderr
if runnable.process_size:
output.stdout += output.stderr
return output
class AndroidPlatform(Platform): # pragma: no cover
def __init__(self, args):
super(AndroidPlatform, self).__init__(args)
self.driver = android.android_driver(args.device)
def PreExecution(self):
def PostExecution(self):
def PreTests(self, node, path):
if isinstance(node, RunnableConfig):
suite_dir = os.path.abspath(os.path.dirname(path))
if node.path:
bench_rel = os.path.normpath(os.path.join(*node.path))
bench_abs = os.path.join(suite_dir, bench_rel)
bench_rel = '.'
bench_abs = suite_dir
self.driver.push_executable(self.shell_dir, 'bin', node.binary)
if self.shell_dir_secondary:
self.shell_dir_secondary, 'bin_secondary', node.binary)
if isinstance(node, RunnableConfig):
self.driver.push_file(bench_abs, node.main, bench_rel)
for resource in node.resources:
self.driver.push_file(bench_abs, resource, bench_rel)
def _Run(self, runnable, count, secondary=False):
target_dir = 'bin_secondary' if secondary else 'bin'
# Relative path to benchmark directory.
if runnable.path:
bench_rel = os.path.normpath(os.path.join(*runnable.path))
bench_rel = '.'
logcat_file = None
if self.args.dump_logcats_to:
runnable_name = '-'.join(runnable.graphs)
logcat_file = os.path.join(
self.args.dump_logcats_to, 'logcat-%s-#%d%s.log' % (
runnable_name, count + 1, '-secondary' if secondary else ''))
logging.debug('Dumping logcat into %s', logcat_file)
output = Output()
start = time.time()
output.stdout =
except android.CommandFailedException as e:
output.stdout = e.output
output.exit_code = e.status
except android.TimeoutException as e:
output.stdout = e.output
output.timed_out = True
if runnable.process_size:
output.stdout += 'MaxMemory: Unsupported'
output.duration = time.time() - start
return output
class CustomMachineConfiguration:
def __init__(self, disable_aslr = False, governor = None):
self.aslr_backup = None
self.governor_backup = None
self.disable_aslr = disable_aslr
self.governor = governor
def __enter__(self):
if self.disable_aslr:
self.aslr_backup = CustomMachineConfiguration.GetASLR()
if self.governor != None:
self.governor_backup = CustomMachineConfiguration.GetCPUGovernor()
return self
def __exit__(self, type, value, traceback):
if self.aslr_backup != None:
if self.governor_backup != None:
def GetASLR():
with open('/proc/sys/kernel/randomize_va_space', 'r') as f:
return int(f.readline().strip())
except Exception:
logging.exception('Failed to get current ASLR settings.')
def SetASLR(value):
with open('/proc/sys/kernel/randomize_va_space', 'w') as f:
except Exception:
'Failed to update ASLR to %s. Are we running under sudo?', value)
new_value = CustomMachineConfiguration.GetASLR()
if value != new_value:
raise Exception('Present value is %s' % new_value)
def GetCPUCoresRange():
with open('/sys/devices/system/cpu/present', 'r') as f:
indexes = f.readline()
r = map(int, indexes.split('-'))
if len(r) == 1:
return range(r[0], r[0] + 1)
return range(r[0], r[1] + 1)
except Exception:
logging.exception('Failed to retrieve number of CPUs.')
def GetCPUPathForId(cpu_index):
ret = '/sys/devices/system/cpu/cpu'
ret += str(cpu_index)
ret += '/cpufreq/scaling_governor'
return ret
def GetCPUGovernor():
cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
ret = None
for cpu_index in cpu_indices:
cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
with open(cpu_device, 'r') as f:
# We assume the governors of all CPUs are set to the same value
val = f.readline().strip()
if ret == None:
ret = val
elif ret != val:
raise Exception('CPU cores have differing governor settings')
return ret
except Exception:
logging.exception('Failed to get the current CPU governor. Is the CPU '
'governor disabled? Check BIOS.')
def SetCPUGovernor(value):
cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
for cpu_index in cpu_indices:
cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
with open(cpu_device, 'w') as f:
except Exception:
logging.exception('Failed to change CPU governor to %s. Are we '
'running under sudo?', value)
cur_value = CustomMachineConfiguration.GetCPUGovernor()
if cur_value != value:
raise Exception('Could not set CPU governor. Present value is %s'
% cur_value )
def Main(argv):
parser = argparse.ArgumentParser()
help='The architecture to run tests for. Pass "auto" '
'to auto-detect.', default='x64',
choices=SUPPORTED_ARCHS + ['auto'])
help='Adapt to path structure used on buildbots and adds '
'timestamps/level to all logged status messages',
default=False, action='store_true')
parser.add_argument('-d', '--device',
help='The device ID to run Android tests on. If not '
'given it will be autodetected.')
help='Additional flags to pass to the test executable',
help='Path to a file for storing json results.')
help='Path to a file for storing json results from run '
'without patch or for reference build run.')
parser.add_argument('--outdir', help='Base directory with compile output',
help='Base directory with compile output without patch '
'or for reference build')
help='JavaScript engine binary. By default, d8 under '
'architecture-specific build dir. '
'Not supported in conjunction with outdir-secondary.')
help='Raise the priority to nice -20 for the '
'benchmarking process.Requires Linux, schedtool, and '
'sudo privileges.', default=False, action='store_true')
help='Run benchmarking process on the specified core. '
'For example: --affinitize=0 will run the benchmark '
'process on core 0. --affinitize=3 will run the '
'benchmark process on core 3. Requires Linux, schedtool, '
'and sudo privileges.', default=None)
help='Disable ASLR for the duration of the benchmarked '
'process. Requires Linux and sudo privileges.',
default=False, action='store_true')
help='Set cpu governor to specified policy for the '
'duration of the benchmarked process. Typical options: '
'"powersave" for more stable results, or "performance" '
'for shorter completion time of suite, with potentially '
'more noise in results.')
help='Only run the benchmarks beginning with this '
'string. For example: '
'--filter=JSTests/TypedArrays/ will run only TypedArray '
'benchmarks from the JSTests suite.',
help='Writes logcat output from each test into specified '
'directory. Only supported for android targets.')
parser.add_argument("--run-count", type=int, default=0,
help="Override the run count specified by the test "
"suite. The default 0 uses the suite's config.")
parser.add_argument('suite', nargs='+', help='Path to the suite config file.')
args = parser.parse_args(argv)
except SystemExit:
level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s')
if args.arch == 'auto': # pragma: no cover
args.arch = utils.DefaultArch()
if args.arch not in SUPPORTED_ARCHS:
'Auto-detected architecture "%s" is not supported.', args.arch)
if (args.json_test_results_secondary and
not args.outdir_secondary): # pragma: no cover
logging.error('For writing secondary json test results, a secondary outdir '
'patch must be specified.')
workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if args.buildbot:
build_config = 'Release'
build_config = '%s.release' % args.arch
if args.binary_override_path == None:
args.shell_dir = os.path.join(workspace, args.outdir, build_config)
default_binary_name = 'd8'
if not os.path.isfile(args.binary_override_path):
logging.error('binary-override-path must be a file name')
if args.outdir_secondary:
logging.error('specify either binary-override-path or outdir-secondary')
args.shell_dir = os.path.abspath(
default_binary_name = os.path.basename(args.binary_override_path)
if args.outdir_secondary:
args.shell_dir_secondary = os.path.join(
workspace, args.outdir_secondary, build_config)
args.shell_dir_secondary = None
if args.json_test_results:
args.json_test_results = os.path.abspath(args.json_test_results)
if args.json_test_results_secondary:
args.json_test_results_secondary = os.path.abspath(
# Ensure all arguments have absolute path before we start changing current
# directory.
args.suite = map(os.path.abspath, args.suite)
prev_aslr = None
prev_cpu_gov = None
platform = Platform.GetPlatform(args)
result_tracker = ResultTracker()
result_tracker_secondary = ResultTracker()
# We use list here to allow modification in nested function below.
have_failed_tests = [False]
with CustomMachineConfiguration(governor = args.cpu_governor,
disable_aslr = args.noaslr) as conf:
for path in args.suite:
if not os.path.exists(path): # pragma: no cover
'Configuration file %s does not exist.' % path])
with open(path) as f:
suite = json.loads(
# If no name is given, default to the file name without .json.
suite.setdefault('name', os.path.splitext(os.path.basename(path))[0])
# Setup things common to one test suite.
# Build the graph/trace tree structure.
default_parent = DefaultSentinel(default_binary_name)
root = BuildGraphConfigs(suite, args.arch, default_parent)
# Callback to be called on each node on traversal.
def NodeCB(node):
platform.PreTests(node, path)
# Traverse graph/trace tree and iterate over all runnables.
for runnable in FlattenRunnables(root, NodeCB):
runnable_name = '/'.join(runnable.graphs)
if (not runnable_name.startswith(args.filter) and
runnable_name + '/' != args.filter):
continue'>>> Running suite: %s', runnable_name)
durations = []
durations_secondary = []
def Runner():
"""Output generator that reruns several times."""
for i in range(0, max(1, args.run_count or runnable.run_count)):
attempts_left = runnable.retry_count + 1
while attempts_left:
output, output_secondary = platform.Run(runnable, i)
if output.IsSuccess() and output_secondary.IsSuccess():
if output_secondary is not NULL_OUTPUT:
yield output, output_secondary
attempts_left -= 1
if not attempts_left: # ignore failures until last attempt
have_failed_tests[0] = True
else:'>>> Retrying suite: %s', runnable_name)
# Let runnable iterate over all runs and handle output.
runnable.Run(Runner, args.shell_dir_secondary, result_tracker,
if runnable.has_timeouts:
if runnable.has_near_timeouts:
result_tracker.AddRunnableDurations(runnable, durations)
if durations_secondary:
runnable, durations_secondary)
if args.json_test_results:
else: # pragma: no cover
print('Primary results:', result_tracker)
if args.json_test_results_secondary:
else: # pragma: no cover
print('Secondary results:', result_tracker_secondary)
if (result_tracker.errors or result_tracker_secondary.errors or
return 1
return 0
def MainWrapper():
return Main(sys.argv[1:])
# Log uncaptured exceptions and report infra failure to the caller.
if __name__ == '__main__': # pragma: no cover