blob: 0c930be9a879d3680fe55940f379b302512e85f5 [file] [log] [blame]
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Evaluate ChromeOS autotest.
Note that by default 'test_that' will install dependency packages of autotest
if the package checksum mismatch. If you want to override content of autotest
package, e.g. chrome's test binary, please make sure the autotest version
matches. Otherwise your test binary will be overwritten.
"""
from __future__ import print_function
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
import typing
import xml.etree.ElementTree
from bisect_kit import bisector_cli
from bisect_kit import catapult_util
from bisect_kit import cli
from bisect_kit import common
from bisect_kit import configure
from bisect_kit import core
from bisect_kit import cros_lab_util
from bisect_kit import cros_util
from bisect_kit import errors
from bisect_kit import util
logger = logging.getLogger(__name__)
OLD = 'old'
NEW = 'new'
SKIP = 'skip'
FATAL = 'fatal'
EXIT_CODE_MAP = {
OLD: cli.EXIT_CODE_OLD,
NEW: cli.EXIT_CODE_NEW,
SKIP: cli.EXIT_CODE_SKIP,
FATAL: cli.EXIT_CODE_FATAL,
}
def create_argument_parser():
parents = [common.common_argument_parser, common.session_optional_parser]
# exit_on_error=False: to capture invalid argument values
parser = argparse.ArgumentParser(
description=__doc__, parents=parents, exit_on_error=False)
cli.patching_argparser_exit(parser)
parser.add_argument(
'--rich_result',
action='store_true',
help='Instead of mere exit code, output detailed information in json')
parser.add_argument(
'dut',
nargs='?',
type=cli.argtype_notempty,
metavar='DUT',
default=configure.get('DUT', ''))
parser.add_argument(
'--chromeos_root',
type=cli.argtype_dir_path,
metavar='CHROMEOS_ROOT',
default=configure.get('CHROMEOS_ROOT', ''),
help='ChromeOS tree root')
parser.add_argument(
'--chrome_root',
metavar='CHROME_ROOT',
type=cli.argtype_dir_path,
default=configure.get('CHROME_ROOT'),
help='Chrome tree root; necessary for telemetry tests')
parser.add_argument(
'--prebuilt',
action='store_true',
help='Run autotest using existing prebuilt package if specified; '
'otherwise use the default one')
parser.add_argument(
'--reinstall',
action='store_true',
help='Remove existing autotest folder on the DUT first')
parser.add_argument(
'--reboot_before_test',
action='store_true',
help='Reboot before test run')
group = parser.add_argument_group(title='Options for normal autotest tests')
group.add_argument(
'--test_name', help='Test name, like "video_VideoDecodeAccelerator.h264"')
group.add_argument(
'--fail_to_pass',
action='store_true',
help='For functional tests: old behavior is FAIL and new behavior is '
'PASS; If not specified, default = old behavior is PASS and new '
'behavior is FAIL')
group.add_argument(
'--metric',
help='Metric name of performance test; example: '
'"cheets_SystemRawImageSize"')
group.add_argument(
'--args',
help='Extra args passed to "test_that --args"; Overrides the default',
action='append',
default=[])
group = parser.add_argument_group(title='Options for CTS/GTS tests')
group.add_argument('--cts_revision', help='CTS revision, like "9.0_r3"')
group.add_argument('--cts_abi', choices=['arm', 'x86'])
group.add_argument(
'--cts_prefix',
help='Prefix of autotest test name, '
'like cheets_CTS_N, cheets_CTS_P, cheets_GTS')
group.add_argument(
'--cts_module', help='CTS/GTS module name, like "CtsCameraTestCases"')
group.add_argument(
'--cts_test',
help='CTS/GTS test name, like '
'"android.hardware.cts.CameraTest#testDisplayOrientation"')
group.add_argument('--cts_timeout', type=float, help='timeout, in seconds')
return parser
def get_additional_test_args(test_name):
"""Gets extra arguments to specific test.
Some tests may require special arguments to run.
Args:
test_name: test name
Returns:
arguments (list[str])
"""
if test_name.startswith('telemetry_'):
return ['local=True']
return []
def prepare_to_run_test(opts):
# Some versions of ChromeOS SDK is broken and ship bad 'ssh' executable. This
# works around the issue. See crbug/906289 for detail.
# TODO(kcwu): remove this workaround once we no longer support bisecting
# versions earlier than R73-11445.0.0.
ssh_path = os.path.join(opts.chromeos_root, 'chroot/usr/bin/ssh')
if os.path.exists(ssh_path):
with open(ssh_path, 'rb') as f:
if b'file descriptor passing not supported' in f.read():
cros_util.cros_sdk(opts.chromeos_root, 'sudo', 'emerge',
'net-misc/openssh')
# Special handling for audio tests (b/136136270).
if opts.prebuilt:
autotest_dir = os.path.join(opts.chromeos_root,
cros_util.prebuilt_autotest_dir)
else:
autotest_dir = os.path.join(opts.chromeos_root,
cros_util.in_tree_autotest_dir)
cros_util.override_autotest_config(autotest_dir)
sox_path = os.path.join(opts.chromeos_root, 'chroot/usr/bin/sox')
if not os.path.exists(sox_path):
try:
cros_util.cros_sdk(opts.chromeos_root, 'sudo', 'emerge', 'sox')
except subprocess.CalledProcessError:
# It's known that installing sox would fail for earlier version of
# chromeos (b/136136270), so ignore the failure.
logger.debug('Sox is only required by some audio tests. '
'Assume the failure of installing sox is harmless')
# test_that may use this ssh key and ssh complains its permission is too open.
# chmod every time just before run test_that because the permission may change
# after some git operations.
util.check_call(
'chmod',
'o-r,g-r',
'src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa',
cwd=opts.chromeos_root)
if opts.reinstall:
util.ssh_cmd(opts.dut, 'rm', '-rf', '/usr/local/autotest')
if opts.reboot_before_test:
cros_util.reboot(
opts.dut, force_reboot_callback=cros_lab_util.reboot_via_servo)
def run_test(opts) -> str:
"""Runs an autotest test.
Args:
opts: An argparse.Namespace to hold command line arguments.
Returns:
path of test result (outside chroot)
"""
prebuilt_autotest_dir = os.path.join(cros_util.chromeos_root_inside_chroot,
cros_util.prebuilt_autotest_dir)
# Set results dir inside source tree, so it's easier to access them outside
# chroot.
results_dir = os.path.join(cros_util.chromeos_root_inside_chroot,
'tmp/autotest_results_tmp')
if opts.prebuilt:
test_that_bin = os.path.join(prebuilt_autotest_dir,
'site_utils/test_that.py')
else:
test_that_bin = '/usr/bin/test_that'
cmd = [
test_that_bin, opts.dut, opts.test_name, '--debug', '--results_dir',
results_dir
]
if opts.prebuilt:
cmd += ['--autotest_dir', prebuilt_autotest_dir]
args = get_additional_test_args(opts.test_name)
if opts.args:
if args:
logger.info(
'default test_that args `%s` is overridden by '
'command line option `%s`', args, opts.args)
cmd += ['--args', ' '.join(opts.args)]
elif args:
cmd += ['--args', ' '.join(args)]
try:
output = cros_util.cros_sdk(
opts.chromeos_root, *cmd, chrome_root=opts.chrome_root)
except subprocess.CalledProcessError as e:
if e.output is None:
raise errors.ExternalError('cros_sdk failed before test started')
output = e.output
m = re.search(r'Finished running tests. Results can be found in (\S+)',
output)
if not m:
raise errors.ExternalError('result dir is unknown')
assert m.group(1) == results_dir
return results_dir.replace(cros_util.chromeos_root_inside_chroot,
opts.chromeos_root)
# Although tauto will generate results.json, its content may be empty and
# useless (see testdata/tast_test_result/fail.2 for example). So we have to
# parse the machine-unfriendly test_report.log instead.
def parse_tauto_results(result_dir: str, test_name: typing.Optional[str]
) -> tuple[bool, typing.Optional[str]]:
result_file = os.path.join(result_dir, 'test_report.log')
if not os.path.exists(result_file):
raise errors.ExternalError('test_report.log not found')
failed = None
reason = None
for line in open(result_file):
line = line.strip()
# Assume no space characters in the result path.
m = re.match(r'^(\S+)\s+(.+)', line)
if not m:
continue
# Match test name.
path, content = m.groups()
m = re.search(r'/(?:results-\d+-)?([^/]+)$', path)
if not m:
continue
logger.debug('found test name: %s', m.group(1))
if m.group(1) != test_name:
continue
if re.match(r'\[ (FAILED|PASSED) \]', content):
if 'PASSED' in content:
return True, None
failed = True
if re.match(r'ABORT|ERROR|FAIL|WARN|TEST_NA', content):
reason = content
assert failed, 'test name not found in test_report.log'
return False, reason
def parse_cts_results(result_dir,
test_name) -> tuple[bool, typing.Optional[str]]:
# The result path looks like
# results-1-cheets_CTS_P.tradefed-run-test/
# cheets_CTS_P.tradefed-run-test.CtsCameraTestCases.testFocalLengths/
# results/android-cts/2022.03.02_00.27.09/test_result.xml
result_files = glob.glob(
os.path.join(result_dir, 'results*/*/results/*/*/test_result.xml'))
if not result_files:
logger.error('failed to run cts test, fallback to tauto result')
passed, reason = parse_tauto_results(result_dir, test_name)
assert not passed, 'test_result.xml not found, but test passed?'
raise errors.BisectionTemporaryError('failed to run test: %s' % reason)
result_files.sort(key=os.path.getmtime)
logger.info('found result file: %s', result_files)
# Use the latest result file because it is the final result of retries.
result_file = result_files[-1]
tree = xml.etree.ElementTree.parse(result_file)
root = tree.getroot()
summary = root.find('Summary')
assert summary is not None
logger.info('summary: %s', summary.attrib)
if summary.get('pass') == summary.get('failed') == '0':
raise errors.BisectionTemporaryError('no tests found')
# Returns the reason of the first failure.
for test in root.iter('Test'):
if test.get('result') == 'pass':
continue
reason = None
failure = test.find('Failure')
if failure:
reason = failure.get('message')
return False, reason
assert root.findall(
'.//Test[@result="pass"]'), 'there should be some tests passed'
return True, None
def gather_test_result(opts, result_dir) -> core.StepResult:
if opts.cts_test:
passed, reason = parse_cts_results(result_dir, opts.test_name)
else:
passed, reason = parse_tauto_results(result_dir, opts.test_name)
if opts.metric:
for root, _, files in os.walk(result_dir):
for filename in files:
if filename != 'results-chart.json':
continue
full_path = os.path.join(root, filename)
try:
values = catapult_util.get_benchmark_values(full_path, opts.metric)
except Exception:
if not passed:
raise errors.BisectionTemporaryError(
'test failed to generate metric values, reason: %s' % reason)
raise
return core.StepResult('value', values=values)
if not passed:
raise errors.BisectionTemporaryError(
'test failed to generate metric values, reason: %s' % reason)
raise errors.ExecutionFatalError(
'test passed, but no metric values generated')
if opts.fail_to_pass:
if passed:
logger.info('passed')
return core.StepResult('new', reason)
logger.info('failed')
return core.StepResult('old', reason)
if passed:
logger.info('passed')
return core.StepResult('old', reason)
logger.info('failed')
return core.StepResult('new', reason)
def step_main(args: typing.Optional[tuple[str]]) -> core.StepResult:
common.init()
parser = create_argument_parser()
opts = parser.parse_args(args)
common.config_logging(opts)
if not cros_util.is_dut(opts.dut):
raise errors.BrokenDutException('%r is not a valid DUT address' % opts.dut)
dut_os_version = cros_util.query_dut_short_version(opts.dut)
is_cts = (
opts.cts_revision or opts.cts_abi or opts.cts_prefix or opts.cts_module or
opts.cts_test or opts.cts_timeout)
if is_cts:
if opts.test_name or opts.metric or opts.args:
raise errors.ArgumentError(
None,
'do not specify --test_name, --metric, --args for CTS/GTS tests')
if not opts.cts_prefix:
raise errors.ArgumentError(
None, '--cts_prefix should be specified for CTS/GTS tests')
if not opts.cts_module:
raise errors.ArgumentError(
None, '--cts_module should be specified for CTS/GTS tests')
opts.test_name = '%s.tradefed-run-test' % opts.cts_prefix
opts.args = [
'module=%s' % opts.cts_module,
'test=%s' % opts.cts_test, 'max_retry=0'
]
if opts.cts_revision:
opts.args.append('revision=%s' % opts.cts_revision)
if opts.cts_abi:
opts.args.append('abi=%s' % opts.cts_abi)
if opts.cts_timeout:
opts.args.append('timeout=%s' % opts.cts_timeout)
else:
if not opts.test_name:
raise errors.ArgumentError(None, 'argument --test_name is required')
# Verify command line options.
if opts.metric:
if opts.fail_to_pass:
raise errors.ArgumentError(
'--fail_to_pass',
'--fail_to_pass is not for benchmark test (--metric)')
if opts.test_name.startswith('telemetry_'):
if not opts.chrome_root:
raise errors.ArgumentError(
'--chrome_root', '--chrome_root is mandatory for telemetry tests')
if opts.prebuilt:
autotest_dir = os.path.join(opts.chromeos_root,
cros_util.prebuilt_autotest_dir)
if not os.path.exists(autotest_dir):
raise errors.ArgumentError(
'--prebuilt', 'no autotest prebuilt installed (%s); '
'please run switch_autotest_prebuilt.py first' % autotest_dir)
try:
prepare_to_run_test(opts)
except Exception as e:
raise errors.BisectionTemporaryError(
'failed when prepare, assume it is temporary: %s' % e)
result_dir = run_test(opts)
result = gather_test_result(opts, result_dir)
# The OS version should not change.
assert dut_os_version == cros_util.query_dut_short_version(opts.dut), (
'Someone else reflashed the DUT. DUT locking is not respected?')
return result
def main(args: typing.Optional[tuple[str]] = None) -> int:
return bisector_cli.step_main_wrapper(step_main, args)
if __name__ == '__main__':
sys.exit(main())