blob: 59f39c1e71b200146063cbd6e5201405d8871646 [file] [log] [blame]
import json
import sys
from os.path import dirname, join
from six.moves import cStringIO as StringIO
from mozlog import handlers, structuredlog
sys.path.insert(0, join(dirname(__file__), "..", ".."))
from formatters.chromium import ChromiumFormatter
def test_chromium_required_fields(capfd):
# Test that the test results contain a handful of required fields.
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# output a bunch of stuff
logger.suite_start(["test-id-1"], run_info={}, time=123)
logger.test_start("test-id-1")
logger.test_end("test-id-1", status="PASS", expected="PASS")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_obj = json.load(output)
# Check for existence of required fields
assert "interrupted" in output_obj
assert "path_delimiter" in output_obj
assert "version" in output_obj
assert "num_failures_by_type" in output_obj
assert "tests" in output_obj
test_obj = output_obj["tests"]["test-id-1"]
assert "actual" in test_obj
assert "expected" in test_obj
def test_chromium_test_name_trie(capfd):
# Ensure test names are broken into directories and stored in a trie with
# test results at the leaves.
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# output a bunch of stuff
logger.suite_start(["/foo/bar/test-id-1", "/foo/test-id-2"], run_info={},
time=123)
logger.test_start("/foo/bar/test-id-1")
logger.test_end("/foo/bar/test-id-1", status="TIMEOUT", expected="FAIL")
logger.test_start("/foo/test-id-2")
logger.test_end("/foo/test-id-2", status="ERROR", expected="TIMEOUT")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_obj = json.load(output)
# Ensure that the test names are broken up by directory name and that the
# results are stored at the leaves.
test_obj = output_obj["tests"]["foo"]["bar"]["test-id-1"]
assert test_obj["actual"] == "TIMEOUT"
assert test_obj["expected"] == "FAIL"
test_obj = output_obj["tests"]["foo"]["test-id-2"]
# The ERROR status is mapped to FAIL for Chromium
assert test_obj["actual"] == "FAIL"
assert test_obj["expected"] == "TIMEOUT"
def test_num_failures_by_type(capfd):
# Test that the number of failures by status type is correctly calculated.
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# Run some tests with different statuses: 3 passes, 1 timeout
logger.suite_start(["t1", "t2", "t3", "t4"], run_info={}, time=123)
logger.test_start("t1")
logger.test_end("t1", status="PASS", expected="PASS")
logger.test_start("t2")
logger.test_end("t2", status="PASS", expected="PASS")
logger.test_start("t3")
logger.test_end("t3", status="PASS", expected="FAIL")
logger.test_start("t4")
logger.test_end("t4", status="TIMEOUT", expected="CRASH")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
num_failures_by_type = json.load(output)["num_failures_by_type"]
# We expect 3 passes and 1 timeout, nothing else.
assert sorted(num_failures_by_type.keys()) == ["PASS", "TIMEOUT"]
assert num_failures_by_type["PASS"] == 3
assert num_failures_by_type["TIMEOUT"] == 1
def test_subtest_messages(capfd):
# Tests accumulation of test output
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# Run two tests with subtest messages. The subtest name should be included
# in the output. We should also tolerate missing messages.
logger.suite_start(["t1", "t2"], run_info={}, time=123)
logger.test_start("t1")
logger.test_status("t1", status="FAIL", subtest="t1_a",
message="t1_a_message")
logger.test_status("t1", status="PASS", subtest="t1_b",
message="t1_b_message")
logger.test_end("t1", status="PASS", expected="PASS")
logger.test_start("t2")
# Subtests with empty messages should not be ignored.
logger.test_status("t2", status="PASS", subtest="t2_a")
# A test-level message will also be appended
logger.test_end("t2", status="TIMEOUT", expected="PASS",
message="t2_message")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
t1_artifacts = output_json["tests"]["t1"]["artifacts"]
assert t1_artifacts["log"] == [
"[t1_a] [FAIL expected PASS] t1_a_message",
"[t1_b] [PASS expected PASS] t1_b_message",
"[] [FAIL expected PASS] ",
]
assert t1_artifacts["wpt_subtest_failure"] == ["true"]
t2_artifacts = output_json["tests"]["t2"]["artifacts"]
assert t2_artifacts["log"] == [
"[t2_a] [PASS expected PASS] ",
"[] [TIMEOUT expected PASS] t2_message",
]
assert "wpt_subtest_failure" not in t2_artifacts.keys()
def test_subtest_failure(capfd):
# Tests that a test fails if a subtest fails
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
formatter = ChromiumFormatter()
logger.add_handler(handlers.StreamHandler(output, formatter))
# Run a test with some subtest failures.
logger.suite_start(["t1"], run_info={}, time=123)
logger.test_start("t1")
logger.test_status("t1", status="FAIL", subtest="t1_a",
message="t1_a_message")
logger.test_status("t1", status="PASS", subtest="t1_b",
message="t1_b_message")
logger.test_status("t1", status="TIMEOUT", subtest="t1_c",
message="t1_c_message")
# Make sure the test name was added to the set of tests with subtest fails
assert "t1" in formatter.tests_with_subtest_fails
# The test status is reported as a pass here because the harness was able to
# run the test to completion.
logger.test_end("t1", status="PASS", expected="PASS")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
t1_artifacts = test_obj["artifacts"]
assert t1_artifacts["log"] == [
"[t1_a] [FAIL expected PASS] t1_a_message",
"[t1_b] [PASS expected PASS] t1_b_message",
"[t1_c] [TIMEOUT expected PASS] t1_c_message",
"[] [FAIL expected PASS] ",
]
assert t1_artifacts["wpt_subtest_failure"] == ["true"]
# The status of the test in the output is a failure because subtests failed,
# despite the harness reporting that the test passed. But the harness status
# is logged as an artifact.
assert t1_artifacts["wpt_actual_status"] == ["PASS"]
assert test_obj["actual"] == "FAIL"
assert test_obj["expected"] == "PASS"
# Also ensure that the formatter cleaned up its internal state
assert "t1" not in formatter.tests_with_subtest_fails
def test_expected_subtest_failure(capfd):
# Tests that an expected subtest failure does not cause the test to fail
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
formatter = ChromiumFormatter()
logger.add_handler(handlers.StreamHandler(output, formatter))
# Run a test with some expected subtest failures.
logger.suite_start(["t1"], run_info={}, time=123)
logger.test_start("t1")
logger.test_status("t1", status="FAIL", expected="FAIL", subtest="t1_a",
message="t1_a_message")
logger.test_status("t1", status="PASS", subtest="t1_b",
message="t1_b_message")
logger.test_status("t1", status="TIMEOUT", expected="TIMEOUT", subtest="t1_c",
message="t1_c_message")
# The subtest failures are all expected so this test should not be added to
# the set of tests with subtest failures.
assert "t1" not in formatter.tests_with_subtest_fails
# The test status is reported as a pass here because the harness was able to
# run the test to completion.
logger.test_end("t1", status="PASS", expected="PASS")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
t1_log = test_obj["artifacts"]["log"]
assert t1_log == [
"[t1_a] [FAIL expected FAIL] t1_a_message",
"[t1_b] [PASS expected PASS] t1_b_message",
"[t1_c] [TIMEOUT expected TIMEOUT] t1_c_message",
"[] [PASS expected PASS] ",
]
# The status of the test in the output is a pass because the subtest
# failures were all expected.
assert test_obj["actual"] == "PASS"
assert test_obj["expected"] == "PASS"
def test_unexpected_subtest_pass(capfd):
# A subtest that unexpectedly passes is considered a failure condition.
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
formatter = ChromiumFormatter()
logger.add_handler(handlers.StreamHandler(output, formatter))
# Run a test with a subtest that is expected to fail but passes.
logger.suite_start(["t1"], run_info={}, time=123)
logger.test_start("t1")
logger.test_status("t1", status="PASS", expected="FAIL", subtest="t1_a",
message="t1_a_message")
# Since the subtest behaviour is unexpected, it's considered a failure, so
# the test should be added to the set of tests with subtest failures.
assert "t1" in formatter.tests_with_subtest_fails
# The test status is reported as a pass here because the harness was able to
# run the test to completion.
logger.test_end("t1", status="PASS", expected="PASS")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
t1_artifacts = test_obj["artifacts"]
assert t1_artifacts["log"] == [
"[t1_a] [PASS expected FAIL] t1_a_message",
"[] [FAIL expected PASS] ",
]
assert t1_artifacts["wpt_subtest_failure"] == ["true"]
# Since the subtest status is unexpected, we fail the test. But we report
# wpt_actual_status as an artifact
assert t1_artifacts["wpt_actual_status"] == ["PASS"]
assert test_obj["actual"] == "FAIL"
assert test_obj["expected"] == "PASS"
# Also ensure that the formatter cleaned up its internal state
assert "t1" not in formatter.tests_with_subtest_fails
def test_expected_test_fail(capfd):
# Check that an expected test-level failure is treated as a Pass
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# Run some tests with different statuses: 3 passes, 1 timeout
logger.suite_start(["t1"], run_info={}, time=123)
logger.test_start("t1")
logger.test_end("t1", status="ERROR", expected="ERROR")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
# The test's actual and expected status should map from "ERROR" to "FAIL"
assert test_obj["actual"] == "FAIL"
assert test_obj["expected"] == "FAIL"
# ..and this test should not be a regression nor unexpected
assert "is_regression" not in test_obj
assert "is_unexpected" not in test_obj
def test_unexpected_test_fail(capfd):
# Check that an unexpected test-level failure is marked as unexpected and
# as a regression.
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# Run some tests with different statuses: 3 passes, 1 timeout
logger.suite_start(["t1"], run_info={}, time=123)
logger.test_start("t1")
logger.test_end("t1", status="ERROR", expected="OK")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
# The test's actual and expected status should be mapped, ERROR->FAIL and
# OK->PASS
assert test_obj["actual"] == "FAIL"
assert test_obj["expected"] == "PASS"
# ..and this test should be a regression and unexpected
assert test_obj["is_regression"] is True
assert test_obj["is_unexpected"] is True
def test_flaky_test_expected(capfd):
# Check that a flaky test with multiple possible statuses is seen as
# expected if its actual status is one of the possible ones.
# set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# Run a test that is known to be flaky
logger.suite_start(["t1"], run_info={}, time=123)
logger.test_start("t1")
logger.test_end("t1", status="ERROR", expected="OK", known_intermittent=["ERROR", "TIMEOUT"])
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
# The test's statuses are all mapped, changing ERROR->FAIL and OK->PASS
assert test_obj["actual"] == "FAIL"
# All the possible statuses are concatenated together into expected.
assert test_obj["expected"] == "PASS FAIL TIMEOUT"
# ...this is not a regression or unexpected because the actual status is one
# of the expected ones
assert "is_regression" not in test_obj
assert "is_unexpected" not in test_obj
def test_flaky_test_unexpected(capfd):
# Check that a flaky test with multiple possible statuses is seen as
# unexpected if its actual status is NOT one of the possible ones.
# set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# Run a test that is known to be flaky
logger.suite_start(["t1"], run_info={}, time=123)
logger.test_start("t1")
logger.test_end("t1", status="ERROR", expected="OK", known_intermittent=["TIMEOUT"])
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
# The test's statuses are all mapped, changing ERROR->FAIL and OK->PASS
assert test_obj["actual"] == "FAIL"
# All the possible statuses are concatenated together into expected.
assert test_obj["expected"] == "PASS TIMEOUT"
# ...this is a regression and unexpected because the actual status is not
# one of the expected ones
assert test_obj["is_regression"] is True
assert test_obj["is_unexpected"] is True
def test_precondition_failed(capfd):
# Check that a failed precondition gets properly handled.
# set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# Run a test with a precondition failure
logger.suite_start(["t1"], run_info={}, time=123)
logger.test_start("t1")
logger.test_end("t1", status="PRECONDITION_FAILED", expected="OK")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
# The precondition failure should map to FAIL status, but we should also
# have an artifact containing the original PRECONDITION_FAILED status.
assert test_obj["actual"] == "FAIL"
assert test_obj["artifacts"]["wpt_actual_status"] == ["PRECONDITION_FAILED"]
# ...this is an unexpected regression because we expected a pass but failed
assert test_obj["is_regression"] is True
assert test_obj["is_unexpected"] is True
def test_known_intermittent_empty(capfd):
# If the known_intermittent list is empty, we want to ensure we don't append
# any extraneous characters to the output.
# set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# Run a test and include an empty known_intermittent list
logger.suite_start(["t1"], run_info={}, time=123)
logger.test_start("t1")
logger.test_end("t1", status="OK", expected="OK", known_intermittent=[])
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
# Both actual and expected statuses get mapped to Pass. No extra whitespace
# anywhere.
assert test_obj["actual"] == "PASS"
assert test_obj["expected"] == "PASS"
def test_reftest_screenshots(capfd):
# reftest_screenshots, if present, should be plumbed into artifacts.
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# Run a reftest with reftest_screenshots.
logger.suite_start(["t1"], run_info={}, time=123)
logger.test_start("t1")
logger.test_end("t1", status="FAIL", expected="PASS", extra={
"reftest_screenshots": [
{"url": "foo.html", "hash": "HASH1", "screenshot": "DATA1"},
"!=",
{"url": "foo-ref.html", "hash": "HASH2", "screenshot": "DATA2"},
]
})
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
assert test_obj["artifacts"]["screenshots"] == [
"foo.html: DATA1",
"foo-ref.html: DATA2",
]
def test_process_output_crashing_test(capfd):
"""Test that chromedriver logs are preserved for crashing tests"""
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
logger.suite_start(["t1", "t2", "t3"], run_info={}, time=123)
logger.test_start("t1")
logger.process_output(100, "This message should be recorded", "/some/path/to/chromedriver --some-flag")
logger.process_output(101, "This message should not be recorded", "/some/other/process --another-flag")
logger.process_output(100, "This message should also be recorded", "/some/path/to/chromedriver --some-flag")
logger.test_end("t1", status="CRASH", expected="CRASH")
logger.test_start("t2")
logger.process_output(100, "Another message for the second test", "/some/path/to/chromedriver --some-flag")
logger.test_end("t2", status="CRASH", expected="PASS")
logger.test_start("t3")
logger.process_output(100, "This test fails", "/some/path/to/chromedriver --some-flag")
logger.process_output(100, "But the output should not be captured", "/some/path/to/chromedriver --some-flag")
logger.process_output(100, "Because it does not crash", "/some/path/to/chromedriver --some-flag")
logger.test_end("t3", status="FAIL", expected="PASS")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
assert test_obj["artifacts"]["wpt_crash_log"] == [
"This message should be recorded",
"This message should also be recorded"
]
test_obj = output_json["tests"]["t2"]
assert test_obj["artifacts"]["wpt_crash_log"] == [
"Another message for the second test"
]
test_obj = output_json["tests"]["t3"]
assert "wpt_crash_log" not in test_obj["artifacts"]