Reland "[Blink] Report skipped WPT tests in resultDB and result.html"
Reland notes: The problem with the original change was that
WPTResultsProcessor uses Port.skips_test() to check for
non-TestExpectation skip reasons, but WebTestFinder adds its own
(namely, MSAN + idlharness). The reland fixes this by reading skipped
tests from test_end(status='SKIP'), which is plumbed from the
authoritative WebTestFinder.
This is a reland of commit 33fd59aaa07b7243538ac6283d533aa631c4198e
Original change's description:
> [Blink] Report skipped WPT tests in resultDB and result.html
>
> This change enhances the reporting of Web Platform Tests (WPT) by
> including skipped tests in both resultDB and the result.html summary.
>
> Furthermore, `wpt_results_processor` now uses `port.skips_test()` to
> check for skipped tests. This ensures that all skipped tests, regardless
> of the reason, are correctly reported in their expected status.
>
> Bug: 384786282
> Change-Id: I9843749279b6aaf4cd2c20e86c691af1899b4321
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6092241
> Reviewed-by: Jonathan Lee <jonathanjlee@google.com>
> Reviewed-by: Weizhong Xia <weizhong@google.com>
> Commit-Queue: An Sung <ansung@google.com>
> Cr-Commit-Position: refs/heads/main@{#1409322}
Bug: 384786282
Change-Id: If9deaf071f93db4f363961036e98a8f5c2a0dd01
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6195112
Reviewed-by: Weizhong Xia <weizhong@google.com>
Commit-Queue: An Sung <ansung@google.com>
Reviewed-by: Jonathan Lee <jonathanjlee@google.com>
Cr-Commit-Position: refs/heads/main@{#1428099}
diff --git a/android_webview/test/BUILD.gn b/android_webview/test/BUILD.gn
index e4ed3055..df55140 100644
--- a/android_webview/test/BUILD.gn
+++ b/android_webview/test/BUILD.gn
@@ -124,10 +124,7 @@
"--browser-apk",
"@WrappedPath(apks/SystemWebViewShell.apk)",
]
- data = [
- "//third_party/blink/web_tests/platform/webview/",
- "//third_party/blink/web_tests/MobileTestExpectations",
- ]
+ data = [ "//third_party/blink/web_tests/platform/webview/" ]
data_deps = [
"//android_webview:system_webview_apk",
"//android_webview/tools/system_webview_shell:system_webview_shell_layout_test_apk",
@@ -154,10 +151,7 @@
"--additional-apk",
"@WrappedPath(apks/TrichromeLibrary.apk)",
]
- data = [
- "//third_party/blink/web_tests/platform/webview/",
- "//third_party/blink/web_tests/MobileTestExpectations",
- ]
+ data = [ "//third_party/blink/web_tests/platform/webview/" ]
data_deps = [
"//android_webview:trichrome_webview_apk",
"//android_webview/tools/system_webview_shell:system_webview_shell_layout_test_apk",
@@ -185,10 +179,7 @@
"--additional-apk",
"@WrappedPath(apks/TrichromeLibrary64.apk)",
]
- data = [
- "//third_party/blink/web_tests/platform/webview/",
- "//third_party/blink/web_tests/MobileTestExpectations",
- ]
+ data = [ "//third_party/blink/web_tests/platform/webview/" ]
data_deps = [
"//android_webview:trichrome_webview_64_apk",
"//android_webview/tools/system_webview_shell:system_webview_shell_layout_test_apk",
diff --git a/chrome/android/BUILD.gn b/chrome/android/BUILD.gn
index 671eaf10..cab53619 100644
--- a/chrome/android/BUILD.gn
+++ b/chrome/android/BUILD.gn
@@ -2668,10 +2668,7 @@
"chrome_android",
"--zero-tests-executed-ok",
]
- data = [
- "//third_party/blink/web_tests/platform/android/",
- "//third_party/blink/web_tests/MobileTestExpectations",
- ]
+ data = [ "//third_party/blink/web_tests/platform/android/" ]
data_deps = [
":chrome_public_apk",
"//chrome/test/chromedriver:chromedriver_server($host_toolchain)",
diff --git a/third_party/blink/tools/BUILD.gn b/third_party/blink/tools/BUILD.gn
index b58e3bc..e50ed9d 100644
--- a/third_party/blink/tools/BUILD.gn
+++ b/third_party/blink/tools/BUILD.gn
@@ -52,6 +52,14 @@
# Emulator
"//build/android/pylib/",
+ # Common expectation
+ "//third_party/blink/web_tests/LeakExpectations",
+ "//third_party/blink/web_tests/MobileTestExpectations",
+ "//third_party/blink/web_tests/NeverFixTests",
+ "//third_party/blink/web_tests/SlowTests",
+ "//third_party/blink/web_tests/StaleTestExpectations",
+ "//third_party/blink/web_tests/TestExpectations",
+
# Android-specific expectations
"//third_party/blink/web_tests/android/AndroidWPTNeverFixTests",
"//third_party/blink/web_tests/android/WPTSmokeTestCases",
diff --git a/third_party/blink/tools/blinkpy/wpt_tests/test_loader.py b/third_party/blink/tools/blinkpy/wpt_tests/test_loader.py
index 21881463..250b098 100644
--- a/third_party/blink/tools/blinkpy/wpt_tests/test_loader.py
+++ b/third_party/blink/tools/blinkpy/wpt_tests/test_loader.py
@@ -56,10 +56,12 @@
*args,
expectations: Optional[TestExpectations] = None,
include: Optional[Collection[str]] = None,
+ tests_to_skip: Optional[Collection[str]] = None,
**kwargs):
self._port = port
self._expectations = expectations or TestExpectations(port)
self._include = include
+ self._tests_to_skip = tests_to_skip or []
# Invoking the superclass constructor will immediately load tests, so
# set `_port` and `_expectations` first.
super().__init__(*args, **kwargs)
@@ -79,7 +81,7 @@
self.tests[subsuite_name] = collections.defaultdict(list)
self.disabled_tests[subsuite_name] = collections.defaultdict(list)
- test_urls = subsuite.include or self._include or []
+ test_urls = subsuite.include or self._include
for test_url in test_urls:
if not test_url.startswith('/'):
test_url = f'/{test_url}'
@@ -92,10 +94,10 @@
inherit_metadata, test_metadata = self.load_metadata(
subsuite.run_info, manifest, test_root['metadata_path'],
item.path)
- test = self.get_test(manifest, item, inherit_metadata,
- test_metadata)
# `WebTestFinder` should have already filtered out skipped
# tests, but add to `disabled_tests` anyway just in case.
+ test = self.get_test(manifest, item, inherit_metadata,
+ test_metadata)
tests = self.disabled_tests if test.disabled() else self.tests
tests[subsuite_name][item.item_type].append(test)
@@ -141,16 +143,20 @@
expected_text.decode('utf-8', 'replace'))
else:
testharness_lines = []
-
exp_line = self._expectations.get_expectations(test_name)
+
# Do not create test metadata when there is no baseline and test is
# expected to pass or run with no test expectations.
- if (self._port.get_option('no_expectations') or exp_line.results
- == {ResultType.Pass}) and not testharness_lines:
+ if test_name in self._tests_to_skip:
+ test_ast = self._build_skipped_test_ast(test_name)
+ elif (self._port.get_option('no_expectations') or exp_line.results
+ == {ResultType.Pass}) and not testharness_lines:
+ # All (sub)tests pass; no metadata explicitly needed.
continue
- test_file_ast.append(
- self._build_test_ast(item.item_type, exp_line,
- testharness_lines))
+ else:
+ test_ast = self._build_test_ast(item.item_type, exp_line,
+ testharness_lines)
+ test_file_ast.append(test_ast)
if not test_file_ast.children:
# AST is empty. Fast path for all-pass expectations.
@@ -162,16 +168,21 @@
test_metadata.set('type', test_type)
return [], test_metadata
+ def _build_skipped_test_ast(self, test_name: str) -> wptnode.DataNode:
+ """Builds the AST for a test that should be skipped."""
+ test_ast = wptnode.DataNode(_test_basename(test_name))
+ disabled = wptnode.KeyValueNode('disabled')
+ disabled.append(wptnode.AtomNode(True))
+ test_ast.append(disabled)
+ return test_ast
+
def _build_test_ast(
self,
test_type: TestType,
exp_line: ExpectationType,
testharness_lines: List[TestharnessLine],
) -> wptnode.DataNode:
- # Use the default result 'Pass' when run with --no-expectations
- exp_results = ({
- ResultType.Pass
- } if self._port.get_option('no_expectations') else exp_line.results)
+ exp_results = exp_line.results
test_statuses = chromium_to_wptrunner_statuses(
exp_results - {ResultType.Skip}, test_type)
harness_errors = {
@@ -179,6 +190,8 @@
for line in testharness_lines
if line.line_type is LineType.HARNESS_ERROR
}
+ passing_status = chromium_to_wptrunner_statuses(
+ frozenset([ResultType.Pass]), test_type)
if not can_have_subtests(test_type):
# Temporarily expect PASS so that unexpected passes don't contribute
# to retries or build failures.
@@ -186,9 +199,7 @@
elif ResultType.Failure in exp_results or not harness_errors:
# Add `OK` for `[ Failure ]` lines or no explicit harness error in
# the baseline.
- test_statuses.update(
- chromium_to_wptrunner_statuses(frozenset([ResultType.Pass]),
- test_type))
+ test_statuses.update(passing_status)
elif len(harness_errors) > 1:
raise ValueError(
f'testharness baseline for {exp_line.test!r} can only have up '
@@ -221,12 +232,21 @@
@classmethod
def install(cls, port: Port, expectations: TestExpectations,
- include: List[str]):
- """Patch overrides into the wptrunner API (may be unstable)."""
+ include: Collection[str], tests_to_skip: Collection[str]):
+ """Patch overrides into the wptrunner API (may be unstable).
+
+ Arguments:
+ include: Base tests to run, formatted as wptrunner-style URLs
+ (i.e., leading `/`, and no `external/wpt` prefix).
+ tests_to_skip: A list of Blink-style test names (can be virtual).
+ This `TestLoader` will generate metadata with `disabled: @True`
+ for these tests.
+ """
testloader.TestLoader = functools.partial(cls,
port,
expectations=expectations,
- include=include)
+ include=include,
+ tests_to_skip=tests_to_skip)
# Ideally, we would patch `executorchrome.*.convert_result`, but changes
# to the executor classes here in the main process don't persist to
diff --git a/third_party/blink/tools/blinkpy/wpt_tests/wpt_adapter.py b/third_party/blink/tools/blinkpy/wpt_tests/wpt_adapter.py
index b4436c4..c12d07a7 100644
--- a/third_party/blink/tools/blinkpy/wpt_tests/wpt_adapter.py
+++ b/third_party/blink/tools/blinkpy/wpt_tests/wpt_adapter.py
@@ -447,7 +447,7 @@
logger.error('No tests to run.')
sys.exit(exit_codes.NO_TESTS_EXIT_STATUS)
- return self._prepare_tests_for_wptrunner(tests_to_run)
+ return tests_to_run, tests_to_skip
def _lookup_subsuite_args(self, subsuite_name):
for suite in self.port.virtual_test_suites():
@@ -489,7 +489,14 @@
def _set_up_runner_tests(self, runner_options, tmp_dir):
if not self.using_upstream_wpt:
- include_tests, subsuite_json = self._collect_tests()
+ tests_to_run, tests_to_skip = self._collect_tests()
+ include_tests, subsuite_json = self._prepare_tests_for_wptrunner([
+ *tests_to_run,
+ # Pass skipped tests to wptrunner too so that they're added to
+ # the log and test results, but the Blink-side `TestLoader`
+ # will generate metadata that disables them.
+ *tests_to_skip,
+ ])
if subsuite_json:
config_path = self.fs.join(tmp_dir, 'subsuite.json')
with self.fs.open_text_file_for_writing(
@@ -511,6 +518,9 @@
runner_options.total_chunks = 1
runner_options.this_chunk = 1
runner_options.default_exclude = True
+
+ TestLoader.install(self.port, self._expectations,
+ runner_options.include, tests_to_skip)
else:
self._set_up_runner_sharding_options(runner_options)
runner_options.retry_unexpected = 0
@@ -551,8 +561,6 @@
tests_root = self.tools_root
else:
tests_root = self.finder.path_from_wpt_tests()
- TestLoader.install(self.port, self._expectations,
- runner_options.include)
runner_options.tests_root = tests_root
runner_options.metadata_root = tests_root
logger.debug('Using WPT tests (external) from %s', tests_root)
diff --git a/third_party/blink/tools/blinkpy/wpt_tests/wpt_results_processor.py b/third_party/blink/tools/blinkpy/wpt_tests/wpt_results_processor.py
index 649147d..9984e26 100644
--- a/third_party/blink/tools/blinkpy/wpt_tests/wpt_results_processor.py
+++ b/third_party/blink/tools/blinkpy/wpt_tests/wpt_results_processor.py
@@ -60,8 +60,6 @@
from blinkpy.web_tests.models.test_run_results import convert_to_hierarchical_view
from blinkpy.web_tests.models.typ_types import (
Artifacts,
- Expectation,
- ExpectationType,
Result,
ResultSinkReporter,
ResultType,
@@ -155,15 +153,12 @@
def __init__(self,
*args,
test_type: Optional[str] = None,
- exp_line: Optional[ExpectationType] = None,
baseline: Optional[List[TestharnessLine]] = None,
no_expectations: bool = False,
**kwargs):
- kwargs.setdefault('expected', exp_line.results)
super().__init__(*args, **kwargs)
self.testharness_results = []
self.test_type = test_type
- self._exp_line = exp_line or Expectation()
self._baseline = baseline or []
self.image_diff_stats = None
self.no_expectations = no_expectations
@@ -630,6 +625,7 @@
baseline = parse_testharness_baseline(expected_text.decode())
else:
baseline = []
+ expected = self._expectations.get_expectations(test).results
self._results[test] = WPTResult(
test,
# Placeholder status that has the lowest priority possible.
@@ -640,7 +636,7 @@
worker=0,
file_path=self._file_path_for_test(test),
test_type=self.get_test_type(test),
- exp_line=self._expectations.get_expectations(test),
+ expected=expected,
baseline=baseline,
no_expectations=self.port.get_option('no_expectations'))
@@ -690,6 +686,8 @@
result = self._results.pop(test, None)
if not result:
raise EventProcessingError('Test not started: %s' % test)
+ if 'SKIP' in expected:
+ result.expected |= {ResultType.Skip}
result.took = max(0, event.time - result.started) / 1000
result.pid = (extra or {}).get('browser_pid', 0)
result.update_from_test(status, message)
diff --git a/third_party/blink/tools/blinkpy/wpt_tests/wpt_results_processor_unittest.py b/third_party/blink/tools/blinkpy/wpt_tests/wpt_results_processor_unittest.py
index 7237bdc1..5b48016 100644
--- a/third_party/blink/tools/blinkpy/wpt_tests/wpt_results_processor_unittest.py
+++ b/third_party/blink/tools/blinkpy/wpt_tests/wpt_results_processor_unittest.py
@@ -1357,3 +1357,33 @@
report = json.loads(self.fs.read_text_file(report_dest))
self.assertEqual(report['run_info'], self.wpt_report['run_info'])
self.assertEqual(report['results'], self.wpt_report['results'])
+
+ def test_report_expected_skipped_test(self):
+ self.fs.write_text_file(
+ self.path_finder.path_from_web_tests('TestExpectations'),
+ textwrap.dedent("""\
+ # results: [ Skip ]
+ external/wpt/reftest.html [ Skip ]
+ """))
+ self._event(action='test_start', test='/reftest.html')
+ self._event(action='test_end',
+ test='/reftest.html',
+ expected=None,
+ status='SKIP')
+ report_mock = self.processor.sink.report_individual_test_result
+ report_mock.assert_called_once_with(
+ test_name_prefix='',
+ result=mock.ANY,
+ artifact_output_dir=self.fs.join('/mock-checkout', 'out',
+ 'Default'),
+ expectations=None,
+ test_file_location=self.path_finder.path_from_web_tests(
+ 'external', 'wpt', 'reftest.html'),
+ html_summary=mock.ANY)
+ result = report_mock.call_args.kwargs['result']
+ self.assertEqual(result.name, 'external/wpt/reftest.html')
+ self.assertEqual(result.actual, 'SKIP')
+ self.assertEqual(result.expected, {'SKIP'})
+ self.assertFalse(result.unexpected)
+ self.assertAlmostEqual(result.took, 0)
+ self.assertEqual(result.artifacts, {})
\ No newline at end of file