blob: a2ce381e383824fe1b8d0634c8abe10e6cb37d37 [file] [log] [blame]
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from core import perf_data_generator
from core.perf_data_generator import BenchmarkMetadata
from telemetry import benchmark
import mock
class PerfDataGeneratorTest(unittest.TestCase):
def setUp(self):
# Test config can be big, so set maxDiff to None to see the full comparision
# diff when assertEquals fails.
self.maxDiff = None
def testVerifyAllTestsInBenchmarkCsvPassesWithCorrectInput(self):
tests = {
'AAAAA1 AUTOGENERATED': {},
'Android Nexus5 Perf (2)': {
'scripts': [
{'name': 'benchmark_name_1'},
{'name': 'benchmark_name_2'}
]
},
'Linux Perf': {
'isolated_scripts': [
{'name': 'benchmark_name_2.reference'},
{'name': 'benchmark_name_3'}
]
}
}
benchmarks = {
'benchmark_name_1': BenchmarkMetadata('foo@bar.com', None, False),
'benchmark_name_2': BenchmarkMetadata('darth@deathstar', None, False),
'benchmark_name_3': BenchmarkMetadata('neo@matrix.org', None, False)
}
# Mock out content of unowned_benchmarks.txt
with mock.patch('__builtin__.open',
mock.mock_open(read_data="benchmark_name_2")):
perf_data_generator.verify_all_tests_in_benchmark_csv(tests, benchmarks)
def testVerifyAllTestsInBenchmarkCsvCatchesMismatchedTests(self):
tests = {
'Android Nexus5 Perf (2)': {
'scripts': [
{'name': 'benchmark_name_1'},
{'name': 'benchmark_name_2'}
]
}
}
benchmarks = {
'benchmark_name_2': BenchmarkMetadata(None, None, False),
'benchmark_name_3': BenchmarkMetadata(None, None, False),
}
with self.assertRaises(AssertionError) as context:
perf_data_generator.verify_all_tests_in_benchmark_csv(tests, benchmarks)
exception = context.exception.message
self.assertTrue('Add benchmark_name_1' in exception)
self.assertTrue('Remove benchmark_name_3' in exception)
def testVerifyAllTestsInBenchmarkCsvFindsFakeTest(self):
tests = {'Random fake test': {}}
benchmarks = {
'benchmark_name_1': BenchmarkMetadata(None, None, False)
}
with self.assertRaises(AssertionError) as context:
perf_data_generator.verify_all_tests_in_benchmark_csv(tests, benchmarks)
self.assertTrue('Unknown test' in context.exception.message)
def testGenerateTelemetryTestForNonReferenceBuild(self):
swarming_dimensions = [{'os': 'SkyNet', 'id': 'T-850', 'pool': 'T-RIP'}]
test = perf_data_generator.generate_telemetry_test(
swarming_dimensions, 'speedometer', 'release')
expected_generated_test = {
'override_compile_targets': ['telemetry_perf_tests'],
'args': ['speedometer', '-v', '--upload-results',
'--browser=release', '--output-format=chartjson'],
'swarming': {
'ignore_task_failure': False,
'dimension_sets': [{'os': 'SkyNet', 'id': 'T-850', 'pool': 'T-RIP'}],
'hard_timeout': 10800,
'can_use_on_swarming_builders': True,
'expiration': 36000,
'io_timeout': 1200,
'upload_test_results': True,
},
'name': 'speedometer',
'isolate_name': 'telemetry_perf_tests',
}
self.assertEquals(test, expected_generated_test)
def testGenerateTelemetryTestForReferenceBuild(self):
swarming_dimensions = [{'os': 'SkyNet', 'id': 'T-850', 'pool': 'T-RIP'}]
test = perf_data_generator.generate_telemetry_test(
swarming_dimensions, 'speedometer', 'reference')
expected_generated_test = {
'override_compile_targets': ['telemetry_perf_tests'],
'args': ['speedometer', '-v', '--upload-results',
'--browser=reference', '--output-format=chartjson',
'--max-failures=5',
'--output-trace-tag=_ref'],
'swarming': {
'ignore_task_failure': True,
'dimension_sets': [{'os': 'SkyNet', 'id': 'T-850', 'pool': 'T-RIP'}],
'hard_timeout': 10800,
'can_use_on_swarming_builders': True,
'expiration': 36000,
'io_timeout': 1200,
'upload_test_results': True,
},
'name': 'speedometer.reference',
'isolate_name': 'telemetry_perf_tests',
}
self.assertEquals(test, expected_generated_test)
def testGenerateTelemetryTestsWebView(self):
class RegularBenchmark(benchmark.Benchmark):
@classmethod
def Name(cls):
return 'regular'
swarming_dimensions = [
{'os': 'SkyNet', 'id': 'T-850', 'pool': 'T-RIP', 'device_ids': ['a']}
]
test_config = {
'platform': 'android',
'swarming_dimensions': swarming_dimensions,
'replace_system_webview': True,
}
sharding_map = {'fake': {'regular': 'a'}}
benchmarks = [RegularBenchmark]
tests = perf_data_generator.generate_telemetry_tests(
'fake', test_config, benchmarks, sharding_map, ['blacklisted'])
self.assertEqual(len(tests), 1)
test = tests[0]
self.assertEquals(test['args'], [
'regular', '-v', '--upload-results',
'--browser=android-webview', '--output-format=chartjson',
'--webview-embedder-apk=../../out/Release/apks/SystemWebViewShell.apk'])
self.assertEquals(test['isolate_name'], 'telemetry_perf_webview_tests')
def testGenerateTelemetryTestsBlacklistedReferenceBuildTest(self):
class BlacklistedBenchmark(benchmark.Benchmark):
@classmethod
def Name(cls):
return 'blacklisted'
class NotBlacklistedBenchmark(benchmark.Benchmark):
@classmethod
def Name(cls):
return 'not_blacklisted'
swarming_dimensions = [
{'os': 'SkyNet', 'id': 'T-850', 'pool': 'T-RIP', 'device_ids': ['a']}
]
test_config = {
'platform': 'android',
'swarming_dimensions': swarming_dimensions,
}
sharding_map = {'fake': {'blacklisted': 'a', 'not_blacklisted': 'a'}}
benchmarks = [BlacklistedBenchmark, NotBlacklistedBenchmark]
tests = perf_data_generator.generate_telemetry_tests(
'fake', test_config, benchmarks, sharding_map, ['blacklisted'])
generated_test_names = set(t['name'] for t in tests)
self.assertEquals(
generated_test_names,
{'blacklisted', 'not_blacklisted', 'not_blacklisted.reference'})
def testRemoveBlacklistedTestsNoop(self):
tests = [{
'swarming': {
'dimension_sets': [{
'id': 'build1-b1',
}]
},
'name': 'test',
}]
self.assertEqual(
perf_data_generator.remove_blacklisted_device_tests(tests, []), (
tests, {}))
def testRemoveBlacklistedTestsShouldRemove(self):
tests = [{
'swarming': {
'dimension_sets': [{
'id': 'build1-b1',
}]
},
'name': 'test',
}]
self.assertEqual(
perf_data_generator.remove_blacklisted_device_tests(
tests, ['build1-b1']), ([], {'build1-b1': ['test']}))
def testRemoveBlacklistedTestsShouldRemoveMultiple(self):
tests = [{
'swarming': {
'dimension_sets': [{
'id': 'build1-b1',
}]
},
'name': 'test',
}, {
'swarming': {
'dimension_sets': [{
'id': 'build2-b1',
}]
},
'name': 'other_test',
}, {
'swarming': {
'dimension_sets': [{
'id': 'build2-b1',
}]
},
'name': 'test',
}]
self.assertEqual(
perf_data_generator.remove_blacklisted_device_tests(
tests, ['build1-b1', 'build2-b1']), ([], {
'build1-b1': ['test'],
'build2-b1': ['other_test', 'test'],
}))
def testShouldBenchmarksBeScheduledBadOS(self):
class RegularBenchmark(benchmark.Benchmark):
@classmethod
def Name(cls):
return 'regular'
with self.assertRaises(TypeError):
perf_data_generator.ShouldBenchmarksBeScheduled(
RegularBenchmark, 'bot_name', 'os_name', None)
def testShouldBenchmarksBeScheduledShouldRun(self):
class RegularBenchmark(benchmark.Benchmark):
@classmethod
def Name(cls):
return 'regular'
valid_os_list = ['mac', 'android', 'windows', 'linux']
for os in valid_os_list:
self.assertTrue(
perf_data_generator.ShouldBenchmarksBeScheduled(
RegularBenchmark, 'bot_name', os, None))
def testShouldBenchmarkBeScheduledSupportedPlatform(self):
class RegularBenchmark(benchmark.Benchmark):
SUPPORTED_PLATFORMS = []
@classmethod
def Name(cls):
return 'regular'
self.assertFalse(
perf_data_generator.ShouldBenchmarksBeScheduled(
RegularBenchmark, 'bot_name', 'mac', None))
def testListsAlphabetical(self):
keys = [
'BENCHMARK_REF_BUILD_BLACKLIST',
'SVELTE_DEVICE_LIST'
]
for key in keys:
lst = getattr(perf_data_generator, key)
self.assertEqual(sorted(lst), lst, 'please sort %s' % key)
def testGenerateCplusplusIsolateScriptTest(self):
dimension={
'gpu': '10de:104a',
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-perf',
'device_ids': [
'build92-m1', 'build93-m1',
'build94-m1', 'build95-m1', 'build96-m1'
],
'perf_tests': [
('angle_perftests', 'build94-m1'),
],
}
test = perf_data_generator.generate_cplusplus_isolate_script_test(dimension)
test = test[0]
self.assertEqual(test['name'], 'angle_perftests')
self.assertEqual(test['isolate_name'], 'angle_perftests')
def testGenerateCplusplusIsolateScriptTestWithArgs(self):
dimension={
'gpu': '10de:104a',
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-perf',
'device_ids': [
'build92-m1', 'build93-m1',
'build94-m1', 'build95-m1', 'build96-m1'
],
'perf_tests_with_args': [
('passthrough_command_buffer_perftests', 'build94-m1',
['--use-cmd-decoder=passthrough', '--use-angle=gl-null'],
'command_buffer_perftests')
]
}
test = perf_data_generator.generate_cplusplus_isolate_script_test_with_args(
dimension)
test = test[0]
self.assertEqual(test['name'], 'passthrough_command_buffer_perftests')
self.assertEqual(test['isolate_name'], 'command_buffer_perftests')
self.assertTrue('--use-cmd-decoder=passthrough' in test['args'])
self.assertTrue('--use-angle=gl-null' in test['args'])