Add support for running a subset of tests (aka "sharding").
This patch adds two new command line arguments, --shard-index
and --total-shards. These can be used to run a fractional subset
of the tests, and work by running every `total_shard`th test in
the list of tests, starting at offset `shard_index`.
Also, bump the version to 0.9.5.
diff --git a/typ/arg_parser.py b/typ/arg_parser.py
index cb51711..f7a6026 100644
--- a/typ/arg_parser.py
+++ b/typ/arg_parser.py
@@ -147,6 +147,14 @@
self.add_argument('--passthrough', action='store_true',
default=False,
help='Prints all output while running.')
+ self.add_argument('--total-shards', default=1, type=int,
+ help=('Total number of shards being used for '
+ 'this test run. (The user of '
+ 'this script is responsible for spawning '
+ 'all of the shards.)'))
+ self.add_argument('--shard-index', default=0, type=int,
+ help=('Shard index (0..total_shards-1) of this '
+ 'test run.'))
self.add_argument('--retry-limit', type=int, default=0,
help='Retries each failure up to N times.')
self.add_argument('--terminal-width', type=int,
@@ -193,6 +201,20 @@
'along with --test-result-server')
self.exit_status = 2
+ if rargs.total_shards < 1:
+ self._print_message('Error: --total-shards must be at least 1')
+ self.exit_status = 2
+
+ if rargs.shard_index < 0:
+ self._print_message('Error: --shard-index must be at least 0')
+ self.exit_status = 2
+
+ if rargs.shard_index >= rargs.total_shards:
+ self._print_message('Error: --shard-index must be no more than '
+ 'the number of shards (%i) minus 1' %
+ rargs.total_shards)
+ self.exit_status = 2
+
if not rargs.suffixes:
rargs.suffixes = DEFAULT_SUFFIXES
diff --git a/typ/runner.py b/typ/runner.py
index 67c3537..c2975f1 100644
--- a/typ/runner.py
+++ b/typ/runner.py
@@ -353,9 +353,18 @@
# TODO: Add support for discovering setupProcess/teardownProcess?
- test_set.parallel_tests = _sort_inputs(test_set.parallel_tests)
- test_set.isolated_tests = _sort_inputs(test_set.isolated_tests)
- test_set.tests_to_skip = _sort_inputs(test_set.tests_to_skip)
+ shard_index = args.shard_index
+ total_shards = args.total_shards
+ assert total_shards >= 1
+ assert shard_index >= 0 and shard_index < total_shards, (
+ 'shard_index (%d) must be >= 0 and < total_shards (%d)' %
+ (shard_index, total_shards))
+ test_set.parallel_tests = _sort_inputs(
+ test_set.parallel_tests)[shard_index::total_shards]
+ test_set.isolated_tests = _sort_inputs(
+ test_set.isolated_tests)[shard_index::total_shards]
+ test_set.tests_to_skip = _sort_inputs(
+ test_set.tests_to_skip)[shard_index::total_shards]
return 0, test_set
finally:
unittest.skip = orig_skip
diff --git a/typ/tests/arg_parser_test.py b/typ/tests/arg_parser_test.py
index e603ecd..8e3a7bd 100644
--- a/typ/tests/arg_parser_test.py
+++ b/typ/tests/arg_parser_test.py
@@ -43,3 +43,34 @@
check(['--coverage', '--coverage-omit', 'foo'])
check(['--jobs', '3'])
check(['-vv'], ['--verbose', '--verbose'])
+
+ def test_valid_shard_options(self):
+ parser = ArgumentParser()
+
+ parser.parse_args(['--total-shards', '1'])
+ self.assertEqual(parser.exit_status, None)
+
+ parser.parse_args(['--total-shards', '5', '--shard-index', '4'])
+ self.assertEqual(parser.exit_status, None)
+
+ parser.parse_args(['--total-shards', '5', '--shard-index', '0'])
+ self.assertEqual(parser.exit_status, None)
+
+
+ def test_invalid_shard_options(self):
+ parser = ArgumentParser()
+
+ parser.parse_args(['--total-shards', '0'])
+ self.assertEqual(parser.exit_status, 2)
+
+ parser.parse_args(['--total-shards', '-1'])
+ self.assertEqual(parser.exit_status, 2)
+
+ parser.parse_args(['--total-shards', '5', '--shard-index', '-1'])
+ self.assertEqual(parser.exit_status, 2)
+
+ parser.parse_args(['--total-shards', '5', '--shard-index', '5'])
+ self.assertEqual(parser.exit_status, 2)
+
+ parser.parse_args(['--total-shards', '5', '--shard-index', '6'])
+ self.assertEqual(parser.exit_status, 2)
diff --git a/typ/tests/main_test.py b/typ/tests/main_test.py
index 7a2d179..f0d027d 100644
--- a/typ/tests/main_test.py
+++ b/typ/tests/main_test.py
@@ -159,10 +159,86 @@
return suite
"""
-
LOAD_TEST_FILES = {'load_test.py': LOAD_TEST_PY}
+MIXED_TEST_PY = """
+import unittest
+class SampleTest(unittest.TestCase):
+
+ def test_pass_0(self):
+ self.assertEqual(1, 1)
+
+ def test_pass_1(self):
+ self.assertEqual(1, 1)
+
+ def test_fail_0(self):
+ self.assertEqual(1, 2)
+
+ def test_fail_1(self):
+ raise Exception()
+
+ @unittest.skip('Skip for no reason')
+ def test_skip_0(self):
+ pass
+"""
+
+
+LOAD_MANY_TEST_PY = """
+import unittest
+
+def generate_test_case(test_method_name, test_type):
+ class GeneratedTest(unittest.TestCase):
+ pass
+
+ if test_type == 'pass':
+ def test_method(self):
+ self.assertEqual(1, 1)
+ elif test_type == 'fail':
+ def test_method(self):
+ self.assertEqual(1, 2)
+ elif test_type == 'skip':
+ def test_method(self):
+ self.skipTest('Skipped')
+ else:
+ raise Exception
+
+ setattr(GeneratedTest, test_method_name, test_method)
+ return GeneratedTest(test_method_name)
+
+
+def load_tests(loader, standard_tests, pattern):
+ del loader, standard_tests, pattern # unused
+
+ suite = unittest.TestSuite()
+
+ passed_test_names = [
+ str('test_pass_%s' % i) for i in range(2, 15)]
+
+ failed_test_names = [
+ str('test_fail_%s' % i) for i in range(2, 10)]
+
+ skipped_test_names = [
+ str('test_skip_%s' % i) for i in range(1, 10)]
+
+ for test_method_name in passed_test_names:
+ suite.addTest(generate_test_case(test_method_name, 'pass'))
+
+ for test_method_name in failed_test_names:
+ suite.addTest(generate_test_case(test_method_name, 'fail'))
+
+ for test_method_name in skipped_test_names:
+ suite.addTest(generate_test_case(test_method_name, 'skip'))
+
+ return suite
+"""
+
+
+MANY_TEST_FILES = {
+ 'mixed_test.py': MIXED_TEST_PY, # 2 passes, 2 fails, 1 skip
+ 'load_many_test.py': LOAD_MANY_TEST_PY} # 13 passes, 13 fails, 9 skips
+
+
path_to_main = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'runner.py')
@@ -172,6 +248,34 @@
prog = [sys.executable, path_to_main]
files_to_ignore = ['*.pyc']
+ def get_test_results_stat(self, test_output):
+ num_passes = test_output.count(' passed\n')
+ num_fails = test_output.count(' failed unexpectedly:\n')
+ num_skips = test_output.count(' was skipped\n')
+ return num_passes, num_fails, num_skips
+
+ def run_and_check_test_results(self, num_shards):
+ total_passes, total_fails, total_skips = 0, 0, 0
+ min_num_tests_run = float('inf')
+ max_num_tests_run = 0
+ for shard_index in range(num_shards):
+ _, out, _, _ = self.check(
+ ['--total-shards', str(num_shards), '--shard-index',
+ str(shard_index)], files=MANY_TEST_FILES)
+ passes, fails, skips = self.get_test_results_stat(out)
+ total_passes += passes
+ total_fails += fails
+ total_skips += skips
+ num_tests_run = passes + fails
+ min_num_tests_run = min(min_num_tests_run, num_tests_run)
+ max_num_tests_run = max(max_num_tests_run, num_tests_run)
+ self.assertEqual(total_passes, 15)
+ self.assertEqual(total_fails, 10)
+ self.assertEqual(total_skips, 10)
+
+ # Make sure that we don't distribute the tests too unevenly.
+ self.assertLessEqual(max_num_tests_run - min_num_tests_run, 2)
+
def test_bad_arg(self):
self.check(['--bad-arg'], ret=2, out='',
rerr='.*: error: unrecognized arguments: --bad-arg\n')
@@ -527,6 +631,19 @@
# the decorators.
self.assertIn('sf_test.SkipSetup.test_notrun was skipped', out)
+ def test_sharding(self):
+ # Test no sharding.
+ self.run_and_check_test_results(1)
+
+ # A typical with 4 shards.
+ self.run_and_check_test_results(4)
+
+ # Case which number of shards is a prime.
+ self.run_and_check_test_results(7)
+
+ # Case which number of shards is more than number of tests.
+ self.run_and_check_test_results(50)
+
def test_subdir(self):
files = {
'foo/__init__.py': '',
diff --git a/typ/version.py b/typ/version.py
index edf14a0..a0cc8a3 100644
--- a/typ/version.py
+++ b/typ/version.py
@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-VERSION = '0.9.4'
+VERSION = '0.9.5'