Change the handling of the "no tests to run" case.

Previously, if typ failed to find any actual tests to run, either
because there were no tests, or because all of the tests were
being skipped, it would error out. This turns out to be awkward
behavior because it can change the result of the test run depending
on whether or not someone skips a test.

So, this CL changes things so that the test run "succeeds" as long
as no tests actually fail. If no tests are found, or every test is
skipped, that's still a success.

As part of this, this change also changes the format of the
one-line summary from "%d tests run, %d failures" to
"%d tests passed, %d skipped, %d failures" to be a little clearer
about the three types of results (and to help the user figure out
what happened if all of the tests were skipped or nothing was found).

R=nednguyen@google.com
BUG=https://github.com/catapult-project/catapult/issues/3540
diff --git a/typ/json_results.py b/typ/json_results.py
index 6d1eb81..f048d05 100644
--- a/typ/json_results.py
+++ b/typ/json_results.py
@@ -123,6 +123,14 @@
     return full_results['num_failures_by_type']['FAIL']
 
 
+def num_passes(full_results):
+    return full_results['num_failures_by_type']['PASS']
+
+
+def num_skips(full_results):
+    return full_results['num_failures_by_type']['SKIP']
+
+
 def failed_test_names(results):
     names = set()
     for r in results.results:
diff --git a/typ/runner.py b/typ/runner.py
index a2f5c3d..01e414c 100644
--- a/typ/runner.py
+++ b/typ/runner.py
@@ -422,9 +422,6 @@
 
     def _run_tests(self, result_set, test_set):
         h = self.host
-        if not test_set.parallel_tests and not test_set.isolated_tests:
-            self.print_('No tests to run.')
-            return 1, None
 
         all_tests = [ti.name for ti in
                      _sort_inputs(test_set.parallel_tests +
@@ -592,8 +589,9 @@
         self.printer.flush()
 
     def _summarize(self, full_results):
-        num_tests = self.stats.finished
+        num_passes = json_results.num_passes(full_results)
         num_failures = json_results.num_failures(full_results)
+        num_skips = json_results.num_skips(full_results)
 
         if self.args.quiet and num_failures == 0:
             return
@@ -603,10 +601,11 @@
                                            self.stats.started_time)
         else:
             timing_clause = ''
-        self.update('%d test%s run%s, %d failure%s.' %
-                    (num_tests,
-                     '' if num_tests == 1 else 's',
+        self.update('%d test%s passed%s, %d skipped, %d failure%s.' %
+                    (num_passes,
+                     '' if num_passes == 1 else 's',
                      timing_clause,
+                     num_skips,
                      num_failures,
                      '' if num_failures == 1 else 's'), elide=False)
         self.print_()
diff --git a/typ/tests/main_test.py b/typ/tests/main_test.py
index 4474efc..f17aabc 100644
--- a/typ/tests/main_test.py
+++ b/typ/tests/main_test.py
@@ -191,7 +191,7 @@
         self.check([], files=PASS_TEST_FILES,
                    ret=0,
                    out=('[1/1] pass_test.PassingTest.test_pass passed\n'
-                        '1 test run, 0 failures.\n'), err='')
+                        '1 test passed, 0 skipped, 0 failures.\n'), err='')
 
     def test_coverage(self):
         try:
@@ -203,7 +203,7 @@
             self.check(['-c', 'pass_test'], files=files, ret=0, err='',
                        out=d("""\
                              [1/1] pass_test.PassingTest.test_pass passed
-                             1 test run, 0 failures.
+                             1 test passed, 0 skipped, 0 failures.
 
                              Name           Stmts   Miss  Cover
                              ----------------------------------
@@ -230,7 +230,7 @@
         self.check(['-n'], files=PASS_TEST_FILES, ret=0, err='',
                    out=d("""\
                          [1/1] pass_test.PassingTest.test_pass passed
-                         1 test run, 0 failures.
+                         1 test passed, 0 skipped, 0 failures.
                          """))
 
     def test_error(self):
@@ -243,7 +243,7 @@
         _, out, _, _ = self.check([''], files=files, ret=1, err='')
         self.assertIn('[1/1] err_test.ErrTest.test_err failed unexpectedly',
                       out)
-        self.assertIn('1 test run, 1 failure', out)
+        self.assertIn('0 tests passed, 0 skipped, 1 failure', out)
 
     def test_fail(self):
         _, out, _, _ = self.check([], files=FAIL_TEST_FILES, ret=1, err='')
@@ -267,7 +267,7 @@
                                       files=files, ret=0, err='')
         self.assertIn('Retrying failed tests (attempt #1 of 3)', out)
         self.assertNotIn('Retrying failed tests (attempt #2 of 3)', out)
-        self.assertIn('1 test run, 0 failures.\n', out)
+        self.assertIn('1 test passed, 0 skipped, 0 failures.\n', out)
         results = json.loads(files['full_results.json'])
         self.assertEqual(
             results['tests'][
@@ -293,7 +293,7 @@
                                       files=files, ret=0, err='')
         self.assertIn('Retrying failed tests (attempt #1 of 3)', out)
         self.assertNotIn('Retrying failed tests (attempt #2 of 3)', out)
-        self.assertIn('1 test run, 0 failures.\n', out)
+        self.assertIn('0 tests passed, 1 skipped, 0 failures.\n', out)
         results = json.loads(files['full_results.json'])
         self.assertEqual(
             results['tests'][
@@ -377,8 +377,8 @@
 
     def test_import_failure_no_tests(self):
         files = {'foo.py': 'import unittest'}
-        self.check(['-l', 'foo'], files=files, ret=1, err='',
-                   out='No tests to run.\n')
+        self.check(['-l', 'foo'], files=files, ret=0, err='',
+                   out='\n')
 
     def test_import_failure_syntax_error(self):
         files = {'syn_test.py': d("""\
@@ -405,7 +405,7 @@
     def test_isolate(self):
         self.check(['--isolate', '*test_pass*'], files=PASS_TEST_FILES, ret=0,
                    out=('[1/1] pass_test.PassingTest.test_pass passed\n'
-                        '1 test run, 0 failures.\n'), err='')
+                        '1 test passed, 0 skipped, 0 failures.\n'), err='')
 
     def test_load_tests_failure(self):
         files = {'foo_test.py': d("""\
@@ -423,7 +423,7 @@
                                   err='')
         self.assertIn('[1/2] load_test.BaseTest.test_fail failed', out)
         self.assertIn('[2/2] load_test.BaseTest.test_pass passed', out)
-        self.assertIn('2 tests run, 1 failure.\n', out)
+        self.assertIn('1 test passed, 0 skipped, 1 failure.\n', out)
 
     def test_load_tests_multiple_workers(self):
         _, out, _, _ = self.check([], files=LOAD_TEST_FILES, ret=1, err='')
@@ -433,7 +433,7 @@
         # we care about are present.
         self.assertIn('test_pass passed', out)
         self.assertIn('test_fail failed', out)
-        self.assertIn('2 tests run, 1 failure.\n', out)
+        self.assertIn('1 test passed, 0 skipped, 1 failure.\n', out)
 
     def test_missing_builder_name(self):
         self.check(['--test-results-server', 'localhost'], ret=2,
@@ -449,7 +449,7 @@
                    files=OUTPUT_TEST_FILES, aenv={'NINJA_STATUS': 'ns: '},
                    out=d("""\
                          ns: output_test.PassTest.test_out passed
-                         1 test run, 0 failures.
+                         1 test passed, 0 skipped, 0 failures.
                          """), err='')
 
     def test_output_for_failures(self):
@@ -474,15 +474,16 @@
                          3)
 
     def test_skip(self):
-        self.check(['--skip', '*test_fail*'], files=FAIL_TEST_FILES, ret=1,
-                   out='No tests to run.\n', err='')
+        _, out, _, _ = self.check(['--skip', '*test_fail*'],
+                                  files=FAIL_TEST_FILES, ret=0)
+        self.assertIn('0 tests passed, 1 skipped, 0 failures.', out)
 
         files = {'fail_test.py': FAIL_TEST_PY,
                  'pass_test.py': PASS_TEST_PY}
         self.check(['-j', '1', '--skip', '*test_fail*'], files=files, ret=0,
                    out=('[1/2] fail_test.FailingTest.test_fail was skipped\n'
                         '[2/2] pass_test.PassingTest.test_pass passed\n'
-                        '2 tests run, 0 failures.\n'), err='')
+                        '1 test passed, 1 skipped, 0 failures.\n'), err='')
 
         # This tests that we print test_started updates for skipped tests
         # properly. It also tests how overwriting works.
@@ -503,7 +504,7 @@
              '                                     \r'
              '[2/2] pass_test.PassingTest.test_pass passed\r'
              '                                            \r'
-             '2 tests run, 0 failures.'))
+             '1 test passed, 1 skipped, 0 failures.'))
 
     def test_skips_and_failures(self):
         _, out, _, _ = self.check(['-j', '1', '-v', '-v'], files=SF_TEST_FILES,
@@ -533,16 +534,17 @@
                        '  reason\n'
                        '[9/9] sf_test.SkipSetup.test_notrun was skipped:\n'
                        '  setup failed\n'
-                       '9 tests run, 4 failures.\n'), out)
+                       '1 test passed, 4 skipped, 4 failures.\n'), out)
 
     def test_skip_and_all(self):
         # --all should override --skip
-        self.check(['-l', '--skip', '*test_pass'],
-                   files=PASS_TEST_FILES, ret=1, err='',
-                   out='No tests to run.\n')
-        self.check(['-l', '--all', '--skip', '*test_pass'],
-                   files=PASS_TEST_FILES, ret=0, err='',
-                   out='pass_test.PassingTest.test_pass\n')
+        _, out, _, _ = self.check(['--skip', '*test_pass'],
+                                  files=PASS_TEST_FILES, ret=0, err='')
+        self.assertIn('0 tests passed, 1 skipped, 0 failures.', out)
+
+        _, out, _, _ = self.check(['--all', '--skip', '*test_pass'],
+                                  files=PASS_TEST_FILES, ret=0, err='')
+        self.assertIn('1 test passed, 0 skipped, 0 failures.', out)
 
     def test_skip_decorators_and_all(self):
         _, out, _, _ = self.check(['--all', '-j', '1', '-v', '-v'],
@@ -589,7 +591,7 @@
             for i, test in enumerate(tests):
                 exp_out += ('[%d/%d] shard_test.ShardTest.test_%s passed\n' %
                             (i + 1, total_tests, test))
-            exp_out += '%d test%s run, 0 failures.\n' % (
+            exp_out += '%d test%s passed, 0 skipped, 0 failures.\n' % (
                 total_tests, "" if total_tests == 1 else "s")
             self.assertEqual(out, exp_out)
 
@@ -607,14 +609,14 @@
         self.check(['foo/bar'], files=files, ret=0, err='',
                    out=d("""\
                          [1/1] foo.bar.pass_test.PassingTest.test_pass passed
-                         1 test run, 0 failures.
+                         1 test passed, 0 skipped, 0 failures.
                          """))
 
     def test_timing(self):
         self.check(['-t'], files=PASS_TEST_FILES, ret=0, err='',
                    rout=(r'\[1/1\] pass_test.PassingTest.test_pass passed '
                          r'\d+.\d+s\n'
-                         r'1 test run in \d+.\d+s, 0 failures.'))
+                         r'1 test passed in \d+.\d+s, 0 skipped, 0 failures.'))
 
     def test_test_results_server(self):
         server = test_result_server_fake.start()
@@ -629,7 +631,7 @@
                         '--metadata', 'foo=bar'],
                        files=PASS_TEST_FILES, ret=0, err='',
                        out=('[1/1] pass_test.PassingTest.test_pass passed\n'
-                            '1 test run, 0 failures.\n'))
+                            '1 test passed, 0 skipped, 0 failures.\n'))
 
         finally:
             posts = server.stop()
@@ -654,7 +656,7 @@
                         '--metadata', 'foo=bar'],
                        files=PASS_TEST_FILES, ret=1, err='',
                        out=('[1/1] pass_test.PassingTest.test_pass passed\n'
-                            '1 test run, 0 failures.\n'
+                            '1 test passed, 0 skipped, 0 failures.\n'
                             'Uploading the JSON results raised '
                             '"HTTP Error 500: Internal Server Error"\n'))
 
@@ -669,7 +671,7 @@
                     '--metadata', 'foo=bar'],
                    files=PASS_TEST_FILES, ret=1, err='',
                    rout=(r'\[1/1\] pass_test.PassingTest.test_pass passed\n'
-                         '1 test run, 0 failures.\n'
+                         '1 test passed, 0 skipped, 0 failures.\n'
                          'Uploading the JSON results raised .*\n'))
 
     def test_verbose_2(self):
@@ -680,7 +682,7 @@
                            hello on stderr
                          [2/2] output_test.PassTest.test_out passed:
                            hello on stdout
-                         2 tests run, 0 failures.
+                         2 tests passed, 0 skipped, 0 failures.
                          """), err='')
 
     def test_verbose_3(self):
@@ -693,7 +695,7 @@
                          [1/2] output_test.PassTest.test_out queued
                          [2/2] output_test.PassTest.test_out passed:
                            hello on stdout
-                         2 tests run, 0 failures.
+                         2 tests passed, 0 skipped, 0 failures.
                          """), err='')
 
     def test_version(self):
diff --git a/typ/tests/runner_test.py b/typ/tests/runner_test.py
index 4ebb30b..a5013b2 100644
--- a/typ/tests/runner_test.py
+++ b/typ/tests/runner_test.py
@@ -147,8 +147,8 @@
             result = self.call([],
                                win_multiprocessing=WinMultiprocessing.ignore)
             ret, out, err = result
-            self.assertEqual(ret, 1)
-            self.assertEqual(out, 'No tests to run.\n')
+            self.assertEqual(ret, 0)
+            self.assertEqual(out, '0 tests passed, 0 skipped, 0 failures.\n')
             self.assertEqual(err, '')
 
     def test_real_unimportable_main(self):
@@ -205,14 +205,14 @@
 
     def test_single_job(self):
         ret, out, err = self.call(['-j', '1'], platform='win32')
-        self.assertEqual(ret, 1)
-        self.assertIn('No tests to run.', out)
+        self.assertEqual(ret, 0)
+        self.assertEqual('0 tests passed, 0 skipped, 0 failures.\n', out )
         self.assertEqual(err, '')
 
     def test_spawn(self):
         ret, out, err = self.call([])
-        self.assertEqual(ret, 1)
-        self.assertIn('No tests to run.', out)
+        self.assertEqual(ret, 0)
+        self.assertEqual('0 tests passed, 0 skipped, 0 failures.\n', out)
         self.assertEqual(err, '')