Bug 1462434 - Prevent baseline coverage tests from being skipped. r?jmaher
This patch prevents baseline coverage tests from being skipped when too many tests are being run.
MozReview-Commit-ID: JVTOYZAXbwf
--- a/testing/mozharness/scripts/desktop_unittest.py
+++ b/testing/mozharness/scripts/desktop_unittest.py
@@ -863,35 +863,46 @@ class DesktopUnittest(TestingMixin, Merc
env['STYLO_THREADS'] = '1'
else:
env['STYLO_THREADS'] = '4'
env = self.query_env(partial_env=env, log_level=INFO)
cmd_timeout = self.get_timeout_for_category(suite_category)
summary = None
+ executed_too_many_tests = False
for per_test_args in self.query_args(suite):
- if (datetime.now() - self.start_time) > max_per_test_time:
- # Running tests has run out of time. That is okay! Stop running
- # them so that a task timeout is not triggered, and so that
- # (partial) results are made available in a timely manner.
- self.info("TinderboxPrint: Running tests took too long: Not all tests "
- "were executed.<br/>")
- # Signal per-test time exceeded, to break out of suites and
- # suite categories loops also.
- return False
- if executed_tests >= max_per_test_tests:
- # When changesets are merged between trees or many tests are
- # otherwise updated at once, there probably is not enough time
- # to run all tests, and attempting to do so may cause other
- # problems, such as generating too much log output.
- self.info("TinderboxPrint: Too many modified tests: Not all tests "
- "were executed.<br/>")
- return False
- executed_tests = executed_tests + 1
+ # Make sure baseline code coverage tests are never
+ # skipped and that having them run has no influence
+ # on the max number of actual tests that are to be run.
+ is_baseline_test = 'baselinecoverage' in per_test_args[-1] \
+ if self.per_test_coverage else False
+ if executed_too_many_tests and not is_baseline_test:
+ continue
+
+ if not is_baseline_test:
+ if (datetime.now() - self.start_time) > max_per_test_time:
+ # Running tests has run out of time. That is okay! Stop running
+ # them so that a task timeout is not triggered, and so that
+ # (partial) results are made available in a timely manner.
+ self.info("TinderboxPrint: Running tests took too long: Not all tests "
+ "were executed.<br/>")
+ # Signal per-test time exceeded, to break out of suites and
+ # suite categories loops also.
+ return False
+ if executed_tests >= max_per_test_tests:
+ # When changesets are merged between trees or many tests are
+ # otherwise updated at once, there probably is not enough time
+ # to run all tests, and attempting to do so may cause other
+ # problems, such as generating too much log output.
+ self.info("TinderboxPrint: Too many modified tests: Not all tests "
+ "were executed.<br/>")
+ executed_too_many_tests = True
+
+ executed_tests = executed_tests + 1
final_cmd = copy.copy(cmd)
final_cmd.extend(per_test_args)
if self.per_test_coverage:
gcov_dir, jsvm_dir = self.set_coverage_env(env)
return_code = self.run_command(final_cmd, cwd=dirs['abs_work_dir'],
@@ -925,16 +936,19 @@ class DesktopUnittest(TestingMixin, Merc
parser.append_tinderboxprint_line(suite_name)
self.buildbot_status(tbpl_status, level=log_level)
if len(per_test_args) > 0:
self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
else:
self.log("The %s suite: %s ran with return status: %s" %
(suite_category, suite, tbpl_status), level=log_level)
+
+ if executed_too_many_tests:
+ return False
else:
self.debug('There were no suites to run for %s' % suite_category)
return True
# main {{{1
if __name__ == '__main__':
desktop_unittest = DesktopUnittest()
--- a/testing/mozharness/scripts/web_platform_tests.py
+++ b/testing/mozharness/scripts/web_platform_tests.py
@@ -333,33 +333,44 @@ class WebPlatformTest(TestingMixin, Merc
else:
test_types = self.config.get("test_type", [])
suites = [None]
for suite in suites:
if suite:
test_types = [suite]
summary = None
+ executed_too_many_tests = False
for per_test_args in self.query_args(suite):
- if (datetime.now() - start_time) > max_per_test_time:
- # Running tests has run out of time. That is okay! Stop running
- # them so that a task timeout is not triggered, and so that
- # (partial) results are made available in a timely manner.
- self.info("TinderboxPrint: Running tests took too long: Not all tests "
- "were executed.<br/>")
- return
- if executed_tests >= max_per_test_tests:
- # When changesets are merged between trees or many tests are
- # otherwise updated at once, there probably is not enough time
- # to run all tests, and attempting to do so may cause other
- # problems, such as generating too much log output.
- self.info("TinderboxPrint: Too many modified tests: Not all tests "
- "were executed.<br/>")
- return
- executed_tests = executed_tests + 1
+ # Make sure baseline code coverage tests are never
+ # skipped and that having them run has no influence
+ # on the max number of actual tests that are to be run.
+ is_baseline_test = 'baselinecoverage' in per_test_args[-1] \
+ if self.per_test_coverage else False
+ if executed_too_many_tests and not is_baseline_test:
+ continue
+
+ if not is_baseline_test:
+ if (datetime.now() - start_time) > max_per_test_time:
+ # Running tests has run out of time. That is okay! Stop running
+ # them so that a task timeout is not triggered, and so that
+ # (partial) results are made available in a timely manner.
+ self.info("TinderboxPrint: Running tests took too long: Not all tests "
+ "were executed.<br/>")
+ return
+ if executed_tests >= max_per_test_tests:
+ # When changesets are merged between trees or many tests are
+ # otherwise updated at once, there probably is not enough time
+ # to run all tests, and attempting to do so may cause other
+ # problems, such as generating too much log output.
+ self.info("TinderboxPrint: Too many modified tests: Not all tests "
+ "were executed.<br/>")
+ executed_too_many_tests = True
+
+ executed_tests = executed_tests + 1
cmd = self._query_cmd(test_types)
cmd.extend(per_test_args)
if self.per_test_coverage:
gcov_dir, jsvm_dir = self.set_coverage_env(env)
return_code = self.run_command(cmd,