Bug 1209463 - [test] Log suite names in test harnesses, r?gbrown
Suite names are currently only used by formatters to print out
an overall summary from |mach test| and |mach mochitest|. So
this doesn't need to be exact and can be tweaked further at a
later date.
If multiple test invocations have the same suite name, their
results will be merged in the overall summary. If a suite name
is missing, the summary will contain a placeholder name.
MozReview-Commit-ID: K1xpb9hUQRX
--- a/layout/tools/reftest/reftest.jsm
+++ b/layout/tools/reftest/reftest.jsm
@@ -446,17 +446,18 @@ function StartTests()
g.urls = g.urls.slice(start, end);
}
if (g.startAfter === undefined && !g.suiteStarted) {
var ids = g.urls.map(function(obj) {
return obj.identifier;
});
- logger.suiteStart(ids, {"skipped": g.urls.length - numActiveTests});
+ var suite = prefs.getCharPref('reftest.suite', 'reftest');
+ logger.suiteStart(ids, suite, {"skipped": g.urls.length - numActiveTests});
g.suiteStarted = true
}
if (g.shuffle) {
if (g.startAfter !== undefined) {
logger.error("Can't resume from a crashed test when " +
"--shuffle is enabled, continue by shuffling " +
"all the tests");
--- a/layout/tools/reftest/runreftest.py
+++ b/layout/tools/reftest/runreftest.py
@@ -289,16 +289,17 @@ class RefTest(object):
prefs['reftest.repeat'] = options.repeat
if options.runUntilFailure:
prefs['reftest.runUntilFailure'] = True
if options.cleanupCrashes:
prefs['reftest.cleanupPendingCrashes'] = True
prefs['reftest.focusFilterMode'] = options.focusFilterMode
prefs['reftest.logLevel'] = options.log_tbpl_level or 'info'
prefs['reftest.manifests'] = json.dumps(manifests)
+ prefs['reftest.suite'] = options.suite
if startAfter not in (None, self.TEST_SEEN_INITIAL, self.TEST_SEEN_FINAL):
self.log.info("Setting reftest.startAfter to %s" % startAfter)
prefs['reftest.startAfter'] = startAfter
# Unconditionally update the e10s pref.
if options.e10s:
prefs['browser.tabs.remote.autostart'] = True
--- a/testing/marionette/harness/marionette_harness/runner/base.py
+++ b/testing/marionette/harness/marionette_harness/runner/base.py
@@ -864,16 +864,17 @@ class BaseMarionetteTestRunner(object):
"does not match browser appinfo (self.is_e10s)")
self.marionette.delete_session()
tests_by_group = defaultdict(list)
for test in self.tests:
tests_by_group[test['group']].append(test['filepath'])
self.logger.suite_start(tests_by_group,
+ name='marionette-test',
version_info=self.version_info,
device_info=device_info)
self._log_skipped_tests()
interrupted = None
try:
repeat_index = 0
--- a/testing/mochitest/runtests.py
+++ b/testing/mochitest/runtests.py
@@ -1333,17 +1333,17 @@ toolbar#nav-bar {
os.path.isfile(path) and path.endswith(".xpi")):
extensions.append(path)
extensions.extend(options.extensionsToInstall)
return extensions
def logPreamble(self, tests):
"""Logs a suite_start message and test_start/test_end at the beginning of a run.
"""
- self.log.suite_start(self.tests_by_manifest)
+ self.log.suite_start(self.tests_by_manifest, name='mochitest-{}'.format(self.flavor))
for test in tests:
if 'disabled' in test:
self.log.test_start(test['path'])
self.log.test_end(
test['path'],
'SKIP',
message=test['disabled'])
--- a/testing/modules/StructuredLog.jsm
+++ b/testing/modules/StructuredLog.jsm
@@ -91,18 +91,22 @@ StructuredLogger.prototype = {
var data = {test,
min_expected: minExpected,
max_expected: maxExpected,
count};
this._logData("assertion_count", data);
},
- suiteStart(tests, runinfo = null, versioninfo = null, deviceinfo = null, extra = null) {
+ suiteStart(tests, name = null, runinfo = null, versioninfo = null, deviceinfo = null, extra = null) {
var data = {tests: tests.map(x => this._testId(x))};
+ if (name !== null) {
+ data.name = name;
+ }
+
if (runinfo !== null) {
data.runinfo = runinfo;
}
if (versioninfo !== null) {
data.versioninfo = versioninfo;
}
--- a/testing/runcppunittests.py
+++ b/testing/runcppunittests.py
@@ -149,17 +149,17 @@ class CPPUnitTests(object):
* symbols_path: A path to a directory containing Breakpad-formatted
symbol files for producing stack traces on crash.
Returns True if all test programs exited with a zero status, False
otherwise.
"""
self.xre_path = xre_path
self.log = mozlog.get_default_logger()
- self.log.suite_start(programs)
+ self.log.suite_start(programs, name='cppunittest')
env = self.build_environment()
pass_count = 0
fail_count = 0
for prog in programs:
test_path = prog[0]
timeout_factor = prog[1]
single_result = self.run_one_test(test_path, env, symbols_path,
interactive, timeout_factor)
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptrunner.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptrunner.py
@@ -196,17 +196,17 @@ def run_tests(config, test_paths, produc
while repeat_count < repeat or repeat_until_unexpected:
repeat_count += 1
if repeat_until_unexpected:
logger.info("Repetition %i" % (repeat_count))
elif repeat > 1:
logger.info("Repetition %i / %i" % (repeat_count, repeat))
unexpected_count = 0
- logger.suite_start(test_loader.test_ids, run_info=run_info)
+ logger.suite_start(test_loader.test_ids, name='web-platform-test', run_info=run_info)
for test_type in kwargs["test_types"]:
logger.info("Running %s tests" % test_type)
# WebDriver tests may create and destroy multiple browser
# processes as part of their expected behavior. These
# processes are managed by a WebDriver server binary. This
# obviates the need for wptrunner to provide a browser, so
# the NullBrowser is used in place of the "target" browser
--- a/testing/xpcshell/runxpcshelltests.py
+++ b/testing/xpcshell/runxpcshelltests.py
@@ -1454,17 +1454,17 @@ class XPCShellTests(object):
keep_going = True
exceptions = []
tracebacks = []
self.try_again_list = []
tests_by_manifest = defaultdict(list)
for test in self.alltests:
tests_by_manifest[test['manifest']].append(test['id'])
- self.log.suite_start(tests_by_manifest)
+ self.log.suite_start(tests_by_manifest, name='xpcshell')
while tests_queue or running_tests:
# if we're not supposed to continue and all of the running tests
# are done, stop
if not keep_going and not running_tests:
break
# if there's room to run more tests, start running them