Bug 1372567 - Fix problems with mochitest structured logging, r?jgraham
The mochitest harness uses testEnd multiple times to log various failures. This can result
in several testEnd messages, which will soon cause mozlog to spit out an error. Instead,
these should be testStatus.
This also starts using mozlog's assertion_count log action to log test assertions (again,
instead of testEnd).
MozReview-Commit-ID: FFsyicSso5Y
--- a/testing/mochitest/browser-test.js
+++ b/testing/mochitest/browser-test.js
@@ -264,17 +264,16 @@ Tester.prototype = {
let newTab = gBrowser.addTab("about:blank", { skipAnimation: true });
gBrowser.removeTab(gBrowser.selectedTab, { skipPermitUnload: true });
gBrowser.stop();
}
// Remove stale windows
this.structuredLogger.info("checking window state");
let windowsEnum = Services.wm.getEnumerator(null);
- let createdFakeTestForLogging = false;
while (windowsEnum.hasMoreElements()) {
let win = windowsEnum.getNext();
if (win != window && !win.closed &&
win.document.documentElement.getAttribute("id") != "browserTestHarness") {
let type = win.document.documentElement.getAttribute("windowtype");
switch (type) {
case "navigator:browser":
type = "browser window";
@@ -286,35 +285,23 @@ Tester.prototype = {
}
let msg = baseMsg.replace("{elt}", type);
if (this.currentTest) {
this.currentTest.addResult(new testResult({
name: msg,
allowFailure: this.currentTest.allowFailure,
}));
} else {
- if (!createdFakeTestForLogging) {
- createdFakeTestForLogging = true;
- this.structuredLogger.testStart("browser-test.js");
- }
this.failuresFromInitialWindowState++;
- this.structuredLogger.testStatus("browser-test.js",
- msg, "FAIL", false, "");
+ this.structuredLogger.error("browser-test.js | " + msg);
}
win.close();
}
}
- if (createdFakeTestForLogging) {
- let time = Date.now() - startTime;
- this.structuredLogger.testEnd("browser-test.js",
- "OK",
- undefined,
- "finished window state check in " + time + "ms");
- }
// Make sure the window is raised before each test.
this.SimpleTest.waitForFocus(aCallback);
},
finish: function Tester_finish(aSkipSummary) {
var passCount = this.tests.reduce((a, f) => a + f.passCount, 0);
var failCount = this.tests.reduce((a, f) => a + f.failCount, 0);
@@ -343,20 +330,17 @@ Tester.prototype = {
if (this.tests.length) {
let e10sMode = gMultiProcessBrowser ? "e10s" : "non-e10s";
this.structuredLogger.info("Browser Chrome Test Summary");
this.structuredLogger.info("Passed: " + passCount);
this.structuredLogger.info("Failed: " + failCount);
this.structuredLogger.info("Todo: " + todoCount);
this.structuredLogger.info("Mode: " + e10sMode);
} else {
- this.structuredLogger.testEnd("browser-test.js",
- "FAIL",
- "PASS",
- "No tests to run. Did you pass invalid test_paths?");
+ this.structuredLogger.error("browser-test.js | No tests to run. Did you pass invalid test_paths?");
}
this.structuredLogger.info("*** End BrowserChrome Test Results ***");
// Tests complete, notify the callback and return
this.callback(this.tests);
this.callback = null;
this.tests = null;
}
--- a/testing/mochitest/runtests.py
+++ b/testing/mochitest/runtests.py
@@ -145,17 +145,17 @@ class MochitestFormatter(TbplFormatter):
class MessageLogger(object):
"""File-like object for logging messages (structured logs)"""
BUFFERING_THRESHOLD = 100
# This is a delimiter used by the JS side to avoid logs interleaving
DELIMITER = u'\ue175\uee31\u2c32\uacbf'
BUFFERED_ACTIONS = set(['test_status', 'log'])
VALID_ACTIONS = set(['suite_start', 'suite_end', 'test_start', 'test_end',
- 'test_status', 'log',
+ 'test_status', 'log', 'assertion_count',
'buffering_on', 'buffering_off'])
TEST_PATH_PREFIXES = ['/tests/',
'chrome://mochitests/content/a11y/',
'chrome://mochitests/content/browser/',
'chrome://mochitests/content/chrome/']
def __init__(self, logger, buffering=True, structured=True):
self.logger = logger
--- a/testing/mochitest/tests/SimpleTest/TestRunner.js
+++ b/testing/mochitest/tests/SimpleTest/TestRunner.js
@@ -444,20 +444,17 @@ TestRunner.runNextTest = function() {
var todoCount = parseInt($("todo-count").innerHTML, 10);
if (passCount === 0 &&
failCount === 0 &&
todoCount === 0)
{
// No |$('testframe').contentWindow|, so manually update: ...
// ... the log,
- TestRunner.structuredLogger.testEnd('SimpleTest/TestRunner.js',
- "ERROR",
- "OK",
- "No checks actually run");
+ TestRunner.structuredLogger.error("SimpleTest/TestRunner.js | No checks actually run");
// ... the count,
$("fail-count").innerHTML = 1;
// ... the indicator.
var indicator = $("indicator");
indicator.innerHTML = "Status: Fail (No checks actually run)";
indicator.style.backgroundColor = "red";
}
@@ -506,16 +503,21 @@ TestRunner.runNextTest = function() {
TestRunner.expectChildProcessCrash = function() {
TestRunner._expectingProcessCrash = true;
};
/**
* This stub is called by SimpleTest when a test is finished.
**/
TestRunner.testFinished = function(tests) {
+ // Need to track subtests recorded here separately or else they'll
+ // trigger the `result after SimpleTest.finish()` error.
+ var extraTests = [];
+ var result = "OK";
+
// Prevent a test from calling finish() multiple times before we
// have a chance to unload it.
if (TestRunner._currentTest == TestRunner._lastTestFinished &&
!TestRunner._loopIsRestarting) {
TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
"ERROR",
"OK",
"called finish() multiple times");
@@ -535,33 +537,41 @@ TestRunner.testFinished = function(tests
MemoryStats.dump(TestRunner._currentTest,
TestRunner.currentTestURL,
TestRunner.dumpOutputDirectory,
TestRunner.dumpAboutMemoryAfterTest,
TestRunner.dumpDMDAfterTest);
function cleanUpCrashDumpFiles() {
if (!SpecialPowers.removeExpectedCrashDumpFiles(TestRunner._expectingProcessCrash)) {
- TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
- "ERROR",
- "OK",
- "This test did not leave any crash dumps behind, but we were expecting some!");
- tests.push({ result: false });
+ var subtest = "expected-crash-dump-missing";
+ TestRunner.structuredLogger.testStatus(TestRunner.currentTestURL,
+ subtest,
+ "ERROR",
+ "PASS",
+ "This test did not leave any crash dumps behind, but we were expecting some!");
+ extraTests.push({ name: subtest, result: false });
+ result = "ERROR";
}
+
var unexpectedCrashDumpFiles =
SpecialPowers.findUnexpectedCrashDumpFiles();
TestRunner._expectingProcessCrash = false;
if (unexpectedCrashDumpFiles.length) {
- TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
- "ERROR",
- "OK",
- "This test left crash dumps behind, but we " +
- "weren't expecting it to!",
- {unexpected_crashdump_files: unexpectedCrashDumpFiles});
- tests.push({ result: false });
+ var subtest = "unexpected-crash-dump-found";
+ TestRunner.structuredLogger.testStatus(TestRunner.currentTestURL,
+ subtest,
+ "ERROR",
+ "PASS",
+ "This test left crash dumps behind, but we " +
+ "weren't expecting it to!",
+ null,
+ {unexpected_crashdump_files: unexpectedCrashDumpFiles});
+ extraTests.push({ name: subtest, result: false });
+ result = "CRASH";
unexpectedCrashDumpFiles.sort().forEach(function(aFilename) {
TestRunner.structuredLogger.info("Found unexpected crash dump file " +
aFilename + ".");
});
}
if (TestRunner.cleanupCrashes) {
if (SpecialPowers.removePendingCrashDumpFiles()) {
@@ -574,34 +584,35 @@ TestRunner.testFinished = function(tests
if (TestRunner.currentTestURL != TestRunner.getLoadedTestURL()) {
TestRunner.structuredLogger.testStatus(TestRunner.currentTestURL,
TestRunner.getLoadedTestURL(),
"FAIL",
"PASS",
"finished in a non-clean fashion, probably" +
" because it didn't call SimpleTest.finish()",
{loaded_test_url: TestRunner.getLoadedTestURL()});
- tests.push({ result: false });
+ extraTests.push({ name: "clean-finish", result: false });
+ result = result != "CRASH" ? "ERROR": result
}
var runtime = new Date().valueOf() - TestRunner._currentTestStartTime;
TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
+ result,
"OK",
- undefined,
"Finished in " + runtime + "ms",
{runtime: runtime}
);
if (TestRunner.slowestTestTime < runtime && TestRunner._timeoutFactor >= 1) {
TestRunner.slowestTestTime = runtime;
TestRunner.slowestTestURL = TestRunner.currentTestURL;
}
- TestRunner.updateUI(tests);
+ TestRunner.updateUI(tests.concat(extraTests));
// Don't show the interstitial if we just run one test with no repeats:
if (TestRunner._urls.length == 1 && TestRunner.repeat <= 1) {
TestRunner.testUnloaded();
return;
}
var interstitialURL;
@@ -613,17 +624,17 @@ TestRunner.testFinished = function(tests
// check if there were test run after SimpleTest.finish, which should never happen
$('testframe').contentWindow.addEventListener('unload', function() {
var testwin = $('testframe').contentWindow;
if (testwin.SimpleTest && testwin.SimpleTest._tests.length != testwin.SimpleTest.testsLength) {
var wrongtestlength = testwin.SimpleTest._tests.length - testwin.SimpleTest.testsLength;
var wrongtestname = '';
for (var i = 0; i < wrongtestlength; i++) {
wrongtestname = testwin.SimpleTest._tests[testwin.SimpleTest.testsLength + i].name;
- TestRunner.structuredLogger.testStatus(TestRunner.currentTestURL, wrongtestname, 'FAIL', 'PASS', "Result logged after SimpleTest.finish()");
+ TestRunner.structuredLogger.error(TestRunner.currentTestURL + " logged result after SimpleTest.finish(): " + wrongtestname);
}
TestRunner.updateUI([{ result: false }]);
}
});
TestRunner._makeIframe(interstitialURL, 0);
}
SpecialPowers.executeAfterFlushingMessageQueue(function() {
@@ -636,52 +647,29 @@ TestRunner.testUnloaded = function() {
// If we're in a debug build, check assertion counts. This code is
// similar to the code in Tester_nextTest in browser-test.js used
// for browser-chrome mochitests.
if (SpecialPowers.isDebugBuild) {
var newAssertionCount = SpecialPowers.assertionCount();
var numAsserts = newAssertionCount - TestRunner._lastAssertionCount;
TestRunner._lastAssertionCount = newAssertionCount;
- var url = TestRunner.getNextUrl();
var max = TestRunner._expectedMaxAsserts;
var min = TestRunner._expectedMinAsserts;
if (Array.isArray(TestRunner.expected)) {
// Accumulate all assertion counts recorded in the failure pattern file.
let additionalAsserts = TestRunner.expected.reduce((acc, [pat, count]) => {
return pat == "ASSERTION" ? acc + count : acc;
}, 0);
min += additionalAsserts;
max += additionalAsserts;
}
- if (numAsserts > max) {
- TestRunner.structuredLogger.testEnd(url,
- "ERROR",
- "OK",
- "Assertion count " + numAsserts + " is greater than expected range " +
- min + "-" + max + " assertions.",
- {assertions: numAsserts, min_asserts: min, max_asserts: max});
- TestRunner.updateUI([{ result: false }]);
- } else if (numAsserts < min) {
- TestRunner.structuredLogger.testEnd(url,
- "OK",
- "ERROR",
- "Assertion count " + numAsserts + " is less than expected range " +
- min + "-" + max + " assertions.",
- {assertions: numAsserts, min_asserts: min, max_asserts: max});
- TestRunner.updateUI([{ result: false }]);
- } else if (numAsserts > 0) {
- TestRunner.structuredLogger.testEnd(url,
- "ERROR",
- "ERROR",
- "Assertion count " + numAsserts + " within expected range " +
- min + "-" + max + " assertions.",
- {assertions: numAsserts, min_asserts: min, max_asserts: max});
- }
+ TestRunner.structuredLogger.assertionCount(TestRunner.currentTestURL, numAsserts, min, max);
}
+
TestRunner._currentTest++;
if (TestRunner.runSlower) {
setTimeout(TestRunner.runNextTest, 1000);
} else {
TestRunner.runNextTest();
}
};