--- a/layout/tools/reftest/output.py
+++ b/layout/tools/reftest/output.py
@@ -20,53 +20,76 @@ class ReftestFormatter(TbplFormatter):
def __call__(self, data):
if 'component' in data and data['component'] == 'mozleak':
# Output from mozleak requires that no prefix be added
# so that mozharness will pick up these failures.
return "%s\n" % data['message']
formatted = TbplFormatter.__call__(self, data)
+
+ if formatted is None:
+ return
if data['action'] == 'process_output':
return formatted
return 'REFTEST %s' % formatted
def log(self, data):
prefix = "%s |" % data['level'].upper()
return "%s %s\n" % (prefix, data['message'])
+ def _format_status(self, data):
+ extra = data.get('extra', {})
+ status = data['status']
+
+ status_msg = "TEST-"
+ if 'expected' in data:
+ status_msg += "UNEXPECTED-%s" % status
+ else:
+ if status not in ("PASS", "SKIP"):
+ status_msg += "KNOWN-"
+ status_msg += status
+ if extra.get('status_msg') == 'Random':
+ status_msg += "(EXPECTED RANDOM)"
+ return status_msg
+
+ def test_status(self, data):
+ test = data['test']
+
+ status_msg = self._format_status(data)
+ output_text = "%s | %s | %s" % (status_msg, test, data.get("subtest", "unknown test"))
+ if data.get('message'):
+ output_text += " | %s" % data['message']
+ return output_text + "\n"
+
def test_end(self, data):
extra = data.get('extra', {})
status = data['status']
test = data['test']
- status_msg = "TEST-"
- if 'expected' in data:
- status_msg += "UNEXPECTED-%s" % status
- else:
- if status != "PASS":
- status_msg += "KNOWN-"
- status_msg += status
- if extra.get('status_msg') == 'Random':
- status_msg += "(EXPECTED RANDOM)"
+ output_text = ""
+ if status != "OK":
+ status_msg = self._format_status(data)
+ output_text = "%s | %s | %s" % (status_msg, test, data.get("message", ""))
- output_text = "%s | %s | %s" % (status_msg, test, data.get("message", ""))
if "reftest_screenshots" in extra:
screenshots = extra["reftest_screenshots"]
image_1 = screenshots[0]["screenshot"]
if len(screenshots) == 3:
image_2 = screenshots[2]["screenshot"]
output_text += ("\nREFTEST IMAGE 1 (TEST): data:image/png;base64,%s\n"
"REFTEST IMAGE 2 (REFERENCE): data:image/png;base64,%s") % (
image_1, image_2)
elif len(screenshots) == 1:
output_text += "\nREFTEST IMAGE: data:image/png;base64,%s" % image_1
- output_text += "\nREFTEST TEST-END | %s" % test
+ if output_text:
+ output_text += "\nREFTEST "
+ output_text += "TEST-END | %s" % test
return "%s\n" % output_text
def process_output(self, data):
return "%s\n" % data["data"]
def suite_end(self, data):
lines = []
summary = data['extra']['results']
--- a/layout/tools/reftest/reftest.jsm
+++ b/layout/tools/reftest/reftest.jsm
@@ -1723,17 +1723,17 @@ function RecordResult(testRunTime, error
// for EXPECTED_FUZZY we need special handling because we can have
// Pass, UnexpectedPass, or UnexpectedFail
var output;
var extra;
if (gURLs[0].type == TYPE_LOAD) {
++gTestResults.LoadOnly;
- logger.testEnd(gURLs[0].identifier, "PASS", "PASS", "(LOAD ONLY)");
+ logger.testStatus(gURLs[0].identifier, "(LOAD ONLY)", "PASS", "PASS");
gCurrentCanvas = null;
FinishTestItem();
return;
}
if (gURLs[0].type == TYPE_PRINT) {
switch (gState) {
case 1:
// First document has been loaded.
@@ -1800,17 +1800,17 @@ function RecordResult(testRunTime, error
else
logger.info("An expected silent failure occurred");
}
if (errorMsg) {
output = outputs[expected][false];
extra = { status_msg: output.n };
++gTestResults[output.n];
- logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1], errorMsg, null, extra);
+ logger.testStatus(gURLs[0].identifier, errorMsg, output.s[0], output.s[1], null, null, extra);
FinishTestItem();
return;
}
var anyFailed = typeSpecificResults.some(function(result) { return !result.passed; });
var outputPair;
if (anyFailed && expected == EXPECTED_FAIL) {
// If we're marked as expected to fail, and some (but not all) tests
@@ -1823,18 +1823,18 @@ function RecordResult(testRunTime, error
outputPair = outputs[expected];
}
var index = 0;
typeSpecificResults.forEach(function(result) {
var output = outputPair[result.passed];
var extra = { status_msg: output.n };
++gTestResults[output.n];
- logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1],
- result.description + " item " + (++index), null, extra);
+ logger.testStatus(gURLs[0].identifier, result.description + " item " + (++index),
+ output.s[0], output.s[1], null, null, extra);
});
if (anyFailed && expected == EXPECTED_PASS) {
FlushTestBuffer();
}
FinishTestItem();
return;
@@ -1941,17 +1941,17 @@ function RecordResult(testRunTime, error
// The gFailed*Messages arrays will contain messages from both the test and the reference.
if (gFailedOpaqueLayer) {
failures.push("failed reftest-opaque-layer: " + gFailedOpaqueLayerMessages.join(", "));
}
if (gFailedAssignedLayer) {
failures.push("failed reftest-assigned-layer: " + gFailedAssignedLayerMessages.join(", "));
}
var failureString = failures.join(", ");
- logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1], failureString, null, extra);
+ logger.testStatus(gURLs[0].identifier, failureString, output.s[0], output.s[1], null, null, extra);
} else {
var message = "image comparison, max difference: " + maxDifference.value +
", number of differing pixels: " + differences;
if (!test_passed && expected == EXPECTED_PASS ||
!test_passed && expected == EXPECTED_FUZZY ||
test_passed && expected == EXPECTED_FAIL) {
if (!equal) {
extra.max_difference = maxDifference.value;
@@ -1971,17 +1971,17 @@ function RecordResult(testRunTime, error
var image1 = gCanvas1.toDataURL();
extra.reftest_screenshots = [
{url:gURLs[0].identifier[0],
screenshot: image1.slice(image1.indexOf(",") + 1)}
];
extra.image1 = image1;
}
}
- logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1], message, null, extra);
+ logger.testStatus(gURLs[0].identifier, message, output.s[0], output.s[1], null, null, extra);
if (gNoCanvasCache) {
ReleaseCanvas(gCanvas1);
ReleaseCanvas(gCanvas2);
} else {
if (gURLs[0].prefSettings1.length == 0) {
UpdateCanvasCache(gURLs[0].url1, gCanvas1);
}
@@ -2006,17 +2006,17 @@ function RecordResult(testRunTime, error
function LoadFailed(why)
{
++gTestResults.FailedLoad;
// Once bug 896840 is fixed, this can go away, but for now it will give log
// output that is TBPL starable for bug 789751 and bug 720452.
if (!why) {
logger.error("load failed with unknown reason");
}
- logger.testEnd(gURLs[0]["url" + gState].spec, "FAIL", "PASS", "load failed: " + why);
+ logger.testStatus(gURLs[0].identifier, "load failed: " + why, "FAIL", "PASS");
FlushTestBuffer();
FinishTestItem();
}
function RemoveExpectedCrashDumpFiles()
{
if (gExpectingProcessCrash) {
for (let crashFilename of gExpectedCrashDumpFiles) {
@@ -2045,17 +2045,17 @@ function FindUnexpectedCrashDumpFiles()
while (entries.hasMoreElements()) {
let file = entries.getNext().QueryInterface(CI.nsIFile);
let path = String(file.path);
if (path.match(/\.(dmp|extra)$/) && !gUnexpectedCrashDumpFiles[path]) {
if (!foundCrashDumpFile) {
++gTestResults.UnexpectedFail;
foundCrashDumpFile = true;
if (gCurrentURL) {
- logger.testEnd(gCurrentURL, "FAIL", "PASS", "This test left crash dumps behind, but we weren't expecting it to!");
+ logger.testStatus(gURLs[0].identifier, "crash-check", "FAIL", "PASS", "This test left crash dumps behind, but we weren't expecting it to!");
} else {
logger.error("Harness startup left crash dumps behind, but we weren't expecting it to!");
}
}
logger.info("Found unexpected crash dump file " + path);
gUnexpectedCrashDumpFiles[path] = true;
}
}
@@ -2084,16 +2084,18 @@ function CleanUpCrashDumpFiles()
if (gCleanupPendingCrashes) {
RemovePendingCrashDumpFiles();
}
gExpectingProcessCrash = false;
}
function FinishTestItem()
{
+ logger.testEnd(gURLs[0].identifier, "OK");
+
// Replace document with BLANK_URL_FOR_CLEARING in case there are
// assertions when unloading.
logger.debug("Loading a blank page");
// After clearing, content will notify us of the assertion count
// and tests will continue.
SendClear();
gFailedNoPaint = false;
gFailedOpaqueLayer = false;
@@ -2113,17 +2115,17 @@ function DoAssertionCheck(numAsserts)
gAssertionCount = newAssertionCount;
numAsserts += numLocalAsserts;
}
var minAsserts = gURLs[0].minAsserts;
var maxAsserts = gURLs[0].maxAsserts;
- logger.assertionCount(gCurrentURL, numAsserts, minAsserts, maxAsserts);
+ logger.assertionCount(gURLs[0].identifier, numAsserts, minAsserts, maxAsserts);
}
if (gURLs[0].chaosMode) {
gWindowUtils.leaveChaosMode();
}
// And start the next test.
gURLs.shift();
--- a/layout/tools/reftest/selftest/test_reftest_output.py
+++ b/layout/tools/reftest/selftest/test_reftest_output.py
@@ -19,32 +19,40 @@ get_mozharness_status = partial(get_mozh
def test_output_pass(runtests):
status, lines = runtests('reftest-pass.list')
assert status == 0
tbpl_status, log_level = get_mozharness_status(lines, status)
assert tbpl_status == TBPL_SUCCESS
assert log_level in (INFO, WARNING)
+ test_status = filter_action('test_status', lines)
+ assert len(test_status) == 3
+ assert all(t['status'] == 'PASS' for t in test_status)
+
test_end = filter_action('test_end', lines)
assert len(test_end) == 3
- assert all(t['status'] == 'PASS' for t in test_end)
+ assert all(t['status'] == 'OK' for t in test_end)
def test_output_fail(runtests):
status, lines = runtests('reftest-fail.list')
assert status == 0
tbpl_status, log_level = get_mozharness_status(lines, status)
assert tbpl_status == TBPL_WARNING
assert log_level == WARNING
+ test_status = filter_action('test_status', lines)
+ assert len(test_status) == 3
+ assert all(t['status'] == 'FAIL' for t in test_status)
+
test_end = filter_action('test_end', lines)
assert len(test_end) == 3
- assert all(t['status'] == 'FAIL' for t in test_end)
+ assert all(t['status'] == 'OK' for t in test_end)
@pytest.mark.skip_mozinfo("!crashreporter")
def test_output_crash(runtests):
status, lines = runtests('reftest-crash.list', environment=["MOZ_CRASHREPORTER_SHUTDOWN=1"])
assert status == 1
tbpl_status, log_level = get_mozharness_status(lines, status)
@@ -81,20 +89,23 @@ def test_output_asan(runtests):
def test_output_assertion(runtests):
status, lines = runtests('reftest-assert.list')
assert status == 0
tbpl_status, log_level = get_mozharness_status(lines, status)
assert tbpl_status == TBPL_WARNING
assert log_level == WARNING
+ test_status = filter_action('test_status', lines)
+ assert len(test_status) == 1
+ assert test_status[0]['status'] == 'PASS'
+
test_end = filter_action('test_end', lines)
assert len(test_end) == 1
- # TODO: this should be ASSERT
- assert test_end[0]['status'] == 'PASS'
+ assert test_end[0]['status'] == 'OK'
assertions = filter_action('assertion_count', lines)
assert len(assertions) == 1
assert assertions[0]['count'] == 1
@pytest.mark.skip_mozinfo("!debug")
def test_output_leak(monkeypatch, runtests):