--- a/layout/tools/reftest/output.py
+++ b/layout/tools/reftest/output.py
@@ -1,18 +1,21 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import threading
+from collections import defaultdict
from mozlog.formatters import TbplFormatter
from mozrunner.utils import get_stack_fixer_function
+TEST_RESULTS = defaultdict(int)
+
class ReftestFormatter(TbplFormatter):
"""
Formatter designed to preserve the legacy "tbpl" format in reftest.
This is needed for both the reftest-analyzer and mozharness log parsing.
We can change this format when both reftest-analyzer and mozharness have
been changed to read structured logs.
@@ -64,17 +67,17 @@ class ReftestFormatter(TbplFormatter):
output_text += "\nREFTEST TEST-END | %s" % test
return "%s\n" % output_text
def process_output(self, data):
return "%s\n" % data["data"]
def suite_end(self, data):
lines = []
- summary = data['extra']['results']
+ summary = TEST_RESULTS
summary['success'] = summary['Pass'] + summary['LoadOnly']
lines.append("Successful: %(success)s (%(Pass)s pass, %(LoadOnly)s load only)" %
summary)
summary['unexpected'] = (summary['Exception'] + summary['FailedLoad'] +
summary['UnexpectedFail'] + summary['UnexpectedPass'] +
summary['AssertionUnexpected'] +
summary['AssertionUnexpectedFixed'])
lines.append(("Unexpected: %(unexpected)s (%(UnexpectedFail)s unexpected fail, "
@@ -85,16 +88,18 @@ class ReftestFormatter(TbplFormatter):
summary['known'] = (summary['KnownFail'] + summary['AssertionKnown'] +
summary['Random'] + summary['Skip'] + summary['Slow'])
lines.append(("Known problems: %(known)s (" +
"%(KnownFail)s known fail, " +
"%(AssertionKnown)s known asserts, " +
"%(Random)s random, " +
"%(Skip)s skipped, " +
"%(Slow)s slow)") % summary)
+ lines.append("Slowest test took %(slowestTestTime)s ms (%(slowestTestURL)s)" % summary)
+ lines.append("Total canvas count = %(recycledCanvases)s" % summary)
lines = ["REFTEST INFO | %s" % s for s in lines]
lines.append("REFTEST SUITE-END | Shutdown")
return "INFO | Result summary:\n{}\n".format('\n'.join(lines))
class OutputHandler(object):
"""Process the output of a process during a test run and translate
raw data logged from reftest.js to an appropriate structured log action,
@@ -113,18 +118,32 @@ class OutputHandler(object):
try:
data = json.loads(line)
except ValueError:
self.verbatim(line)
return [line]
if isinstance(data, dict) and 'action' in data:
- self.log.log_raw(data)
+ if data['action'] == 'results':
+ self.add_results(data)
+ else:
+ self.log.log_raw(data)
else:
self.verbatim(json.dumps(data))
return [data]
+ def add_results(self, data):
+ results = data['results']
+ slowest_time = results.pop('slowestTestTime')
+ slowest_test = results.pop('slowestTestURL')
+ if slowest_time > TEST_RESULTS['slowestTestTime']:
+ TEST_RESULTS['slowestTestTime'] = slowest_time
+ TEST_RESULTS['slowestTestURL'] = slowest_test
+
+ for k, v in results.iteritems():
+ TEST_RESULTS[k] += v
+
def verbatim(self, line):
if self.stack_fixer_function:
line = self.stack_fixer_function(line)
self.log.process_output(threading.current_thread().name, line)
--- a/layout/tools/reftest/reftest.jsm
+++ b/layout/tools/reftest/reftest.jsm
@@ -44,16 +44,19 @@ var gTimeoutHook = null;
var gRemote = false;
var gIgnoreWindowSize = false;
var gShuffle = false;
var gRepeat = null;
var gRunUntilFailure = false;
var gCleanupPendingCrashes = false;
var gTotalChunks = 0;
var gThisChunk = 0;
+var gRunByManifest = false;
+var gLastManifest = null;
+var gLastSeenManifest = null;
var gContainingWindow = null;
var gURLFilterRegex = {};
var gContentGfxInfo = null;
const FOCUS_FILTER_ALL_TESTS = "all";
const FOCUS_FILTER_NEEDS_FOCUS_TESTS = "needs-focus";
const FOCUS_FILTER_NON_NEEDS_FOCUS_TESTS = "non-needs-focus";
var gFocusFilterMode = FOCUS_FILTER_ALL_TESTS;
var gCompareStyloToGecko = false;
@@ -376,22 +379,28 @@ function InitAndStartRefTests()
gRemote = prefs.getBoolPref("reftest.remote", false);
gIgnoreWindowSize = prefs.getBoolPref("reftest.ignoreWindowSize", false);
/* Support for running a chunk (subset) of tests. In separate try as this is optional */
try {
gTotalChunks = prefs.getIntPref("reftest.totalChunks");
gThisChunk = prefs.getIntPref("reftest.thisChunk");
- }
- catch(e) {
+ } catch(e) {
gTotalChunks = 0;
gThisChunk = 0;
}
+ /* Support runByManifest mode which restarts the browser between each manifest */
+ try {
+ gRunByManifest = prefs.getBoolPref("reftest.runByManifest", false);
+ } catch(e) {
+ gRunByManifest = false;
+ }
+
try {
gFocusFilterMode = prefs.getCharPref("reftest.focusFilterMode");
} catch(e) {}
try {
gStartAfter = prefs.getCharPref("reftest.startAfter");
} catch(e) {
gStartAfter = undefined;
@@ -419,16 +428,17 @@ function InitAndStartRefTests()
}
try {
if (gServer)
StartHTTPServer();
} catch (ex) {
//gBrowser.loadURI('data:text/plain,' + ex);
++gTestResults.Exception;
logger.error("EXCEPTION: " + ex);
+ gRunByManifest = false;
DoneTests();
}
// Focus the content browser.
if (gFocusFilterMode != FOCUS_FILTER_NON_NEEDS_FOCUS_TESTS) {
gBrowser.addEventListener("focus", StartTests, true);
gBrowser.focus();
} else {
@@ -497,16 +507,17 @@ function StartTests()
gURLs = [];
gManifestsLoaded = {};
try {
var manifests = JSON.parse(prefs.getCharPref("reftest.manifests"));
gURLFilterRegex = manifests[null];
} catch(e) {
logger.error("Unable to find reftest.manifests pref. Please ensure your profile is setup properly");
+ gRunByManifest = false;
DoneTests();
}
try {
var globalFilter = manifests.hasOwnProperty("") ? new RegExp(manifests[""]) : null;
var manifestURLs = Object.keys(manifests);
// Ensure we read manifests from higher up the directory tree first so that we
@@ -549,52 +560,63 @@ function StartTests()
var end = Math.round(gThisChunk * testsPerChunk);
// Map these indices onto the gURLs array. This avoids modifying the
// gURLs array which prevents skipped tests from showing up in the log
start = gThisChunk == 1 ? 0 : gURLs.indexOf(tURLs[start]);
end = gThisChunk == gTotalChunks ? gURLs.length : gURLs.indexOf(tURLs[end + 1]) - 1;
gURLs = gURLs.slice(start, end);
- logger.info("Running chunk " + gThisChunk + " out of " + gTotalChunks + " chunks. " +
- "tests " + (start+1) + "-" + end + "/" + gURLs.length);
+ if (gStartAfter === undefined) {
+ logger.info("Running chunk " + gThisChunk + " out of " + gTotalChunks + " chunks. " +
+ "tests " + (start+1) + "-" + end + "/" + gURLs.length);
+ }
}
if (gShuffle) {
if (gStartAfter !== undefined) {
logger.error("Can't resume from a crashed test when " +
"--shuffle is enabled, continue by shuffling " +
"all the tests");
+ gRunByManifest = false;
DoneTests();
return;
}
Shuffle(gURLs);
} else if (gStartAfter !== undefined) {
- // Skip through previously crashed test
+ logger.info("PREVIOUS TEST: " + gStartAfter);
+ // Skip through previously run test
// We have to do this after chunking so we don't break the numbers
- var crash_idx = gURLs.map(function(url) {
- return url['url1']['spec'];
+ var test_idx = gURLs.map(function(url) {
+ return url.identifier;
}).indexOf(gStartAfter);
- if (crash_idx == -1) {
- throw "Can't find the previously crashed test";
+ if (test_idx == -1) {
+ throw "Can't find the previously run test";
}
- gURLs = gURLs.slice(crash_idx + 1);
+ gURLs = gURLs.slice(test_idx + 1);
}
gTotalTests = gURLs.length;
if (!gTotalTests)
throw "No tests to run";
+ if (gStartAfter !== undefined)
+ logger.info("Resuming test run from " + gURLs[0].url1.spec);
+
+ if (gRunByManifest)
+ logger.info("Running manifest " + gURLs[0].manifest);
+
gURICanvases = {};
StartCurrentTest();
} catch (ex) {
//gBrowser.loadURI('data:text/plain,' + ex);
++gTestResults.Exception;
logger.error("EXCEPTION: " + ex);
+ gRunByManifest = false;
DoneTests();
}
}
function OnRefTestUnload()
{
let plugin1 = getTestPlugin("Test Plug-in");
let plugin2 = getTestPlugin("Second Test Plug-in");
@@ -780,17 +802,17 @@ function BuildConditionSandbox(aURL) {
sandbox.browserIsRemote = gBrowserIsRemote;
try {
sandbox.asyncPan = gContainingWindow.document.docShell.asyncPanZoomEnabled;
} catch (e) {
sandbox.asyncPan = false;
}
- if (!gDumpedConditionSandbox) {
+ if (!gDumpedConditionSandbox && gStartAfter === undefined) {
logger.info("Dumping JSON representation of sandbox");
logger.info(JSON.stringify(CU.waiveXrays(sandbox)));
gDumpedConditionSandbox = true;
}
// Graphics features
sandbox.usesRepeatResampling = sandbox.d2d;
return sandbox;
@@ -854,20 +876,33 @@ function AddTestItem(aTest, aFilter)
if (gFocusFilterMode == FOCUS_FILTER_NEEDS_FOCUS_TESTS &&
!aTest.needsFocus)
return;
if (gFocusFilterMode == FOCUS_FILTER_NON_NEEDS_FOCUS_TESTS &&
aTest.needsFocus)
return;
if (aTest.url2 !== null)
- aTest.identifier = [aTest.prettyPath, aTest.type, aTest.url2.spec];
+ aTest.identifier = aTest.prettyPath + " " + aTest.type + " " + aTest.url2.spec;
else
aTest.identifier = aTest.prettyPath;
+ var count = gURLs.reduce(function(count, test) {
+ var end = test.identifier.lastIndexOf(' (dupe-id-');
+ end = end == -1 ? test.identifier.length : end;
+ var id = test.identifier.substring(0, end);
+ if (aTest.identifier == id) {
+ return count + 1;
+ }
+ return count;
+ }, 1);
+
+ if (count > 1)
+ aTest.identifier = aTest.identifier + " (dupe-id-" + count + ")";
+
gURLs.push(aTest);
}
function AddStyloTestPrefs(aSandbox, aTestPrefSettings, aRefPrefSettings)
{
AddPrefSettings("test-", "layout.css.servo.enabled", "true", aSandbox,
aTestPrefSettings, aRefPrefSettings);
AddPrefSettings("ref-", "layout.css.servo.enabled", "false", aSandbox,
@@ -1146,16 +1181,17 @@ function ReadManifest(aURL, inherited_st
prefSettings1: testPrefSettings,
prefSettings2: refPrefSettings,
fuzzyMinDelta: fuzzy_delta.min,
fuzzyMaxDelta: fuzzy_delta.max,
fuzzyMinPixels: fuzzy_pixels.min,
fuzzyMaxPixels: fuzzy_pixels.max,
url1: testURI,
url2: null,
+ manifest: aURL.spec,
chaosMode: chaosMode }, aFilter);
} else if (items[0] == TYPE_SCRIPT) {
if (items.length != 2)
throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": incorrect number of arguments to script";
var [testURI] = runHttp
? ServeFiles(principal, httpDepth,
listURL, [items[1]])
: [gIOService.newURI(items[1], null, listURL)];
@@ -1175,16 +1211,17 @@ function ReadManifest(aURL, inherited_st
prefSettings1: testPrefSettings,
prefSettings2: refPrefSettings,
fuzzyMinDelta: fuzzy_delta.min,
fuzzyMaxDelta: fuzzy_delta.max,
fuzzyMinPixels: fuzzy_pixels.min,
fuzzyMaxPixels: fuzzy_pixels.max,
url1: testURI,
url2: null,
+ manifest: aURL.spec,
chaosMode: chaosMode }, aFilter);
} else if (items[0] == TYPE_REFTEST_EQUAL || items[0] == TYPE_REFTEST_NOTEQUAL) {
if (items.length != 3)
throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": incorrect number of arguments to " + items[0];
if (items[0] == TYPE_REFTEST_NOTEQUAL &&
expected_status == EXPECTED_FUZZY &&
(fuzzy_delta.min > 0 || fuzzy_pixels.min > 0)) {
@@ -1219,16 +1256,17 @@ function ReadManifest(aURL, inherited_st
prefSettings1: testPrefSettings,
prefSettings2: refPrefSettings,
fuzzyMinDelta: fuzzy_delta.min,
fuzzyMaxDelta: fuzzy_delta.max,
fuzzyMinPixels: fuzzy_pixels.min,
fuzzyMaxPixels: fuzzy_pixels.max,
url1: testURI,
url2: refURI,
+ manifest: aURL.spec,
chaosMode: chaosMode }, aFilter);
} else {
throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": unknown test type " + items[0];
}
}
}
function AddURIUseCount(uri)
@@ -1335,16 +1373,22 @@ function Blur()
function StartCurrentTest()
{
gTestLog = [];
// make sure we don't run tests that are expected to kill the browser
while (gURLs.length > 0) {
var test = gURLs[0];
+ if (gLastManifest == null || !gRunByManifest) {
+ gLastManifest = test.manifest;
+ } else if (test.manifest != gLastManifest) {
+ break;
+ }
+
logger.testStart(test.identifier);
if (test.expected == EXPECTED_DEATH) {
++gTestResults.Skip;
logger.testEnd(test.identifier, "SKIP");
gURLs.shift();
} else if (test.needsFocus && !Focus()) {
// FIXME: Marking this as a known fail is dangerous! What
// if it starts failing all the time?
@@ -1355,21 +1399,22 @@ function StartCurrentTest()
++gTestResults.Slow;
logger.testEnd(test.identifier, "SKIP", null, "(SLOW)");
gURLs.shift();
} else {
break;
}
}
- if ((gURLs.length == 0 && gRepeat == 0) ||
+ if ((gRepeat == 0 && gURLs.length == 0) ||
+ (gRunByManifest && gURLs[0].manifest != gLastManifest) ||
(gRunUntilFailure && HasUnexpectedResult())) {
RestoreChangedPreferences();
DoneTests();
- } else if (gURLs.length == 0 && gRepeat > 0) {
+ } else if (gRepeat > 0 && gURLs.length == 0) {
// Repeat
gRepeat--;
StartTests();
} else {
if (gURLs[0].chaosMode) {
gWindowUtils.enterChaosMode();
}
if (!gURLs[0].needsFocus) {
@@ -1482,19 +1527,27 @@ function StartCurrentURI(aState)
} else {
SendLoadTest(type, gCurrentURL, gLoadTimeout);
}
}
}
function DoneTests()
{
- logger.suiteEnd(extra={'results': gTestResults});
- logger.info("Slowest test took " + gSlowestTestTime + "ms (" + gSlowestTestURL + ")");
- logger.info("Total canvas count = " + gRecycledCanvases.length);
+ // Log results with a special action so they don't get lost when
+ // using runByManifest.
+ gTestResults.slowestTestTime = gSlowestTestTime;
+ gTestResults.slowestTestURL = gSlowestTestURL || 'unknown';
+ gTestResults.recycledCanvases = gRecycledCanvases.length;
+ logger._logData('results', { 'results': gTestResults });
+
+ if (gURLs.length == 0 || !gRunByManifest) {
+ logger.suiteEnd();
+ }
+
if (gFailedUseWidgetLayers) {
LogWidgetLayersFailure();
}
function onStopped() {
let appStartup = CC["@mozilla.org/toolkit/app-startup;1"].getService(CI.nsIAppStartup);
appStartup.quit(CI.nsIAppStartup.eForceQuit);
}
@@ -1832,30 +1885,30 @@ function RecordResult(testRunTime, error
!test_passed && expected == EXPECTED_FUZZY ||
test_passed && expected == EXPECTED_FAIL) {
if (!equal) {
extra.max_difference = maxDifference.value;
extra.differences = differences;
var image1 = gCanvas1.toDataURL();
var image2 = gCanvas2.toDataURL();
extra.reftest_screenshots = [
- {url:gURLs[0].identifier[0],
+ {url:gURLs[0].prettyPath,
screenshot: image1.slice(image1.indexOf(",") + 1)},
- gURLs[0].identifier[1],
- {url:gURLs[0].identifier[2],
+ gURLs[0].type,
+ {url:gURLs[0].url2.spec,
screenshot: image2.slice(image2.indexOf(",") + 1)}
];
extra.image1 = image1;
extra.image2 = image2;
message += (", max difference: " + extra.max_difference +
", number of differing pixels: " + differences);
} else {
var image1 = gCanvas1.toDataURL();
extra.reftest_screenshots = [
- {url:gURLs[0].identifier[0],
+ {url:gURLs[0].prettyPath,
screenshot: image1.slice(image1.indexOf(",") + 1)}
];
extra.image1 = image1;
}
}
logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1], message, null, extra);
if (gNoCanvasCache) {
--- a/layout/tools/reftest/reftestcommandline.py
+++ b/layout/tools/reftest/reftestcommandline.py
@@ -241,16 +241,22 @@ class ReftestArgumentsParser(argparse.Ar
nargs="*",
help="Path to test file, manifest file, or directory containing tests")
self.add_argument("--work-path",
action="store",
dest="workPath",
help="Path to the base dir of all test files.")
+ self.add_argument("--run-by-manifest",
+ action="store_true",
+ dest="runByManifest",
+ default=False,
+ help="Restart the browser between each manifest.")
+
mozlog.commandline.add_logging_group(self)
def get_ip(self):
import moznetwork
if os.name != "nt":
return moznetwork.get_ip()
else:
self.error(
--- a/layout/tools/reftest/runreftest.py
+++ b/layout/tools/reftest/runreftest.py
@@ -2,17 +2,16 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Runs the reftest test harness.
"""
import collections
-import itertools
import json
import multiprocessing
import os
import platform
import re
import shutil
import signal
import subprocess
@@ -282,16 +281,18 @@ class RefTest(object):
if options.shuffle:
prefs['reftest.shuffle'] = True
if options.repeat:
prefs['reftest.repeat'] = options.repeat
if options.runUntilFailure:
prefs['reftest.runUntilFailure'] = True
if options.cleanupCrashes:
prefs['reftest.cleanupPendingCrashes'] = True
+ if options.runByManifest:
+ prefs['reftest.runByManifest'] = True
prefs['reftest.focusFilterMode'] = options.focusFilterMode
prefs['reftest.logLevel'] = options.log_tbpl_level or 'info'
prefs['reftest.manifests'] = json.dumps(manifests)
if startAfter not in (None, self.TEST_SEEN_INITIAL, self.TEST_SEEN_FINAL):
self.log.info("Setting reftest.startAfter to %s" % startAfter)
prefs['reftest.startAfter'] = startAfter
@@ -601,20 +602,19 @@ class RefTest(object):
debug_args = None
if debuggerInfo:
interactive = debuggerInfo.interactive
debug_args = [debuggerInfo.path] + debuggerInfo.args
def record_last_test(message):
"""Records the last test seen by this harness for the benefit of crash logging."""
if message['action'] == 'test_start':
- if " " in message['test']:
- self.lastTestSeen = message['test'].split(" ")[0]
- else:
- self.lastTestSeen = message['test']
+ self.lastTestSeen = message['test']
+ elif message['action'] == 'suite_end':
+ self.lastTestSeen = 'suite-end'
self.log.add_handler(record_last_test)
outputHandler = OutputHandler(self.log, options.utilityPath, symbolsPath=symbolsPath)
kp_kwargs = {
'kill_on_timeout': False,
'cwd': SCRIPT_DIRECTORY,
@@ -686,17 +686,19 @@ class RefTest(object):
debuggerInfo = None
if options.debugger:
debuggerInfo = mozdebug.get_debugger_info(options.debugger, options.debuggerArgs,
options.debuggerInteractive)
profileDir = None
startAfter = None # When the previous run crashed, we skip the tests we ran before
prevStartAfter = None
- for i in itertools.count():
+ overall_status = 0
+ i = 0
+ while True:
try:
if cmdargs is None:
cmdargs = []
if self.use_marionette:
cmdargs.append('-marionette')
profile = self.createReftestProfile(options,
@@ -713,48 +715,51 @@ class RefTest(object):
cmdargs=cmdargs,
# give the JS harness 30 seconds to deal with
# its own timeouts
env=browserEnv,
timeout=options.timeout + 30.0,
symbolsPath=options.symbolsPath,
options=options,
debuggerInfo=debuggerInfo)
+ overall_status |= status
self.log.info("Process mode: {}".format('e10s' if options.e10s else 'non-e10s'))
mozleak.process_leak_log(self.leakLogFile,
leak_thresholds=options.leakThresholds,
stack_fixer=get_stack_fixer_function(options.utilityPath,
options.symbolsPath))
if status == 0:
break
- if startAfter == self.TEST_SEEN_FINAL:
- self.log.info("Finished running all tests, skipping resume "
- "despite non-zero status code: %s" % status)
+ if options.runByManifest and self.lastTestSeen == 'suite-end':
+ break
+ if not options.runByManifest and overall_status == 0:
break
- if startAfter is not None and options.shuffle:
- self.log.error("Can not resume from a crash with --shuffle "
- "enabled. Please consider disabling --shuffle")
- break
- if startAfter is not None and options.maxRetries <= i:
- self.log.error("Hit maximum number of allowed retries ({}) "
- "in the test run".format(options.maxRetries))
- break
- if startAfter == prevStartAfter:
- # If the test stuck on the same test, or there the crashed
- # test appeared more then once, stop
- self.log.error("Force stop because we keep running into "
- "test \"{}\"".format(startAfter))
- break
+ if status:
+ i += 1
+ if startAfter is not None and options.shuffle:
+ self.log.error("Can not resume from a crash with --shuffle "
+ "enabled. Please consider disabling --shuffle")
+ break
+ if startAfter is not None and options.maxRetries <= i:
+ self.log.error("Hit maximum number of allowed retries ({}) "
+ "in the test run".format(options.maxRetries))
+ break
+ if startAfter == prevStartAfter:
+ # If the test stuck on the same test, or there the crashed
+ # test appeared more then once, stop
+ self.log.error("Force stop because we keep running into "
+ "test \"{}\"".format(startAfter))
+ break
prevStartAfter = startAfter
# TODO: we need to emit an SUITE-END log if it crashed
finally:
self.cleanup(profileDir)
- return status
+ return overall_status
def copyExtraFilesToProfile(self, options, profile):
"Copy extra files or dirs specified on the command line to the testing profile."
profileDir = profile.profile
for f in options.extraProfileFiles:
abspath = self.getFullPath(f)
if os.path.isfile(abspath):
if os.path.basename(abspath) == 'user.js':
--- a/testing/mozharness/configs/unittests/linux_unittest.py
+++ b/testing/mozharness/configs/unittests/linux_unittest.py
@@ -143,16 +143,17 @@ config = {
"options": [
"--appname=%(binary_path)s",
"--utility-path=tests/bin",
"--extra-profile-file=tests/bin/plugins",
"--symbols-path=%(symbols_path)s",
"--log-raw=%(raw_log_file)s",
"--log-errorsummary=%(error_summary_file)s",
"--cleanup-crashes",
+ "--run-by-manifest",
],
"run_filename": "runreftest.py",
"testsdir": "reftest"
},
"xpcshell": {
"options": [
"--symbols-path=%(symbols_path)s",
"--test-plugin-path=%(test_plugin_path)s",