--- a/testing/mochitest/runtests.py
+++ b/testing/mochitest/runtests.py
@@ -46,17 +46,17 @@ from manifestparser.filters import (
pathprefix,
subsuite,
tags,
)
try:
from marionette import Marionette
from marionette_driver.addons import Addons
-except ImportError, e:
+except ImportError as e:
# Defer ImportError until attempt to use Marionette
def reraise(*args, **kwargs):
raise(e)
Marionette = reraise
from leaks import ShutdownLeaks, LSANLeaks
from mochitest_options import (
MochitestArgumentParser, build_obj, get_default_valgrind_suppression_files
@@ -844,17 +844,17 @@ class MochitestDesktop(object):
return test_environment(**kwargs)
def extraPrefs(self, extraPrefs):
"""interpolate extra preferences from option strings"""
try:
return dict(parseKeyValue(extraPrefs, context='--setpref='))
except KeyValueParseError as e:
- print str(e)
+ print(str(e))
sys.exit(1)
def getFullPath(self, path):
" Get an absolute path relative to self.oldcwd."
return os.path.normpath(
os.path.join(
self.oldcwd,
os.path.expanduser(path)))
@@ -1008,18 +1008,21 @@ class MochitestDesktop(object):
self.testRoot = options.flavor
if options.flavor == 'browser' and options.immersiveMode:
self.testRoot = 'metro'
else:
self.testRoot = self.TEST_PATH
self.testRootAbs = os.path.join(SCRIPT_DIR, self.testRoot)
- def buildTestURL(self, options):
- testHost = "http://mochi.test:8888"
+ def buildTestURL(self, options, scheme='http'):
+ if scheme == 'https':
+ testHost = "https://example.com"
+ else:
+ testHost = "http://mochi.test:8888"
testURL = "/".join([testHost, self.TEST_PATH])
if len(options.test_paths) == 1:
if options.repeat > 0 and os.path.isfile(
os.path.join(
self.oldcwd,
os.path.dirname(__file__),
self.TEST_PATH,
@@ -1046,23 +1049,23 @@ class MochitestDesktop(object):
"""
tests = self.getActiveTests(options, disabled)
paths = []
for test in tests:
if testsToFilter and (test['path'] not in testsToFilter):
continue
paths.append(test)
-
# Bug 883865 - add this functionality into manifestparser
- with open(os.path.join(SCRIPT_DIR, options.testRunManifestFile), 'w') as manifestFile:
- manifestFile.write(json.dumps({'tests': paths}))
- options.manifestFile = options.testRunManifestFile
-
- return self.buildTestURL(options)
+ # Generate test by schemes
+ for (scheme, grouped_tests) in self.groupTestsByScheme(paths).items():
+ with open(os.path.join(SCRIPT_DIR, options.testRunManifestFile), 'w') as manifestFile:
+ manifestFile.write(json.dumps({'tests': grouped_tests}))
+ options.manifestFile = options.testRunManifestFile
+ yield (scheme, grouped_tests)
def startWebSocketServer(self, options, debuggerInfo):
""" Launch the websocket server """
self.wsserver = WebSocketServer(
options,
SCRIPT_DIR,
self.log,
debuggerInfo)
@@ -1382,16 +1385,18 @@ toolbar#nav-bar {
(test['name'], test['manifest']))
continue
testob = {'path': tp}
if 'disabled' in test:
testob['disabled'] = test['disabled']
if 'expected' in test:
testob['expected'] = test['expected']
+ if 'scheme' in test:
+ testob['scheme'] = test['scheme']
paths.append(testob)
def path_sort(ob1, ob2):
path1 = ob1['path'].split('/')
path2 = ob2['path'].split('/')
return cmp(path1, path2)
paths.sort(path_sort)
@@ -2005,27 +2010,26 @@ toolbar#nav-bar {
interactive=interactive,
outputTimeout=timeout)
proc = runner.process_handler
self.log.info("runtests.py | Application pid: %d" % proc.pid)
self.log.process_start("Main app process")
# start marionette and kick off the tests
marionette_args = marionette_args or {}
- port_timeout = marionette_args.pop('port_timeout')
+ port_timeout = marionette_args.pop('port_timeout', 60)
self.marionette = Marionette(**marionette_args)
self.marionette.start_session(timeout=port_timeout)
# install specialpowers and mochikit as temporary addons
addons = Addons(self.marionette)
if mozinfo.info.get('toolkit') != 'gonk':
addons.install(os.path.join(here, 'extensions', 'specialpowers'), temp=True)
addons.install(self.mochijar, temp=True)
-
self.execute_start_script()
# an open marionette session interacts badly with mochitest,
# delete it until we figure out why.
self.marionette.delete_session()
del self.marionette
# wait until app is finished
@@ -2171,16 +2175,30 @@ toolbar#nav-bar {
# We need to print the summary only if options.bisectChunk has a value.
# Also we need to make sure that we do not print the summary in between
# running tests via --run-by-dir.
if options.bisectChunk and options.bisectChunk in self.result:
bisect.print_summary()
return result
+ def groupTestsByScheme(self, tests):
+ """
+ split tests into groups by schemes. test is classified as http if
+ no scheme specified
+ """
+ httpTests = []
+ httpsTests = []
+ for test in tests:
+ if not test.get('scheme') or test.get('scheme') == 'http':
+ httpTests.append(test)
+ elif test.get('scheme') == 'https':
+ httpsTests.append(test)
+ return {'http': httpTests, 'https': httpsTests}
+
def runTests(self, options):
""" Prepare, configure, run tests and cleanup """
# a11y and chrome tests don't run with e10s enabled in CI. Need to set
# this here since |mach mochitest| sets the flavor after argument parsing.
if options.flavor in ('a11y', 'chrome'):
options.e10s = False
mozinfo.update({"e10s": options.e10s}) # for test manifest parsing.
@@ -2205,17 +2223,17 @@ toolbar#nav-bar {
if not options.runByDir:
return self.runMochitests(options, testsToRun)
# code for --run-by-dir
dirs = self.getDirectories(options)
result = 1 # default value, if no tests are run.
for d in dirs:
- print "dir: %s" % d
+ print("dir: %s" % d)
tests_in_dir = [t for t in testsToRun if os.path.dirname(t) == d]
# If we are using --run-by-dir, we should not use the profile path (if) provided
# by the user, since we need to create a new directory for each run. We would face
# problems if we use the directory provided by the user.
result = self.runMochitests(options, tests_in_dir)
# Dump the logging buffer
@@ -2223,30 +2241,30 @@ toolbar#nav-bar {
if result == -1:
break
e10s_mode = "e10s" if options.e10s else "non-e10s"
# printing total number of tests
if options.flavor == 'browser':
- print "TEST-INFO | checking window state"
- print "Browser Chrome Test Summary"
- print "\tPassed: %s" % self.countpass
- print "\tFailed: %s" % self.countfail
- print "\tTodo: %s" % self.counttodo
- print "\tMode: %s" % e10s_mode
- print "*** End BrowserChrome Test Results ***"
+ print("TEST-INFO | checking window state")
+ print("Browser Chrome Test Summary")
+ print("\tPassed: %s" % self.countpass)
+ print("\tFailed: %s" % self.countfail)
+ print("\tTodo: %s" % self.counttodo)
+ print("\tMode: %s" % e10s_mode)
+ print("*** End BrowserChrome Test Results ***")
else:
- print "0 INFO TEST-START | Shutdown"
- print "1 INFO Passed: %s" % self.countpass
- print "2 INFO Failed: %s" % self.countfail
- print "3 INFO Todo: %s" % self.counttodo
- print "4 INFO Mode: %s" % e10s_mode
- print "5 INFO SimpleTest FINISHED"
+ print("0 INFO TEST-START | Shutdown")
+ print("1 INFO Passed: %s" % self.countpass)
+ print("2 INFO Failed: %s" % self.countfail)
+ print("3 INFO Todo: %s" % self.counttodo)
+ print("4 INFO Mode: %s" % e10s_mode)
+ print("5 INFO SimpleTest FINISHED")
return result
def doTests(self, options, testsToFilter=None):
# A call to initializeLooping method is required in case of --run-by-dir or --bisect-chunk
# since we need to initialize variables for each loop.
if options.bisectChunk or options.runByDir:
self.initializeLooping(options)
@@ -2315,38 +2333,16 @@ toolbar#nav-bar {
if self.mozLogs:
self.browserEnv["MOZ_LOG_FILE"] = "{}/moz-pid=%PID-uid={}.log".format(
self.browserEnv["MOZ_UPLOAD_DIR"], str(uuid.uuid4()))
try:
self.startServers(options, debuggerInfo)
- # testsToFilter parameter is used to filter out the test list that
- # is sent to buildTestPath
- testURL = self.buildTestPath(options, testsToFilter)
-
- # read the number of tests here, if we are not going to run any,
- # terminate early
- if os.path.exists(
- os.path.join(
- SCRIPT_DIR,
- options.testRunManifestFile)):
- with open(os.path.join(SCRIPT_DIR, options.testRunManifestFile)) as fHandle:
- tests = json.load(fHandle)
- count = 0
- for test in tests['tests']:
- count += 1
- if count == 0:
- return 1
-
- self.buildURLOptions(options, self.browserEnv)
- if self.urlOpts:
- testURL += "?" + "&".join(self.urlOpts)
-
if options.immersiveMode:
options.browserArgs.extend(('-firefoxpath', options.app))
options.app = self.immersiveHelperPath
if options.jsdebugger:
options.browserArgs.extend(['-jsdebugger'])
# Remove the leak detection file so it can't "leak" to the tests run.
@@ -2377,35 +2373,49 @@ toolbar#nav-bar {
'port_timeout': options.marionette_port_timeout,
}
if options.marionette:
host, port = options.marionette.split(':')
marionette_args['host'] = host
marionette_args['port'] = int(port)
- self.log.info("runtests.py | Running with e10s: {}".format(options.e10s))
- self.log.info("runtests.py | Running tests: start.\n")
- status = self.runApp(testURL,
- self.browserEnv,
- options.app,
- profile=self.profile,
- extraArgs=options.browserArgs,
- utilityPath=options.utilityPath,
- debuggerInfo=debuggerInfo,
- valgrindPath=valgrindPath,
- valgrindArgs=valgrindArgs,
- valgrindSuppFiles=valgrindSuppFiles,
- symbolsPath=options.symbolsPath,
- timeout=timeout,
- detectShutdownLeaks=detectShutdownLeaks,
- screenshotOnFail=options.screenshotOnFail,
- bisectChunk=options.bisectChunk,
- marionette_args=marionette_args,
- )
+ # testsToFilter parameter is used to filter out the test list that
+ # is sent to buildTestPath
+ for (scheme, tests) in self.buildTestPath(options, testsToFilter):
+ # read the number of tests here, if we are not going to run any,
+ # terminate early
+ if not tests:
+ continue
+
+ testURL = self.buildTestURL(options, scheme=scheme)
+
+ self.buildURLOptions(options, self.browserEnv)
+ if self.urlOpts:
+ testURL += "?" + "&".join(self.urlOpts)
+
+ self.log.info("runtests.py | Running with e10s: {}".format(options.e10s))
+ self.log.info("runtests.py | Running tests: start.\n")
+ status = self.runApp(testURL,
+ self.browserEnv,
+ options.app,
+ profile=self.profile,
+ extraArgs=options.browserArgs,
+ utilityPath=options.utilityPath,
+ debuggerInfo=debuggerInfo,
+ valgrindPath=valgrindPath,
+ valgrindArgs=valgrindArgs,
+ valgrindSuppFiles=valgrindSuppFiles,
+ symbolsPath=options.symbolsPath,
+ timeout=timeout,
+ detectShutdownLeaks=detectShutdownLeaks,
+ screenshotOnFail=options.screenshotOnFail,
+ bisectChunk=options.bisectChunk,
+ marionette_args=marionette_args,
+ )
except KeyboardInterrupt:
self.log.info("runtests.py | Received keyboard interrupt.\n")
status = -1
except:
traceback.print_exc()
self.log.error(
"Automation Error: Received unexpected exception while running application\n")
status = 1
--- a/testing/mochitest/tests/SimpleTest/setup.js
+++ b/testing/mochitest/tests/SimpleTest/setup.js
@@ -155,17 +155,17 @@ TestRunner.logger.addListener("dumpListe
var gTestList = [];
var RunSet = {};
RunSet.runall = function(e) {
// Filter tests to include|exclude tests based on data in params.filter.
// This allows for including or excluding tests from the gTestList
// TODO Only used by ipc tests, remove once those are implemented sanely
if (params.testManifest) {
- getTestManifest("http://mochi.test:8888/" + params.testManifest, params, function(filter) { gTestList = filterTests(filter, gTestList, params.runOnly); RunSet.runtests(); });
+ getTestManifest(getTestManifest(params.testManifest), params, function(filter) { gTestList = filterTests(filter, gTestList, params.runOnly); RunSet.runtests(); });
} else {
RunSet.runtests();
}
}
RunSet.runtests = function(e) {
// Which tests we're going to run
var my_tests = gTestList;
@@ -230,17 +230,17 @@ function toggleNonTests (e) {
} else {
$("toggleNonTests").innerHTML = "Show Non-Tests";
}
}
// hook up our buttons
function hookup() {
if (params.manifestFile) {
- getTestManifest("http://mochi.test:8888/" + params.manifestFile, params, hookupTests);
+ getTestManifest(getTestManifestURL(params.manifestFile), params, hookupTests);
} else {
hookupTests(gTestList);
}
}
function hookupTests(testList) {
if (testList.length > 0) {
gTestList = testList;
@@ -253,8 +253,18 @@ function hookupTests(testList) {
document.getElementById('runtests').onclick = RunSet.reloadAndRunAll;
document.getElementById('toggleNonTests').onclick = toggleNonTests;
// run automatically if autorun specified
if (params.autorun) {
RunSet.runall();
}
}
+
+function getTestManifestURL(path) {
+ // The test manifest url scheme should be the same protocol as the containing
+ // window... unless it's not http(s)
+ if (window.location.protocol == "http:" ||
+ window.location.protocol == "https:") {
+ return window.location.protocol + "//" + window.location.host + "/" + path;
+ }
+ return "http://mochi.test:8888/" + path;
+}