Bug 1209463 - [mochitest] Stop re-logging errors at the end of a mochitest run, r?gbrown
This is no longer necessary because the formatters now have the ability
to dump failure summaries which include failing tests and subtest results.
This also needs to be removed because it is throwing off the summary counts.
MozReview-Commit-ID: GbQkk0xQRds
--- a/testing/mochitest/runtests.py
+++ b/testing/mochitest/runtests.py
@@ -167,19 +167,16 @@ class MessageLogger(object):
# TEST-START/TEST-END. So it is off to begin, but will be enabled after
# a TEST-START comes in.
self.buffering = False
self.restore_buffering = buffering
# Message buffering
self.buffered_messages = []
- # Failures reporting, after the end of the tests execution
- self.errors = []
-
def validate(self, obj):
"""Tests whether the given object is a valid structured message
(only does a superficial validation)"""
if not (isinstance(obj, dict) and 'action' in obj and obj[
'action'] in MessageLogger.VALID_ACTIONS):
raise ValueError
def _fix_test_name(self, message):
@@ -240,18 +237,16 @@ class MessageLogger(object):
if message['action'] == 'buffering_off':
self.buffering = False
return
# Error detection also supports "raw" errors (in log messages) because some tests
# manually dump 'TEST-UNEXPECTED-FAIL'.
if ('expected' in message or (message['action'] == 'log' and message[
'message'].startswith('TEST-UNEXPECTED'))):
- # Saving errors/failures to be shown at the end of the test run
- self.errors.append(message)
self.restore_buffering = self.restore_buffering or self.buffering
self.buffering = False
if self.buffered_messages:
snipped = len(
self.buffered_messages) - self.BUFFERING_THRESHOLD
if snipped > 0:
self.logger.info(
"<snipped {0} output lines - "
@@ -324,33 +319,33 @@ def call(*args, **kwargs):
process = mozprocess.ProcessHandler(*args, **kwargs)
process.run()
return process.wait()
def killPid(pid, log):
# see also https://bugzilla.mozilla.org/show_bug.cgi?id=911249#c58
- if HAVE_PSUTIL:
- # Kill a process tree (including grandchildren) with signal.SIGTERM
- if pid == os.getpid():
- raise RuntimeError("Error: trying to kill ourselves, not another process")
- try:
- parent = psutil.Process(pid)
- children = parent.children(recursive=True)
- children.append(parent)
- for p in children:
- p.send_signal(signal.SIGTERM)
- gone, alive = psutil.wait_procs(children, timeout=30)
+ if HAVE_PSUTIL:
+ # Kill a process tree (including grandchildren) with signal.SIGTERM
+ if pid == os.getpid():
+ raise RuntimeError("Error: trying to kill ourselves, not another process")
+ try:
+ parent = psutil.Process(pid)
+ children = parent.children(recursive=True)
+ children.append(parent)
+ for p in children:
+ p.send_signal(signal.SIGTERM)
+ gone, alive = psutil.wait_procs(children, timeout=30)
for p in gone:
log.info('psutil found pid %s dead' % p.pid)
for p in alive:
log.info('failed to kill pid %d after 30s' % p.pid)
- except Exception as e:
- log.info("Error: Failed to kill process %d: %s" % (pid, str(e)))
+ except Exception as e:
+ log.info("Error: Failed to kill process %d: %s" % (pid, str(e)))
else:
try:
os.kill(pid, getattr(signal, "SIGKILL", signal.SIGTERM))
except Exception as e:
log.info("Failed to kill process %d: %s" % (pid, str(e)))
if mozinfo.isWin:
@@ -2558,39 +2553,40 @@ toolbar#nav-bar {
tests = [t for t in tests if 'disabled' not in t]
# Until we have all green, this does not run on a11y (for perf reasons)
if not options.runByManifest:
return self.runMochitests(options, [t['path'] for t in tests])
# code for --run-by-manifest
manifests = set(t['manifest'] for t in tests)
- result = 1 # default value, if no tests are run.
+ result = 0
origPrefs = options.extraPrefs[:]
for m in sorted(manifests):
self.log.info("Running manifest: {}".format(m))
prefs = list(self.prefs_by_manifest[m])[0]
options.extraPrefs = origPrefs[:]
if prefs:
prefs = prefs.strip().split()
self.log.info("The following extra prefs will be set:\n {}".format(
'\n '.join(prefs)))
options.extraPrefs.extend(prefs)
# If we are using --run-by-manifest, we should not use the profile path (if) provided
# by the user, since we need to create a new directory for each run. We would face
# problems if we use the directory provided by the user.
tests_in_manifest = [t['path'] for t in tests if t['manifest'] == m]
- result = self.runMochitests(options, tests_in_manifest)
+ res = self.runMochitests(options, tests_in_manifest)
+ result = result or res
# Dump the logging buffer
self.message_logger.dump_buffered()
- if result == -1:
+ if res == -1:
break
e10s_mode = "e10s" if options.e10s else "non-e10s"
# printing total number of tests
if options.flavor == 'browser':
print("TEST-INFO | checking window state")
print("Browser Chrome Test Summary")
@@ -2602,16 +2598,20 @@ toolbar#nav-bar {
else:
print("0 INFO TEST-START | Shutdown")
print("1 INFO Passed: %s" % self.countpass)
print("2 INFO Failed: %s" % self.countfail)
print("3 INFO Todo: %s" % self.counttodo)
print("4 INFO Mode: %s" % e10s_mode)
print("5 INFO SimpleTest FINISHED")
+ if not result and not self.countpass:
+ # either tests failed or no tests run
+ result = 1
+
return result
def doTests(self, options, testsToFilter=None):
# A call to initializeLooping method is required in case of --run-by-dir or --bisect-chunk
# since we need to initialize variables for each loop.
if options.bisectChunk or options.runByManifest:
self.initializeLooping(options)
@@ -3045,27 +3045,17 @@ def run_test_harness(parser, options):
if runner.mozLogs:
with zipfile.ZipFile("{}/mozLogs.zip".format(runner.browserEnv["MOZ_UPLOAD_DIR"]),
"w", zipfile.ZIP_DEFLATED) as logzip:
for logfile in glob.glob("{}/moz*.log*".format(runner.browserEnv["MOZ_UPLOAD_DIR"])):
logzip.write(logfile)
os.remove(logfile)
logzip.close()
-
- # don't dump failures if running from automation as treeherder already displays them
- if build_obj:
- if runner.message_logger.errors:
- result = 1
- runner.message_logger.logger.warning("The following tests failed:")
- for error in runner.message_logger.errors:
- runner.message_logger.logger.log_raw(error)
-
runner.message_logger.finish()
-
return result
def cli(args=sys.argv[1:]):
# parse command line options
parser = MochitestArgumentParser(app='generic')
options = parser.parse_args(args)
if options is None:
--- a/testing/mochitest/tests/python/test_basic_mochitest_plain.py
+++ b/testing/mochitest/tests/python/test_basic_mochitest_plain.py
@@ -26,39 +26,28 @@ def test_output_pass(runtests):
assert log_level in (INFO, WARNING)
lines = filter_action('test_status', lines)
assert len(lines) == 1
assert lines[0]['status'] == 'PASS'
def test_output_fail(runtests):
- from runtests import build_obj
-
status, lines = runtests('test_fail.html')
assert status == 1
tbpl_status, log_level = get_mozharness_status(lines, status)
assert tbpl_status == TBPL_WARNING
assert log_level == WARNING
lines = filter_action('test_status', lines)
- # If we are running with a build_obj, the failed status will be
- # logged a second time at the end of the run.
- if build_obj:
- assert len(lines) == 2
- else:
- assert len(lines) == 1
+ assert len(lines) == 1
assert lines[0]['status'] == 'FAIL'
- if build_obj:
- assert set(lines[0].keys()) == set(lines[1].keys())
- assert set(lines[0].values()) == set(lines[1].values())
-
@pytest.mark.skip_mozinfo("!crashreporter")
def test_output_crash(runtests):
status, lines = runtests('test_crash.html', environment=["MOZ_CRASHREPORTER_SHUTDOWN=1"])
assert status == 1
tbpl_status, log_level = get_mozharness_status(lines, status)
assert tbpl_status == TBPL_FAILURE
@@ -72,18 +61,17 @@ def test_output_crash(runtests):
lines = filter_action('test_end', lines)
assert len(lines) == 0
@pytest.mark.skip_mozinfo("!asan")
def test_output_asan(runtests):
status, lines = runtests('test_crash.html', environment=["MOZ_CRASHREPORTER_SHUTDOWN=1"])
- # TODO: mochitest should return non-zero here
- assert status == 0
+ assert status == 1
tbpl_status, log_level = get_mozharness_status(lines, status)
assert tbpl_status == TBPL_FAILURE
assert log_level == ERROR
crash = filter_action('crash', lines)
assert len(crash) == 0