Bug 1344346 - [mochitest] Log unstructured output as 'process_output', r?jgraham
Mochitest currently converts unstructured logs (e.g output from gecko) to 'info' messages. But
this means those messages won't be validated against mozharness' error logs. This change first
gets unstructured messages logged as process_output, and also ensures the StructuredOutputParser
in mozharness checks process_output messages against the error list.
MozReview-Commit-ID: KPTQnulwzyK
--- a/testing/mochitest/leaks.py
+++ b/testing/mochitest/leaks.py
@@ -21,31 +21,35 @@ class ShutdownLeaks(object):
self.logger = logger
self.tests = []
self.leakedWindows = {}
self.leakedDocShells = set()
self.currentTest = None
self.seenShutdown = set()
def log(self, message):
- if message['action'] == 'log':
- line = message['message']
+ action = message['action']
+
+ # Remove 'log' when jetpack and clipboard are gone and/or structured.
+ if action in ('log', 'process_output'):
+ line = message['message'] if action == 'log' else message['data']
+
if line[2:11] == "DOMWINDOW":
self._logWindow(line)
elif line[2:10] == "DOCSHELL":
self._logDocShell(line)
elif line.startswith("Completed ShutdownLeaks collections in process"):
pid = int(line.split()[-1])
self.seenShutdown.add(pid)
- elif message['action'] == 'test_start':
+ elif action == 'test_start':
fileName = message['test'].replace(
"chrome://mochitests/content/browser/", "")
self.currentTest = {
"fileName": fileName, "windows": set(), "docShells": set()}
- elif message['action'] == 'test_end':
+ elif action == 'test_end':
# don't track a test if no windows or docShells leaked
if self.currentTest and (self.currentTest["windows"] or self.currentTest["docShells"]):
self.tests.append(self.currentTest)
self.currentTest = None
def process(self):
if not self.seenShutdown:
self.logger.error(
--- a/testing/mochitest/runrobocop.py
+++ b/testing/mochitest/runrobocop.py
@@ -37,17 +37,17 @@ class RobocopTestRunner(MochitestDesktop
dm = None
# Some robocop tests run for >60 seconds without generating any output.
NO_OUTPUT_TIMEOUT = 180
def __init__(self, automation, devmgr, options):
"""
Simple one-time initialization.
"""
- MochitestDesktop.__init__(self, options)
+ MochitestDesktop.__init__(self, options.flavor, options)
self.auto = automation
self.dm = devmgr
self.dm.default_timeout = 320
self.options = options
self.options.logFile = "robocop.log"
self.environment = self.auto.environment
self.deviceRoot = self.dm.getDeviceRoot()
--- a/testing/mochitest/runtests.py
+++ b/testing/mochitest/runtests.py
@@ -150,36 +150,39 @@ class MessageLogger(object):
BUFFERED_ACTIONS = set(['test_status', 'log'])
VALID_ACTIONS = set(['suite_start', 'suite_end', 'test_start', 'test_end',
'test_status', 'log',
'buffering_on', 'buffering_off'])
TEST_PATH_PREFIXES = ['/tests/',
'chrome://mochitests/content/browser/',
'chrome://mochitests/content/chrome/']
- def __init__(self, logger, buffering=True):
+ def __init__(self, logger, buffering=True, structured=True):
self.logger = logger
+ self.structured = structured
+ self.gecko_id = 'GECKO'
# Even if buffering is enabled, we only want to buffer messages between
# TEST-START/TEST-END. So it is off to begin, but will be enabled after
# a TEST-START comes in.
self.buffering = False
self.restore_buffering = buffering
# Message buffering
self.buffered_messages = []
# Failures reporting, after the end of the tests execution
self.errors = []
- def valid_message(self, obj):
- """True if the given object is a valid structured message
+ def validate(self, obj):
+ """Tests whether the given object is a valid structured message
(only does a superficial validation)"""
- return isinstance(obj, dict) and 'action' in obj and obj[
- 'action'] in MessageLogger.VALID_ACTIONS
+ if not (isinstance(obj, dict) and 'action' in obj and obj[
+ 'action'] in MessageLogger.VALID_ACTIONS):
+ raise ValueError
def _fix_test_name(self, message):
"""Normalize a logged test path to match the relative path from the sourcedir.
"""
if 'test' in message:
test = message['test']
for prefix in MessageLogger.TEST_PATH_PREFIXES:
if test.startswith(prefix):
@@ -199,49 +202,47 @@ class MessageLogger(object):
line = line.rstrip().decode("UTF-8", "replace")
messages = []
for fragment in line.split(MessageLogger.DELIMITER):
if not fragment:
continue
try:
message = json.loads(fragment)
- if not self.valid_message(message):
+ self.validate(message)
+ except ValueError:
+ if self.structured:
+ message = dict(
+ action='process_output',
+ process=self.gecko_id,
+ data=fragment,
+ )
+ else:
message = dict(
action='log',
level='info',
message=fragment,
- unstructured=True)
- except ValueError:
- message = dict(
- action='log',
- level='info',
- message=fragment,
- unstructured=True)
+ )
+
self._fix_test_name(message)
self._fix_message_format(message)
messages.append(message)
return messages
def process_message(self, message):
"""Processes a structured message. Takes into account buffering, errors, ..."""
# Activation/deactivating message buffering from the JS side
if message['action'] == 'buffering_on':
self.buffering = True
return
if message['action'] == 'buffering_off':
self.buffering = False
return
- unstructured = False
- if 'unstructured' in message:
- unstructured = True
- message.pop('unstructured')
-
# Error detection also supports "raw" errors (in log messages) because some tests
# manually dump 'TEST-UNEXPECTED-FAIL'.
if ('expected' in message or (message['action'] == 'log' and message[
'message'].startswith('TEST-UNEXPECTED'))):
# Saving errors/failures to be shown at the end of the test run
self.errors.append(message)
self.restore_buffering = self.restore_buffering or self.buffering
self.buffering = False
@@ -253,25 +254,22 @@ class MessageLogger(object):
"<snipped {0} output lines - "
"if you need more context, please use "
"SimpleTest.requestCompleteLog() in your test>" .format(snipped))
# Dumping previously buffered messages
self.dump_buffered(limit=True)
# Logging the error message
self.logger.log_raw(message)
- # If we don't do any buffering, or the tests haven't started, or the message was
- # unstructured, it is directly logged.
- elif any([not self.buffering,
- unstructured,
- message['action'] not in self.BUFFERED_ACTIONS]):
+ # Determine if message should be buffered
+ elif self.buffering and self.structured and message['action'] in self.BUFFERED_ACTIONS:
+ self.buffered_messages.append(message)
+ # Otherwise log the message directly
+ else:
self.logger.log_raw(message)
- else:
- # Buffering the message
- self.buffered_messages.append(message)
# If a test ended, we clean the buffer
if message['action'] == 'test_end':
self.buffered_messages = []
self.restore_buffering = self.restore_buffering or self.buffering
self.buffering = False
if message['action'] == 'test_start':
@@ -788,18 +786,19 @@ class MochitestDesktop(object):
mediaDevices = None
patternFiles = {}
# XXX use automation.py for test name to avoid breaking legacy
# TODO: replace this with 'runtests.py' or 'mochitest' or the like
test_name = 'automation.py'
- def __init__(self, logger_options, quiet=False):
+ def __init__(self, flavor, logger_options, quiet=False):
self.update_mozinfo()
+ self.flavor = flavor
self.server = None
self.wsserver = None
self.websocketProcessBridge = None
self.sslTunnel = None
self.tests_by_manifest = defaultdict(list)
self._active_tests = None
self._locations = None
@@ -815,17 +814,22 @@ class MochitestDesktop(object):
"Mochitest specific tbpl formatter")
self.log = commandline.setup_logging("mochitest",
logger_options,
{
"tbpl": sys.stdout
})
MochitestDesktop.log = self.log
- self.message_logger = MessageLogger(logger=self.log, buffering=quiet)
+ # Jetpack flavors still don't use the structured logger. We need to process their output
+ # slightly differently.
+ structured = not self.flavor.startswith('jetpack')
+ self.message_logger = MessageLogger(
+ logger=self.log, buffering=quiet, structured=structured)
+
# Max time in seconds to wait for server startup before tests will fail -- if
# this seems big, it's mostly for debug machines where cold startup
# (particularly after a build) takes forever.
self.SERVER_STARTUP_TIMEOUT = 180 if mozinfo.info.get('debug') else 90
# metro browser sub process id
self.browserProcessId = None
@@ -2072,17 +2076,20 @@ toolbar#nav-bar {
process_args=kp_kwargs)
# start the runner
runner.start(debug_args=debug_args,
interactive=interactive,
outputTimeout=timeout)
proc = runner.process_handler
self.log.info("runtests.py | Application pid: %d" % proc.pid)
- self.log.process_start("Main app process")
+
+ gecko_id = "GECKO(%d)" % proc.pid
+ self.log.process_start(gecko_id)
+ self.message_logger.gecko_id = gecko_id
# start marionette and kick off the tests
marionette_args = marionette_args or {}
port_timeout = marionette_args.pop('port_timeout', 60)
self.marionette = Marionette(**marionette_args)
self.marionette.start_session(timeout=port_timeout)
# install specialpowers and mochikit as temporary addons
@@ -2738,17 +2745,17 @@ toolbar#nav-bar {
def run_test_harness(parser, options):
parser.validate(options)
logger_options = {
key: value for key, value in vars(options).iteritems()
if key.startswith('log') or key == 'valgrind'}
- runner = MochitestDesktop(logger_options, quiet=options.quiet)
+ runner = MochitestDesktop(options.flavor, logger_options, quiet=options.quiet)
options.runByDir = False
if options.flavor in ('plain', 'browser', 'chrome'):
options.runByDir = True
result = runner.runTests(options)
--- a/testing/mochitest/runtestsremote.py
+++ b/testing/mochitest/runtestsremote.py
@@ -24,17 +24,17 @@ SCRIPT_DIR = os.path.abspath(os.path.rea
class MochiRemote(MochitestDesktop):
_automation = None
_dm = None
localProfile = None
logMessages = []
def __init__(self, automation, devmgr, options):
- MochitestDesktop.__init__(self, options)
+ MochitestDesktop.__init__(self, options.flavor, options)
self._automation = automation
self._dm = devmgr
self.environment = self._automation.environment
self.remoteProfile = os.path.join(options.remoteTestRoot, "profile/")
self.remoteModulesDir = os.path.join(options.remoteTestRoot, "modules/")
self._automation.setRemoteProfile(self.remoteProfile)
self.remoteLog = options.remoteLogFile