--- a/testing/mozharness/mozharness/mozilla/blob_upload.py
+++ b/testing/mozharness/mozharness/mozilla/blob_upload.py
@@ -7,24 +7,22 @@
import os
from mozharness.base.python import VirtualenvMixin
from mozharness.base.script import PostScriptRun
blobupload_config_options = [
[["--blob-upload-branch"],
- {"dest": "blob_upload_branch",
- "help": "Branch for blob server's metadata",
- }],
+ {"dest": "blob_upload_branch",
+ "help": "Branch for blob server's metadata", }],
[["--blob-upload-server"],
- {"dest": "blob_upload_servers",
- "action": "extend",
- "help": "Blob servers's location",
- }]
+ {"dest": "blob_upload_servers",
+ "action": "extend",
+ "help": "Blob servers's location", }]
]
class BlobUploadMixin(VirtualenvMixin):
"""Provides mechanism to automatically upload files written in
MOZ_UPLOAD_DIR to the blobber upload server at the end of the
running script.
@@ -41,17 +39,17 @@ class BlobUploadMixin(VirtualenvMixin):
for req in requirements:
self.register_virtualenv_module(req, method='pip')
def upload_blobber_files(self):
self.debug("Check branch and server cmdline options.")
if self.config.get('blob_upload_branch') and \
(self.config.get('blob_upload_servers') or
self.config.get('default_blob_upload_servers')) and \
- self.config.get('blob_uploader_auth_file'):
+ self.config.get('blob_uploader_auth_file'):
self.info("Blob upload gear active.")
upload = [self.query_python_path(), self.query_python_path("blobberc.py")]
dirs = self.query_abs_dirs()
self.debug("Get the directory from which to upload the files.")
if dirs.get('abs_blob_upload_dir'):
blob_dir = dirs['abs_blob_upload_dir']
@@ -70,40 +68,41 @@ class BlobUploadMixin(VirtualenvMixin):
self.info("Preparing to upload files from %s." % blob_dir)
auth_file = self.config.get('blob_uploader_auth_file')
if not os.path.isfile(auth_file):
self.warning("Could not find the credentials files!")
return
blob_branch = self.config.get('blob_upload_branch')
blob_servers_list = self.config.get('blob_upload_servers',
- self.config.get('default_blob_upload_servers'))
+ self.config.get('default_blob_upload_servers'))
servers = []
for server in blob_servers_list:
servers.extend(['-u', server])
auth = ['-a', auth_file]
branch = ['-b', blob_branch]
dir_to_upload = ['-d', blob_dir]
# We want blobberc to tell us if a summary file was uploaded through this manifest file
manifest_path = os.path.join(dirs['abs_work_dir'], 'uploaded_files.json')
record_uploaded_files = ['--output-manifest', manifest_path]
self.info("Files from %s are to be uploaded with <%s> branch at "
- "the following location(s): %s" % (blob_dir, blob_branch,
- ", ".join(["%s" % s for s in blob_servers_list])))
+ "the following location(s): %s" %
+ (blob_dir, blob_branch, ", ".join(["%s" % s for s in blob_servers_list])))
# call blob client to upload files to server
- self.run_command(upload + servers + auth + branch + dir_to_upload + record_uploaded_files)
+ self.run_command(upload + servers + auth + branch +
+ dir_to_upload + record_uploaded_files)
uploaded_files = '{}'
if os.path.isfile(manifest_path):
with open(manifest_path, 'r') as f:
uploaded_files = f.read()
self.rmtree(manifest_path)
self.set_buildbot_property(prop_name='blobber_files',
- prop_value=uploaded_files, write_to_file=True)
+ prop_value=uploaded_files, write_to_file=True)
else:
self.warning("Blob upload gear skipped. Missing cmdline options.")
@PostScriptRun
def _upload_blobber_files(self):
self.upload_blobber_files()
--- a/testing/mozharness/mozharness/mozilla/buildbot.py
+++ b/testing/mozharness/mozharness/mozilla/buildbot.py
@@ -83,23 +83,27 @@ class BuildbotMixin(object):
dirs = self.query_abs_dirs()
log_file = os.path.join(
dirs['abs_log_dir'],
self.log_obj.log_files[self.log_obj.log_level]
)
if os.path.exists(log_file):
file_size = os.path.getsize(log_file)
if file_size > self.config['buildbot_max_log_size']:
- self.error("Log file size %d is greater than max allowed %d! Setting TBPL_FAILURE (was %s)..." % (file_size, self.config['buildbot_max_log_size'], tbpl_status))
+ self.error("Log file size %d is greater than max allowed %d! Setting "
+ "TBPL_FAILURE (was %s)..." %
+ (file_size, self.config['buildbot_max_log_size'], tbpl_status))
tbpl_status = TBPL_FAILURE
if not level:
level = TBPL_STATUS_DICT[tbpl_status]
- self.worst_buildbot_status = self.worst_level(tbpl_status, self.worst_buildbot_status, TBPL_WORST_LEVEL_TUPLE)
+ self.worst_buildbot_status = self.worst_level(tbpl_status, self.worst_buildbot_status,
+ TBPL_WORST_LEVEL_TUPLE)
if self.worst_buildbot_status != tbpl_status:
- self.info("Current worst status %s is worse; keeping it." % self.worst_buildbot_status)
+ self.info("Current worst status %s is worse; keeping it." %
+ self.worst_buildbot_status)
self.add_summary("# TBPL %s #" % self.worst_buildbot_status, level=level)
if set_return_code:
self.return_code = EXIT_STATUS_DICT[self.worst_buildbot_status]
def set_buildbot_property(self, prop_name, prop_value, write_to_file=False):
self.info("Setting buildbot property %s to %s" % (prop_name, prop_value))
self.buildbot_properties[prop_name] = prop_value
if write_to_file:
@@ -132,66 +136,73 @@ class BuildbotMixin(object):
dir_name = os.path.dirname(file_name)
if not os.path.isdir(dir_name):
self.mkdir_p(dir_name)
if not prop_list:
prop_list = self.buildbot_properties.keys()
self.info("Writing buildbot properties to %s" % file_name)
else:
if not isinstance(prop_list, (list, tuple)):
- self.log("dump_buildbot_properties: Can't dump non-list prop_list %s!" % str(prop_list), level=error_level)
+ self.log("dump_buildbot_properties: Can't dump non-list prop_list %s!" %
+ str(prop_list), level=error_level)
return
self.info("Writing buildbot properties %s to %s" % (str(prop_list), file_name))
contents = ""
for prop in prop_list:
contents += "%s:%s\n" % (prop, self.buildbot_properties.get(prop, "None"))
return self.write_to_file(file_name, contents)
def invoke_sendchange(self, downloadables=None, branch=None,
- username="sendchange-unittest", sendchange_props=None):
+ username="sendchange-unittest", sendchange_props=None):
""" Generic sendchange, currently b2g- and unittest-specific.
"""
c = self.config
buildbot = self.query_exe("buildbot", return_type="list")
if branch is None:
if c.get("debug_build"):
platform = re.sub('[_-]debug', '', self.buildbot_config["properties"]["platform"])
- branch = '%s-%s-debug-unittest' % (self.buildbot_config["properties"]["branch"], platform)
+ branch = '%s-%s-debug-unittest' % (self.buildbot_config["properties"]["branch"],
+ platform)
else:
- branch = '%s-%s-opt-unittest' % (self.buildbot_config["properties"]["branch"], self.buildbot_config["properties"]["platform"])
+ branch = '%s-%s-opt-unittest' % (self.buildbot_config["properties"]["branch"],
+ self.buildbot_config["properties"]["platform"])
sendchange = [
'sendchange',
'--master', c.get("sendchange_masters")[0],
'--username', username,
'--branch', branch,
]
if self.buildbot_config['sourcestamp'].get("revision"):
sendchange += ['-r', self.buildbot_config['sourcestamp']["revision"]]
if len(self.buildbot_config['sourcestamp']['changes']) > 0:
if self.buildbot_config['sourcestamp']['changes'][0].get('who'):
- sendchange += ['--username', self.buildbot_config['sourcestamp']['changes'][0]['who']]
+ sendchange += ['--username',
+ self.buildbot_config['sourcestamp']['changes'][0]['who']]
if self.buildbot_config['sourcestamp']['changes'][0].get('comments'):
- sendchange += ['--comments', self.buildbot_config['sourcestamp']['changes'][0]['comments'].encode('ascii', 'ignore')]
+ sendchange += ['--comments', self.buildbot_config['sourcestamp']
+ ['changes'][0]['comments'].encode('ascii', 'ignore')]
if sendchange_props:
for key, value in sendchange_props.iteritems():
sendchange.extend(['--property', '%s:%s' % (key, value)])
else:
if self.buildbot_config["properties"].get("builduid"):
- sendchange += ['--property', "builduid:%s" % self.buildbot_config["properties"]["builduid"]]
+ sendchange += ['--property', "builduid:%s" %
+ self.buildbot_config["properties"]["builduid"]]
sendchange += [
'--property', "buildid:%s" % self.query_buildid(),
'--property', 'pgo_build:False',
]
for d in downloadables:
sendchange += [d]
retcode = self.run_command(buildbot + sendchange)
if retcode != 0:
- self.info("The sendchange failed but we don't want to turn the build orange: %s" % retcode)
+ self.info("The sendchange failed but we don't want to turn the build orange: %s" %
+ retcode)
def query_build_name(self):
build_name = self.config.get('platform')
if not build_name:
self.fatal('Must specify "platform" in the mozharness config for indexing')
return build_name
--- a/testing/mozharness/mozharness/mozilla/building/buildbase.py
+++ b/testing/mozharness/mozharness/mozilla/building/buildbase.py
@@ -29,17 +29,16 @@ import re
from mozharness.base.config import BaseConfig, parse_config_file
from mozharness.base.log import ERROR, OutputParser, FATAL
from mozharness.base.script import PostScriptRun
from mozharness.base.vcs.vcsbase import MercurialScript
from mozharness.mozilla.buildbot import (
BuildbotMixin,
EXIT_STATUS_DICT,
TBPL_STATUS_DICT,
- TBPL_EXCEPTION,
TBPL_FAILURE,
TBPL_RETRY,
TBPL_WARNING,
TBPL_SUCCESS,
TBPL_WORST_LEVEL_TUPLE,
)
from mozharness.mozilla.purge import PurgeMixin
from mozharness.mozilla.mock import MockMixin
@@ -68,17 +67,17 @@ you are running this in buildbot, "repo_
'comments_undetermined': '"comments" could not be determined. This may be \
because it was a forced build.',
'tooltool_manifest_undetermined': '"tooltool_manifest_src" not set, \
Skipping run_tooltool...',
}
ERROR_MSGS.update(MOCK_ERROR_MSGS)
-### Output Parsers
+# Output Parsers
TBPL_UPLOAD_ERRORS = [
{
'regex': re.compile("Connection timed out"),
'level': TBPL_RETRY,
},
{
'regex': re.compile("Connection reset by peer"),
@@ -136,31 +135,33 @@ class MakeUploadOutputParser(OutputParse
# For android builds, the package is also used as the mar file.
# Grab the first one, since that is the one in the
# nightly/YYYY/MM directory
if self.use_package_as_marfile:
if 'tinderbox-builds' in m or 'nightly/latest-' in m:
self.info("Skipping wrong packageUrl: %s" % m)
else:
if 'completeMarUrl' in self.matches:
- self.fatal("Found multiple package URLs. Please update buildbase.py")
+ self.fatal("Found multiple package URLs. "
+ "Please update buildbase.py")
self.info("Using package as mar file: %s" % m)
self.matches['completeMarUrl'] = m
u, self.package_filename = os.path.split(m)
if self.use_package_as_marfile and self.package_filename:
# The checksum file is also dumped during 'make upload'. Look
# through here to get the hash and filesize of the android package
# for balrog submission.
pat = r'''^([^ ]*) sha512 ([0-9]*) %s$''' % self.package_filename
m = re.compile(pat).match(line)
if m:
self.matches['completeMarHash'] = m.group(1)
self.matches['completeMarSize'] = m.group(2)
- self.info("Using package as mar file and found package hash=%s size=%s" % (m.group(1), m.group(2)))
+ self.info("Using package as mar file and found package hash=%s size=%s" %
+ (m.group(1), m.group(2)))
# now let's check for retry errors which will give log levels:
# tbpl status as RETRY and mozharness status as WARNING
for error_check in self.tbpl_error_list:
if error_check['regex'].search(line):
self.num_warnings += 1
self.warning(line)
self.tbpl_status = self.worst_level(
@@ -254,31 +255,30 @@ class BuildingConfig(BaseConfig):
all_config_dicts = []
# important config files
variant_cfg_file = branch_cfg_file = pool_cfg_file = ''
# we want to make the order in which the options were given
# not matter. ie: you can supply --branch before --build-pool
# or vice versa and the hierarchy will not be different
- #### The order from highest precedence to lowest is:
- ## There can only be one of these...
+ # The order from highest precedence to lowest is:
+ # There can only be one of these...
# 1) build_pool: this can be either staging, pre-prod, and prod cfgs
# 2) branch: eg: mozilla-central, cedar, cypress, etc
# 3) build_variant: these could be known like asan and debug
# or a custom config
- ##
- ## There can be many of these
+ #
+ # There can be many of these:
# 4) all other configs: these are any configs that are passed with
# --cfg and --opt-cfg. There order is kept in
# which they were passed on the cmd line. This
# behaviour is maintains what happens by default
# in mozharness
- ##
- ####
+ #
# so, let's first assign the configs that hold a known position of
# importance (1 through 3)
for i, cf in enumerate(all_config_files):
if options.build_pool:
if cf == BuildOptionParser.build_pool_cfg_file:
pool_cfg_file = all_config_files[i]
@@ -355,17 +355,18 @@ class BuildOptionParser(object):
'cross-artifact': 'builds/releng_sub_%s_configs/%s_cross_artifact.py',
'debug': 'builds/releng_sub_%s_configs/%s_debug.py',
'asan-and-debug': 'builds/releng_sub_%s_configs/%s_asan_and_debug.py',
'asan-tc-and-debug': 'builds/releng_sub_%s_configs/%s_asan_tc_and_debug.py',
'stat-and-debug': 'builds/releng_sub_%s_configs/%s_stat_and_debug.py',
'code-coverage': 'builds/releng_sub_%s_configs/%s_code_coverage.py',
'source': 'builds/releng_sub_%s_configs/%s_source.py',
'noopt-debug': 'builds/releng_sub_%s_configs/%s_noopt_debug.py',
- 'api-16-gradle-dependencies': 'builds/releng_sub_%s_configs/%s_api_16_gradle_dependencies.py',
+ 'api-16-gradle-dependencies':
+ 'builds/releng_sub_%s_configs/%s_api_16_gradle_dependencies.py',
'api-16': 'builds/releng_sub_%s_configs/%s_api_16.py',
'api-16-old-id': 'builds/releng_sub_%s_configs/%s_api_16_old_id.py',
'api-16-artifact': 'builds/releng_sub_%s_configs/%s_api_16_artifact.py',
'api-16-debug': 'builds/releng_sub_%s_configs/%s_api_16_debug.py',
'api-16-debug-artifact': 'builds/releng_sub_%s_configs/%s_api_16_debug_artifact.py',
'api-16-gradle': 'builds/releng_sub_%s_configs/%s_api_16_gradle.py',
'api-16-gradle-artifact': 'builds/releng_sub_%s_configs/%s_api_16_gradle_artifact.py',
'rusttests': 'builds/releng_sub_%s_configs/%s_rusttests.py',
@@ -374,17 +375,17 @@ class BuildOptionParser(object):
'x86-old-id': 'builds/releng_sub_%s_configs/%s_x86_old_id.py',
'x86-artifact': 'builds/releng_sub_%s_configs/%s_x86_artifact.py',
'api-16-partner-sample1': 'builds/releng_sub_%s_configs/%s_api_16_partner_sample1.py',
'aarch64': 'builds/releng_sub_%s_configs/%s_aarch64.py',
'android-test': 'builds/releng_sub_%s_configs/%s_test.py',
'android-checkstyle': 'builds/releng_sub_%s_configs/%s_checkstyle.py',
'android-lint': 'builds/releng_sub_%s_configs/%s_lint.py',
'android-findbugs': 'builds/releng_sub_%s_configs/%s_findbugs.py',
- 'valgrind' : 'builds/releng_sub_%s_configs/%s_valgrind.py',
+ 'valgrind': 'builds/releng_sub_%s_configs/%s_valgrind.py',
'artifact': 'builds/releng_sub_%s_configs/%s_artifact.py',
'debug-artifact': 'builds/releng_sub_%s_configs/%s_debug_artifact.py',
'devedition': 'builds/releng_sub_%s_configs/%s_devedition.py',
'dmd': 'builds/releng_sub_%s_configs/%s_dmd.py',
}
build_pool_cfg_file = 'builds/build_pool_specifics.py'
branch_cfg_file = 'builds/branch_specifics.py'
@@ -756,18 +757,17 @@ or run without that action (ie: --no-{ac
sys.executable, os.path.join(dirs['abs_src_dir'], 'mach'), 'python',
print_conf_setting_path, app_ini_path,
'App', prop
]
env = self.query_build_env()
# dirs['abs_obj_dir'] can be different from env['MOZ_OBJDIR'] on
# mac, and that confuses mach.
del env['MOZ_OBJDIR']
- return self.get_output_from_command_m(cmd,
- cwd=dirs['abs_obj_dir'], env=env)
+ return self.get_output_from_command_m(cmd, cwd=dirs['abs_obj_dir'], env=env)
else:
return None
def query_builduid(self):
c = self.config
if self.builduid:
return self.builduid
@@ -1160,17 +1160,17 @@ or run without that action (ie: --no-{ac
This method is used both to figure out what revision to check out and
to figure out what revision *was* checked out.
"""
revision = None
if 'revision' in self.buildbot_properties:
revision = self.buildbot_properties['revision']
elif (self.buildbot_config and
- self.buildbot_config.get('sourcestamp', {}).get('revision')):
+ self.buildbot_config.get('sourcestamp', {}).get('revision')):
revision = self.buildbot_config['sourcestamp']['revision']
elif self.buildbot_config and self.buildbot_config.get('revision'):
revision = self.buildbot_config['revision']
else:
if not source_path:
dirs = self.query_abs_dirs()
source_path = dirs['abs_src_dir'] # let's take the default
@@ -1326,17 +1326,17 @@ or run without that action (ie: --no-{ac
app_ini_buildid = self._query_build_prop_from_app_ini('BuildID')
# it would be hard to imagine query_buildid evaluating to a falsey
# value (e.g. 0), but incase it does, force it to None
buildbot_buildid = self.query_buildid() or None
self.info(
'buildid from application.ini: "%s". buildid from buildbot '
'properties: "%s"' % (app_ini_buildid, buildbot_buildid)
)
- if app_ini_buildid == buildbot_buildid != None:
+ if (app_ini_buildid == buildbot_buildid) is not None:
self.info('buildids match.')
else:
self.error(
'buildids do not match or values could not be determined'
)
# set the build to orange if not already worse
self.return_code = self.worst_level(
EXIT_STATUS_DICT[TBPL_WARNING], self.return_code,
@@ -1404,17 +1404,17 @@ or run without that action (ie: --no-{ac
routes = []
for template in templates:
routes.append(template.format(**fmt))
self.info("Using routes: %s" % routes)
taskid = self.buildbot_config['properties'].get('upload_to_task_id')
tc = Taskcluster(
branch=self.branch,
- rank=pushinfo.pushdate, # Use pushdate as the rank
+ rank=pushinfo.pushdate, # Use pushdate as the rank
client_id=self.client_id,
access_token=self.access_token,
log_obj=self.log_obj,
# `upload_to_task_id` is used by mozci to have access to where the artifacts
# will be uploaded
task_id=taskid,
)
@@ -1475,17 +1475,18 @@ or run without that action (ie: --no-{ac
else:
templates = self.routes_json['routes']
# Some trees may not be setting uploadFiles, so default to []. Normally
# we'd only expect to get here if the build completes successfully,
# which means we should have uploadFiles.
files = self.query_buildbot_property('uploadFiles') or []
if not files:
- self.warning('No files from the build system to upload to S3: uploadFiles property is missing or empty.')
+ self.warning('No files from the build system to upload to S3: uploadFiles'
+ 'property is missing or empty.')
packageName = self.query_buildbot_property('packageFilename')
self.info('packageFilename is: %s' % packageName)
if self.config.get('use_package_as_marfile'):
self.info('Using packageUrl for the MAR file')
self.set_buildbot_property('completeMarUrl',
self.query_buildbot_property('packageUrl'),
@@ -1501,34 +1502,35 @@ or run without that action (ie: --no-{ac
self.set_buildbot_property('completeMarHash',
self.query_sha512sum(upload_file),
write_to_file=True)
break
property_conditions = [
# key: property name, value: condition
('symbolsUrl', lambda m: m.endswith('crashreporter-symbols.zip') or
- m.endswith('crashreporter-symbols-full.zip')),
+ m.endswith('crashreporter-symbols-full.zip')),
('testsUrl', lambda m: m.endswith(('tests.tar.bz2', 'tests.zip'))),
('robocopApkUrl', lambda m: m.endswith('apk') and 'robocop' in m),
('jsshellUrl', lambda m: 'jsshell-' in m and m.endswith('.zip')),
# Temporarily use "TC" in MarUrl parameters. We don't want to
# override these to point to taskcluster just yet, and still
# need to use FTP. However, they can't be removed outright since
# that can affect packageUrl. See bug 1144985.
('completeMarUrlTC', lambda m: m.endswith('.complete.mar')),
('partialMarUrlTC', lambda m: m.endswith('.mar') and '.partial.' in m),
('codeCoverageURL', lambda m: m.endswith('code-coverage-gcno.zip')),
('sdkUrl', lambda m: m.endswith(('sdk.tar.bz2', 'sdk.zip'))),
('testPackagesUrl', lambda m: m.endswith('test_packages.json')),
('packageUrl', lambda m: m.endswith(packageName)),
]
# Also upload our mozharness log files
- files.extend([os.path.join(self.log_obj.abs_log_dir, x) for x in self.log_obj.log_files.values()])
+ files.extend([os.path.join(self.log_obj.abs_log_dir, x)
+ for x in self.log_obj.log_files.values()])
# Also upload our buildprops.json file.
files.extend([os.path.join(dirs['base_work_dir'], 'buildprops.json')])
self._taskcluster_upload(files, templates,
property_conditions=property_conditions)
def _set_file_properties(self, file_name, find_dir, prop_type,
@@ -1624,18 +1626,18 @@ or run without that action (ie: --no-{ac
buildprops = os.path.join(dirs['base_work_dir'], 'buildprops.json')
# not finding buildprops is not an error outside of buildbot
if os.path.exists(buildprops):
self.copyfile(
buildprops,
os.path.join(dirs['abs_work_dir'], 'buildprops.json'))
if 'MOZILLABUILD' in os.environ:
- # We found many issues with intermittent build failures when not invoking mach via bash.
- # See bug 1364651 before considering changing.
+ # We found many issues with intermittent build failures when not invoking
+ # mach via bash. See bug 1364651 before considering changing.
mach = [
os.path.join(os.environ['MOZILLABUILD'], 'msys', 'bin', 'bash.exe'),
os.path.join(dirs['abs_src_dir'], 'mach')
]
else:
mach = [sys.executable, 'mach']
return_code = self.run_command_m(
@@ -1710,17 +1712,18 @@ or run without that action (ie: --no-{ac
'echo-variable-PACKAGE',
'AB_CD=multi',
]
package_filename = self.get_output_from_command_m(
package_cmd,
cwd=objdir,
)
if not package_filename:
- self.fatal("Unable to determine the package filename for the multi-l10n build. Was trying to run: %s" % package_cmd)
+ self.fatal("Unable to determine the package filename for the multi-l10n build."
+ "Was trying to run: %s" % package_cmd)
self.info('Multi-l10n package filename is: %s' % package_filename)
parser = MakeUploadOutputParser(config=self.config,
log_obj=self.log_obj,
use_package_as_marfile=True,
package_filename=package_filename,
)
@@ -1913,17 +1916,16 @@ or run without that action (ie: --no-{ac
continue
data['subtests'].append({
'name': phase['name'],
'value': phase['duration'],
})
return data
-
def _load_sccache_stats(self):
stats_file = os.path.join(
self.query_abs_dirs()['abs_obj_dir'], 'sccache-stats.json'
)
if not os.path.exists(stats_file):
self.info('%s does not exist; not loading sccache stats' % stats_file)
return
@@ -2029,17 +2031,17 @@ or run without that action (ie: --no-{ac
# only use for builds with ship. So nix the alerts for builds we don't
# ship.
def filter_alert(alert):
if not self._is_configuration_shipped():
alert['shouldAlert'] = False
return alert
- if installer.endswith('.apk'): # Android
+ if installer.endswith('.apk'): # Android
yield filter_alert({
"name": "installer size",
"value": installer_size,
"alertChangeType": "absolute",
"alertThreshold": (200 * 1024),
"subtests": size_measurements
})
else:
@@ -2180,19 +2182,19 @@ or run without that action (ie: --no-{ac
build_type = 'pgo-'
else: # we don't do talos sendchange for debug so no need to check
build_type = '' # leave 'opt' out of branch for talos
talos_branch = "%s-%s-%s%s" % (self.branch,
self.stage_platform,
build_type,
'talos')
self.invoke_sendchange(downloadables=[installer_url],
- branch=talos_branch,
- username='sendchange',
- sendchange_props=sendchange_props)
+ branch=talos_branch,
+ username='sendchange',
+ sendchange_props=sendchange_props)
elif test_type == 'unittest':
# do unittest sendchange
if c.get('debug_build'):
build_type = '' # for debug builds we append nothing
elif pgo_build:
build_type = '-pgo'
else: # generic opt build
build_type = '-opt'
@@ -2232,21 +2234,20 @@ or run without that action (ie: --no-{ac
# grab any props available from this or previous unclobbered runs
self.generate_build_props(console_output=False,
halt_on_failure=False)
# generate balrog props as artifacts
if self.config.get('taskcluster_nightly'):
env = self.query_mach_build_env(multiLocale=False)
props_path = os.path.join(env["UPLOAD_PATH"],
- 'balrog_props.json')
+ 'balrog_props.json')
self.generate_balrog_props(props_path)
return
-
def valgrind_test(self):
'''Execute mach's valgrind-test for memory leaks'''
env = self.query_build_env()
env.update(self.query_mach_build_env())
return_code = self.run_command_m(
command=[sys.executable, 'mach', 'valgrind-test'],
cwd=self.query_abs_dirs()['abs_src_dir'],
@@ -2255,18 +2256,16 @@ or run without that action (ie: --no-{ac
if return_code:
self.return_code = self.worst_level(
EXIT_STATUS_DICT[TBPL_FAILURE], self.return_code,
AUTOMATION_EXIT_CODES[::-1]
)
self.fatal("'mach valgrind-test' did not run successfully. Please check "
"log for errors.")
-
-
def _post_fatal(self, message=None, exit_code=None):
if not self.return_code: # only overwrite return_code if it's 0
self.error('setting return code to 2 because fatal was called')
self.return_code = 2
@PostScriptRun
def _summarize(self):
""" If this is run in automation, ensure the return code is valid and
--- a/testing/mozharness/mozharness/mozilla/building/hazards.py
+++ b/testing/mozharness/mozharness/mozilla/building/hazards.py
@@ -12,62 +12,63 @@ class HazardError(Exception):
def __str__(self):
return repr(self.value)
# Logging ends up calling splitlines directly on what is being logged, which would fail.
def splitlines(self):
return str(self).splitlines()
+
class HazardAnalysis(object):
def clobber_shell(self, builder):
"""Clobber the specially-built JS shell used to run the analysis"""
dirs = builder.query_abs_dirs()
builder.rmtree(dirs['shell_objdir'])
def configure_shell(self, builder):
"""Configure the specially-built JS shell used to run the analysis"""
dirs = builder.query_abs_dirs()
if not os.path.exists(dirs['shell_objdir']):
builder.mkdir_p(dirs['shell_objdir'])
js_src_dir = os.path.join(dirs['gecko_src'], 'js', 'src')
rc = builder.run_command(['autoconf-2.13'],
- cwd=js_src_dir,
- env=builder.env,
- error_list=MakefileErrorList)
+ cwd=js_src_dir,
+ env=builder.env,
+ error_list=MakefileErrorList)
if rc != 0:
rc = builder.run_command(['autoconf2.13'],
cwd=js_src_dir,
env=builder.env,
error_list=MakefileErrorList)
if rc != 0:
raise HazardError("autoconf failed, can't continue.")
rc = builder.run_command([os.path.join(js_src_dir, 'configure'),
- '--enable-optimize',
- '--disable-debug',
- '--enable-ctypes',
- '--with-system-nspr',
- '--without-intl-api'],
- cwd=dirs['shell_objdir'],
- env=builder.env,
- error_list=MakefileErrorList)
+ '--enable-optimize',
+ '--disable-debug',
+ '--enable-ctypes',
+ '--with-system-nspr',
+ '--without-intl-api'],
+ cwd=dirs['shell_objdir'],
+ env=builder.env,
+ error_list=MakefileErrorList)
if rc != 0:
raise HazardError("Configure failed, can't continue.")
def build_shell(self, builder):
"""Build a JS shell specifically for running the analysis"""
dirs = builder.query_abs_dirs()
rc = builder.run_command(['make', '-j', str(builder.config.get('concurrency', 4)), '-s'],
- cwd=dirs['shell_objdir'],
- env=builder.env,
- error_list=MakefileErrorList)
+ cwd=dirs['shell_objdir'],
+ env=builder.env,
+ error_list=MakefileErrorList)
if rc != 0:
raise HazardError("Build failed, can't continue.")
def clobber(self, builder):
"""Clobber all of the old analysis data. Note that theoretically we could do
incremental analyses, but they seem to still be buggy."""
dirs = builder.query_abs_dirs()
builder.rmtree(dirs['abs_analysis_dir'])
@@ -104,19 +105,18 @@ jobs = 4
""" % values
defaults_path = os.path.join(analysis_dir, 'defaults.py')
file(defaults_path, "w").write(defaults)
builder.log("Wrote analysis config file " + defaults_path)
build_script = builder.config['build_command']
builder.copyfile(os.path.join(dirs['mozharness_scriptdir'],
- os.path.join('spidermonkey', build_script)),
- os.path.join(analysis_dir, build_script),
- copystat=True)
+ os.path.join('spidermonkey', build_script)),
+ os.path.join(analysis_dir, build_script), copystat=True)
def run(self, builder, env, error_list):
"""Execute the analysis, which consists of building all analyzed
source code with a GCC plugin active that siphons off the interesting
data, then running some JS scripts over the databases created by
the plugin."""
dirs = builder.query_abs_dirs()
analysis_dir = dirs['abs_analysis_dir']
@@ -127,19 +127,19 @@ jobs = 4
cmd = [
builder.config['python'],
os.path.join(analysis_scriptdir, 'analyze.py'),
"--source", dirs['gecko_src'],
"--buildcommand", build_script,
]
retval = builder.run_command(cmd,
- cwd=analysis_dir,
- env=env,
- error_list=error_list)
+ cwd=analysis_dir,
+ env=env,
+ error_list=error_list)
if retval != 0:
raise HazardError("failed to build")
def collect_output(self, builder):
"""Gather up the analysis output and place in the upload dir."""
dirs = builder.query_abs_dirs()
analysis_dir = dirs['abs_analysis_dir']
upload_dir = dirs['abs_blob_upload_dir']
@@ -166,32 +166,34 @@ jobs = 4
'hazards',
'list of just the hazards, together with gcFunction reason for each'))
for f, short, long in files:
builder.copy_to_upload_dir(os.path.join(analysis_dir, f),
short_desc=short,
long_desc=long,
compress=False, # blobber will compress
upload_dir=upload_dir)
- print("== Hazards (temporarily inline here, beware weirdly interleaved output, see bug 1211402) ==")
+ print("== Hazards (temporarily inline here, beware weirdly interleaved "
+ "output, see bug 1211402) ==")
print(file(os.path.join(analysis_dir, "hazards.txt")).read())
def upload_results(self, builder):
"""Upload the results of the analysis."""
pass
def check_expectations(self, builder):
"""Compare the actual to expected number of problems."""
if 'expect_file' not in builder.config:
builder.info('No expect_file given; skipping comparison with expected hazard count')
return
dirs = builder.query_abs_dirs()
analysis_dir = dirs['abs_analysis_dir']
- analysis_scriptdir = os.path.join(dirs['gecko_src'], 'js', 'src', 'devtools', 'rootAnalysis')
+ analysis_scriptdir = os.path.join(dirs['gecko_src'], 'js', 'src',
+ 'devtools', 'rootAnalysis')
expect_file = os.path.join(analysis_scriptdir, builder.config['expect_file'])
expect = builder.read_from_file(expect_file)
if expect is None:
raise HazardError("could not load expectation file")
data = json.loads(expect)
num_hazards = 0
num_refs = 0
@@ -211,31 +213,33 @@ jobs = 4
status = []
if expect_hazards is None:
status.append("%d hazards" % num_hazards)
else:
status.append("%d/%d hazards allowed" % (num_hazards, expect_hazards))
if expect_hazards is not None and expect_hazards != num_hazards:
if expect_hazards < num_hazards:
- builder.warning("TEST-UNEXPECTED-FAIL %d more hazards than expected (expected %d, saw %d)" %
- (num_hazards - expect_hazards, expect_hazards, num_hazards))
+ builder.warning("TEST-UNEXPECTED-FAIL %d more hazards than expected "
+ "(expected %d, saw %d)" %
+ (num_hazards - expect_hazards, expect_hazards, num_hazards))
builder.buildbot_status(TBPL_WARNING)
else:
builder.info("%d fewer hazards than expected! (expected %d, saw %d)" %
- (expect_hazards - num_hazards, expect_hazards, num_hazards))
+ (expect_hazards - num_hazards, expect_hazards, num_hazards))
expect_refs = data.get('expect-refs')
if expect_refs is None:
status.append("%d unsafe refs" % num_refs)
else:
status.append("%d/%d unsafe refs allowed" % (num_refs, expect_refs))
if expect_refs is not None and expect_refs != num_refs:
if expect_refs < num_refs:
- builder.warning("TEST-UNEXPECTED-FAIL %d more unsafe refs than expected (expected %d, saw %d)" %
- (num_refs - expect_refs, expect_refs, num_refs))
+ builder.warning("TEST-UNEXPECTED-FAIL %d more unsafe refs than expected "
+ "(expected %d, saw %d)" %
+ (num_refs - expect_refs, expect_refs, num_refs))
builder.buildbot_status(TBPL_WARNING)
else:
builder.info("%d fewer unsafe refs than expected! (expected %d, saw %d)" %
- (expect_refs - num_refs, expect_refs, num_refs))
+ (expect_refs - num_refs, expect_refs, num_refs))
builder.info("TinderboxPrint: " + ", ".join(status))
--- a/testing/mozharness/mozharness/mozilla/checksums.py
+++ b/testing/mozharness/mozharness/mozilla/checksums.py
@@ -7,15 +7,17 @@ def parse_checksums_file(checksums):
size = int(size)
if size < 0:
raise ValueError("Found negative value (%d) for size." % size)
if file_ not in fileInfo:
fileInfo[file_] = {"hashes": {}}
# If the file already exists, make sure that the size matches the
# previous entry.
elif fileInfo[file_]['size'] != size:
- raise ValueError("Found different sizes for same file %s (%s and %s)" % (file_, fileInfo[file_]['size'], size))
+ raise ValueError("Found different sizes for same file %s (%s and %s)" %
+ (file_, fileInfo[file_]['size'], size))
# Same goes for the hash.
elif type_ in fileInfo[file_]['hashes'] and fileInfo[file_]['hashes'][type_] != hash_:
- raise ValueError("Found different %s hashes for same file %s (%s and %s)" % (type_, file_, fileInfo[file_]['hashes'][type_], hash_))
+ raise ValueError("Found different %s hashes for same file %s (%s and %s)" %
+ (type_, file_, fileInfo[file_]['hashes'][type_], hash_))
fileInfo[file_]['size'] = size
fileInfo[file_]['hashes'][type_] = hash_
return fileInfo
--- a/testing/mozharness/mozharness/mozilla/l10n/locales.py
+++ b/testing/mozharness/mozharness/mozilla/l10n/locales.py
@@ -3,17 +3,16 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""Localization.
"""
import os
-from urlparse import urljoin
import pprint
import sys
from copy import deepcopy
sys.path.insert(1, os.path.dirname(sys.path[0]))
from mozharness.base.config import parse_config_file
from mozharness.base.parallel import ChunkingMixin
--- a/testing/mozharness/mozharness/mozilla/l10n/multi_locale_build.py
+++ b/testing/mozharness/mozharness/mozilla/l10n/multi_locale_build.py
@@ -130,17 +130,16 @@ class MultiLocaleBuild(LocalesMixin, Mer
command = "make -f client.mk build"
env = self.query_env()
if self._process_command(command=command,
cwd=dirs['abs_mozilla_dir'],
env=env, error_list=MakefileErrorList):
self.fatal("Erroring out after the build failed.")
def add_locales(self):
- c = self.config
dirs = self.query_abs_dirs()
locales = self.query_locales()
for locale in locales:
command = 'make chrome-%s L10NBASEDIR=%s' % (locale, dirs['abs_l10n_dir'])
status = self._process_command(command=command,
cwd=dirs['abs_locales_dir'],
error_list=MakefileErrorList)
@@ -210,17 +209,18 @@ class MultiLocaleBuild(LocalesMixin, Mer
'%s/' % backup_dir],
error_list=SSHErrorList)
def restore_objdir(self):
dirs = self.query_abs_dirs()
rsync = self.query_exe('rsync')
backup_dir = '%s-bak' % dirs['abs_objdir']
if not os.path.isdir(dirs['abs_objdir']) or not os.path.isdir(backup_dir):
- self.warning("Both %s and %s need to exist to restore the objdir! Skipping..." % (dirs['abs_objdir'], backup_dir))
+ self.warning("Both %s and %s need to exist to restore the objdir! Skipping..." %
+ (dirs['abs_objdir'], backup_dir))
return
self.run_command([rsync, '-a', '--delete', '--partial',
'%s/' % backup_dir,
'%s/' % dirs['abs_objdir']],
error_list=SSHErrorList)
def upload_multi(self):
# TODO
@@ -228,11 +228,12 @@ class MultiLocaleBuild(LocalesMixin, Mer
def _process_command(self, **kwargs):
"""Stub wrapper function that allows us to call scratchbox in
MaemoMultiLocaleBuild.
"""
return self.run_command(**kwargs)
+
# __main__ {{{1
if __name__ == '__main__':
pass
--- a/testing/mozharness/mozharness/mozilla/mapper.py
+++ b/testing/mozharness/mozharness/mozilla/mapper.py
@@ -46,19 +46,21 @@ class MapperMixin:
self.info('Mapping %s revision to %s using %s' % (project_name, vcs, url))
n = 1
while n <= attempts:
try:
r = urllib2.urlopen(url, timeout=10)
j = json.loads(r.readline())
if j['%s_rev' % vcs] is None:
if require_answer:
- raise Exception("Mapper returned a revision of None; maybe it needs more time.")
+ raise Exception("Mapper returned a revision of None; "
+ "maybe it needs more time.")
else:
- self.warning("Mapper returned a revision of None. Accepting because require_answer is False.")
+ self.warning("Mapper returned a revision of None. "
+ "Accepting because require_answer is False.")
return j['%s_rev' % vcs]
except Exception, err:
self.warning('Error: %s' % str(err))
if n == attempts:
self.fatal('Giving up on %s %s revision for %s.' % (project_name, vcs, rev))
if sleeptime > 0:
self.info('Sleeping %i seconds before retrying' % sleeptime)
time.sleep(sleeptime)
--- a/testing/mozharness/mozharness/mozilla/merkle.py
+++ b/testing/mozharness/mozharness/mozilla/merkle.py
@@ -1,24 +1,28 @@
#!/usr/bin/env python
import struct
+
def _round2(n):
k = 1
while k < n:
k <<= 1
return k >> 1
+
def _leaf_hash(hash_fn, leaf):
return hash_fn(b'\x00' + leaf).digest()
+
def _pair_hash(hash_fn, left, right):
return hash_fn(b'\x01' + left + right).digest()
+
class InclusionProof:
"""
Represents a Merkle inclusion proof for purposes of serialization,
deserialization, and verification of the proof. The format for inclusion
proofs in RFC 6962-bis is as follows:
opaque LogID<2..127>;
opaque NodeHash<32..2^8-1>;
@@ -51,17 +55,17 @@ class InclusionProof:
@staticmethod
def from_rfc6962_bis(serialized):
start = 0
read = 1
if len(serialized) < start + read:
raise Exception('Inclusion proof too short for log ID header')
log_id_len, = struct.unpack('B', serialized[start:start+read])
start += read
- start += log_id_len # Ignore the log ID itself
+ start += log_id_len # Ignore the log ID itself
read = 8 + 8 + 2
if len(serialized) < start + read:
raise Exception('Inclusion proof too short for middle section')
tree_size, leaf_index, path_len = struct.unpack('!QQH', serialized[start:start+read])
start += read
path_elements = []
@@ -113,20 +117,20 @@ class InclusionProof:
for i, elem in enumerate(self.path_elements):
if lr[i]:
node = _pair_hash(hash_fn, node, elem)
else:
node = _pair_hash(hash_fn, elem, node)
return node
-
def verify(self, hash_fn, leaf, leaf_index, tree_size, tree_head):
return self._expected_head(hash_fn, leaf, leaf_index, tree_size) == tree_head
+
class MerkleTree:
"""
Implements a Merkle tree on a set of data items following the
structure defined in RFC 6962-bis. This allows us to create a
single hash value that summarizes the data (the 'head'), and an
'inclusion proof' for each element that connects it to the head.
https://tools.ietf.org/html/draft-ietf-trans-rfc6962-bis-24
--- a/testing/mozharness/mozharness/mozilla/mock.py
+++ b/testing/mozharness/mozharness/mozilla/mock.py
@@ -13,17 +13,16 @@ import subprocess
import os
ERROR_MSGS = {
'undetermined_buildroot_lock': 'buildroot_lock_path does not exist.\
Nothing to remove.'
}
-
# MockMixin {{{1
class MockMixin(object):
"""Provides methods to setup and interact with mock environments.
https://wiki.mozilla.org/ReleaseEngineering/Applications/Mock
This is dependent on ScriptMixin
"""
done_mock_setup = False
--- a/testing/mozharness/mozharness/mozilla/purge.py
+++ b/testing/mozharness/mozharness/mozilla/purge.py
@@ -63,18 +63,18 @@ class PurgeMixin(object):
cmd.extend([clobberer_url, branch, buildername, builddir, slave, master])
error_list = [{
'substr': 'Error contacting server', 'level': ERROR,
'explanation': 'Error contacting server for clobberer information.'
}]
retval = self.retry(self.run_command, attempts=3, good_statuses=(0,), args=[cmd],
- kwargs={'cwd':os.path.dirname(dirs['base_work_dir']),
- 'error_list':error_list})
+ kwargs={'cwd': os.path.dirname(dirs['base_work_dir']),
+ 'error_list': error_list})
if retval != 0:
self.fatal("failed to clobber build", exit_code=2)
def clobber(self, always_clobber_dirs=None):
""" Mozilla clobberer-type clobber.
"""
c = self.config
if c.get('developer_mode'):
--- a/testing/mozharness/mozharness/mozilla/release.py
+++ b/testing/mozharness/mozharness/mozilla/release.py
@@ -63,10 +63,8 @@ def get_previous_version(version, partia
v != version))
else:
# StrictVersion truncates trailing zero in versions with more than 1
# dot. Compose a structure that will be sorted by StrictVersion and
# return untouched version
composed = sorted([(v, StrictVersion(v)) for v in partial_versions if
v != version], key=lambda x: x[1], reverse=True)
return composed[0][0]
-
-
--- a/testing/mozharness/mozharness/mozilla/repo_manifest.py
+++ b/testing/mozharness/mozharness/mozilla/repo_manifest.py
@@ -19,17 +19,18 @@ def load_manifest(filename):
# Check that we don't have any unsupported tags
to_visit = list(doc.childNodes)
while to_visit:
node = to_visit.pop()
# Skip text nodes
if node.nodeType in (node.TEXT_NODE, node.COMMENT_NODE):
continue
- if node.tagName not in ('include', 'project', 'remote', 'default', 'manifest', 'copyfile', 'remove-project'):
+ if node.tagName not in ('include', 'project', 'remote', 'default', 'manifest', 'copyfile',
+ 'remove-project'):
raise ValueError("Unsupported tag: %s" % node.tagName)
to_visit.extend(node.childNodes)
# Find all <include> nodes
for i in doc.getElementsByTagName('include'):
p = i.parentNode
# The name attribute is relative to where the original manifest lives
--- a/testing/mozharness/mozharness/mozilla/repo_manipulation.py
+++ b/testing/mozharness/mozharness/mozilla/repo_manipulation.py
@@ -155,10 +155,8 @@ the script (--clean-repos --pull --migra
cwd=cwd,
error_list=HgErrorList,
success_codes=[0, 1],
)
if status == 1:
self.warning("No changes for %s!" % cwd)
elif status:
self.fatal(error_message)
-
-
--- a/testing/mozharness/mozharness/mozilla/secrets.py
+++ b/testing/mozharness/mozharness/mozilla/secrets.py
@@ -2,21 +2,18 @@
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""Support for fetching secrets from the secrets API
"""
-import os
-import mozharness
import urllib2
import json
-from mozharness.base.log import ERROR
class SecretsMixin(object):
def _fetch_secret(self, secret_name):
self.info("fetching secret {} from API".format(secret_name))
# fetch from http://taskcluster, which points to the taskcluster proxy
# within a taskcluster task. Outside of that environment, do not
--- a/testing/mozharness/mozharness/mozilla/selfserve.py
+++ b/testing/mozharness/mozharness/mozilla/selfserve.py
@@ -1,11 +1,12 @@
import json
import site
+
# SelfServeMixin {{{1
class SelfServeMixin(object):
def _get_session(self):
site_packages_path = self.query_python_site_packages_path()
site.addsitedir(site_packages_path)
import requests
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=5)
--- a/testing/mozharness/mozharness/mozilla/signed_certificate_timestamp.py
+++ b/testing/mozharness/mozharness/mozilla/signed_certificate_timestamp.py
@@ -1,13 +1,14 @@
#!/usr/bin/env python
import struct
import base64
+
class SignedCertificateTimestamp:
"""
Represents a Signed Certificate Timestamp from a Certificate Transparency
log, which is how the log indicates that it has seen and logged a
certificate. The format for SCTs in RFC 6962 is as follows:
struct {
Version sct_version;
@@ -70,17 +71,16 @@ class SignedCertificateTimestamp:
self.id = base64.b64decode(response_json['id'])
self.timestamp = response_json['timestamp']
self.signature = base64.b64decode(response_json['signature'])
self.extensions = b''
if 'extensions' in response_json:
self.extensions = base64.b64decode(response_json['extensions'])
-
@staticmethod
def from_rfc6962(serialized):
start = 0
read = 1 + 32 + 8
if len(serialized) < start + read:
raise Exception('SCT too short for version, log ID, and timestamp')
version, = struct.unpack('B', serialized[0])
log_id = serialized[1:1+32]
@@ -115,16 +115,15 @@ class SignedCertificateTimestamp:
sct = SignedCertificateTimestamp()
sct.id = log_id
sct.timestamp = timestamp
sct.extensions = extensions
sct.signature = struct.pack('!HH', alg, sig_len) + sig
return sct
-
def to_rfc6962(self):
version = struct.pack("B", self.version)
timestamp = struct.pack("!Q", self.timestamp)
ext_len = struct.pack("!H", len(self.extensions))
return version + self.id + timestamp + \
- ext_len + self.extensions + self.signature
+ ext_len + self.extensions + self.signature
--- a/testing/mozharness/mozharness/mozilla/taskcluster_helper.py
+++ b/testing/mozharness/mozharness/mozilla/taskcluster_helper.py
@@ -1,15 +1,14 @@
"""Taskcluster module. Defines a few helper functions to call into the taskcluster
client.
"""
import os
from datetime import datetime, timedelta
from urlparse import urljoin
-from mozharness.base.log import INFO
from mozharness.base.log import LogMixin
# Taskcluster {{{1
class Taskcluster(LogMixin):
"""
Helper functions to report data to Taskcluster
@@ -246,17 +245,18 @@ class TaskClusterArtifactFinderMixin(obj
parent_task['extra'].get('locations', {}).get('build')
)
# Case 1: The parent task is a pure TC task
if installer_path:
self.set_artifacts(
self.url_to_artifact(parent_id, installer_path),
self.url_to_artifact(parent_id, 'public/build/target.test_packages.json'),
- self.url_to_artifact(parent_id, 'public/build/target.crashreporter-symbols.zip')
+ self.url_to_artifact(parent_id,
+ 'public/build/target.crashreporter-symbols.zip')
)
else:
# Case 2: The parent task has an associated BBB task
# graph_props.json is uploaded in buildbase.py
self.set_bbb_artifacts(
task_id=parent_id,
properties_file_path='public/build/buildbot_properties.json'
)
@@ -267,10 +267,11 @@ class TaskClusterArtifactFinderMixin(obj
self.set_bbb_artifacts(
task_id=parent_id,
properties_file_path='public/build/buildbot_properties.json'
)
# Use the signed installer if it's set
if 'signed_installer_url' in properties:
signed_installer_url = properties['signed_installer_url']
- self.info('Overriding installer_url with signed_installer_url: %s' % signed_installer_url)
+ self.info('Overriding installer_url with signed_installer_url: %s' %
+ signed_installer_url)
self.installer_url = signed_installer_url
--- a/testing/mozharness/mozharness/mozilla/testing/codecoverage.py
+++ b/testing/mozharness/mozharness/mozilla/testing/codecoverage.py
@@ -6,17 +6,16 @@
import os
import shutil
import tempfile
from mozharness.base.script import (
PreScriptAction,
PostScriptAction,
)
-from mozharness.mozilla.tooltool import TooltoolMixin
code_coverage_config_options = [
[["--code-coverage"],
{"action": "store_true",
"dest": "code_coverage",
"default": False,
"help": "Whether gcov c++ code coverage should be run."
}],
@@ -72,38 +71,42 @@ class CodeCoverageMixin(object):
# Install grcov on the test machine
# Get the path to the build machines gcno files.
self.url_to_gcno = self.query_build_dir_url('target.code-coverage-gcno.zip')
dirs = self.query_abs_dirs()
# Create the grcov directory, get the tooltool manifest, and finally
# download and unpack the grcov binary.
self.grcov_dir = tempfile.mkdtemp()
- manifest = os.path.join(dirs.get('abs_test_install_dir', os.path.join(dirs['abs_work_dir'], 'tests')), \
- 'config/tooltool-manifests/linux64/ccov.manifest')
+ manifest = os.path.join(dirs.get('abs_test_install_dir',
+ os.path.join(dirs['abs_work_dir'], 'tests')),
+ 'config/tooltool-manifests/linux64/ccov.manifest')
tooltool_path = self._fetch_tooltool_py()
- cmd = [tooltool_path, '--url', 'https://tooltool.mozilla-releng.net/', 'fetch', \
- '-m', manifest, '-o', '-c', '/builds/worker/tooltool-cache']
+ cmd = [tooltool_path, '--url', 'https://tooltool.mozilla-releng.net/', 'fetch',
+ '-m', manifest, '-o', '-c', '/builds/worker/tooltool-cache']
self.run_command(cmd, cwd=self.grcov_dir)
- self.run_command(['tar', '-jxvf', os.path.join(self.grcov_dir, 'grcov-linux-standalone-x86_64.tar.bz2'), \
- '-C', self.grcov_dir], cwd=self.grcov_dir)
+ self.run_command(['tar', '-jxvf',
+ os.path.join(self.grcov_dir, 'grcov-linux-standalone-x86_64.tar.bz2'),
+ '-C', self.grcov_dir], cwd=self.grcov_dir)
@PostScriptAction('run-tests')
def _package_coverage_data(self, action, success=None):
if not self.code_coverage_enabled:
return
del os.environ['GCOV_PREFIX']
del os.environ['JS_CODE_COVERAGE_OUTPUT_DIR']
if not self.ccov_upload_disabled:
# TODO This is fragile, find rel_topsrcdir properly somehow
# We need to find the path relative to the gecko topsrcdir. Use
# some known gecko directories as a test.
- canary_dirs = ['browser', 'docshell', 'dom', 'js', 'layout', 'toolkit', 'xpcom', 'xpfe']
+ canary_dirs = [
+ 'browser', 'docshell', 'dom', 'js', 'layout', 'toolkit', 'xpcom', 'xpfe'
+ ]
rel_topsrcdir = None
for root, dirs, files in os.walk(self.gcov_dir):
# need to use 'any' in case no gcda data was generated in that subdir.
if any(d in dirs for d in canary_dirs):
rel_topsrcdir = root
break
else:
# Unable to upload code coverage files. Since this is the whole
@@ -135,20 +138,23 @@ class CodeCoverageMixin(object):
'-t', 'lcov',
'-p', '/builds/worker/workspace/build/src/',
'--ignore-dir', 'gcc',
os.path.join(self.grcov_dir, 'target.code-coverage-gcno.zip'), file_path_gcda
]
# 'grcov_output' will be a tuple, the first variable is the path to the lcov output,
# the other is the path to the standard error output.
- grcov_output = self.get_output_from_command(grcov_command, cwd=self.grcov_dir, \
- silent=True, tmpfile_base_path=os.path.join(self.grcov_dir, 'grcov_lcov_output'), \
- save_tmpfiles=True, return_type='files')
+ grcov_output = self.get_output_from_command(
+ grcov_command, cwd=self.grcov_dir,
+ silent=True, tmpfile_base_path=os.path.join(self.grcov_dir, 'grcov_lcov_output'),
+ save_tmpfiles=True, return_type='files'
+ )
new_output_name = grcov_output[0] + '.info'
os.rename(grcov_output[0], new_output_name)
# Zip the grcov output and upload it.
- command = ['zip', os.path.join(dirs['abs_blob_upload_dir'], 'code-coverage-grcov.zip'), new_output_name]
+ command = ['zip', os.path.join(dirs['abs_blob_upload_dir'],
+ 'code-coverage-grcov.zip'), new_output_name]
self.run_command(command, cwd=self.grcov_dir)
shutil.rmtree(self.gcov_dir)
shutil.rmtree(self.jsvm_dir)
shutil.rmtree(self.grcov_dir)
--- a/testing/mozharness/mozharness/mozilla/testing/device.py
+++ b/testing/mozharness/mozharness/mozilla/testing/device.py
@@ -5,21 +5,18 @@
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
'''Interact with a device via ADB
This code is largely from
https://hg.mozilla.org/build/tools/file/default/sut_tools
'''
-import datetime
-import os
import re
import subprocess
-import sys
import time
from mozharness.base.errors import ADBErrorList
from mozharness.base.log import LogMixin, DEBUG
from mozharness.base.script import ScriptMixin
# Device flags
@@ -112,19 +109,21 @@ class ADBDeviceHandler(BaseDeviceHandler
if device_id:
if auto_connect:
self.ping_device(auto_connect=True)
else:
self.info("Trying to find device...")
devices = self._query_attached_devices()
if not devices:
self.add_device_flag(DEVICE_NOT_CONNECTED)
- self.fatal("No device connected via adb!\nUse 'adb connect' or specify a device_id or device_ip in config!")
+ self.fatal("No device connected via adb!\nUse 'adb connect' or specify a "
+ "device_id or device_ip in config!")
elif len(devices) > 1:
- self.warning("""More than one device detected; specify 'device_id' or\n'device_ip' to target a specific device!""")
+ self.warning("""More than one device detected; specify 'device_id' "
+ "or\n'device_ip' to target a specific device!""")
device_id = devices[0]
self.info("Found %s." % device_id)
self.device_id = device_id
return self.device_id
# maintenance {{{2
def ping_device(self, auto_connect=False, silent=False):
if auto_connect and not self._query_attached_devices():
@@ -357,19 +356,18 @@ class ADBDeviceHandler(BaseDeviceHandler
# Note that "adb install" typically writes status messages
# to stderr and the adb return code may not differentiate
# successful installations from failures; instead we check
# the command output.
install_complete = False
retries = 0
while retries < 6:
output = self.get_output_from_command([adb, "-s", device_id,
- "install", '-r',
- file_path],
- ignore_errors=True)
+ "install", '-r', file_path],
+ ignore_errors=True)
if output and output.lower().find("success") >= 0:
install_complete = True
break
self.warning("Failed to install %s" % file_path)
time.sleep(30)
retries = retries + 1
if not install_complete:
self.fatal("Failed to install %s!" % file_path)
@@ -474,17 +472,18 @@ class DeviceMixin(object):
def query_device_handler(self):
if self.device_handler:
return self.device_handler
c = self.config
device_protocol = c.get('device_protocol')
device_class = DEVICE_PROTOCOL_DICT.get(device_protocol)
if not device_class:
- self.fatal("Unknown device_protocol %s; set via --device-protocol!" % str(device_protocol))
+ self.fatal("Unknown device_protocol %s; set via --device-protocol!" %
+ str(device_protocol))
self.device_handler = device_class(
log_obj=self.log_obj,
config=self.config,
script_obj=self,
)
return self.device_handler
def check_device(self):
--- a/testing/mozharness/mozharness/mozilla/testing/errors.py
+++ b/testing/mozharness/mozharness/mozilla/testing/errors.py
@@ -14,17 +14,18 @@ whether IGNORE, DEBUG, INFO, WARNING, ER
"""
import re
from mozharness.base.log import INFO, WARNING, ERROR
# ErrorLists {{{1
_mochitest_summary = {
- 'regex': re.compile(r'''(\d+ INFO (Passed|Failed|Todo):\ +(\d+)|\t(Passed|Failed|Todo): (\d+))'''),
+ 'regex': (
+ re.compile(r'''(\d+ INFO (Passed|Failed|Todo):\ +(\d+)|\t(Passed|Failed|Todo): (\d+))''')),
'pass_group': "Passed",
'fail_group': "Failed",
'known_fail_group': "Todo",
}
TinderBoxPrintRe = {
"mochitest_summary": _mochitest_summary,
"mochitest-chrome_summary": _mochitest_summary,
@@ -34,35 +35,38 @@ TinderBoxPrintRe = {
"mochitest-plain-gpu_summary": _mochitest_summary,
"marionette_summary": {
'regex': re.compile(r'''(passed|failed|todo):\ +(\d+)'''),
'pass_group': "passed",
'fail_group': "failed",
'known_fail_group': "todo",
},
"reftest_summary": {
- 'regex': re.compile(r'''REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \('''),
+ 'regex': (
+ re.compile(r'''REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \(''')),
'pass_group': "Successful",
'fail_group': "Unexpected",
'known_fail_group': "Known problems",
},
"crashtest_summary": {
- 'regex': re.compile(r'''REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \('''),
+ 'regex': (
+ re.compile(r'''REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \(''')),
'pass_group': "Successful",
'fail_group': "Unexpected",
'known_fail_group': "Known problems",
},
"xpcshell_summary": {
'regex': re.compile(r'''INFO \| (Passed|Failed): (\d+)'''),
'pass_group': "Passed",
'fail_group': "Failed",
'known_fail_group': None,
},
"jsreftest_summary": {
- 'regex': re.compile(r'''REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \('''),
+ 'regex': (
+ re.compile(r'''REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \(''')),
'pass_group': "Successful",
'fail_group': "Unexpected",
'known_fail_group': "Known problems",
},
"robocop_summary": _mochitest_summary,
"instrumentation_summary": _mochitest_summary,
"cppunittest_summary": {
'regex': re.compile(r'''cppunittests INFO \| (Passed|Failed): (\d+)'''),
@@ -97,31 +101,47 @@ TinderBoxPrintRe = {
"geckoview_summary": {
'regex': re.compile(r'''(Passed|Failed): (\d+)'''),
'pass_group': "Passed",
'fail_group': "Failed",
'known_fail_group': None,
},
"harness_error": {
- 'full_regex': re.compile(r"(?:TEST-UNEXPECTED-FAIL|PROCESS-CRASH) \| .* \| (application crashed|missing output line for total leaks!|negative leaks caught!|\d+ bytes leaked)"),
+ 'full_regex':
+ re.compile(r"(?:TEST-UNEXPECTED-FAIL|PROCESS-CRASH) \| .* \| "
+ "(application crashed|missing output line for total leaks!|negative leaks "
+ "caught!|\d+ bytes leaked)"),
'minimum_regex': re.compile(r'''(TEST-UNEXPECTED|PROCESS-CRASH)'''),
- 'retry_regex': re.compile(r'''(FAIL-SHOULD-RETRY|No space left on device|DMError|Connection to the other side was lost in a non-clean fashion|program finished with exit code 80|INFRA-ERROR|twisted.spread.pb.PBConnectionLost|_dl_open: Assertion|Timeout exceeded for _runCmd call)''')
+ 'retry_regex':
+ re.compile(r'''(FAIL-SHOULD-RETRY|No space left on device|DMError|Connection to the "
+ "other side was lost in a non-clean fashion|program finished with exit "
+ "code 80|INFRA-ERROR|twisted.spread.pb.PBConnectionLost|_dl_open: "
+ "Assertion|Timeout exceeded for _runCmd call)''')
},
}
TestPassed = [
{'regex': re.compile('''(TEST-INFO|TEST-KNOWN-FAIL|TEST-PASS|INFO \| )'''), 'level': INFO},
]
HarnessErrorList = [
{'substr': 'TEST-UNEXPECTED', 'level': ERROR, },
{'substr': 'PROCESS-CRASH', 'level': ERROR, },
{'regex': re.compile('''ERROR: (Address|Leak)Sanitizer'''), 'level': ERROR, },
{'regex': re.compile('''thread '([^']+)' panicked'''), 'level': ERROR, },
]
LogcatErrorList = [
- {'substr': 'Fatal signal 11 (SIGSEGV)', 'level': ERROR, 'explanation': 'This usually indicates the B2G process has crashed'},
- {'substr': 'Fatal signal 7 (SIGBUS)', 'level': ERROR, 'explanation': 'This usually indicates the B2G process has crashed'},
- {'substr': '[JavaScript Error:', 'level': WARNING},
- {'substr': 'seccomp sandbox violation', 'level': ERROR, 'explanation': 'A content process has violated the system call sandbox (bug 790923)'},
+ {'substr':
+ 'Fatal signal 11 (SIGSEGV)',
+ 'level': ERROR, 'explanation': 'This usually indicates the B2G process has crashed'},
+ {'substr':
+ 'Fatal signal 7 (SIGBUS)',
+ 'level': ERROR, 'explanation': 'This usually indicates the B2G process has crashed'},
+ {'substr':
+ '[JavaScript Error:',
+ 'level': WARNING},
+ {'substr':
+ 'seccomp sandbox violation',
+ 'level': ERROR,
+ 'explanation': 'A content process has violated the system call sandbox (bug 790923)'},
]
--- a/testing/mozharness/mozharness/mozilla/testing/firefox_ui_tests.py
+++ b/testing/mozharness/mozharness/mozilla/testing/firefox_ui_tests.py
@@ -5,18 +5,17 @@
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
import copy
import os
import sys
-from mozharness.base.log import FATAL, WARNING
-from mozharness.base.python import PostScriptRun, PreScriptAction
+from mozharness.base.python import PreScriptAction
from mozharness.mozilla.structuredlog import StructuredOutputParser
from mozharness.mozilla.testing.testbase import (
TestingMixin,
testing_config_options,
)
from mozharness.mozilla.testing.codecoverage import (
CodeCoverageMixin,
code_coverage_config_options
@@ -25,17 +24,18 @@ from mozharness.mozilla.vcstools import
# General command line arguments for Firefox ui tests
firefox_ui_tests_config_options = [
[["--allow-software-gl-layers"], {
"action": "store_true",
"dest": "allow_software_gl_layers",
"default": False,
- "help": "Permits a software GL implementation (such as LLVMPipe) to use the GL compositor.",
+ "help": "Permits a software GL implementation (such as LLVMPipe) to use "
+ "the GL compositor.",
}],
[["--enable-webrender"], {
"action": "store_true",
"dest": "enable_webrender",
"default": False,
"help": "Tries to enable the WebRender compositor.",
}],
[['--dry-run'], {
@@ -249,17 +249,18 @@ class FirefoxUITests(TestingMixin, VCSTo
# Set further environment settings
env = env or self.query_env()
env.update({'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']})
if self.query_minidump_stackwalk():
env.update({'MINIDUMP_STACKWALK': self.minidump_stackwalk_path})
env['RUST_BACKTRACE'] = 'full'
- # If code coverage is enabled, set GCOV_PREFIX and JS_CODE_COVERAGE_OUTPUT_DIR env variables
+ # If code coverage is enabled, set GCOV_PREFIX and JS_CODE_COVERAGE_OUTPUT_DIR
+ # env variables
if self.config.get('code_coverage'):
env['GCOV_PREFIX'] = self.gcov_dir
env['JS_CODE_COVERAGE_OUTPUT_DIR'] = self.jsvm_dir
if self.config['allow_software_gl_layers']:
env['MOZ_LAYERS_ALLOW_SOFTWARE_GL'] = '1'
if self.config['enable_webrender']:
env['MOZ_WEBRENDER'] = '1'
--- a/testing/mozharness/mozharness/mozilla/testing/mozpool.py
+++ b/testing/mozharness/mozharness/mozilla/testing/mozpool.py
@@ -9,79 +9,86 @@
import os
import socket
import sys
from time import sleep
from mozharness.mozilla.buildbot import TBPL_RETRY, TBPL_EXCEPTION
-#TODO - adjust these values
+# TODO - adjust these values
MAX_RETRIES = 20
RETRY_INTERVAL = 60
+
# MozpoolMixin {{{1
class MozpoolMixin(object):
mozpool_handler = None
- mobile_imaging_format= "http://mobile-imaging"
+ mobile_imaging_format = "http://mobile-imaging"
def determine_mozpool_host(self, device):
if "mobile_imaging_format" in self.config:
self.mobile_imaging_format = self.config["mobile_imaging_format"]
hostname = str(self.mobile_imaging_format)[7:]
fqdn = socket.getfqdn(hostname)
imaging_server_fqdn = (str(self.mobile_imaging_format)).replace(hostname, fqdn)
return imaging_server_fqdn
def query_mozpool_handler(self, device=None, mozpool_api_url=None):
- if self.mozpool_handler != None:
+ if self.mozpool_handler is not None:
return self.mozpool_handler
else:
- self.mozpool_api_url = self.determine_mozpool_host(device) if device else mozpool_api_url
- assert self.mozpool_api_url != None, \
+ self.mozpool_api_url = (
+ self.determine_mozpool_host(device) if device else mozpool_api_url
+ )
+ assert self.mozpool_api_url is not None, \
"query_mozpool_handler() requires either a device or mozpool_api_url!"
site_packages_path = self.query_python_site_packages_path()
mph_path = os.path.join(site_packages_path, 'mozpoolclient')
sys.path.append(mph_path)
sys.path.append(site_packages_path)
try:
- from mozpoolclient import MozpoolHandler, MozpoolException, MozpoolConflictException
+ from mozpoolclient import (MozpoolHandler, MozpoolException,
+ MozpoolConflictException)
self.MozpoolException = MozpoolException
self.MozpoolConflictException = MozpoolConflictException
self.mozpool_handler = MozpoolHandler(self.mozpool_api_url, log_obj=self)
except ImportError, e:
self.fatal("Can't instantiate MozpoolHandler until mozpoolclient python "
"package is installed! (VirtualenvMixin?): \n%s" % str(e))
return self.mozpool_handler
def retrieve_android_device(self, b2gbase):
mph = self.query_mozpool_handler(self.mozpool_device)
for retry in self._retry_sleep(
error_message="INFRA-ERROR: Could not request device '%s'" % self.mozpool_device,
tbpl_status=TBPL_RETRY):
try:
image = 'panda-android-4.0.4_v3.3'
- duration = 4 * 60 * 60 # request valid for 14400 seconds == 4 hours
- response = mph.request_device(self.mozpool_device, image, assignee=self.mozpool_assignee, \
- b2gbase=b2gbase, pxe_config=None, duration=duration)
+ duration = 4 * 60 * 60 # request valid for 14400 seconds == 4 hours
+ response = mph.request_device(self.mozpool_device, image,
+ assignee=self.mozpool_assignee, b2gbase=b2gbase,
+ pxe_config=None, duration=duration)
break
except self.MozpoolConflictException:
self.warning("Device unavailable. Retry#%i.." % retry)
except self.MozpoolException, e:
self.buildbot_status(TBPL_RETRY)
self.fatal("We could not request the device: %s" % str(e))
self.request_url = response['request']['url']
self.info("Got request, url=%s" % self.request_url)
self._wait_for_request_ready()
def _retry_job_and_close_request(self, message, exception=None):
mph = self.query_mozpool_handler(self.mozpool_device)
- exception_message = str(exception) if exception!=None and str(exception) != None else ""
+ exception_message = (
+ str(exception) if exception is not None and str(exception) is not None else ""
+ )
self.error("%s -> %s" % (message, exception_message))
if self.request_url:
mph.close_request(self.request_url)
self.buildbot_status(TBPL_RETRY)
self.fatal(message)
def _retry_sleep(self, sleep_time=RETRY_INTERVAL, max_retries=MAX_RETRIES,
error_message=None, tbpl_status=None, fail_cb=None):
@@ -94,20 +101,22 @@ class MozpoolMixin(object):
self.buildbot_status(tbpl_status)
if fail_cb:
assert callable(fail_cb)
fail_cb()
self.fatal('Retries limit exceeded')
def _wait_for_request_ready(self):
mph = self.query_mozpool_handler(self.mozpool_device)
+
def on_fail():
# Device is not ready after retries...
self.info("Aborting mozpool request.")
self.close_request()
for retry in self._retry_sleep(sleep_time=RETRY_INTERVAL, max_retries=MAX_RETRIES,
- error_message="INFRA-ERROR: Request did not become ready in time",
- tbpl_status=TBPL_EXCEPTION, fail_cb=on_fail):
+ error_message="INFRA-ERROR: Request did not become "
+ "ready in time",
+ tbpl_status=TBPL_EXCEPTION, fail_cb=on_fail):
response = mph.query_request_status(self.request_url)
state = response['state']
if state == 'ready':
return
self.info("Waiting for request 'ready' stage. Current state: '%s'" % state)
--- a/testing/mozharness/mozharness/mozilla/testing/talos.py
+++ b/testing/mozharness/mozharness/mozilla/testing/talos.py
@@ -43,21 +43,23 @@ TalosErrorList = PythonErrorList + [
{'substr': r'''FAIL: Graph server unreachable''', 'level': CRITICAL},
{'substr': r'''FAIL: Busted:''', 'level': CRITICAL},
{'substr': r'''FAIL: failed to cleanup''', 'level': ERROR},
{'substr': r'''erfConfigurator.py: Unknown error''', 'level': CRITICAL},
{'substr': r'''talosError''', 'level': CRITICAL},
{'regex': re.compile(r'''No machine_name called '.*' can be found'''), 'level': CRITICAL},
{'substr': r"""No such file or directory: 'browser_output.txt'""",
'level': CRITICAL,
- 'explanation': r"""Most likely the browser failed to launch, or the test was otherwise unsuccessful in even starting."""},
+ 'explanation': r"""Most likely the browser failed to launch, or the test was otherwise "
+ "unsuccessful in even starting."""},
]
# TODO: check for running processes on script invocation
+
class TalosOutputParser(OutputParser):
minidump_regex = re.compile(r'''talosError: "error executing: '(\S+) (\S+) (\S+)'"''')
RE_PERF_DATA = re.compile(r'.*PERFHERDER_DATA:\s+(\{.*\})')
worst_tbpl_status = TBPL_SUCCESS
def __init__(self, **kwargs):
super(TalosOutputParser, self).__init__(**kwargs)
self.minidump_output = None
@@ -193,31 +195,38 @@ class Talos(TestingMixin, MercurialScrip
self.talos_json = self.config.get("talos_json")
self.talos_json_config = self.config.get("talos_json_config")
self.repo_path = self.config.get("repo_path")
self.obj_path = self.config.get("obj_path")
self.tests = None
self.gecko_profile = self.config.get('gecko_profile')
self.gecko_profile_interval = self.config.get('gecko_profile_interval')
self.pagesets_name = None
- self.mitmproxy_rel_bin = None # some platforms download a mitmproxy release binary
- self.mitmproxy_recording_set = None # zip file found on tooltool that contains all of the mitmproxy recordings
- self.mitmproxy_recordings_file_list = self.config.get('mitmproxy', None) # files inside the recording set
- self.mitmdump = None # path to mitdump tool itself, in py3 venv
+ # some platforms download a mitmproxy release binary
+ self.mitmproxy_rel_bin = None
+ # zip file found on tooltool that contains all of the mitmproxy recordings
+ self.mitmproxy_recording_set = None
+ # files inside the recording set
+ self.mitmproxy_recordings_file_list = self.config.get('mitmproxy', None)
+ # path to mitdump tool itself, in py3 venv
+ self.mitmdump = None
- # We accept some configuration options from the try commit message in the format mozharness: <options>
+ # We accept some configuration options from the try commit message
+ # in the format mozharness: <options>
# Example try commit message:
# mozharness: --geckoProfile try: <stuff>
def query_gecko_profile_options(self):
gecko_results = []
if self.buildbot_config:
# this is inside automation
# now let's see if we added GeckoProfile specs in the commit message
try:
- junk, junk, opts = self.buildbot_config['sourcestamp']['changes'][-1]['comments'].partition('mozharness:')
+ junk, junk, opts = self.buildbot_config(
+ ['sourcestamp']['changes'][-1]['comments'].partition('mozharness:')
+ )
except IndexError:
# when we don't have comments on changes (bug 1255187)
opts = None
if opts:
# In the case of a multi-line commit message, only examine
# the first line for mozharness options
opts = opts.split('\n')[0]
@@ -244,17 +253,18 @@ class Talos(TestingMixin, MercurialScrip
['--geckoProfileInterval', str(self.gecko_profile_interval)]
)
return gecko_results
def query_abs_dirs(self):
if self.abs_dirs:
return self.abs_dirs
abs_dirs = super(Talos, self).query_abs_dirs()
- abs_dirs['abs_blob_upload_dir'] = os.path.join(abs_dirs['abs_work_dir'], 'blobber_upload_dir')
+ abs_dirs['abs_blob_upload_dir'] = os.path.join(abs_dirs['abs_work_dir'],
+ 'blobber_upload_dir')
self.abs_dirs = abs_dirs
return self.abs_dirs
def query_talos_json_config(self):
"""Return the talos json config."""
if self.talos_json_config:
return self.talos_json_config
if not self.talos_json:
@@ -283,17 +293,18 @@ class Talos(TestingMixin, MercurialScrip
talos_opts = self.talos_json_config['suites'][self.suite].get('talos_options', None)
for index, val in enumerate(talos_opts):
if val == '--mitmproxy':
self.mitmproxy_recordings_file_list = talos_opts[index + 1]
return self.mitmproxy_recordings_file_list
def get_suite_from_test(self):
""" Retrieve the talos suite name from a given talos test name."""
- # running locally, single test name provided instead of suite; go through tests and find suite name
+ # running locally, single test name provided instead of suite;
+ # go through tests and find suite name
suite_name = None
if self.query_talos_json_config():
if '-a' in self.config['talos_extra_options']:
test_name_index = self.config['talos_extra_options'].index('-a') + 1
if '--activeTests' in self.config['talos_extra_options']:
test_name_index = self.config['talos_extra_options'].index('--activeTests') + 1
if test_name_index < len(self.config['talos_extra_options']):
test_name = self.config['talos_extra_options'][test_name_index]
@@ -305,25 +316,26 @@ class Talos(TestingMixin, MercurialScrip
self.fatal("Test name is missing or invalid")
else:
self.fatal("Talos json config not found, cannot verify suite")
return suite_name
def validate_suite(self):
""" Ensure suite name is a valid talos suite. """
if self.query_talos_json_config() and self.suite is not None:
- if not self.suite in self.talos_json_config.get('suites'):
+ if self.suite not in self.talos_json_config.get('suites'):
self.fatal("Suite '%s' is not valid (not found in talos json config)" % self.suite)
def talos_options(self, args=None, **kw):
"""return options to talos"""
# binary path
binary_path = self.binary_path or self.config.get('binary_path')
if not binary_path:
- self.fatal("Talos requires a path to the binary. You can specify binary_path or add download-and-extract to your action list.")
+ self.fatal("Talos requires a path to the binary. You can specify binary_path or "
+ "add download-and-extract to your action list.")
# talos options
options = []
# talos can't gather data if the process name ends with '.exe'
if binary_path.endswith('.exe'):
binary_path = binary_path[:-4]
# options overwritten from **kw
kw_options = {'executablePath': binary_path}
@@ -369,17 +381,18 @@ class Talos(TestingMixin, MercurialScrip
self.talos_path = os.path.join(
self.query_abs_dirs()['abs_work_dir'], 'tests', 'talos'
)
# need to determine if talos pageset is required to be downloaded
if self.config.get('run_local'):
# talos initiated locally, get and verify test/suite from cmd line
self.talos_path = os.path.dirname(self.talos_json)
- if '-a' in self.config['talos_extra_options'] or '--activeTests' in self.config['talos_extra_options']:
+ if '-a' in (self.config['talos_extra_options'] or
+ '--activeTests' in self.config['talos_extra_options']):
# test name (-a or --activeTests) specified, find out what suite it is a part of
self.suite = self.get_suite_from_test()
elif '--suite' in self.config['talos_extra_options']:
# --suite specified, get suite from cmd line and ensure is valid
suite_name_index = self.config['talos_extra_options'].index('--suite') + 1
if suite_name_index < len(self.config['talos_extra_options']):
self.suite = self.config['talos_extra_options'][suite_name_index]
self.validate_suite()
@@ -437,17 +450,18 @@ class Talos(TestingMixin, MercurialScrip
def setup_py3_virtualenv(self):
"""Mitmproxy needs Python 3.x; set up a separate py 3.x env here"""
self.info("Setting up python 3.x virtualenv, required for mitmproxy")
# first download the py3 package
self.py3_path = self.fetch_python3()
# now create the py3 venv
self.py3_venv_configuration(python_path=self.py3_path, venv_path='py3venv')
self.py3_create_venv()
- requirements = [os.path.join(self.talos_path, 'talos', 'mitmproxy', 'mitmproxy_requirements.txt')]
+ requirements = [os.path.join(self.talos_path, 'talos', 'mitmproxy',
+ 'mitmproxy_requirements.txt')]
self.py3_install_requirement_files(requirements)
# add py3 executables path to system path
sys.path.insert(1, self.py3_path_to_executables())
def install_mitmproxy(self):
"""Install the mitmproxy tool into the Python 3.x env"""
if 'win' in self.platform_name():
self.info("Installing mitmproxy")
@@ -464,27 +478,30 @@ class Talos(TestingMixin, MercurialScrip
_platform = 'osx'
else:
_platform = 'linux64'
self.query_mitmproxy_rel_bin(_platform)
if self.mitmproxy_rel_bin is None:
self.fatal("Aborting: mitmproxy_release_bin_osx not found in talos.json")
self.download_mitmproxy_binary(_platform)
else:
- self.info("Not downloading mitmproxy rel binary because no-download was specified")
+ self.info("Not downloading mitmproxy rel binary because "
+ "no-download was specified")
self.info('The mitmdump macosx binary is found at: %s' % self.mitmdump)
self.run_command([self.mitmdump, '--version'], env=self.query_env())
def query_mitmproxy_rel_bin(self, platform):
"""Mitmproxy requires external playback archives to be downloaded and extracted"""
if self.mitmproxy_rel_bin:
return self.mitmproxy_rel_bin
if self.query_talos_json_config() and self.suite is not None:
config_key = "mitmproxy_release_bin_" + platform
- self.mitmproxy_rel_bin = self.talos_json_config['suites'][self.suite].get(config_key, False)
+ self.mitmproxy_rel_bin = (
+ self.talos_json_config['suites'][self.suite].get(config_key, False)
+ )
return self.mitmproxy_rel_bin
def download_mitmproxy_binary(self, platform):
"""Download the mitmproxy release binary from tooltool"""
self.info("Downloading the mitmproxy release binary using tooltool")
dest = os.path.join(self.talos_path, 'talos', 'mitmproxy')
_manifest = "mitmproxy-rel-bin-%s.manifest" % platform
manifest_file = os.path.join(self.talos_path, 'talos', 'mitmproxy', _manifest)
@@ -501,24 +518,27 @@ class Talos(TestingMixin, MercurialScrip
unzip_cmd = [tar, '-xvzf', archive, '-C', dest]
self.run_command(unzip_cmd, halt_on_failure=True)
def query_mitmproxy_recording_set(self):
"""Mitmproxy requires external playback archives to be downloaded and extracted"""
if self.mitmproxy_recording_set:
return self.mitmproxy_recording_set
if self.query_talos_json_config() and self.suite is not None:
- self.mitmproxy_recording_set = self.talos_json_config['suites'][self.suite].get('mitmproxy_recording_set', False)
+ self.mitmproxy_recording_set = (
+ self.talos_json_config['suites'][self.suite].get('mitmproxy_recording_set', False)
+ )
return self.mitmproxy_recording_set
def download_mitmproxy_recording_set(self):
"""Download the set of mitmproxy recording files that will be played back"""
self.info("Downloading the mitmproxy recording set using tooltool")
dest = os.path.join(self.talos_path, 'talos', 'mitmproxy')
- manifest_file = os.path.join(self.talos_path, 'talos', 'mitmproxy', 'mitmproxy-playback-set.manifest')
+ manifest_file = os.path.join(self.talos_path, 'talos', 'mitmproxy',
+ 'mitmproxy-playback-set.manifest')
self.tooltool_fetch(
manifest_file,
output_dir=dest,
cache=self.config.get('tooltool_cache')
)
archive = os.path.join(dest, self.mitmproxy_recording_set)
unzip = self.query_exe('unzip')
unzip_cmd = [unzip, '-q', '-o', archive, '-d', dest]
@@ -592,17 +612,16 @@ class Talos(TestingMixin, MercurialScrip
parser.update_worst_log_and_tbpl_levels(WARNING, TBPL_WARNING)
def _artifact_perf_data(self, dest):
src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'local.json')
try:
shutil.copyfile(src, dest)
except:
self.critical("Error copying results %s to upload dir %s" % (src, dest))
- parser.update_worst_log_and_tbpl_levels(CRITICAL, TBPL_FAILURE)
def run_tests(self, args=None, **kw):
"""run Talos tests"""
# get talos options
options = self.talos_options(args=args, **kw)
# XXX temporary python version check
--- a/testing/mozharness/mozharness/mozilla/testing/testbase.py
+++ b/testing/mozharness/mozharness/mozilla/testing/testbase.py
@@ -4,20 +4,18 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
import copy
import os
import platform
import pprint
-import re
import urllib2
import json
-import socket
from urlparse import urlparse, ParseResult
from mozharness.base.errors import BaseErrorList
from mozharness.base.log import FATAL, WARNING
from mozharness.base.python import (
ResourceMonitoringMixin,
VirtualenvMixin,
virtualenv_config_options,
@@ -47,62 +45,63 @@ TOOLTOOL_PLATFORM_DIR = {
'win64': 'win32',
'macosx': 'macosx64',
}
testing_config_options = [
[["--installer-url"],
{"action": "store",
- "dest": "installer_url",
- "default": None,
- "help": "URL to the installer to install",
+ "dest": "installer_url",
+ "default": None,
+ "help": "URL to the installer to install",
}],
[["--installer-path"],
{"action": "store",
- "dest": "installer_path",
- "default": None,
- "help": "Path to the installer to install. This is set automatically if run with --download-and-extract.",
+ "dest": "installer_path",
+ "default": None,
+ "help": "Path to the installer to install. This is set automatically if run with "
+ "--download-and-extract.",
}],
[["--binary-path"],
{"action": "store",
- "dest": "binary_path",
- "default": None,
- "help": "Path to installed binary. This is set automatically if run with --install.",
+ "dest": "binary_path",
+ "default": None,
+ "help": "Path to installed binary. This is set automatically if run with --install.",
}],
[["--exe-suffix"],
{"action": "store",
- "dest": "exe_suffix",
- "default": None,
- "help": "Executable suffix for binaries on this platform",
+ "dest": "exe_suffix",
+ "default": None,
+ "help": "Executable suffix for binaries on this platform",
}],
[["--test-url"],
{"action": "store",
- "dest": "test_url",
- "default": None,
- "help": "URL to the zip file containing the actual tests",
+ "dest": "test_url",
+ "default": None,
+ "help": "URL to the zip file containing the actual tests",
}],
[["--test-packages-url"],
{"action": "store",
- "dest": "test_packages_url",
- "default": None,
- "help": "URL to a json file describing which tests archives to download",
+ "dest": "test_packages_url",
+ "default": None,
+ "help": "URL to a json file describing which tests archives to download",
}],
[["--jsshell-url"],
{"action": "store",
- "dest": "jsshell_url",
- "default": None,
- "help": "URL to the jsshell to install",
+ "dest": "jsshell_url",
+ "default": None,
+ "help": "URL to the jsshell to install",
}],
[["--download-symbols"],
{"action": "store",
- "dest": "download_symbols",
- "type": "choice",
- "choices": ['ondemand', 'true'],
- "help": "Download and extract crash reporter symbols.",
+ "dest": "download_symbols",
+ "type": "choice",
+ "choices": ['ondemand', 'true'],
+ "help": "Download and extract crash reporter symbols.",
}],
] + copy.deepcopy(virtualenv_config_options) \
+ copy.deepcopy(try_config_options) \
+ copy.deepcopy(verify_config_options)
# TestingMixin {{{1
class TestingMixin(VirtualenvMixin, BuildbotMixin, ResourceMonitoringMixin,
@@ -207,17 +206,17 @@ class TestingMixin(VirtualenvMixin, Buil
# Check if the URL exists. If not, use none to allow mozcrash to auto-check for symbols
try:
if symbols_url:
self._urlopen(symbols_url, timeout=120)
self.symbols_url = symbols_url
except Exception as ex:
self.warning("Cannot open symbols url %s (installer url: %s): %s" %
- (symbols_url, self.installer_url, ex))
+ (symbols_url, self.installer_url, ex))
if raise_on_failure:
raise
# If no symbols URL can be determined let minidump_stackwalk query the symbols.
# As of now this only works for Nightly and release builds.
if not self.symbols_url:
self.warning("No symbols_url found. Let minidump_stackwalk query for symbols.")
@@ -238,34 +237,35 @@ class TestingMixin(VirtualenvMixin, Buil
* --installer-url is set
* --test-url is set if needed
* every url is substituted by another external to the
Release Engineering network
"""
c = self.config
orig_config = copy.deepcopy(c)
self.warning("When you use developer_config.py, we drop "
- "'read-buildbot-config' from the list of actions.")
+ "'read-buildbot-config' from the list of actions.")
if "read-buildbot-config" in rw_config.actions:
rw_config.actions.remove("read-buildbot-config")
self.actions = tuple(rw_config.actions)
def _replace_url(url, changes):
for from_, to_ in changes:
if url.startswith(from_):
new_url = url.replace(from_, to_)
self.info("Replacing url %s -> %s" % (url, new_url))
return new_url
return url
if c.get("installer_url") is None:
self.exception("You must use --installer-url with developer_config.py")
if c.get("require_test_zip"):
if not c.get('test_url') and not c.get('test_packages_url'):
- self.exception("You must use --test-url or --test-packages-url with developer_config.py")
+ self.exception("You must use --test-url or "
+ "--test-packages-url with developer_config.py")
c["installer_url"] = _replace_url(c["installer_url"], c["replace_urls"])
if c.get("test_url"):
c["test_url"] = _replace_url(c["test_url"], c["replace_urls"])
if c.get("test_packages_url"):
c["test_packages_url"] = _replace_url(c["test_packages_url"], c["replace_urls"])
for key, value in self.config.iteritems():
@@ -287,17 +287,18 @@ class TestingMixin(VirtualenvMixin, Buil
if not hasattr(self, "https_username"):
self.info("NOTICE: Files downloaded from outside of "
"Release Engineering network require LDAP "
"credentials.")
self.https_username, self.https_password = get_credentials()
# This creates a password manager
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
- # Because we have put None at the start it will use this username/password combination from here on
+ # Because we have put None at the start it will use this username/password combination
+ # from here on
passman.add_password(None, url, self.https_username, self.https_password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
return urllib2.build_opener(authhandler).open(url, **kwargs)
# If we have the developer_run flag enabled then we will switch
# URLs to the right place and enable http authentication
if "developer_config.py" in self.config["config_files"]:
@@ -306,41 +307,42 @@ class TestingMixin(VirtualenvMixin, Buil
return urllib2.urlopen(url, **kwargs)
# read_buildbot_config is in BuildbotMixin.
def find_artifacts_from_buildbot_changes(self):
c = self.config
try:
files = self.buildbot_config['sourcestamp']['changes'][-1]['files']
- buildbot_prop_branch = self.buildbot_config['properties']['branch']
# Bug 868490 - Only require exactly two files if require_test_zip;
# otherwise accept either 1 or 2, since we'll be getting a
# test_zip url that we don't need.
expected_length = [1, 2, 3]
if c.get("require_test_zip") and not self.test_url:
expected_length = [2, 3]
actual_length = len(files)
if actual_length not in expected_length:
- self.fatal("Unexpected number of files in buildbot config %s.\nExpected these number(s) of files: %s, but got: %d" %
+ self.fatal("Unexpected number of files in buildbot config %s.\n"
+ "Expected these number(s) of files: %s, but got: %d" %
(c['buildbot_json_path'], str(expected_length), actual_length))
for f in files:
if f['name'].endswith('tests.zip'): # yuk
if not self.test_url:
# str() because of unicode issues on mac
self.test_url = str(f['name'])
self.info("Found test url %s." % self.test_url)
elif f['name'].endswith('crashreporter-symbols.zip'): # yuk
self.symbols_url = str(f['name'])
self.info("Found symbols url %s." % self.symbols_url)
elif f['name'].endswith('test_packages.json'):
self.test_packages_url = str(f['name'])
self.info("Found a test packages url %s." % self.test_packages_url)
- elif not any(f['name'].endswith(s) for s in ('code-coverage-gcno.zip', 'stylo-bindings.zip')):
+ elif not any(f['name'].endswith(s) for s in ('code-coverage-gcno.zip',
+ 'stylo-bindings.zip')):
if not self.installer_url:
self.installer_url = str(f['name'])
self.info("Found installer url %s." % self.installer_url)
except IndexError, e:
self.error(str(e))
def find_artifacts_from_taskcluster(self):
self.info("Finding installer, test and symbols from parent task. ")
@@ -396,17 +398,18 @@ class TestingMixin(VirtualenvMixin, Buil
message += """installer_url isn't set!
You can set this by:
1. specifying --installer-url URL, or
2. running via buildbot and running the read-buildbot-config action
"""
- if self.config.get("require_test_zip") and not self.test_url and not self.test_packages_url:
+ if (self.config.get("require_test_zip")
+ and not self.test_url and not self.test_packages_url):
message += """test_url isn't set!
You can set this by:
1. specifying --test-url URL, or
2. running via buildbot and running the read-buildbot-config action
"""
@@ -521,20 +524,18 @@ 2. running via buildbot and running the
self.info("Structured output parser in use for %s." % suite_category)
return StructuredOutputParser(suite_category=suite_category, strict=strict, **kwargs)
def _download_installer(self):
file_name = None
if self.installer_path:
file_name = self.installer_path
dirs = self.query_abs_dirs()
- source = self.download_file(self.installer_url,
- file_name=file_name,
- parent_dir=dirs['abs_work_dir'],
- error_level=FATAL)
+ source = self.download_file(self.installer_url, file_name=file_name,
+ parent_dir=dirs['abs_work_dir'], error_level=FATAL)
self.installer_path = os.path.realpath(source)
self.set_buildbot_property("build_url", self.installer_url, write_to_file=True)
def _download_and_extract_symbols(self):
dirs = self.query_abs_dirs()
if self.config.get('download_symbols') == 'ondemand':
self.symbols_url = self.query_symbols_url()
self.symbols_path = self.symbols_url
@@ -614,17 +615,18 @@ 2. running the download-and-extract acti
""")
if not self.is_python_package_installed("mozInstall"):
self.fatal("""Can't call install() without mozinstall!
Did you run with --create-virtualenv? Is mozinstall in virtualenv_modules?""")
def install_app(self, app=None, target_dir=None, installer_path=None):
""" Dependent on mozinstall """
# install the application
- cmd = self.query_exe("mozinstall", default=self.query_python_path("mozinstall"), return_type="list")
+ cmd = self.query_exe("mozinstall", default=self.query_python_path("mozinstall"),
+ return_type="list")
if app:
cmd.extend(['--app', app])
# Remove the below when we no longer need to support mozinstall 0.3
self.info("Detecting whether we're running mozinstall >=1.0...")
output = self.get_output_from_command(cmd + ['-h'])
if '--source' in output:
cmd.append('--source')
# End remove
@@ -715,17 +717,17 @@ Did you run with --create-virtualenv? Is
self.fatal('Could not determine nodejs filename')
def query_nodejs(self, manifest=None):
if self.nodejs_path:
return self.nodejs_path
c = self.config
- dirs = self.query_abs_dirs();
+ dirs = self.query_abs_dirs()
nodejs_path = self.query_nodejs_filename()
if not self.config.get('download_nodejs'):
self.nodejs_path = nodejs_path
return self.nodejs_path
if not manifest:
tooltool_manifest_path = self.query_nodejs_tooltool_manifest()
@@ -745,17 +747,18 @@ Did you run with --create-virtualenv? Is
abs_nodejs_path = os.path.join(dirs['abs_work_dir'], nodejs_path)
if os.path.exists(abs_nodejs_path):
if self.platform_name() not in ('win32', 'win64'):
self.chmod(abs_nodejs_path, 0755)
self.nodejs_path = abs_nodejs_path
else:
- self.warning("nodejs path was given but couldn't be found. Tried looking in '%s'" % abs_nodejs_path)
+ self.warning("nodejs path was given but couldn't be found. "
+ "Tried looking in '%s'" % abs_nodejs_path)
self.buildbot_status(TBPL_WARNING, WARNING)
return self.nodejs_path
def query_minidump_stackwalk(self, manifest=None):
if self.minidump_stackwalk_path:
return self.minidump_stackwalk_path
--- a/testing/mozharness/mozharness/mozilla/testing/try_tools.py
+++ b/testing/mozharness/mozharness/mozilla/testing/try_tools.py
@@ -11,28 +11,28 @@ import re
from collections import defaultdict
from mozharness.base.script import PostScriptAction
from mozharness.base.transfer import TransferMixin
try_config_options = [
[["--try-message"],
{"action": "store",
- "dest": "try_message",
- "default": None,
- "help": "try syntax string to select tests to run",
+ "dest": "try_message",
+ "default": None,
+ "help": "try syntax string to select tests to run",
}],
]
test_flavors = {
'browser-chrome': {},
'chrome': {},
'devtools-chrome': {},
'mochitest': {},
- 'xpcshell' :{},
+ 'xpcshell': {},
'reftest': {
"path": lambda x: os.path.join("tests", "reftest", "tests", x)
},
'crashtest': {
"path": lambda x: os.path.join("tests", "reftest", "tests", x)
},
'web-platform-tests': {
"path": lambda x: os.path.join("tests", x.split("testing" + os.path.sep)[1])
@@ -40,16 +40,17 @@ test_flavors = {
'web-platform-tests-reftests': {
"path": lambda x: os.path.join("tests", x.split("testing" + os.path.sep)[1])
},
'web-platform-tests-wdspec': {
"path": lambda x: os.path.join("tests", x.split("testing" + os.path.sep)[1])
},
}
+
class TryToolsMixin(TransferMixin):
"""Utility functions for an interface between try syntax and out test harnesses.
Requires log and script mixins."""
harness_extra_args = None
try_test_paths = {}
known_try_arguments = {
'--tag': ({
@@ -138,17 +139,17 @@ class TryToolsMixin(TransferMixin):
# args later on.
if line.startswith('"') and line.endswith('"'):
line = line[1:-1]
# Allow spaces inside of [filter expressions]
try_message = line.strip().split('try: ', 1)
all_try_args = re.findall(r'(?:\[.*?\]|\S)+', try_message[1])
break
if not all_try_args:
- self.warning('Try syntax not found in: %s.' % msg )
+ self.warning('Try syntax not found in: %s.' % msg)
return all_try_args
def try_message_has_flag(self, flag, message=None):
"""
Returns True if --`flag` is present in message.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--' + flag, action='store_true')
@@ -190,16 +191,17 @@ class TryToolsMixin(TransferMixin):
if not all_try_args:
return
parser = argparse.ArgumentParser(
description=('Parse an additional subset of arguments passed to try syntax'
' and forward them to the underlying test harness command.'))
label_dict = {}
+
def label_from_val(val):
if val in label_dict:
return label_dict[val]
return '--%s' % val.replace('_', '-')
for label, (opts, _) in self.known_try_arguments.iteritems():
if 'action' in opts and opts['action'] not in ('append', 'store',
'store_true', 'store_false'):
@@ -252,17 +254,17 @@ class TryToolsMixin(TransferMixin):
if self.harness_extra_args:
args = self.harness_extra_args.get(flavor, [])[:]
if self.try_test_paths.get(flavor):
self.info('TinderboxPrint: Tests will be run from the following '
'files: %s.' % ','.join(self.try_test_paths[flavor]))
args.extend(['--this-chunk=1', '--total-chunks=1'])
- path_func = test_flavors[flavor].get("path", lambda x:x)
+ path_func = test_flavors[flavor].get("path", lambda x: x)
tests = [path_func(os.path.normpath(item)) for item in self.try_test_paths[flavor]]
else:
tests = []
if args or tests:
self.info('TinderboxPrint: The following arguments were forwarded from mozharness '
'to the test command:\nTinderboxPrint: \t%s -- %s' %
(" ".join(args), " ".join(tests)))
--- a/testing/mozharness/mozharness/mozilla/testing/unittest.py
+++ b/testing/mozharness/mozharness/mozilla/testing/unittest.py
@@ -181,23 +181,21 @@ class DesktopUnittestOutputParser(Output
# only if fail_count was more then 0 but also if fail_count is still -1
# (no fail summary line was found)
if self.fail_count != 0:
self.worst_log_level = self.worst_level(WARNING, self.worst_log_level)
self.tbpl_status = self.worst_level(TBPL_WARNING, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
# Account for the possibility that no test summary was output.
- if self.pass_count <= 0 and self.fail_count <= 0 and \
- (self.known_fail_count is None or self.known_fail_count <= 0):
+ if self.pass_count <= 0 and self.fail_count <= 0 and (self.known_fail_count is None
+ or self.known_fail_count <= 0):
self.error('No tests run or test summary not found')
- self.worst_log_level = self.worst_level(WARNING,
- self.worst_log_level)
- self.tbpl_status = self.worst_level(TBPL_WARNING,
- self.tbpl_status,
+ self.worst_log_level = self.worst_level(WARNING, self.worst_log_level)
+ self.tbpl_status = self.worst_level(TBPL_WARNING, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
if return_code not in success_codes:
self.tbpl_status = self.worst_level(TBPL_FAILURE, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
# we can trust in parser.worst_log_level in either case
return (self.tbpl_status, self.worst_log_level)
--- a/testing/mozharness/mozharness/mozilla/testing/verify_tools.py
+++ b/testing/mozharness/mozharness/mozilla/testing/verify_tools.py
@@ -1,16 +1,15 @@
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
-import argparse
import os
import posixpath
import re
import sys
import mozinfo
from manifestparser import TestManifest
from mozharness.base.script import PostScriptAction
@@ -38,17 +37,17 @@ class VerifyToolsMixin(object):
For each file modified on this push, determine if the modified file
is a test, by searching test manifests. Populate self.verify_suites
with test files, organized by suite.
This depends on test manifests, so can only run after test zips have
been downloaded and extracted.
"""
- if self.config.get('verify') != True:
+ if not self.config.get('verify'):
return
repository = os.environ.get("GECKO_HEAD_REPOSITORY")
revision = os.environ.get("GECKO_HEAD_REV")
if not repository or not revision:
self.warning("unable to verify tests: no repo or revision!")
return []
@@ -57,41 +56,46 @@ class VerifyToolsMixin(object):
return response
dirs = self.query_abs_dirs()
mozinfo.find_and_update_from_json(dirs['abs_test_install_dir'])
manifests = [
(os.path.join(dirs['abs_mochitest_dir'], 'tests', 'mochitest.ini'), 'plain'),
(os.path.join(dirs['abs_mochitest_dir'], 'chrome', 'chrome.ini'), 'chrome'),
- (os.path.join(dirs['abs_mochitest_dir'], 'browser', 'browser-chrome.ini'), 'browser-chrome'),
+ (os.path.join(dirs['abs_mochitest_dir'], 'browser',
+ 'browser-chrome.ini'), 'browser-chrome'),
(os.path.join(dirs['abs_mochitest_dir'], 'a11y', 'a11y.ini'), 'a11y'),
(os.path.join(dirs['abs_xpcshell_dir'], 'tests', 'xpcshell.ini'), 'xpcshell'),
]
tests_by_path = {}
for (path, suite) in manifests:
if os.path.exists(path):
man = TestManifest([path], strict=False)
active = man.active_tests(exists=False, disabled=False, filters=[], **mozinfo.info)
- tests_by_path.update({t['relpath']:(suite,t.get('subsuite')) for t in active})
+ tests_by_path.update({t['relpath']: (suite, t.get('subsuite')) for t in active})
self.info("Verification updated with manifest %s" % path)
ref_manifests = [
- (os.path.join(dirs['abs_reftest_dir'], 'tests', 'layout', 'reftests', 'reftest.list'), 'reftest'),
- (os.path.join(dirs['abs_reftest_dir'], 'tests', 'testing', 'crashtest', 'crashtests.list'), 'crashtest'),
- # TODO (os.path.join(dirs['abs_test_install_dir'], 'jsreftest', 'tests', 'jstests.list'), 'jstestbrowser'),
+ (os.path.join(dirs['abs_reftest_dir'], 'tests', 'layout', 'reftests',
+ 'reftest.list'), 'reftest'),
+ (os.path.join(dirs['abs_reftest_dir'], 'tests', 'testing', 'crashtest',
+ 'crashtests.list'), 'crashtest'),
+ # TODO (os.path.join(dirs['abs_test_install_dir'], 'jsreftest', 'tests',
+ # 'jstests.list'), 'jstestbrowser'),
]
sys.path.append(dirs['abs_reftest_dir'])
import manifest
self.reftest_test_dir = os.path.join(dirs['abs_reftest_dir'], 'tests')
for (path, suite) in ref_manifests:
if os.path.exists(path):
man = manifest.ReftestManifest()
man.load(path)
- tests_by_path.update({os.path.relpath(t,self.reftest_test_dir):(suite,None) for t in man.files})
+ tests_by_path.update({os.path.relpath(t, self.reftest_test_dir): (suite, None)
+ for t in man.files})
self.info("Verification updated with manifest %s" % path)
# determine which files were changed on this push
url = '%s/json-automationrelevance/%s' % (repository.rstrip('/'), revision)
contents = self.retry(get_automationrelevance, attempts=2, sleeptime=10)
changed_files = set()
for c in contents['changesets']:
self.info(" {cset} {desc}".format(
@@ -103,25 +107,25 @@ class VerifyToolsMixin(object):
for file in changed_files:
# manifest paths use os.sep (like backslash on Windows) but
# automation-relevance uses posixpath.sep
file = file.replace(posixpath.sep, os.sep)
entry = tests_by_path.get(file)
if entry:
self.info("Verification found test %s" % file)
subsuite_mapping = {
- ('browser-chrome', 'clipboard') : 'browser-chrome-clipboard',
- ('chrome', 'clipboard') : 'chrome-clipboard',
- ('plain', 'clipboard') : 'plain-clipboard',
- ('browser-chrome', 'devtools') : 'mochitest-devtools-chrome',
- ('browser-chrome', 'gpu') : 'browser-chrome-gpu',
- ('chrome', 'gpu') : 'chrome-gpu',
- ('plain', 'gpu') : 'plain-gpu',
- ('plain', 'media') : 'mochitest-media',
- ('plain', 'webgl') : 'mochitest-gl',
+ ('browser-chrome', 'clipboard'): 'browser-chrome-clipboard',
+ ('chrome', 'clipboard'): 'chrome-clipboard',
+ ('plain', 'clipboard'): 'plain-clipboard',
+ ('browser-chrome', 'devtools'): 'mochitest-devtools-chrome',
+ ('browser-chrome', 'gpu'): 'browser-chrome-gpu',
+ ('chrome', 'gpu'): 'chrome-gpu',
+ ('plain', 'gpu'): 'plain-gpu',
+ ('plain', 'media'): 'mochitest-media',
+ ('plain', 'webgl'): 'mochitest-gl',
}
if entry in subsuite_mapping:
suite = subsuite_mapping[entry]
else:
suite = entry[0]
suite_files = self.verify_suites.get(suite)
if not suite_files:
suite_files = []
@@ -137,17 +141,17 @@ class VerifyToolsMixin(object):
Each array element is an array of command line arguments for a modified
test in the suite.
"""
# Limit each test harness run to 15 minutes, to avoid task timeouts
# when verifying long-running tests.
MAX_TIME_PER_TEST = 900
- if self.config.get('verify') != True:
+ if self.config.get('verify'):
# not in verify mode: run once, with no additional args
args = [[]]
else:
# in verify mode, run nothing by default (unsupported suite or no files modified)
args = []
# otherwise, run once for each file in requested suite
files = self.verify_suites.get(suite)
references = re.compile(r"(-ref|-noref|-noref.)\.")
@@ -169,20 +173,20 @@ class VerifyToolsMixin(object):
return args
def query_verify_category_suites(self, category, all_suites):
"""
In verify mode, determine which suites are active, for the given
suite category.
"""
suites = None
- if self.config.get('verify') == True:
+ if self.config.get('verify'):
if all_suites and self.verify_downloaded:
suites = dict((key, all_suites.get(key)) for key in
- self.verify_suites if key in all_suites.keys())
+ self.verify_suites if key in all_suites.keys())
else:
# Until test zips are downloaded, manifests are not available,
# so it is not possible to determine which suites are active/
# required for verification; assume all suites from supported
# suite categories are required.
if category in ['mochitest', 'xpcshell', 'reftest']:
suites = all_suites
return suites
@@ -203,9 +207,8 @@ class VerifyToolsMixin(object):
while len(new) < max_test_name_len:
head, tail = os.path.split(head)
previous = new
new = os.path.join(tail, new)
test_name = os.path.join('...', previous or new)
test_name = test_name.rstrip(os.path.sep)
self.log("TinderboxPrint: Verification of %s<br/>: %s" %
(test_name, tbpl_status), level=log_level)
-
--- a/testing/mozharness/mozharness/mozilla/updates/balrog.py
+++ b/testing/mozharness/mozharness/mozilla/updates/balrog.py
@@ -19,17 +19,16 @@ class BalrogMixin(object):
def query_python(self):
python = sys.executable
# A mock environment is a special case, the system python isn't
# available there
if 'mock_target' in self.config:
python = 'python2.7'
return python
-
def generate_balrog_props(self, props_path):
self.set_buildbot_property(
"hashType", self.config.get("hash_type", "sha512"), write_to_file=True
)
if self.buildbot_config and "properties" in self.buildbot_config:
buildbot_properties = self.buildbot_config["properties"].items()
else:
@@ -102,22 +101,26 @@ class BalrogMixin(object):
return_codes.append(return_code)
# return the worst (max) code
return max(return_codes)
def submit_balrog_release_pusher(self, dirs):
product = self.buildbot_config["properties"]["product"]
cmd = [
self.query_python(),
- os.path.join(os.path.join(dirs['abs_tools_dir'], "scripts/updates/balrog-release-pusher.py"))
+ os.path.join(os.path.join(dirs['abs_tools_dir'],
+ "scripts/updates/balrog-release-pusher.py"))
]
- cmd.extend(["--build-properties", os.path.join(dirs["base_work_dir"], "balrog_props.json")])
+ cmd.extend(["--build-properties",
+ os.path.join(dirs["base_work_dir"], "balrog_props.json")])
cmd.extend(["--buildbot-configs", "https://hg.mozilla.org/build/buildbot-configs"])
- cmd.extend(["--release-config", os.path.join(dirs['build_dir'], self.config.get("release_config_file"))])
- cmd.extend(["--credentials-file", os.path.join(dirs['base_work_dir'], self.config.get("balrog_credentials_file"))])
+ cmd.extend(["--release-config", os.path.join(dirs['build_dir'],
+ self.config.get("release_config_file"))])
+ cmd.extend(["--credentials-file", os.path.join(dirs['base_work_dir'],
+ self.config.get("balrog_credentials_file"))])
cmd.extend(["--release-channel", self.query_release_config()['release_channel']])
return_codes = []
for server in self.config["balrog_servers"]:
server_args = [
"--api-root", server["balrog_api_root"],
"--username", self._query_balrog_username(server, product)
--- a/tools/lint/flake8.yml
+++ b/tools/lint/flake8.yml
@@ -18,16 +18,17 @@ flake8:
- testing/firefox-ui
- testing/mach_commands.py
- testing/marionette/client
- testing/marionette/harness
- testing/marionette/puppeteer
- testing/mochitest
- testing/mozbase
- testing/mozharness/mozfile
+ - testing/mozharness/mozharness/mozilla
- testing/mozharness/mozinfo
- testing/mozharness/scripts
- testing/remotecppunittests.py
- testing/runcppunittests.py
- testing/talos/
- testing/xpcshell
- tools/git
- tools/lint