Bug 1403131 - Run linters against mozharness scripts and configs. r?rail draft
authorSteve Armand <stevea1@mac.com>
Sun, 08 Oct 2017 21:07:17 -0400
changeset 680561 4d9458f1819de0a73af9191d560557ab01ffa32e
parent 680555 d71e8e0053d8043bc9deb98b35ca5220a0c9adea
child 735883 a6c70058cdc290dd08fb5d406eff8161a1241d46
push id84533
push userbmo:stevea1@mac.com
push dateSun, 15 Oct 2017 03:16:56 +0000
reviewersrail
bugs1403131
milestone58.0a1
Bug 1403131 - Run linters against mozharness scripts and configs. r?rail MozReview-Commit-ID: HjsBIHY0x1F
testing/mozharness/external_tools/clobberer.py
testing/mozharness/external_tools/count_and_reboot.py
testing/mozharness/external_tools/detect_repo.py
testing/mozharness/external_tools/download_file.py
testing/mozharness/external_tools/extract_and_run_command.py
testing/mozharness/external_tools/gittool.py
testing/mozharness/external_tools/mouse_and_screen_resolution.py
testing/mozharness/external_tools/packagesymbols.py
testing/mozharness/external_tools/robustcheckout.py
testing/mozharness/mozfile/__init__.py
testing/mozharness/mozfile/mozfile.py
testing/mozharness/mozinfo/__init__.py
testing/mozharness/mozinfo/mozinfo.py
testing/mozharness/scripts/android_emulator_unittest.py
testing/mozharness/scripts/awsy_script.py
testing/mozharness/scripts/bouncer_submitter.py
testing/mozharness/scripts/configtest.py
testing/mozharness/scripts/desktop_l10n.py
testing/mozharness/scripts/desktop_partner_repacks.py
testing/mozharness/scripts/desktop_unittest.py
testing/mozharness/scripts/fx_desktop_build.py
testing/mozharness/scripts/l10n_bumper.py
testing/mozharness/scripts/marionette.py
testing/mozharness/scripts/merge_day/gecko_migration.py
testing/mozharness/scripts/mobile_l10n.py
testing/mozharness/scripts/mobile_partner_repack.py
testing/mozharness/scripts/release/antivirus.py
testing/mozharness/scripts/release/beet_mover.py
testing/mozharness/scripts/release/generate-checksums.py
testing/mozharness/scripts/release/postrelease_version_bump.py
testing/mozharness/scripts/release/publish_balrog.py
testing/mozharness/scripts/release/push-candidate-to-releases.py
testing/mozharness/scripts/release/submit-to-ct.py
testing/mozharness/scripts/release/updates.py
testing/mozharness/scripts/spidermonkey_build.py
testing/mozharness/scripts/telemetry/telemetry_client.py
testing/mozharness/scripts/web_platform_tests.py
tools/lint/flake8.yml
--- a/testing/mozharness/external_tools/clobberer.py
+++ b/testing/mozharness/external_tools/clobberer.py
@@ -4,18 +4,17 @@ import sys
 import shutil
 import urllib2
 import urllib
 import os
 import traceback
 import time
 if os.name == 'nt':
     from win32file import RemoveDirectory, DeleteFile, \
-        GetFileAttributesW, SetFileAttributesW, \
-        FILE_ATTRIBUTE_NORMAL, FILE_ATTRIBUTE_DIRECTORY
+        SetFileAttributesW, FILE_ATTRIBUTE_NORMAL, FILE_ATTRIBUTE_DIRECTORY
     from win32api import FindFiles
 
 clobber_suffix = '.deleteme'
 
 
 def ts_to_str(ts):
     if ts is None:
         return None
@@ -34,16 +33,17 @@ def read_file(fn):
         return None
 
     data = open(fn).read().strip()
     try:
         return int(data)
     except ValueError:
         return None
 
+
 def rmdirRecursiveWindows(dir):
     """Windows-specific version of rmdirRecursive that handles
     path lengths longer than MAX_PATH.
     """
 
     dir = os.path.realpath(dir)
     # Make sure directory is writable
     SetFileAttributesW('\\\\?\\' + dir, FILE_ATTRIBUTE_NORMAL)
@@ -57,16 +57,17 @@ def rmdirRecursiveWindows(dir):
 
         if file_attr & FILE_ATTRIBUTE_DIRECTORY:
             rmdirRecursiveWindows(full_name)
         else:
             SetFileAttributesW('\\\\?\\' + full_name, FILE_ATTRIBUTE_NORMAL)
             DeleteFile('\\\\?\\' + full_name)
     RemoveDirectory('\\\\?\\' + dir)
 
+
 def rmdirRecursive(dir):
     """This is a replacement for shutil.rmtree that works better under
     windows. Thanks to Bear at the OSAF for the code.
     (Borrowed from buildbot.slave.commands)"""
     if os.name == 'nt':
         rmdirRecursiveWindows(dir)
         return
 
@@ -160,16 +161,17 @@ def getClobberDates(clobberURL, branch, 
             builder_time = int(builder_time)
             retval[builddir] = (builder_time, who)
         return retval
     except ValueError:
         print "Error parsing response from server"
         print data
         raise
 
+
 if __name__ == "__main__":
     from optparse import OptionParser
     parser = OptionParser(
         "%prog [options] clobberURL branch buildername builddir slave master")
     parser.add_option("-n", "--dry-run", dest="dryrun", action="store_true",
                       default=False, help="don't actually delete anything")
     parser.add_option("-t", "--periodic", dest="period", type="float",
                       default=None, help="hours between periodic clobbers")
@@ -260,17 +262,18 @@ if __name__ == "__main__":
                 our_clobber_date = now
                 write_file(our_clobber_date, "last-clobber")
             elif periodicClobberTime and now > our_clobber_date + periodicClobberTime:
                 # periodicClobberTime has passed since our last clobber
                 clobber = True
                 clobberType = "periodic"
                 # Update our clobber date to now
                 our_clobber_date = now
-                print "%s:More than %s seconds have passed since our last clobber" % (builddir, periodicClobberTime)
+                print "%s:More than %s seconds have passed since our last clobber" % \
+                    (builddir, periodicClobberTime)
 
         if clobber:
             # Finally, perform a clobber if we're supposed to
             print "%s:Clobbering..." % builddir
             do_clobber(builder_dir, options.dryrun, options.skip)
             write_file(our_clobber_date, "last-clobber")
 
         # If this is the build dir for the current job, display the clobber type in TBPL.
--- a/testing/mozharness/external_tools/count_and_reboot.py
+++ b/testing/mozharness/external_tools/count_and_reboot.py
@@ -1,62 +1,69 @@
 #!/usr/bin/env python
 # encoding: utf-8
 # Created by Chris AtLee on 2008-11-04
 """count_and_reboot.py [-n maxcount] -f countfile
 
 Increments the value in countfile, and reboots the machine once the count
 reaches or exceeds maxcount."""
 
-import os, sys, time
+import os
+import sys
+import time
 
 if sys.platform in ('darwin', 'linux2'):
     def reboot():
         # -S means to accept password from stdin, which we then redirect from
         # /dev/null
         # This results in sudo not waiting forever for a password.  If sudoers
         # isn't set up properly, this will fail immediately
         os.system("sudo -S reboot < /dev/null")
-	# After starting the shutdown, we go to sleep since the system can
-	# take a few minutes to shut everything down and reboot
-	time.sleep(600)
+    # After starting the shutdown, we go to sleep since the system can
+    # take a few minutes to shut everything down and reboot
+    time.sleep(600)
 
 elif sys.platform == "win32":
     # Windows
     def reboot():
         os.system("shutdown -f -r -t 0")
-	# After starting the shutdown, we go to sleep since the system can
-	# take a few minutes to shut everything down and reboot
-	time.sleep(600)
+    # After starting the shutdown, we go to sleep since the system can
+    # take a few minutes to shut everything down and reboot
+    time.sleep(600)
+
 
 def increment_count(fname):
     try:
         current_count = int(open(fname).read())
     except:
         current_count = 0
     current_count += 1
     open(fname, "w").write("%i\n" % current_count)
     return current_count
 
+
 if __name__ == '__main__':
     from optparse import OptionParser
 
     parser = OptionParser(__doc__)
     parser.add_option("-n", "--max-count", dest="maxcount", default=10,
-            help="reboot after <maxcount> runs", type="int")
+                      help="reboot after <maxcount> runs", type="int")
     parser.add_option("-f", "--count-file", dest="countfile", default=None,
-            help="file to record count in")
-    parser.add_option("-z", "--zero-count", dest="zero", default=False,
-            action="store_true", help="zero out the counter before rebooting")
+                      help="file to record count in")
+    parser.add_option("-z", "--zero-count", dest="zero", default=False, action="store_true",
+                      help="zero out the counter before rebooting")
 
     options, args = parser.parse_args()
 
     if not options.countfile:
         parser.error("countfile is required")
 
     if increment_count(options.countfile) >= options.maxcount:
         if options.zero:
             open(options.countfile, "w").write("0\n")
-        print "************************************************************************************************"
-        print "*********** END OF RUN - NOW DOING SCHEDULED REBOOT; FOLLOWING ERROR MESSAGE EXPECTED **********"
-        print "************************************************************************************************"
+        print "*****************************************************" \
+            "*******************************************"
+        print "*********** END OF RUN - NOW DOING SCHEDULED REBOOT; " \
+            "FOLLOWING ERROR MESSAGE EXPECTED **********"
+        print "*****************************************************" \
+            "*******************************************"
         sys.stdout.flush()
         reboot()
--- a/testing/mozharness/external_tools/detect_repo.py
+++ b/testing/mozharness/external_tools/detect_repo.py
@@ -1,52 +1,58 @@
 #!/usr/bin/env python
 # Stolen from taskcluster-vcs
 # https://github.com/taskcluster/taskcluster-vcs/blob/master/src/vcs/detect_remote.js
 
 from urllib2 import Request, urlopen
 from urlparse import urlsplit, urlunsplit
 from os.path import exists, join
 
+
 def first(seq):
     return next(iter(filter(lambda x: x, seq)), '')
 
+
 def all_first(*sequences):
     return map(lambda x: first(x), sequences)
 
+
 # http://codereview.stackexchange.com/questions/13027/joining-url-path-components-intelligently
 # I wonder why this is not a builtin feature in Python
 def urljoin(*parts):
     schemes, netlocs, paths, queries, fragments = zip(*(urlsplit(part) for part in parts))
     scheme, netloc, query, fragment = all_first(schemes, netlocs, queries, fragments)
     path = '/'.join(p.strip('/') for p in paths if p)
     return urlunsplit((scheme, netloc, path, query, fragment))
 
+
 def _detect_remote(url, content):
     try:
         response = urlopen(url)
     except Exception:
         return False
 
     if response.getcode() != 200:
         return False
 
     content_type = response.headers.get('content-type', '')
     return True if content in content_type else False
 
+
 def detect_git(url):
     location = urljoin(url, '/info/refs?service=git-upload-pack')
-    req = Request(location, headers={'User-Agent':'git/2.0.1'})
+    req = Request(location, headers={'User-Agent': 'git/2.0.1'})
     return _detect_remote(req, 'x-git')
 
+
 def detect_hg(url):
     location = urljoin(url, '?cmd=lookup&key=0')
     return _detect_remote(location, 'mercurial')
 
+
 def detect_local(url):
     if exists(join(url, '.git')):
         return 'git'
 
     if exists(join(url, '.hg')):
         return 'hg'
 
     return ''
-
--- a/testing/mozharness/external_tools/download_file.py
+++ b/testing/mozharness/external_tools/download_file.py
@@ -23,17 +23,18 @@ def download_file(url, file_name):
         if f.info().get('content-length') is not None:
             f_length = int(f.info()['content-length'])
             got_length = 0
         local_file = open(file_name, 'wb')
         while True:
             block = f.read(1024 ** 2)
             if not block:
                 if f_length is not None and got_length != f_length:
-                    raise urllib2.URLError("Download incomplete; content-length was %d, but only received %d" % (f_length, got_length))
+                    raise urllib2.URLError("Download incomplete; content-length was %d, \
+                                           but only received %d" % (f_length, got_length))
                 break
             local_file.write(block)
             if f_length is not None:
                 got_length += len(block)
         local_file.close()
         print "%s downloaded to %s" % (url, file_name)
     except urllib2.HTTPError, e:
         print "Warning: Server returned status %s %s for %s" % (str(e.code), str(e), url)
@@ -43,16 +44,17 @@ def download_file(url, file_name):
         raise
     except socket.timeout, e:
         print "Timed out accessing %s: %s" % (url, str(e))
         raise
     except socket.error, e:
         print "Socket error when accessing %s: %s" % (url, str(e))
         raise
 
+
 if __name__ == '__main__':
     if len(sys.argv) != 3:
         if len(sys.argv) != 2:
             print "Usage: download_file.py URL [FILENAME]"
             sys.exit(-1)
         parts = urlparse.urlparse(sys.argv[1])
         file_name = parts[2].split('/')[-1]
     else:
--- a/testing/mozharness/external_tools/extract_and_run_command.py
+++ b/testing/mozharness/external_tools/extract_and_run_command.py
@@ -4,21 +4,21 @@ Usage: extract_and_run_command.py [-j N]
     -j is the number of workers to start, defaulting to 1.
     [command to run] must be a command that can accept one or many files
     to process as arguments.
 
 WARNING: This script does NOT respond to SIGINT. You must use SIGQUIT or SIGKILL to
          terminate it early.
  """
 
-### The canonical location for this file is
-###   https://hg.mozilla.org/build/tools/file/default/stage/extract_and_run_command.py
-###
-### Please update the copy in puppet to deploy new changes to
-### stage.mozilla.org, see
+# The canonical location for this file is
+#   https://hg.mozilla.org/build/tools/file/default/stage/extract_and_run_command.py
+#
+# Please update the copy in puppet to deploy new changes to
+# stage.mozilla.org, see
 # https://wiki.mozilla.org/ReleaseEngineering/How_To/Modify_scripts_on_stage
 
 import logging
 import os
 from os import path
 import sys
 from Queue import Queue
 import shutil
@@ -55,16 +55,17 @@ def extractExe(filename, tempdir):
         fd = tempfile.TemporaryFile()
         proc = subprocess.Popen([SEVENZIP, 'x', '-o%s' % tempdir, filename],
                                 stdout=fd, stderr=subprocess.STDOUT)
         proc.wait()
     except subprocess.CalledProcessError:
         # Not all EXEs are 7-zip files, so we have to ignore extraction errors
         pass
 
+
 # The keys here are matched against the last 3 characters of filenames.
 # The values are callables that accept two string arguments.
 EXTRACTORS = {
     '.mar': extractMar,
     '.exe': extractExe,
 }
 
 
@@ -135,23 +136,24 @@ def process(item, command):
         # Now that we've got all of our output, print it. It's important that
         # the logging module is used for this, because "print" is not
         # thread-safe.
         log.info("\n".join(logs))
 
 
 def worker(command, errors):
     item = q.get()
-    while item != None:
+    while item is not None:
         try:
             process(item, command)
         except:
             errors.put(item)
         item = q.get()
 
+
 if __name__ == '__main__':
     # getopt is used in favour of optparse to enable "--" as a separator
     # between the command and list of files. optparse doesn't allow that.
     from getopt import getopt
     options, args = getopt(sys.argv[1:], 'j:h', ['help'])
 
     concurrency = 1
     for o, a in options:
--- a/testing/mozharness/external_tools/gittool.py
+++ b/testing/mozharness/external_tools/gittool.py
@@ -1,22 +1,24 @@
 #!/usr/bin/env python
-### Compressed module sources ###
-module_sources = [('util', 'eJxlkMEKgzAQRO/5isWTQhFaSg8Ff6LnQknM2ixoItmov1+T2FLb3DY7mZkXGkbnAxjJpiclKI+K\nrOSWSAihsQM28sjBk32WXF0FrKe4YZi8hWAwrZMDuC5fJC1wkaQ+K7eIOqpXm1rTEzmU1ZahLuc/\ncwYlGS9nQNs6jfoACwUDQVIf/RdDAXmULYK0Gpo1aXAz6l3sG6VWJ/nIdjHdx45jWTR3W3xVSKTT\n8NuEE9a+DMzomZz9QOencdyDJ7LvH6zEC9SEeBQ=\n'), ('util.file', 'eJzNVk2P2zYQvftXTF0sLC9ctTbaSwAfim2BFCjSIsktCLy0SFnMSqRAUuv1v+8MP0RZ3uTQU3Sw\nJXLmcWbem5GWy+Vb0fbCQD2oykmtLDgNDVO8FVBL/NG4y/zOcrlcyK7XxkGrTyepTulR23Rnm8HJ\nNj01zDatPKZHJ7qeMBe10R08aFXL07/MWDw+Wrxn5+nyAs+BfTqtPAn3N94KUxwOinXicFgvFgsu\naqh01zMjCkLfbnzgu/WbBeCFUcddTK0RaKqcUM6CrsGdtbe1G+iZtYKDVCAkmhlg1rvjhRVQoRah\nLuiK21UlrJXHVKaeucaW8IfGbQYW88E8I4Bi8lmAdQaTiKFKq9UGrAauQWkHg8VKK2iZOREZFBOV\nm7xlDdJKZR1T1ZjhkVkRAGOadPk9rBcFnAxXZrWBj2YQ66+A7b4BtpuC7W7A/BGHsaD7sFAawXiR\nLXZzi93Uwgg3GHUDtZ+5Rp65NKJy2lxQJY5hHsW4gtUc6lq+ZNrhfcB2GDAlTuyfkAmVYbwaCMdv\n9kY/S44qOMuWV8xwjxRgN8SpRH6oPx5bC7XWP98fmXmERFQjHWbI1KX4VJdCcXtGJRUxKrRHXklf\n2pattA5jyMGvP4/0kBoQKROB6i+FMdoUywc9tNxb1FJxuL+zBHhnl3AHRYozg15VGDHHZukvVN3C\nmgrNrdv4pU5zsffkjhV8wGVAK8rZ2/XYRcI8k45xLHQSO4BGBrYFONmh9GU9YqHQvFZSecJoKG9O\nHzNPjjn1iQttzFxmFqhpN7EIudqGbe3QFXVOKqkCCf/w9veftn5K+Wkwmw6+rx/rxw0VuREvRGHH\n3Eg3kh0HXEnHJMn3Y9NQwxxXYfncEBrVI6d3bHX1RE3Rh474bbuDe9j+svs1JxgV4U2zp/dGn6dx\npSmHnjMnCm95zXyJwXN5wh4vxrqwWhwG1Ur15JubxmkuUdiAtAHypLRRxLoXok3d5CvEceSplQPx\ngqpOxXHm8maaA4qeJmQpLel+duI4crBFjNbOa9iGMW5jy5xZmyPdoCB7rs9qqtc5km82D3G7n4mK\ncX3RUhXh7Hr9qvlVxfpbG0QyHSVHKHlbtFZcnz+phi+Z/Vo5IuqcJW8jXirRO/jnw59EyAYmZ/wI\nfxFdApbvNA6vqonvcZMnw3JKjaDpojTN3N11AEE/30jFMGnFVFGz5kbFZVGRQXvxXT7OFDTAVx8J\ni/mvA20YDmWJPWg6wSXqOcyWBoe2ofTpo4PwonOSW81REl3vxbofvzPK7snSPc3Zfao53pNZ4YNb\nvzaZ9PFL4RvYJ+FbeENE1Dy0NZ61OuPijXOeQDGWYEHK8NQVcTlWJhau1YzTew6/euZKCKuY0ey7\nqJjMTFoN4+NT8v68hh/2kB8zaXEivNNKTCdEQInx4FdWCif84atP+G9DrEIf/tGODW0iN8eB8/AQ\njYv4v/YMTvYDRjHDXN8EGV0wnBvbaewxlJvgD6ii7yUBCuV/5XDUuv1ekqBYBLt1eS2R/wBE3uXX\n'), ('util.commands', 'eJzdWW1v2zgS/u5fwXPQs9x1laDFvSBA9pDdJnfBtkkucS9XtIEgS+OYG4n0kVRc76+/GZKSKPkl\n2T3slzPQOhLJ4bw888yQHg6H55XIDJdCs7lUTFVCcPHAMlmWqcj1cDgc8HIplWG6mi2VzEDr+o1s\n/jK8hPrvZZEaFFXWz4V8eECRA/xmJ/VT/ADmA/4JKkoSkZaQJOPBwKj18YDhxy9dcfHu7ZwXsPEy\nXXL77vrz3cXlu7coeKoqGMC3DJaGXdiZZ0pJddybdp4WGgaDQQ5z0iXJyjzCfxP2+vXjKlUPeuxW\nHLBslTOumV5CxtOCccHMIsXtgaXFKl1rtkqFYRwNVlwYQBHwBILxOb4baSak8YLg27LgGTfFmmUL\nqUHY92431Mj9EWdyuY7GztA5G+HuI5JB+7oZTq926Rc75x4lSE3uxCe/Hu2KuZjLaOjDeMxup6c3\n0+HO4Vd6yF4FEY4Lrs1b9EvBBZB/xm4pQeQR1hP2lBYVtLrF3IDCf6WOxq2eWzeym02cFG1UZCWh\neBeSEtQDJCCeIvznRQlY0RtnKP7BlRShu/x4XC3z1IBdaN8rMJUS9bDfAAG+M+YI9ptKMBxiUcrI\nBUzOGU6oShBGj2PGblKuIXTUj2lRQH7tniziMHxWmllAnUYIAW4QMNwsMKbizS+gJAq7mHcmOX0R\ncVVGwuZVUawnoSVHMaWj9+wWKzze7oA5V6B0BHA6x9jUecdmkKUVmoAwzqUYGYdiNIJMJW24WNhQ\n5jV60fNPqdKsrHKCwwMKtxNlZZaVaQCL80b7wErjBNY2wp0Rp3xDAPYBZxOxxPSfj/UOWDldjoft\nJO+yIFLZArLHJENTt7nNM8feyG5B9qhR4ezm5upmNCFBCQ2dECEF+hBwXA5xgZIDO6FIlxryrrXs\nDTP7LD67fM+iV/Hbua7Xj78KzKv6IYD7fyoOZifoc3gSiDTKriWICFPMv5mw0WrUyaQ9ztQmxxic\nNEvxGZRqn1tnt3opKKWBVjEN6gnUhCE8FZWEk0spAF/rxU+wbh9ORvjfaNI4J/j0TEOpyVLBnH9D\n677gqvsarfUWbRDauTF8MyDy6MvoTTFqtblvuNkp9MxSjkvRl8vULPDtEmNGgiK3duyFBSvT5ZJW\nOh80W3HNhTapyMC5aJZqQNLELBx39if78Os+jFbAdLUXvmM95Hc4MVli4sucZ8lS1nHFedQPJFTh\nFFL1ybujowmj8fbVUfz2T1vD4T+1DELLLM0efSh/JfkSt6QBBBlRpoUhI27FxFgWQI2MlVabQpn2\nYtrepGwr67fQdkvZg20uYHPfdaFwzL0ZSrMKub1I+hxdLFdEt40LvIYOOW5z7DPgG2SVFWXSR9DI\nFQK7KpooNqLXYgZBpUxCVNNQBoYV3VHH4v+6zDxbQcgTKCQAzLVlxy2OaD25pVwVqUbmtSA9CWYO\nHCgW2NnavrU1Q9G2tGdsc3A8aEbQeBzktrFklEHHnZQjk3KYVQ/R0KPaQxBBZRsulY07C5y8kxN2\ndLyRu7sqUmBBf8lvKVF9GXXOdAYA+/VNDdXzCR2pbEJ0EvhQyNWOngK9QYNvwoh9vyd/6HOACmsw\n4RIjWfokeY6nhrQs7UHKZ3w3WCEscN+ewbXznUY7nI4a91ll000BKshBpNBOKqLGPHqlx3gS2EPm\nUX/9JFBwvBnTTkcXfvpyop2UtCnUN2tn9otU37oDGQ8WCdZ4a6zFTY61w8vAxRPGH4SkmhrH8XBf\nIfNbb2vv7NBWpJIW3lbUoykuNWljQiNvU2Aa4k7FcK8Swz4sMcvy8TNrJvWeWyDwzNJbCgw5zRBE\nmuDgA+U2HRyjvkbPefH5T4CG/1lWTTgBE1gO0AXAMuo0M3VLhOfpxJUEx/lcZEWVQ+L7WnuLMKHS\nZhIMcP38a1uatn0ISp3rMLobuvKHPQaYurduOgc/M3c3FLUU7D7xQa2IJrlpJmvcGFmqPaASbSps\nI7xQbC4hLWPnqDsXVXfvsZYV0wtZFTmVc6rttuw3jQxSX5Yu0RbANq1AI/G7lJUgm600pxeLvsfx\nOaxwuaw0eWC2NqDHk0bNHNK8kNljc9rlfXeEfYxVu1Oqb6fvrz5N3amuk5LNZCqfg+c6nN/nUOu9\ncMKGbdbtOuju7UL8iSscvLg+a05e7uv53OnaXO+KjMVNoEmjtR10W8eIlLxbQu2oA3Qmc2B/2Ogu\nXlK3e1J8EQ+2oQ6oTr3NLujZq4HORDe8cW8QdJ0vuRlAUmwVOWAfsRPHBQpc6njvufxl0qVpU7za\ne4C4cXOwfeu13+X6YP/tAZ7QnyChQ2xE/7W8NqXcp64f5yyLNANiNHs9qBdYZIpYlcgk3v6VVI8a\n2cfQCaESCEx/rhK5XOmYTbHk4QRkkB8gVVhnrIOubk/PrUR32MrBHaWiHyR6fIUGz5Us2aziRT6T\nBsk8fYK4vrceB0eYugO6IWuIz2w/bO0Z1JmecJ14fbbfYH7StDJxZtVTGXUMLXZ6o85lPWQ1OxKI\n2wsCrA06dLHDkfUyOicv8GA3U/IRz3TYxD3qMBtqIVzTUF8IfXCGi+R+jfYLeomQA/YvPNTN1zZk\nOVeQGanWhBPiisMVHfgOXR8CbWgrpQg8dD8y8Dtli1LmdqMJO/rL0ZEPFC2huxiiZOkuqXGXvqZ0\nAre/KbgbY2vTz5ILL49GxoGTMR/vXMAmtqmuT6wLxBOzKtNtQsm1tud1qpk07JwRyLGndjzRHbaG\nA6cajJwsmS/yxAaiFz2n6gkbCTPqBq6FSWrvFqLGNHu5dJdc/TTe7DgP2AXVZvHoKrQ9Mq5Q3xxT\nD0/hE8wZg1MCK7EdvpxukVOmGcoBykws0aS6teViVLIHaTsDyQogCdz+UGGZYIucN9Qf+uj2gOki\nHdh19Ocm3Bu4pGA3U3uWh1zVzglYst+cH7D31gNYnm3zQor0sqsbgzA5dmmx0yoL4t4sn089bWmg\nbGCNTHwQspPtGfs0RDc/AudZRizlLwtyt9aOxLdQm15rAyWVc/9bXezetL8/+RkY02joswM5c/iR\nZ0pqOTfDwG5fMu0PcJ3lsW3iNd1p4dHn89/vLi6fWbczG8K53qxtZNvUpzql39if7+Y8Y2FBqimV\n1iCAxYNZ6PD8xT6e/ju5Pp3+I24UuJb2DGQ9nBVyNgMFKl6u486FWaqRxEzX5e5CiXZq6QjpsGir\nquM2QoGfNvqKn799/Tpi39mVe2pGs2zDseEi//vncZhWXVRv4dHA7/Vd8iiHgh2es8N/siFW0RGe\n/brVYDPN+hIsttnh7XYZYe/UKSBExOnM/xLc/C4c34I5x+9TYxRHWgN9F/WdNwmmn198OEtOp9Ob\nix8+Tc+Sy6ubj6cf6p1v8ZABjuDxFOLwgp2UvZJNLbUT+5VAHZbeFhLnxf7+m4hv9XkPBRggCzaX\ntSVvPkdHUC7WP33H5wguWqU3luEXvnodvx6FFRGnJin6CLFlhX05um8vxVyldO//et+BSJ2L8YjV\npdc+xr1ClWE3zkXVcv+LanC4VaviH3fH6/3FzdmP06ubz93d+1TwIvp/MYYCFn8RkDY32BHlnprt\nfNuowvsa/lug8V+mJBic\n'), ('util.retry', 'eJytVk2P2zYQvetXDFwsLDuC4C2wORhxsUHQFgWKnHqXaYmyiUqkQ1LxGkX/e2dIivpy0h6qw1oa\nDh9nHt/MjmivSluwouVJrVULdSdLq1RjQPilm2ZX49dKJS1/s4049YvB0jLJzlwnwdqo81nIc4K/\ncOi/8jO3v+Mr12lRSNbyotgkSVLxGjS3+p6y0golM2DW8vZqzeElA9NwfqXgDu93GbTsrRgsL7AF\ntCYQH4dT8LeSPJQ0h/Tn/j3bZFA2nMnuevisJMdj9Bkd0Pznzb3+9fdm77BWq9Un1jRw9AGtgdHB\nou1aUDVaQ3hrR5qBTlrRgLBgurLkvDJDRJgb6xqLyYNV8JLDMUa/BmHAXjjIrj1xTciGI5uVIdcb\nEzainLi9cS4jL9kM9/0OmKygUt2pIRNn5cVT0W/J0C3CTbOZULrOAY5zEl2kDGx3bThuiTiRWsqD\nYfoX1TUVRgsl684Xm8NvNQwwoDBbTa4S/yjDI1AjjOUVCPnobKY5aCYMOjgJ9peSEXl3uAm8qNOA\nFVxF2/JKMMubuwvjGK7e5XLV6quo0ItYK/Gm2QkzwwsksBHrbm0KBqy2mASmELMnxD7hz4pU1bVc\nWhOBQohwZYZCwwsTnpu76nSvSV92BKf5l05o1NUSCUPEwzTKBCOSlIEjHnFckbp1ScH1WxtuTETO\nI86R9L526R+9+D3P/SU7NYnSkkBiFBQ4pQBY8YOY0HjsKVxj4bgFSpR6Q7CHwt6M16SyMXWlB9dg\n876inlY8fBj6wX6QjzrnFT9153Q19X6qwBHgJDc2r+AJ0lHbgOkxo66z8YFI7GLP7u12EUiQhA+H\nWI5DJKjd/QSWQhOyVunKCXsP1FeoRJ8MysJeXA/a41ffhPz7agISn1U4EX4IKfQN01id0u6Nf/VQ\n+CFD+LE4uO00qsNtS7fklcF2G/yjqy+/RTNdphZYj7lREQwVv4dVRl8FMXD4Q3d8Gg3ebrjt/SLf\nsJAuduBNPGL+m4T/Kr4S36QyidwSbWM1Ttih1jE/b5DNT7D7D+f9wlAfVVCQu+kq9vUTrxV1M/LE\nJYzl8T3TMyhw4UPW3K2n3/EaAj+M3rfw48JzluWkFJYZz7En7hNvGg2E7AZjLSTKf1YiEt5RbQ1z\ngHB9YOvV10vUfwWheoD1eg0f8T9hqTSz2EKQ2zBHbHLszqylTtYZHEu8/+sA7tmiA2ulRhrL8zyZ\n+8Zh5Hm3G48jz7sB5cR0utlPYEKESfQpImRRowIVxkmNebTt1Q1a3jqeIMZbyeWKA9S8dveP6tyz\nQXhh2PGbwrjjfxBjxPS39Ti7gmR21DLE5PFqyB3v+3U2OsY5EEsjBP3vIlhwFlEKYb/D0v/M0CN2\n7oLjNNTHkvwDPQB6iA==\n'), ('util.git', 'eJzNW+uT27YR/66/ApF7IymWeEk/Xuam4/iReJrGntiZdMZ2JEoEJcQUIRPgyddM/vfuAyDAh+S7\nNkmrGVsiCSx2F7u/fRA3Ho+f1eXGKl0aketKqNLKKoUb5VYcld2J3XY8Ho/U/qArK7Txv0y9PlR6\nI01zp66KQ1oZGV0Xau2vKjka5ZXei9qqItno/T4tMyP807pcbvbZHIbt9Y1cHlK7m9PdD7WSFp9F\ns3NVSD/TpLlc1mWhyvcjv1aht1vgfwTf4tpfJVtpv4Ofspoul2W6l8vlbDQabYrUGPFE5mld2Fe7\ntJJfp0ZejQR8DvBo1H0EFLu3pkgok7lY7tP3cpmujS5qK6eVPOgZk1K5wKvE2LSyBhU7HaMYV5eX\nYzcEPw/EP4CCcE9QhUZ4cs0gVA5wgfTeFLKMCb1rBuFTGOSfXZixuIDtS3ByAiTxe4r/zWiKLIDD\nMRIRpbZgBUTgqkuuS4AkHPEAW1c8yykD9L3ES1J2rIu1sgZoeXtJUMpDoWxEbaeN5SFgQsmHWoM2\ncVpSSlvozVyMx7NRpIv+QGKzMLZSh+kYVBOmOE69KL9oVU5xvblgdTD3u9QA9zfKgGdMM4mP/aUT\nA9ziByJlxOuqlrzFPELIj8qAkKBGnIoOhDNsdRtpNDbu6ZvJVtnJXEzAWvFrsdAl7Ekp6aL8chKW\nfzcXm2N2jYRn0f6QUMgI7+fHjTzEXpo8TotCZi/56mlV6eqqO/tZWoD7xvLnjeg57uI5yWlAR/DE\nKZyfbdJSrKVIxbpKy81OANrYdCvwWXIfFZmdPi6AKKkmmzTc/TmKUSVYKmtlDf5/Tc+CYp7DY5UW\n6l8SPBcMYX+wt+QVRlld3YrUsmbE85x+eI0BGgplyonlKXOhLOBvUaDGGBQz1ibMW+HCKxhOYs2F\n3ckS1Qp32VH9xE0lUwsTvXZho9C7vekrk6mKZIkgCAwwUWWup2NaFuMAgMdctNUawe40PJGFh078\nYDhBfeF6BQg5sBgNi3CFnJGVm89ao06x1RkGEralyzur8a42QWbamd+WYEhamEDPH4hv/BbloOb3\nQtcWl4ebADqw+1Y7/XNM3ctM4QUwJTdgCjgENORoscxoBLSZ8N8tW0YifmLP2SHhHez5EQccagA8\n0AFodw+hSB0K3nrj6MF9AFe07AIZMRiqMjYOFBu424ElbnRpUxiK4VjTDFnamENH7TtpJ8ZLA0SR\nv7YgqjK278CwFRgRYaSJrYRd8MUrcra5iBQO+pOJrKoSgs21+OsX7a14IL4H602blUFFSCFJEgBL\noXNii4UweEn+xU6Vdgg1JFr3q1ShnztO0J8CAwBBYKgNCCEMMFDjMPr1YcJe8m7AF07NDnNGbSsX\nY3YGmDhzcauFhnjfI5JZAlmKtbF/DaC0Uwio8AYgKhMwjWziPvjQhsTeliOqgqQRvr7UB0hS3oxh\nMfBXcN+bBcV9vFgs4O4CVhlH4D0XgBXgTdcxkecvn85iM8EHyTEFLJ6Jz65Fx1JaTDbWWNtDjWkF\nzeU1ErDpbDpLOFEIK6BCga0Imkpd7QkxBrCKKc9aUQc0DLOnDaFr1j5gYnRrgNY4QUXNehGMSf4+\nMQxTM8fFCYthT4LcCsADf6OlBLdDZOco9gx+NXHHMEAphg02Nmtkkc9pRiW3dZFW7aE07JJkdkYI\nSbesbN+qRwN+BACWK5cwrbUu+BeIxw8rmZB3skeeMk0qPO5mfJHVscOYJUn/SZtSeRiLWTluxjjs\nUTYcA50tDOAJTsAxscY8Ac4oplkr3c3c1hvYeooGlG3POTK4/U8LiFMlYLzpshMbDGXpoF69/gXM\nwTCc5Rq/A4EJL07Ul27kOaLMRkTVRVkqQWmXAm0YdZzMQGqRR8lGcqwUJP/jC/O2xFqntbSHyk0h\n0zKuRR6I10cNNpNDfNvDMyPGNAatZK+zupCYZBx3CvJVir0QNY9SHFOIk0aLPK2SBpxbSSpRIXPM\no/+zicM5p/wTpsbMplm2xFTF+r3iC6qnmotIFnCgR1mG6M7PKLPOxCqatvL+DEUU4JPHf0wXVvhj\nxVYOu0MNABi8itZZeRftScuDyAQyzsiHOY2kn0UG6UZAFXdnSV9JyygFkwhdvNR34BGWXMC0+/G5\nbfjs8ziMn54zxs8bWbopcwwC32PKojhlcduVaYm5ioN4FerGDugFQRY3d4W28/Y2BG3IORaglEp2\nwA3vm2mUFOypHwHJnt3sphX6oHk4ffvq4Uy8neYSbr6d/QWdEsZIs0kPqMOgvTkt1Arv+8F4vk+2\nla4P0y/7xnM/wznvIIM2j6lZJtf1FiHmCs2BXISHIkiE7sX+1jEFWjlrNj40RBOuY667QXzUnwCg\nhCkbmtNQDYesmharUDahjPD/9AgQemFmjvfTypuH9aIK8F5+OxDC2kwCbrR5vDCf5Cswc3eo9N7s\n2k1z0WpwXKMeQ6vFXdaHDOLOEkdeU8UdlOBbgNfdniDoTGEeZhwNigdMotMxwI6fAdeF1ICKshUO\noup+B/uz8rysEDVWjs+V2OzkBiorqjqxM0rUGMMTNpMnmsMV1o20BOw6VmO8yi49AEDMwbs3RU2q\nh6TMqHVxC6zq9VpW2EGlVIMaOU3vwYlFDIINzLkEttjagOq1NpIgzY0Sawk4IhvGnMiNHTf6Q2rD\nTdiWmjmFkOWNqnSJHd3p+Jvnr5evvn30w9Pl149ePV0+ef4D2A3qfDa8St9bmiZl466tpmWbi05V\nQImMCZvezB2y+JgAstBmkB5EDJI+qRkbZcLNyMGODVXouJehFURuFGY1k1pFG7GBfa1moGtuobW3\nGyQgeG0V6CYaytr2I1x18pS+wHDbyyCzx7QqgUvgV9dFhuW5ay3EbYoL8xVUHCZdU58Dn8B3LMsc\nV1qi4ANsxhZDqu497O0D1Sv9FjfXHp3q/DF6H/JFkzr9MVdFnyjL3Yhust7vi7U0BYDo0gOBjgtV\nFHgzNVNDJd/UZ19FLtzr3LHFhwZYJN85a+x2YkKf06UwsGVosAAJgJd0j+j0bazPTqhJXAXWN9d+\nX+6BeAGLVEcFewziUqICOmmKIv+hZ4NY774DUrvvNuAzWvueH72eIazWdcWMopbijJnUobY7Kw5F\nupFnfTx24s37Jb3Y+lSVRIqB2lCVmfyY4Lzx7IxlNYQHzGuooRrGt/coaoEODDmzhU5zEDuOEnJX\n0N4BQg24OVsw6dqpLm0i75wDHMpzlI7CLr1xwat5z5IWmI7eUjfd6HnTPIWaH5UsSknrOAKUiYKV\n3todvhBkr9dLvn0ddYviVzmwW+2deoAFYKbRFYmjwLQwB7lRuZKQdENxiD1azJ7ljax4yVC+h1XD\nmwl8Bdd97dJ648Srx5ylG1unBcRsZCIXbM6wNHDoRMc6iAWPSPhMgAz56PbAO3L+aS7RfD/9gmxI\nWdT1CZtsmi1ym6PsydX9zvj7V4OY1QWJZ0QCnRUkM4wRjeu2xvYiIhN4/eLJiyvxLWAb+CYtzHkq\nYYeByuU9Kc1c2nRrLv8Jnx6R6P1Yz5riD1GP+zIc5jrwNOvNHX5pcXeKPUjsvBO5V7sxaO6V3ksy\ne7CB0oojpGzbzwbGPeZgFSEkBpJKLrgd350QgIu6/2FPaG8hUC7a4W8gmvhPHAfPDQuvBfxn0Fju\nt8/Rfrg3XnjblTHXYw0xRJXj++/23ej+IXseZaLNDpzMQO+5Cffd9n6a0V3sxIj2Zve1Pbj1saOx\n1v8jHzuRNP+P5AcXhmyOsRONh1u6oaHBgk7Yoia+A+JxOkqihmqVH33c51bkRh9uvYquKPn3UeLK\ntwJyX827KBMFGYIahXgcOSAe34HYAhE4NVGUjsNGs0Y7Tf10hCOIagdrp4fLCzOhTlcvFg7owLCD\nIIM+fgO/xkJSgy8wPZHxkNRhS3NXvPYkDENcyhDXO+4Bnp6hnZqeyI6bZkifBZVHfY22oNxpHzyL\nAXQaIxmaHk/1bftTOTw3V9qtFq4iOXHvN29C4+UxUjWhCY5bSim7wZ5J04khu4bbFMgg+8R0jmDB\nv+iifDMR4jWkT0ddUV1I5uyPYdCJjju3ULiYodNu/U4K94NhBC5CY1o9H6TO4nePh6CUUXltGuZq\n8JEwOdIWUXBKJBKQTw+K506ZNM0dt7XnK9wTJSj2NlngIcx4ZC3q0lULkaLcnChaYvua79IZiS7N\nNt3HsUIJbXhC29kGgb9508s2yvM6Vto2wuj3kDN3X/b6j4sQf5e3a51W2XM8U1LVBzvAUi9tult0\nkf7xdAxhfl3IfdvSnDpP6gc/eKJElXVYvh8/g9pfukMs8RaKPIXCMvsKvvnhOoUy0OrQD3aW0n0T\njOp3RyrexW2YwTDk0/ofwYv5BMflYuHkQ2/+WwCjfZZQqzSbThaLUi+oLtW1nQSL9WGrNUl+tDjp\nDb6ZpvNu0UG1TmsyuzqxHD+dBIkbqgEL34XTIc25EEd8UHRnYdzojIKbx9rBYDDYFo967CFdbdCV\n4jtAaQsyXG+b37G4Tja3tV2TOyEYKqVCUPUAiz0lX9kPQxAznTVvN3HlqE2gaSorsa7okJNbHtb7\njvOPXVpuZYDFTJkNuFl0eM61MLpFP8Sbo8Iak9ZOrRv7EyFrM+rnL8SUqxpaFi7XstDHGVW+utpw\n8c0lJfVFHJkMjDGHf+WGMhlEPb3fA5arzPj30nvq7iPAc88EKO35NFrpzj0hHZvC00wYC7pJIFbx\n6Qv5oVaANKgRoD1piOD0xYJnTeYeQJQ/EEY9nAo1vr4VugAuBURFQ6fINb1dGeqj9LteXSf2vuWP\nRvF784bGQzH5+YtJdMg5GH337GcbdxwW9ByVHcLnT5MLc7lPIfuqOINrzPsMmrVnc+437bx96uT7\ndxWaCXuZ7yL0p3y7X6V0Hbzv0Z36cSjh4gHY/+hkWNR8Adv0zkVAfyLfwiMIhA53TpS4O9RLlOgs\nYpwuuQwpfu/UywfukC6cCv+ocVbsYPA/W+/9udG8KRn/D8P5A/FYlzeycraBzeCy+dMHPopGh2sn\nWMpxyRhOVTvjpz9RGPobjKGEgZTR+Bwd+ojThmDTcdbwhDqZbHj4LPQTmSXqAXKnEUq7jWziBebO\n6a1vRTMxKE/1RnHjVUOsoLNOrkFKb8GpGkhxxUNdbSV6CUY2d+TIydTOTpCBySyAbwfvVN7y5k7J\nFoiNH1JL0x1uuPw1nvTb5a+O7m9X7VERfESDxgk41z7F9+29yjLATQsyW4gTX0THIvuW2Od/B3W0\n+aPZnZ0IOL+Doj8/x/HnEad/ih7/O25mztFPhK/4kJWLXPTnOL2TVZzzNClBOJS6wvErn+AVt3R8\nIjom0SRyJ48ohwNW7ogyXnz79NETf2qP/yztPqeoXHw4czr03yOfFDU=\n')]
+# Compressed module sources #
+module_sources = [('util', 'eJxlkMEKgzAQRO/5isWTQhFaSg8Ff6LnQknM2ixoItmov1+T2FLb3DY7mZkXGkbnAxjJpiclKI+K\nrOSWSAihsQM28sjBk32WXF0FrKe4YZi8hWAwrZMDuC5fJC1wkaQ+K7eIOqpXm1rTEzmU1ZahLuc/\ncwYlGS9nQNs6jfoACwUDQVIf/RdDAXmULYK0Gpo1aXAz6l3sG6VWJ/nIdjHdx45jWTR3W3xVSKTT\n8NuEE9a+DMzomZz9QOencdyDJ7LvH6zEC9SEeBQ=\n'), ('util.file', 'eJzNVk2P2zYQvftXTF0sLC9ctTbaSwAfim2BFCjSIsktCLy0SFnMSqRAUuv1v+8MP0RZ3uTQU3Sw\nJXLmcWbem5GWy+Vb0fbCQD2oykmtLDgNDVO8FVBL/NG4y/zOcrlcyK7XxkGrTyepTulR23Rnm8HJ\nNj01zDatPKZHJ7qeMBe10R08aFXL07/MWDw+Wrxn5+nyAs+BfTqtPAn3N94KUxwOinXicFgvFgsu\naqh01zMjCkLfbnzgu/WbBeCFUcddTK0RaKqcUM6CrsGdtbe1G+iZtYKDVCAkmhlg1rvjhRVQoRah\nLuiK21UlrJXHVKaeucaW8IfGbQYW88E8I4Bi8lmAdQaTiKFKq9UGrAauQWkHg8VKK2iZOREZFBOV\nm7xlDdJKZR1T1ZjhkVkRAGOadPk9rBcFnAxXZrWBj2YQ66+A7b4BtpuC7W7A/BGHsaD7sFAawXiR\nLXZzi93Uwgg3GHUDtZ+5Rp65NKJy2lxQJY5hHsW4gtUc6lq+ZNrhfcB2GDAlTuyfkAmVYbwaCMdv\n9kY/S44qOMuWV8xwjxRgN8SpRH6oPx5bC7XWP98fmXmERFQjHWbI1KX4VJdCcXtGJRUxKrRHXklf\n2pattA5jyMGvP4/0kBoQKROB6i+FMdoUywc9tNxb1FJxuL+zBHhnl3AHRYozg15VGDHHZukvVN3C\nmgrNrdv4pU5zsffkjhV8wGVAK8rZ2/XYRcI8k45xLHQSO4BGBrYFONmh9GU9YqHQvFZSecJoKG9O\nHzNPjjn1iQttzFxmFqhpN7EIudqGbe3QFXVOKqkCCf/w9veftn5K+Wkwmw6+rx/rxw0VuREvRGHH\n3Eg3kh0HXEnHJMn3Y9NQwxxXYfncEBrVI6d3bHX1RE3Rh474bbuDe9j+svs1JxgV4U2zp/dGn6dx\npSmHnjMnCm95zXyJwXN5wh4vxrqwWhwG1Ur15JubxmkuUdiAtAHypLRRxLoXok3d5CvEceSplQPx\ngqpOxXHm8maaA4qeJmQpLel+duI4crBFjNbOa9iGMW5jy5xZmyPdoCB7rs9qqtc5km82D3G7n4mK\ncX3RUhXh7Hr9qvlVxfpbG0QyHSVHKHlbtFZcnz+phi+Z/Vo5IuqcJW8jXirRO/jnw59EyAYmZ/wI\nfxFdApbvNA6vqonvcZMnw3JKjaDpojTN3N11AEE/30jFMGnFVFGz5kbFZVGRQXvxXT7OFDTAVx8J\ni/mvA20YDmWJPWg6wSXqOcyWBoe2ofTpo4PwonOSW81REl3vxbofvzPK7snSPc3Zfao53pNZ4YNb\nvzaZ9PFL4RvYJ+FbeENE1Dy0NZ61OuPijXOeQDGWYEHK8NQVcTlWJhau1YzTew6/euZKCKuY0ey7\nqJjMTFoN4+NT8v68hh/2kB8zaXEivNNKTCdEQInx4FdWCif84atP+G9DrEIf/tGODW0iN8eB8/AQ\njYv4v/YMTvYDRjHDXN8EGV0wnBvbaewxlJvgD6ii7yUBCuV/5XDUuv1ekqBYBLt1eS2R/wBE3uXX\n'), ('util.commands', 'eJzdWW1v2zgS/u5fwXPQs9x1laDFvSBA9pDdJnfBtkkucS9XtIEgS+OYG4n0kVRc76+/GZKSKPkl\n2T3slzPQOhLJ4bw888yQHg6H55XIDJdCs7lUTFVCcPHAMlmWqcj1cDgc8HIplWG6mi2VzEDr+o1s\n/jK8hPrvZZEaFFXWz4V8eECRA/xmJ/VT/ADmA/4JKkoSkZaQJOPBwKj18YDhxy9dcfHu7ZwXsPEy\nXXL77vrz3cXlu7coeKoqGMC3DJaGXdiZZ0pJddybdp4WGgaDQQ5z0iXJyjzCfxP2+vXjKlUPeuxW\nHLBslTOumV5CxtOCccHMIsXtgaXFKl1rtkqFYRwNVlwYQBHwBILxOb4baSak8YLg27LgGTfFmmUL\nqUHY92431Mj9EWdyuY7GztA5G+HuI5JB+7oZTq926Rc75x4lSE3uxCe/Hu2KuZjLaOjDeMxup6c3\n0+HO4Vd6yF4FEY4Lrs1b9EvBBZB/xm4pQeQR1hP2lBYVtLrF3IDCf6WOxq2eWzeym02cFG1UZCWh\neBeSEtQDJCCeIvznRQlY0RtnKP7BlRShu/x4XC3z1IBdaN8rMJUS9bDfAAG+M+YI9ptKMBxiUcrI\nBUzOGU6oShBGj2PGblKuIXTUj2lRQH7tniziMHxWmllAnUYIAW4QMNwsMKbizS+gJAq7mHcmOX0R\ncVVGwuZVUawnoSVHMaWj9+wWKzze7oA5V6B0BHA6x9jUecdmkKUVmoAwzqUYGYdiNIJMJW24WNhQ\n5jV60fNPqdKsrHKCwwMKtxNlZZaVaQCL80b7wErjBNY2wp0Rp3xDAPYBZxOxxPSfj/UOWDldjoft\nJO+yIFLZArLHJENTt7nNM8feyG5B9qhR4ezm5upmNCFBCQ2dECEF+hBwXA5xgZIDO6FIlxryrrXs\nDTP7LD67fM+iV/Hbua7Xj78KzKv6IYD7fyoOZifoc3gSiDTKriWICFPMv5mw0WrUyaQ9ztQmxxic\nNEvxGZRqn1tnt3opKKWBVjEN6gnUhCE8FZWEk0spAF/rxU+wbh9ORvjfaNI4J/j0TEOpyVLBnH9D\n677gqvsarfUWbRDauTF8MyDy6MvoTTFqtblvuNkp9MxSjkvRl8vULPDtEmNGgiK3duyFBSvT5ZJW\nOh80W3HNhTapyMC5aJZqQNLELBx39if78Os+jFbAdLUXvmM95Hc4MVli4sucZ8lS1nHFedQPJFTh\nFFL1ybujowmj8fbVUfz2T1vD4T+1DELLLM0efSh/JfkSt6QBBBlRpoUhI27FxFgWQI2MlVabQpn2\nYtrepGwr67fQdkvZg20uYHPfdaFwzL0ZSrMKub1I+hxdLFdEt40LvIYOOW5z7DPgG2SVFWXSR9DI\nFQK7KpooNqLXYgZBpUxCVNNQBoYV3VHH4v+6zDxbQcgTKCQAzLVlxy2OaD25pVwVqUbmtSA9CWYO\nHCgW2NnavrU1Q9G2tGdsc3A8aEbQeBzktrFklEHHnZQjk3KYVQ/R0KPaQxBBZRsulY07C5y8kxN2\ndLyRu7sqUmBBf8lvKVF9GXXOdAYA+/VNDdXzCR2pbEJ0EvhQyNWOngK9QYNvwoh9vyd/6HOACmsw\n4RIjWfokeY6nhrQs7UHKZ3w3WCEscN+ewbXznUY7nI4a91ll000BKshBpNBOKqLGPHqlx3gS2EPm\nUX/9JFBwvBnTTkcXfvpyop2UtCnUN2tn9otU37oDGQ8WCdZ4a6zFTY61w8vAxRPGH4SkmhrH8XBf\nIfNbb2vv7NBWpJIW3lbUoykuNWljQiNvU2Aa4k7FcK8Swz4sMcvy8TNrJvWeWyDwzNJbCgw5zRBE\nmuDgA+U2HRyjvkbPefH5T4CG/1lWTTgBE1gO0AXAMuo0M3VLhOfpxJUEx/lcZEWVQ+L7WnuLMKHS\nZhIMcP38a1uatn0ISp3rMLobuvKHPQaYurduOgc/M3c3FLUU7D7xQa2IJrlpJmvcGFmqPaASbSps\nI7xQbC4hLWPnqDsXVXfvsZYV0wtZFTmVc6rttuw3jQxSX5Yu0RbANq1AI/G7lJUgm600pxeLvsfx\nOaxwuaw0eWC2NqDHk0bNHNK8kNljc9rlfXeEfYxVu1Oqb6fvrz5N3amuk5LNZCqfg+c6nN/nUOu9\ncMKGbdbtOuju7UL8iSscvLg+a05e7uv53OnaXO+KjMVNoEmjtR10W8eIlLxbQu2oA3Qmc2B/2Ogu\nXlK3e1J8EQ+2oQ6oTr3NLujZq4HORDe8cW8QdJ0vuRlAUmwVOWAfsRPHBQpc6njvufxl0qVpU7za\ne4C4cXOwfeu13+X6YP/tAZ7QnyChQ2xE/7W8NqXcp64f5yyLNANiNHs9qBdYZIpYlcgk3v6VVI8a\n2cfQCaESCEx/rhK5XOmYTbHk4QRkkB8gVVhnrIOubk/PrUR32MrBHaWiHyR6fIUGz5Us2aziRT6T\nBsk8fYK4vrceB0eYugO6IWuIz2w/bO0Z1JmecJ14fbbfYH7StDJxZtVTGXUMLXZ6o85lPWQ1OxKI\n2wsCrA06dLHDkfUyOicv8GA3U/IRz3TYxD3qMBtqIVzTUF8IfXCGi+R+jfYLeomQA/YvPNTN1zZk\nOVeQGanWhBPiisMVHfgOXR8CbWgrpQg8dD8y8Dtli1LmdqMJO/rL0ZEPFC2huxiiZOkuqXGXvqZ0\nAre/KbgbY2vTz5ILL49GxoGTMR/vXMAmtqmuT6wLxBOzKtNtQsm1tud1qpk07JwRyLGndjzRHbaG\nA6cajJwsmS/yxAaiFz2n6gkbCTPqBq6FSWrvFqLGNHu5dJdc/TTe7DgP2AXVZvHoKrQ9Mq5Q3xxT\nD0/hE8wZg1MCK7EdvpxukVOmGcoBykws0aS6teViVLIHaTsDyQogCdz+UGGZYIucN9Qf+uj2gOki\nHdh19Ocm3Bu4pGA3U3uWh1zVzglYst+cH7D31gNYnm3zQor0sqsbgzA5dmmx0yoL4t4sn089bWmg\nbGCNTHwQspPtGfs0RDc/AudZRizlLwtyt9aOxLdQm15rAyWVc/9bXezetL8/+RkY02joswM5c/iR\nZ0pqOTfDwG5fMu0PcJ3lsW3iNd1p4dHn89/vLi6fWbczG8K53qxtZNvUpzql39if7+Y8Y2FBqimV\n1iCAxYNZ6PD8xT6e/ju5Pp3+I24UuJb2DGQ9nBVyNgMFKl6u486FWaqRxEzX5e5CiXZq6QjpsGir\nquM2QoGfNvqKn799/Tpi39mVe2pGs2zDseEi//vncZhWXVRv4dHA7/Vd8iiHgh2es8N/siFW0RGe\n/brVYDPN+hIsttnh7XYZYe/UKSBExOnM/xLc/C4c34I5x+9TYxRHWgN9F/WdNwmmn198OEtOp9Ob\nix8+Tc+Sy6ubj6cf6p1v8ZABjuDxFOLwgp2UvZJNLbUT+5VAHZbeFhLnxf7+m4hv9XkPBRggCzaX\ntSVvPkdHUC7WP33H5wguWqU3luEXvnodvx6FFRGnJin6CLFlhX05um8vxVyldO//et+BSJ2L8YjV\npdc+xr1ClWE3zkXVcv+LanC4VaviH3fH6/3FzdmP06ubz93d+1TwIvp/MYYCFn8RkDY32BHlnprt\nfNuowvsa/lug8V+mJBic\n'), ('util.retry', 'eJytVk2P2zYQvetXDFwsLDuC4C2wORhxsUHQFgWKnHqXaYmyiUqkQ1LxGkX/e2dIivpy0h6qw1oa\nDh9nHt/MjmivSluwouVJrVULdSdLq1RjQPilm2ZX49dKJS1/s4049YvB0jLJzlwnwdqo81nIc4K/\ncOi/8jO3v+Mr12lRSNbyotgkSVLxGjS3+p6y0golM2DW8vZqzeElA9NwfqXgDu93GbTsrRgsL7AF\ntCYQH4dT8LeSPJQ0h/Tn/j3bZFA2nMnuevisJMdj9Bkd0Pznzb3+9fdm77BWq9Un1jRw9AGtgdHB\nou1aUDVaQ3hrR5qBTlrRgLBgurLkvDJDRJgb6xqLyYNV8JLDMUa/BmHAXjjIrj1xTciGI5uVIdcb\nEzainLi9cS4jL9kM9/0OmKygUt2pIRNn5cVT0W/J0C3CTbOZULrOAY5zEl2kDGx3bThuiTiRWsqD\nYfoX1TUVRgsl684Xm8NvNQwwoDBbTa4S/yjDI1AjjOUVCPnobKY5aCYMOjgJ9peSEXl3uAm8qNOA\nFVxF2/JKMMubuwvjGK7e5XLV6quo0ItYK/Gm2QkzwwsksBHrbm0KBqy2mASmELMnxD7hz4pU1bVc\nWhOBQohwZYZCwwsTnpu76nSvSV92BKf5l05o1NUSCUPEwzTKBCOSlIEjHnFckbp1ScH1WxtuTETO\nI86R9L526R+9+D3P/SU7NYnSkkBiFBQ4pQBY8YOY0HjsKVxj4bgFSpR6Q7CHwt6M16SyMXWlB9dg\n876inlY8fBj6wX6QjzrnFT9153Q19X6qwBHgJDc2r+AJ0lHbgOkxo66z8YFI7GLP7u12EUiQhA+H\nWI5DJKjd/QSWQhOyVunKCXsP1FeoRJ8MysJeXA/a41ffhPz7agISn1U4EX4IKfQN01id0u6Nf/VQ\n+CFD+LE4uO00qsNtS7fklcF2G/yjqy+/RTNdphZYj7lREQwVv4dVRl8FMXD4Q3d8Gg3ebrjt/SLf\nsJAuduBNPGL+m4T/Kr4S36QyidwSbWM1Ttih1jE/b5DNT7D7D+f9wlAfVVCQu+kq9vUTrxV1M/LE\nJYzl8T3TMyhw4UPW3K2n3/EaAj+M3rfw48JzluWkFJYZz7En7hNvGg2E7AZjLSTKf1YiEt5RbQ1z\ngHB9YOvV10vUfwWheoD1eg0f8T9hqTSz2EKQ2zBHbHLszqylTtYZHEu8/+sA7tmiA2ulRhrL8zyZ\n+8Zh5Hm3G48jz7sB5cR0utlPYEKESfQpImRRowIVxkmNebTt1Q1a3jqeIMZbyeWKA9S8dveP6tyz\nQXhh2PGbwrjjfxBjxPS39Ti7gmR21DLE5PFqyB3v+3U2OsY5EEsjBP3vIlhwFlEKYb/D0v/M0CN2\n7oLjNNTHkvwDPQB6iA==\n'), ('util.git', 'eJzNW+uT27YR/66/ApF7IymWeEk/Xuam4/iReJrGntiZdMZ2JEoEJcQUIRPgyddM/vfuAyDAh+S7\nNkmrGVsiCSx2F7u/fRA3Ho+f1eXGKl0aketKqNLKKoUb5VYcld2J3XY8Ho/U/qArK7Txv0y9PlR6\nI01zp66KQ1oZGV0Xau2vKjka5ZXei9qqItno/T4tMyP807pcbvbZHIbt9Y1cHlK7m9PdD7WSFp9F\ns3NVSD/TpLlc1mWhyvcjv1aht1vgfwTf4tpfJVtpv4Ofspoul2W6l8vlbDQabYrUGPFE5mld2Fe7\ntJJfp0ZejQR8DvBo1H0EFLu3pkgok7lY7tP3cpmujS5qK6eVPOgZk1K5wKvE2LSyBhU7HaMYV5eX\nYzcEPw/EP4CCcE9QhUZ4cs0gVA5wgfTeFLKMCb1rBuFTGOSfXZixuIDtS3ByAiTxe4r/zWiKLIDD\nMRIRpbZgBUTgqkuuS4AkHPEAW1c8yykD9L3ES1J2rIu1sgZoeXtJUMpDoWxEbaeN5SFgQsmHWoM2\ncVpSSlvozVyMx7NRpIv+QGKzMLZSh+kYVBOmOE69KL9oVU5xvblgdTD3u9QA9zfKgGdMM4mP/aUT\nA9ziByJlxOuqlrzFPELIj8qAkKBGnIoOhDNsdRtpNDbu6ZvJVtnJXEzAWvFrsdAl7Ekp6aL8chKW\nfzcXm2N2jYRn0f6QUMgI7+fHjTzEXpo8TotCZi/56mlV6eqqO/tZWoD7xvLnjeg57uI5yWlAR/DE\nKZyfbdJSrKVIxbpKy81OANrYdCvwWXIfFZmdPi6AKKkmmzTc/TmKUSVYKmtlDf5/Tc+CYp7DY5UW\n6l8SPBcMYX+wt+QVRlld3YrUsmbE85x+eI0BGgplyonlKXOhLOBvUaDGGBQz1ibMW+HCKxhOYs2F\n3ckS1Qp32VH9xE0lUwsTvXZho9C7vekrk6mKZIkgCAwwUWWup2NaFuMAgMdctNUawe40PJGFh078\nYDhBfeF6BQg5sBgNi3CFnJGVm89ao06x1RkGEralyzur8a42QWbamd+WYEhamEDPH4hv/BbloOb3\nQtcWl4ebADqw+1Y7/XNM3ctM4QUwJTdgCjgENORoscxoBLSZ8N8tW0YifmLP2SHhHez5EQccagA8\n0AFodw+hSB0K3nrj6MF9AFe07AIZMRiqMjYOFBu424ElbnRpUxiK4VjTDFnamENH7TtpJ8ZLA0SR\nv7YgqjK278CwFRgRYaSJrYRd8MUrcra5iBQO+pOJrKoSgs21+OsX7a14IL4H602blUFFSCFJEgBL\noXNii4UweEn+xU6Vdgg1JFr3q1ShnztO0J8CAwBBYKgNCCEMMFDjMPr1YcJe8m7AF07NDnNGbSsX\nY3YGmDhzcauFhnjfI5JZAlmKtbF/DaC0Uwio8AYgKhMwjWziPvjQhsTeliOqgqQRvr7UB0hS3oxh\nMfBXcN+bBcV9vFgs4O4CVhlH4D0XgBXgTdcxkecvn85iM8EHyTEFLJ6Jz65Fx1JaTDbWWNtDjWkF\nzeU1ErDpbDpLOFEIK6BCga0Imkpd7QkxBrCKKc9aUQc0DLOnDaFr1j5gYnRrgNY4QUXNehGMSf4+\nMQxTM8fFCYthT4LcCsADf6OlBLdDZOco9gx+NXHHMEAphg02Nmtkkc9pRiW3dZFW7aE07JJkdkYI\nSbesbN+qRwN+BACWK5cwrbUu+BeIxw8rmZB3skeeMk0qPO5mfJHVscOYJUn/SZtSeRiLWTluxjjs\nUTYcA50tDOAJTsAxscY8Ac4oplkr3c3c1hvYeooGlG3POTK4/U8LiFMlYLzpshMbDGXpoF69/gXM\nwTCc5Rq/A4EJL07Ul27kOaLMRkTVRVkqQWmXAm0YdZzMQGqRR8lGcqwUJP/jC/O2xFqntbSHyk0h\n0zKuRR6I10cNNpNDfNvDMyPGNAatZK+zupCYZBx3CvJVir0QNY9SHFOIk0aLPK2SBpxbSSpRIXPM\no/+zicM5p/wTpsbMplm2xFTF+r3iC6qnmotIFnCgR1mG6M7PKLPOxCqatvL+DEUU4JPHf0wXVvhj\nxVYOu0MNABi8itZZeRftScuDyAQyzsiHOY2kn0UG6UZAFXdnSV9JyygFkwhdvNR34BGWXMC0+/G5\nbfjs8ziMn54zxs8bWbopcwwC32PKojhlcduVaYm5ioN4FerGDugFQRY3d4W28/Y2BG3IORaglEp2\nwA3vm2mUFOypHwHJnt3sphX6oHk4ffvq4Uy8neYSbr6d/QWdEsZIs0kPqMOgvTkt1Arv+8F4vk+2\nla4P0y/7xnM/wznvIIM2j6lZJtf1FiHmCs2BXISHIkiE7sX+1jEFWjlrNj40RBOuY667QXzUnwCg\nhCkbmtNQDYesmharUDahjPD/9AgQemFmjvfTypuH9aIK8F5+OxDC2kwCbrR5vDCf5Cswc3eo9N7s\n2k1z0WpwXKMeQ6vFXdaHDOLOEkdeU8UdlOBbgNfdniDoTGEeZhwNigdMotMxwI6fAdeF1ICKshUO\noup+B/uz8rysEDVWjs+V2OzkBiorqjqxM0rUGMMTNpMnmsMV1o20BOw6VmO8yi49AEDMwbs3RU2q\nh6TMqHVxC6zq9VpW2EGlVIMaOU3vwYlFDIINzLkEttjagOq1NpIgzY0Sawk4IhvGnMiNHTf6Q2rD\nTdiWmjmFkOWNqnSJHd3p+Jvnr5evvn30w9Pl149ePV0+ef4D2A3qfDa8St9bmiZl466tpmWbi05V\nQImMCZvezB2y+JgAstBmkB5EDJI+qRkbZcLNyMGODVXouJehFURuFGY1k1pFG7GBfa1moGtuobW3\nGyQgeG0V6CYaytr2I1x18pS+wHDbyyCzx7QqgUvgV9dFhuW5ay3EbYoL8xVUHCZdU58Dn8B3LMsc\nV1qi4ANsxhZDqu497O0D1Sv9FjfXHp3q/DF6H/JFkzr9MVdFnyjL3Yhust7vi7U0BYDo0gOBjgtV\nFHgzNVNDJd/UZ19FLtzr3LHFhwZYJN85a+x2YkKf06UwsGVosAAJgJd0j+j0bazPTqhJXAXWN9d+\nX+6BeAGLVEcFewziUqICOmmKIv+hZ4NY774DUrvvNuAzWvueH72eIazWdcWMopbijJnUobY7Kw5F\nupFnfTx24s37Jb3Y+lSVRIqB2lCVmfyY4Lzx7IxlNYQHzGuooRrGt/coaoEODDmzhU5zEDuOEnJX\n0N4BQg24OVsw6dqpLm0i75wDHMpzlI7CLr1xwat5z5IWmI7eUjfd6HnTPIWaH5UsSknrOAKUiYKV\n3todvhBkr9dLvn0ddYviVzmwW+2deoAFYKbRFYmjwLQwB7lRuZKQdENxiD1azJ7ljax4yVC+h1XD\nmwl8Bdd97dJ648Srx5ylG1unBcRsZCIXbM6wNHDoRMc6iAWPSPhMgAz56PbAO3L+aS7RfD/9gmxI\nWdT1CZtsmi1ym6PsydX9zvj7V4OY1QWJZ0QCnRUkM4wRjeu2xvYiIhN4/eLJiyvxLWAb+CYtzHkq\nYYeByuU9Kc1c2nRrLv8Jnx6R6P1Yz5riD1GP+zIc5jrwNOvNHX5pcXeKPUjsvBO5V7sxaO6V3ksy\ne7CB0oojpGzbzwbGPeZgFSEkBpJKLrgd350QgIu6/2FPaG8hUC7a4W8gmvhPHAfPDQuvBfxn0Fju\nt8/Rfrg3XnjblTHXYw0xRJXj++/23ej+IXseZaLNDpzMQO+5Cffd9n6a0V3sxIj2Zve1Pbj1saOx\n1v8jHzuRNP+P5AcXhmyOsRONh1u6oaHBgk7Yoia+A+JxOkqihmqVH33c51bkRh9uvYquKPn3UeLK\ntwJyX827KBMFGYIahXgcOSAe34HYAhE4NVGUjsNGs0Y7Tf10hCOIagdrp4fLCzOhTlcvFg7owLCD\nIIM+fgO/xkJSgy8wPZHxkNRhS3NXvPYkDENcyhDXO+4Bnp6hnZqeyI6bZkifBZVHfY22oNxpHzyL\nAXQaIxmaHk/1bftTOTw3V9qtFq4iOXHvN29C4+UxUjWhCY5bSim7wZ5J04khu4bbFMgg+8R0jmDB\nv+iifDMR4jWkT0ddUV1I5uyPYdCJjju3ULiYodNu/U4K94NhBC5CY1o9H6TO4nePh6CUUXltGuZq\n8JEwOdIWUXBKJBKQTw+K506ZNM0dt7XnK9wTJSj2NlngIcx4ZC3q0lULkaLcnChaYvua79IZiS7N\nNt3HsUIJbXhC29kGgb9508s2yvM6Vto2wuj3kDN3X/b6j4sQf5e3a51W2XM8U1LVBzvAUi9tult0\nkf7xdAxhfl3IfdvSnDpP6gc/eKJElXVYvh8/g9pfukMs8RaKPIXCMvsKvvnhOoUy0OrQD3aW0n0T\njOp3RyrexW2YwTDk0/ofwYv5BMflYuHkQ2/+WwCjfZZQqzSbThaLUi+oLtW1nQSL9WGrNUl+tDjp\nDb6ZpvNu0UG1TmsyuzqxHD+dBIkbqgEL34XTIc25EEd8UHRnYdzojIKbx9rBYDDYFo967CFdbdCV\n4jtAaQsyXG+b37G4Tja3tV2TOyEYKqVCUPUAiz0lX9kPQxAznTVvN3HlqE2gaSorsa7okJNbHtb7\njvOPXVpuZYDFTJkNuFl0eM61MLpFP8Sbo8Iak9ZOrRv7EyFrM+rnL8SUqxpaFi7XstDHGVW+utpw\n8c0lJfVFHJkMjDGHf+WGMhlEPb3fA5arzPj30nvq7iPAc88EKO35NFrpzj0hHZvC00wYC7pJIFbx\n6Qv5oVaANKgRoD1piOD0xYJnTeYeQJQ/EEY9nAo1vr4VugAuBURFQ6fINb1dGeqj9LteXSf2vuWP\nRvF784bGQzH5+YtJdMg5GH337GcbdxwW9ByVHcLnT5MLc7lPIfuqOINrzPsMmrVnc+437bx96uT7\ndxWaCXuZ7yL0p3y7X6V0Hbzv0Z36cSjh4gHY/+hkWNR8Adv0zkVAfyLfwiMIhA53TpS4O9RLlOgs\nYpwuuQwpfu/UywfukC6cCv+ocVbsYPA/W+/9udG8KRn/D8P5A/FYlzeycraBzeCy+dMHPopGh2sn\nWMpxyRhOVTvjpz9RGPobjKGEgZTR+Bwd+ojThmDTcdbwhDqZbHj4LPQTmSXqAXKnEUq7jWziBebO\n6a1vRTMxKE/1RnHjVUOsoLNOrkFKb8GpGkhxxUNdbSV6CUY2d+TIydTOTpCBySyAbwfvVN7y5k7J\nFoiNH1JL0x1uuPw1nvTb5a+O7m9X7VERfESDxgk41z7F9+29yjLATQsyW4gTX0THIvuW2Od/B3W0\n+aPZnZ0IOL+Doj8/x/HnEad/ih7/O25mztFPhK/4kJWLXPTnOL2TVZzzNClBOJS6wvErn+AVt3R8\nIjom0SRyJ48ohwNW7ogyXnz79NETf2qP/yztPqeoXHw4czr03yOfFDU=\n')]  # noqa
 
-### Load the compressed module sources ###
-import sys, imp
+# Load the compressed module sources #
+import imp
+import sys
+
 for name, source in module_sources:
     source = source.decode("base64").decode("zlib")
     mod = imp.new_module(name)
     exec source in mod.__dict__
     sys.modules[name] = mod
 
-### Original script follows ###
-#!/usr/bin/python
+# Original script follows #
+# !/usr/bin/python
 """%prog [-p|--props-file] [-r|--rev revision] [-b|--branch branch]
          [-s|--shared-dir shared_dir] repo [dest]
 
 Tool to do safe operations with git.
 
 revision/branch on commandline will override those in props-file"""
 
 # Import snippet to find tools lib
--- a/testing/mozharness/external_tools/mouse_and_screen_resolution.py
+++ b/testing/mozharness/external_tools/mouse_and_screen_resolution.py
@@ -10,26 +10,26 @@
 # Target:        Python 2.5 or newer
 #
 from optparse import OptionParser
 from ctypes import windll, Structure, c_ulong, byref
 try:
     import json
 except:
     import simplejson as json
-import os
+
 import sys
 import urllib2
 import socket
-import platform
 import time
 
 default_screen_resolution = {"x": 1024, "y": 768}
 default_mouse_position = {"x": 1010, "y": 10}
 
+
 def wfetch(url, retries=5):
     while True:
         try:
             return urllib2.urlopen(url, timeout=30).read()
         except urllib2.HTTPError, e:
             print("Failed to fetch '%s': %s" % (url, str(e)))
         except urllib2.URLError, e:
             print("Failed to fetch '%s': %s" % (url, str(e)))
@@ -38,30 +38,30 @@ def wfetch(url, retries=5):
         except socket.error, e:
             print("Socket error when accessing %s: %s" % (url, str(e)))
         if retries < 0:
             raise Exception("Could not fetch url '%s'" % url)
         retries -= 1
         print("Retrying")
         time.sleep(60)
 
+
 def main():
 
     # NOTE: this script was written for windows 7, but works well with windows 10
     parser = OptionParser()
     parser.add_option(
         "--configuration-url", dest="configuration_url", type="string",
         help="Specifies the url of the configuration file.")
     parser.add_option(
         "--configuration-file", dest="configuration_file", type="string",
         help="Specifies the path to the configuration file.")
     (options, args) = parser.parse_args()
 
-    if (options.configuration_url == None and
-        options.configuration_file == None):
+    if (options.configuration_url is None and options.configuration_file is None):
         print "You must specify --configuration-url or --configuration-file."
         return 1
 
     if options.configuration_file:
         with open(options.configuration_file) as f:
             conf_dict = json.load(f)
         new_screen_resolution = conf_dict["win7"]["screen_resolution"]
         new_mouse_position = conf_dict["win7"]["mouse_position"]
@@ -99,51 +99,58 @@ def main():
         current_screen_resolution = queryScreenResolution()
         print "Screen resolution (new): (%(x)s, %(y)s)" % current_screen_resolution
 
     print "Mouse position (current): (%(x)s, %(y)s)" % (queryMousePosition())
     setCursorPos(new_mouse_position["x"], new_mouse_position["y"])
     current_mouse_position = queryMousePosition()
     print "Mouse position (new): (%(x)s, %(y)s)" % (current_mouse_position)
 
-    if current_screen_resolution != new_screen_resolution or current_mouse_position != new_mouse_position:
+    if current_screen_resolution != new_screen_resolution or \
+            current_mouse_position != new_mouse_position:
         print "INFRA-ERROR: The new screen resolution or mouse positions are not what we expected"
         return 1
     else:
         return 0
 
+
 class POINT(Structure):
     _fields_ = [("x", c_ulong), ("y", c_ulong)]
 
+
 def queryMousePosition():
     pt = POINT()
     windll.user32.GetCursorPos(byref(pt))
-    return { "x": pt.x, "y": pt.y}
+    return {"x": pt.x, "y": pt.y}
+
 
 def setCursorPos(x, y):
     windll.user32.SetCursorPos(x, y)
 
+
 def queryScreenResolution():
     return {"x": windll.user32.GetSystemMetrics(0),
             "y": windll.user32.GetSystemMetrics(1)}
 
-def changeScreenResolution(xres = None, yres = None, BitsPerPixel = None):
+
+def changeScreenResolution(xres=None, yres=None, BitsPerPixel=None):
     import struct
 
     DM_BITSPERPEL = 0x00040000
     DM_PELSWIDTH = 0x00080000
     DM_PELSHEIGHT = 0x00100000
-    CDS_FULLSCREEN = 0x00000004
     SIZEOF_DEVMODE = 148
 
     DevModeData = struct.calcsize("32BHH") * '\x00'
     DevModeData += struct.pack("H", SIZEOF_DEVMODE)
     DevModeData += struct.calcsize("H") * '\x00'
-    dwFields = (xres and DM_PELSWIDTH or 0) | (yres and DM_PELSHEIGHT or 0) | (BitsPerPixel and DM_BITSPERPEL or 0)
+    dwFields = (xres and DM_PELSWIDTH or 0) | (yres and DM_PELSHEIGHT or 0) | \
+        (BitsPerPixel and DM_BITSPERPEL or 0)
     DevModeData += struct.pack("L", dwFields)
     DevModeData += struct.calcsize("l9h32BHL") * '\x00'
     DevModeData += struct.pack("LLL", BitsPerPixel or 0, xres or 0, yres or 0)
     DevModeData += struct.calcsize("8L") * '\x00'
 
     return windll.user32.ChangeDisplaySettingsA(DevModeData, 0)
 
+
 if __name__ == '__main__':
     sys.exit(main())
--- a/testing/mozharness/external_tools/packagesymbols.py
+++ b/testing/mozharness/external_tools/packagesymbols.py
@@ -65,10 +65,11 @@ def main():
                 sys.exit(1)
             zf.writestr(filename, contents)
             count += 1
             if not args.no_binaries:
                 zf.write(f, debug_filename)
                 count += 1
     print('Added %d files to %s' % (count, args.symbol_zip))
 
+
 if __name__ == '__main__':
     main()
--- a/testing/mozharness/external_tools/robustcheckout.py
+++ b/testing/mozharness/external_tools/robustcheckout.py
@@ -46,16 +46,18 @@ cmdtable = {}
 # cmdutil.command.
 if util.safehasattr(registrar, 'command'):
     command = registrar.command(cmdtable)
 else:
     command = cmdutil.command(cmdtable)
 
 # Mercurial 4.2 introduced the vfs module and deprecated the symbol in
 # scmutil.
+
+
 def getvfs():
     try:
         from mercurial.vfs import vfs
         return vfs
     except ImportError:
         return scmutil.vfs
 
 
--- a/testing/mozharness/mozfile/__init__.py
+++ b/testing/mozharness/mozfile/__init__.py
@@ -1,5 +1,3 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-from mozfile import *
--- a/testing/mozharness/mozfile/mozfile.py
+++ b/testing/mozharness/mozfile/mozfile.py
@@ -24,20 +24,20 @@ import time
            'rmtree',
            'tree',
            'NamedTemporaryFile',
            'TemporaryDirectory']
 
 try:
     WindowsError
 except NameError:
-    WindowsError = None # so we can unconditionally catch it later...
+    WindowsError = None  # so we can unconditionally catch it later...
 
 
-### utilities for extracting archives
+# utilities for extracting archives
 
 def extract_tarball(src, dest):
     """extract a .tar file"""
 
     bundle = tarfile.open(src)
     namelist = bundle.getnames()
 
     for name in namelist:
@@ -49,17 +49,17 @@ def extract_tarball(src, dest):
 def extract_zip(src, dest):
     """extract a zip file"""
 
     if isinstance(src, zipfile.ZipFile):
         bundle = src
     else:
         try:
             bundle = zipfile.ZipFile(src)
-        except Exception, e:
+        except Exception:
             print "src: %s" % src
             raise
 
     namelist = bundle.namelist()
 
     for name in namelist:
         filename = os.path.realpath(os.path.join(dest, name))
         if name.endswith('/'):
@@ -113,17 +113,17 @@ def extract(src, dest=None):
         if index != -1:
             root = os.path.join(dest, name[:index])
             if root not in top_level_files:
                 top_level_files.append(root)
 
     return top_level_files
 
 
-### utilities for removal of files and directories
+# utilities for removal of files and directories
 
 def rmtree(dir):
     """Deprecated wrapper method to remove a directory tree.
 
     Ensure to update your code to use mozfile.remove() directly
 
     :param dir: directory to be removed
     """
@@ -174,42 +174,45 @@ def remove(path):
         os.chmod(path, path_stats.st_mode | stat.S_IRUSR | stat.S_IWUSR)
         _call_with_windows_retry(os.remove, path)
 
     elif os.path.isdir(path):
         # Verify the directory is read/write/execute for the current user
         os.chmod(path, path_stats.st_mode | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
         _call_with_windows_retry(shutil.rmtree, path)
 
+
 def depth(directory):
     """returns the integer depth of a directory or path relative to '/' """
 
     directory = os.path.abspath(directory)
     level = 0
     while True:
         directory, remainder = os.path.split(directory)
         level += 1
         if not remainder:
             break
     return level
 
+
 # ASCII delimeters
 ascii_delimeters = {
-    'vertical_line' : '|',
-    'item_marker'   : '+',
-    'last_child'    : '\\'
+    'vertical_line': '|',
+    'item_marker': '+',
+    'last_child': '\\'
     }
 
 # unicode delimiters
 unicode_delimeters = {
-    'vertical_line' : '│',
-    'item_marker'   : '├',
-    'last_child'    : 'â””'
+    'vertical_line': '│',
+    'item_marker': '├',
+    'last_child': 'â””'
     }
 
+
 def tree(directory,
          item_marker=unicode_delimeters['item_marker'],
          vertical_line=unicode_delimeters['vertical_line'],
          last_child=unicode_delimeters['last_child'],
          sort_key=lambda x: x.lower()):
     """
     display tree directory structure for `directory`
     """
@@ -225,18 +228,17 @@ def tree(directory,
         basename = os.path.basename(abspath)
         parent = os.path.dirname(abspath)
         level = depth(abspath) - top
 
         # sort articles of interest
         for resource in (dirnames, filenames):
             resource[:] = sorted(resource, key=sort_key)
 
-        files_end =  item_marker
-        dirpath_marker = item_marker
+        files_end = item_marker
 
         if level > len(indent):
             indent.append(vertical_line)
         indent = indent[:level]
 
         if dirnames:
             files_end = item_marker
             last[abspath] = dirnames[-1]
@@ -249,31 +251,29 @@ def tree(directory,
             indent[-1] = ' '
         elif not indent:
             dirpath_mark = ''
         else:
             dirpath_mark = item_marker
 
         # append the directory and piece of tree structure
         # if the top-level entry directory, print as passed
-        retval.append('%s%s%s'% (''.join(indent[:-1]),
-                                 dirpath_mark,
-                                 basename if retval else directory))
+        retval.append('%s%s%s' % (''.join(indent[:-1]),
+                      dirpath_mark, basename if retval else directory))
         # add the files
         if filenames:
             last_file = filenames[-1]
             retval.extend([('%s%s%s' % (''.join(indent),
-                                        files_end if filename == last_file else item_marker,
-                                        filename))
-                                        for index, filename in enumerate(filenames)])
+                          files_end if filename == last_file else item_marker, filename))
+                          for index, filename in enumerate(filenames)])
 
     return '\n'.join(retval)
 
 
-### utilities for temporary resources
+# utilities for temporary resources
 
 class NamedTemporaryFile(object):
     """
     Like tempfile.NamedTemporaryFile except it works on Windows
     in the case where you open the created file a second time.
 
     This behaves very similarly to tempfile.NamedTemporaryFile but may
     not behave exactly the same. For example, this function does not
@@ -335,38 +335,38 @@ def TemporaryDirectory():
     """
     tempdir = tempfile.mkdtemp()
     try:
         yield tempdir
     finally:
         shutil.rmtree(tempdir)
 
 
-### utilities dealing with URLs
+# utilities dealing with URLs
 
 def is_url(thing):
     """
     Return True if thing looks like a URL.
     """
 
     parsed = urlparse.urlparse(thing)
     if 'scheme' in parsed:
         return len(parsed.scheme) >= 2
     else:
         return len(parsed[0]) >= 2
 
+
 def load(resource):
     """
     open a file or URL for reading.  If the passed resource string is not a URL,
     or begins with 'file://', return a ``file``.  Otherwise, return the
     result of urllib2.urlopen()
     """
 
     # handle file URLs separately due to python stdlib limitations
     if resource.startswith('file://'):
         resource = resource[len('file://'):]
 
     if not is_url(resource):
         # if no scheme is given, it is a file path
         return file(resource)
 
     return urllib2.urlopen(resource)
-
--- a/testing/mozharness/mozinfo/__init__.py
+++ b/testing/mozharness/mozinfo/__init__.py
@@ -47,10 +47,10 @@ Module variables:
    * :attr:`bits`
    * :attr:`os`
    * :attr:`processor`
    * :attr:`version`
 
 """
 
 import mozinfo
-from mozinfo import *
+
 __all__ = mozinfo.__all__
--- a/testing/mozharness/mozinfo/mozinfo.py
+++ b/testing/mozharness/mozinfo/mozinfo.py
@@ -14,29 +14,34 @@ import platform
 import re
 import sys
 
 import mozfile
 
 # keep a copy of the os module since updating globals overrides this
 _os = os
 
+
 class unknown(object):
     """marker class for unknown information"""
+
     def __nonzero__(self):
         return False
+
     def __str__(self):
         return 'UNKNOWN'
-unknown = unknown() # singleton
+
+
+unknown = unknown()  # singleton
 
 # get system information
 info = {'os': unknown,
         'processor': unknown,
         'version': unknown,
-        'bits': unknown }
+        'bits': unknown}
 (system, node, release, version, machine, processor) = platform.uname()
 (bits, linkage) = platform.architecture()
 
 # get os information and related data
 if system in ["Microsoft", "Windows"]:
     info['os'] = 'win'
     # There is a Python bug on Windows to determine platform values
     # http://bugs.python.org/issue7860
@@ -61,52 +66,53 @@ elif system in ['DragonFly', 'FreeBSD', 
     version = sys.platform
 elif system == "Darwin":
     (release, versioninfo, machine) = platform.mac_ver()
     version = "OS X %s" % release
     info['os'] = 'mac'
 elif sys.platform in ('solaris', 'sunos5'):
     info['os'] = 'unix'
     version = sys.platform
-info['version'] = version # os version
+info['version'] = version  # os version
 
 # processor type and bits
 if processor in ["i386", "i686"]:
     if bits == "32bit":
         processor = "x86"
     elif bits == "64bit":
         processor = "x86_64"
 elif processor.upper() == "AMD64":
     bits = "64bit"
     processor = "x86_64"
 elif processor == "Power Macintosh":
     processor = "ppc"
 bits = re.search('(\d+)bit', bits).group(1)
 info.update({'processor': processor,
              'bits': int(bits),
-            })
+             })
 
 # standard value of choices, for easy inspection
 choices = {'os': ['linux', 'bsd', 'win', 'mac', 'unix'],
            'bits': [32, 64],
            'processor': ['x86', 'x86_64', 'ppc']}
 
 
 def sanitize(info):
     """Do some sanitization of input values, primarily
     to handle universal Mac builds."""
     if "processor" in info and info["processor"] == "universal-x86-x86_64":
         # If we're running on OS X 10.6 or newer, assume 64-bit
-        if release[:4] >= "10.6": # Note this is a string comparison
+        if release[:4] >= "10.6":  # Note this is a string comparison
             info["processor"] = "x86_64"
             info["bits"] = 64
         else:
             info["processor"] = "x86"
             info["bits"] = 32
 
+
 # method for updating information
 def update(new_info):
     """
     Update the info.
 
     :param new_info: Either a dict containing the new info or a path/url
                      to a json file containing the new info.
     """
@@ -119,19 +125,20 @@ def update(new_info):
     info.update(new_info)
     sanitize(info)
     globals().update(info)
 
     # convenience data for os access
     for os_name in choices['os']:
         globals()['is' + os_name.title()] = info['os'] == os_name
     # unix is special
-    if isLinux or isBsd:
+    if isLinux or isBsd:  # noqa
         globals()['isUnix'] = True
 
+
 def find_and_update_from_json(*dirs):
     """
     Find a mozinfo.json file, load it, and update the info with the
     contents.
 
     :param dirs: Directories in which to look for the file. They will be
                  searched after first looking in the root of the objdir
                  if the current script is being run from a Mozilla objdir.
@@ -153,30 +160,32 @@ def find_and_update_from_json(*dirs):
         d = _os.path.abspath(d)
         json_path = _os.path.join(d, "mozinfo.json")
         if _os.path.isfile(json_path):
             update(json_path)
             return json_path
 
     return None
 
+
 update({})
 
 # exports
 __all__ = info.keys()
 __all__ += ['is' + os_name.title() for os_name in choices['os']]
 __all__ += [
     'info',
     'unknown',
     'main',
     'choices',
     'update',
     'find_and_update_from_json',
     ]
 
+
 def main(args=None):
 
     # parse the command line
     from optparse import OptionParser
     parser = OptionParser(description=__doc__)
     for key in choices:
         parser.add_option('--%s' % key, dest=key,
                           action='store_true', default=False,
@@ -194,16 +203,18 @@ def main(args=None):
 
     # print out choices if requested
     flag = False
     for key, value in options.__dict__.items():
         if value is True:
             print '%s choices: %s' % (key, ' '.join([str(choice)
                                                      for choice in choices[key]]))
             flag = True
-    if flag: return
+    if flag:
+        return
 
     # otherwise, print out all info
     for key, value in info.items():
         print '%s: %s' % (key, value)
 
+
 if __name__ == '__main__':
     main()
--- a/testing/mozharness/scripts/android_emulator_unittest.py
+++ b/testing/mozharness/scripts/android_emulator_unittest.py
@@ -7,17 +7,16 @@
 
 import copy
 import datetime
 import glob
 import os
 import re
 import sys
 import signal
-import socket
 import subprocess
 import time
 import tempfile
 
 # load modules from parent dir
 sys.path.insert(1, os.path.dirname(sys.path[0]))
 
 from mozprocess import ProcessHandler
@@ -27,43 +26,44 @@ from mozharness.base.script import BaseS
 from mozharness.base.vcs.vcsbase import VCSMixin
 from mozharness.mozilla.blob_upload import BlobUploadMixin, blobupload_config_options
 from mozharness.mozilla.buildbot import TBPL_RETRY, EXIT_STATUS_DICT
 from mozharness.mozilla.mozbase import MozbaseMixin
 from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
 from mozharness.mozilla.testing.unittest import EmulatorMixin
 
 
-class AndroidEmulatorTest(BlobUploadMixin, TestingMixin, EmulatorMixin, VCSMixin, BaseScript, MozbaseMixin):
+class AndroidEmulatorTest(BlobUploadMixin, TestingMixin, EmulatorMixin, VCSMixin, BaseScript,
+                          MozbaseMixin):
     config_options = [[
         ["--test-suite"],
         {"action": "store",
          "dest": "test_suite",
-        }
+         }
     ], [
         ["--adb-path"],
         {"action": "store",
          "dest": "adb_path",
          "default": None,
          "help": "Path to adb",
-        }
+         }
     ], [
         ["--total-chunk"],
         {"action": "store",
          "dest": "total_chunks",
          "default": None,
          "help": "Number of total chunks",
-        }
+         }
     ], [
         ["--this-chunk"],
         {"action": "store",
          "dest": "this_chunk",
          "default": None,
          "help": "Number of this chunk",
-        }
+         }
     ]] + copy.deepcopy(testing_config_options) + \
         copy.deepcopy(blobupload_config_options)
 
     error_list = [
     ]
 
     virtualenv_requirements = [
     ]
@@ -80,25 +80,25 @@ class AndroidEmulatorTest(BlobUploadMixi
                          'read-buildbot-config',
                          'setup-avds',
                          'start-emulator',
                          'download-and-extract',
                          'create-virtualenv',
                          'verify-emulator',
                          'install',
                          'run-tests',
-                        ],
+                         ],
             default_actions=['clobber',
                              'start-emulator',
                              'download-and-extract',
                              'create-virtualenv',
                              'verify-emulator',
                              'install',
                              'run-tests',
-                            ],
+                             ],
             require_config_file=require_config_file,
             config={
                 'virtualenv_modules': self.virtualenv_modules,
                 'virtualenv_requirements': self.virtualenv_requirements,
                 'require_test_zip': True,
                 # IP address of the host as seen from the emulator
                 'remote_webserver': '10.0.2.2',
             }
@@ -168,21 +168,21 @@ class AndroidEmulatorTest(BlobUploadMixi
 
     @PreScriptAction('create-virtualenv')
     def _pre_create_virtualenv(self, action):
         dirs = self.query_abs_dirs()
         requirements = None
         if self.test_suite == 'mochitest-media':
             # mochitest-media is the only thing that needs this
             requirements = os.path.join(dirs['abs_mochitest_dir'],
-                        'websocketprocessbridge',
-                        'websocketprocessbridge_requirements.txt')
+                                        'websocketprocessbridge',
+                                        'websocketprocessbridge_requirements.txt')
         elif self.test_suite == 'marionette':
             requirements = os.path.join(dirs['abs_test_install_dir'],
-                                    'config', 'marionette_requirements.txt')
+                                        'config', 'marionette_requirements.txt')
         if requirements:
             self.register_virtualenv_module(requirements=[requirements],
                                             two_pass=True)
 
     def _launch_emulator(self):
         env = self.query_env()
 
         # Set $LD_LIBRARY_PATH to self.dirs['abs_work_dir'] so that
@@ -216,33 +216,34 @@ class AndroidEmulatorTest(BlobUploadMixi
         self.info("Created temp file %s." % tmp_file.name)
         self.info("Trying to start the emulator with this command: %s" % ' '.join(command))
         proc = subprocess.Popen(command, stdout=tmp_stdout, stderr=tmp_stdout, env=env)
         return {
             "process": proc,
             "tmp_file": tmp_file,
         }
 
-    def _retry(self, max_attempts, interval, func, description, max_time = 0):
+    def _retry(self, max_attempts, interval, func, description, max_time=0):
         '''
         Execute func until it returns True, up to max_attempts times, waiting for
         interval seconds between each attempt. description is logged on each attempt.
         If max_time is specified, no further attempts will be made once max_time
         seconds have elapsed; this provides some protection for the case where
         the run-time for func is long or highly variable.
         '''
         status = False
         attempts = 0
         if max_time > 0:
-            end_time = datetime.datetime.now() + datetime.timedelta(seconds = max_time)
+            end_time = datetime.datetime.now() + datetime.timedelta(seconds=max_time)
         else:
             end_time = None
         while attempts < max_attempts and not status:
             if (end_time is not None) and (datetime.datetime.now() > end_time):
-                self.info("Maximum retry run-time of %d seconds exceeded; remaining attempts abandoned" % max_time)
+                self.info("Maximum retry run-time of %d seconds exceeded; "
+                          "remaining attempts abandoned" % max_time)
                 break
             if attempts != 0:
                 self.info("Sleeping %d seconds" % interval)
                 time.sleep(interval)
             attempts += 1
             self.info(">> %s: Attempt #%d of %d" % (description, attempts, max_attempts))
             status = func()
         return status
@@ -280,21 +281,23 @@ class AndroidEmulatorTest(BlobUploadMixi
             return True
         return False
 
     def _verify_emulator(self):
         adb_ok = self._verify_adb()
         if not adb_ok:
             self.warning('Unable to communicate with adb')
             return False
-        adb_device_ok = self._retry(4, 30, self._verify_adb_device, "Verify emulator visible to adb")
+        adb_device_ok = self._retry(4, 30, self._verify_adb_device,
+                                    "Verify emulator visible to adb")
         if not adb_device_ok:
             self.warning('Unable to communicate with emulator via adb')
             return False
-        boot_ok = self._retry(30, 10, self._is_boot_completed, "Verify Android boot completed", max_time = 330)
+        boot_ok = self._retry(30, 10, self._is_boot_completed, "Verify Android boot completed",
+                              max_time=330)
         if not boot_ok:
             self.warning('Unable to verify Android boot completion')
             return False
         return True
 
     def _verify_emulator_and_restart_on_fail(self):
         emulator_ok = self._verify_emulator()
         if not emulator_ok:
@@ -309,30 +312,34 @@ class AndroidEmulatorTest(BlobUploadMixi
             self._restart_adbd()
             time.sleep(5)
             self.emulator_proc = self._launch_emulator()
         return emulator_ok
 
     def _install_fennec_apk(self):
         install_ok = False
         if int(self.sdk_level) >= 23:
-            cmd = [self.adb_path, '-s', self.emulator['device_id'], 'install', '-r', '-g', self.installer_path]
+            cmd = [self.adb_path, '-s', self.emulator['device_id'], 'install', '-r', '-g',
+                   self.installer_path]
         else:
-            cmd = [self.adb_path, '-s', self.emulator['device_id'], 'install', '-r', self.installer_path]
+            cmd = [self.adb_path, '-s', self.emulator['device_id'], 'install', '-r',
+                   self.installer_path]
         out = self._run_with_timeout(300, cmd)
         if 'Success' in out:
             install_ok = True
         return install_ok
 
     def _install_robocop_apk(self):
         install_ok = False
         if int(self.sdk_level) >= 23:
-            cmd = [self.adb_path, '-s', self.emulator['device_id'], 'install', '-r', '-g', self.robocop_path]
+            cmd = [self.adb_path, '-s', self.emulator['device_id'], 'install', '-r', '-g',
+                   self.robocop_path]
         else:
-            cmd = [self.adb_path, '-s', self.emulator['device_id'], 'install', '-r', self.robocop_path]
+            cmd = [self.adb_path, '-s', self.emulator['device_id'], 'install', '-r',
+                   self.robocop_path]
         out = self._run_with_timeout(300, cmd)
         if 'Success' in out:
             install_ok = True
         return install_ok
 
     def _dump_host_state(self):
         self._run_proc(['ps', '-ef'])
         self._run_proc(['netstat', '-a', '-p', '-n', '-t', '-u'])
@@ -364,17 +371,17 @@ class AndroidEmulatorTest(BlobUploadMixi
         """
         dirs = self.query_abs_dirs()
         utility = os.path.join(self.xre_path, "screentopng")
         if not os.path.exists(utility):
             self.warning("Unable to take screenshot: %s does not exist" % utility)
             return
         try:
             tmpfd, filename = tempfile.mkstemp(prefix=prefix, suffix='.png',
-                dir=dirs['abs_blob_upload_dir'])
+                                               dir=dirs['abs_blob_upload_dir'])
             os.close(tmpfd)
             self.info("Taking screenshot with %s; saving to %s" % (utility, filename))
             subprocess.call([utility, filename], env=self.query_env())
         except OSError, err:
             self.warning("Failed to take screenshot: %s" % err.strerror)
 
     def _query_package_name(self):
         if self.app_name is None:
@@ -445,17 +452,17 @@ class AndroidEmulatorTest(BlobUploadMixi
             opt = option.split('=')[0]
             # override configured chunk options with script args, if specified
             if opt == '--this-chunk' and self.this_chunk is not None:
                 continue
             if opt == '--total-chunks' and self.total_chunks is not None:
                 continue
             if '%(app)' in option:
                 # only query package name if requested
-                cmd.extend([option % {'app' : self._query_package_name()}])
+                cmd.extend([option % {'app': self._query_package_name()}])
             else:
                 cmd.extend([option % str_format_values])
 
         if self.this_chunk is not None:
             cmd.extend(['--this-chunk', self.this_chunk])
         if self.total_chunks is not None:
             cmd.extend(['--total-chunks', self.total_chunks])
 
@@ -483,17 +490,18 @@ class AndroidEmulatorTest(BlobUploadMixi
         elif self.buildbot_config and 'properties' in self.buildbot_config:
             # probably buildbot
             repo = 'https://hg.mozilla.org/%s' % self.buildbot_config['properties']['repo_path']
             revision = self.buildbot_config['properties']['revision']
         else:
             # something unexpected!
             repo = 'https://hg.mozilla.org/mozilla-central'
             revision = 'default'
-            self.warning('Unable to find repo/revision for manifest; using mozilla-central/default')
+            self.warning('Unable to find repo/revision for manifest; '
+                         'using mozilla-central/default')
         url = '%s/raw-file/%s/%s' % (
             repo,
             revision,
             path)
         return url
 
     def _tooltool_fetch(self, url, dir):
         c = self.config
@@ -508,17 +516,17 @@ class AndroidEmulatorTest(BlobUploadMixi
             self.fatal("Could not retrieve manifest needed to retrieve "
                        "artifacts from %s" % manifest_path)
 
         self.tooltool_fetch(manifest_path,
                             output_dir=dir,
                             cache=c.get("tooltool_cache", None))
 
     ##########################################
-    ### Actions for AndroidEmulatorTest ###
+    # Actions for AndroidEmulatorTest        #
     ##########################################
     def setup_avds(self):
         '''
         If tooltool cache mechanism is enabled, the cached version is used by
         the fetch command. If the manifest includes an "unpack" field, tooltool
         will unpack all compressed archives mentioned in the manifest.
         '''
         c = self.config
@@ -550,17 +558,18 @@ class AndroidEmulatorTest(BlobUploadMixi
             proc = ProcessHandler(cmd)
             proc.run()
             proc.wait()
 
     def start_emulator(self):
         '''
         Starts the emulator
         '''
-        if 'emulator_url' in self.config or 'emulator_manifest' in self.config or 'tools_manifest' in self.config:
+        if 'emulator_url' in self.config or 'emulator_manifest' in self.config or \
+           'tools_manifest' in self.config:
             self.install_emulator()
 
         if not os.path.isfile(self.adb_path):
             self.fatal("The adb binary '%s' is not a valid file!" % self.adb_path)
         self._restart_adbd()
 
         if not self.config.get("developer_mode"):
             # We kill compiz because it sometimes prevents us from starting the emulator
@@ -607,52 +616,53 @@ class AndroidEmulatorTest(BlobUploadMixi
             f.write(out)
 
             f.write('\n\nHost process list:\n')
             out = self._run_proc(['ps', '-ef'], quiet=True)
             f.write(out)
 
             f.write('\n\nEmulator /proc/cpuinfo:\n')
             cmd = [self.adb_path, '-s', self.emulator['device_id'],
-                    'shell', 'cat', '/proc/cpuinfo']
+                   'shell', 'cat', '/proc/cpuinfo']
             out = self._run_with_timeout(30, cmd, quiet=True)
             f.write(out)
 
             f.write('\n\nEmulator /proc/meminfo:\n')
             cmd = [self.adb_path, '-s', self.emulator['device_id'],
-                    'shell', 'cat', '/proc/meminfo']
+                   'shell', 'cat', '/proc/meminfo']
             out = self._run_with_timeout(30, cmd, quiet=True)
             f.write(out)
 
             f.write('\n\nEmulator process list:\n')
             cmd = [self.adb_path, '-s', self.emulator['device_id'],
-                    'shell', 'ps']
+                   'shell', 'ps']
             out = self._run_with_timeout(30, cmd, quiet=True)
             f.write(out)
 
     def verify_emulator(self):
         '''
         Check to see if the emulator can be contacted via adb.
         If any communication attempt fails, kill the emulator, re-launch, and re-check.
         '''
         self.mkdir_p(self.query_abs_dirs()['abs_blob_upload_dir'])
         max_restarts = 5
-        emulator_ok = self._retry(max_restarts, 10, self._verify_emulator_and_restart_on_fail, "Check emulator")
+        emulator_ok = self._retry(max_restarts, 10, self._verify_emulator_and_restart_on_fail,
+                                  "Check emulator")
         if not emulator_ok:
             self.fatal('INFRA-ERROR: Unable to start emulator after %d attempts' % max_restarts,
-                EXIT_STATUS_DICT[TBPL_RETRY])
+                       EXIT_STATUS_DICT[TBPL_RETRY])
         self._dump_perf_info()
         # Start logcat for the emulator. The adb process runs until the
         # corresponding emulator is killed. Output is written directly to
         # the blobber upload directory so that it is uploaded automatically
         # at the end of the job.
         logcat_filename = 'logcat-%s.log' % self.emulator["device_id"]
         logcat_path = os.path.join(self.abs_dirs['abs_blob_upload_dir'], logcat_filename)
-        logcat_cmd = '%s -s %s logcat -v threadtime Trace:S StrictMode:S ExchangeService:S > %s &' % \
-            (self.adb_path, self.emulator["device_id"], logcat_path)
+        logcat_cmd = '%s -s %s logcat -v threadtime Trace:S StrictMode:S '\
+            ' ExchangeService:S > %s &' % (self.adb_path, self.emulator["device_id"], logcat_path)
         self.info(logcat_cmd)
         os.system(logcat_cmd)
         # Get a post-boot emulator process list for diagnostics
         ps_cmd = [self.adb_path, '-s', self.emulator["device_id"], 'shell', 'ps']
         self._run_with_timeout(30, ps_cmd)
 
     def download_and_extract(self):
         """
@@ -677,38 +687,41 @@ class AndroidEmulatorTest(BlobUploadMixi
         else:
             self.fatal("configure hostutils_manifest_path!")
 
     def install(self):
         """
         Install APKs on the emulator
         """
         install_needed = self.config["suite_definitions"][self.test_suite].get("install")
-        if install_needed == False:
+        if install_needed is False:
             self.info("Skipping apk installation for %s" % self.test_suite)
             return
 
         assert self.installer_path is not None, \
             "Either add installer_path to the config or use --installer-path."
 
-        self.sdk_level = self._run_with_timeout(30, [self.adb_path, '-s', self.emulator['device_id'],
-            'shell', 'getprop', 'ro.build.version.sdk'])
+        self.sdk_level = self._run_with_timeout(30, [self.adb_path, '-s',
+                                                     self.emulator['device_id'],
+                                                'shell', 'getprop', 'ro.build.version.sdk'])
 
         # Install Fennec
         install_ok = self._retry(3, 30, self._install_fennec_apk, "Install app APK")
         if not install_ok:
             self.fatal('INFRA-ERROR: Failed to install %s on %s' %
-                (self.installer_path, self.emulator["name"]), EXIT_STATUS_DICT[TBPL_RETRY])
+                       (self.installer_path, self.emulator["name"]),
+                       EXIT_STATUS_DICT[TBPL_RETRY])
 
         # Install Robocop if required
         if self.test_suite.startswith('robocop'):
             install_ok = self._retry(3, 30, self._install_robocop_apk, "Install Robocop APK")
             if not install_ok:
                 self.fatal('INFRA-ERROR: Failed to install %s on %s' %
-                    (self.robocop_path, self.emulator["name"]), EXIT_STATUS_DICT[TBPL_RETRY])
+                           (self.robocop_path, self.emulator["name"]),
+                           EXIT_STATUS_DICT[TBPL_RETRY])
 
         self.info("Finished installing apps for %s" % self.emulator["name"])
 
     def run_tests(self):
         """
         Run the tests
         """
         cmd = self._build_command()
@@ -719,17 +732,18 @@ class AndroidEmulatorTest(BlobUploadMixi
             self.fatal("Don't know how to run --test-suite '%s'!" % self.test_suite)
         env = self.query_env()
         if self.query_minidump_stackwalk():
             env['MINIDUMP_STACKWALK'] = self.minidump_stackwalk_path
         env['MOZ_UPLOAD_DIR'] = self.query_abs_dirs()['abs_blob_upload_dir']
         env['MINIDUMP_SAVE_PATH'] = self.query_abs_dirs()['abs_blob_upload_dir']
         env['RUST_BACKTRACE'] = 'full'
 
-        self.info("Running on %s the command %s" % (self.emulator["name"], subprocess.list2cmdline(cmd)))
+        self.info("Running on %s the command %s" %
+                  (self.emulator["name"], subprocess.list2cmdline(cmd)))
         self.info("##### %s log begins" % self.test_suite)
 
         # TinderBoxPrintRe does not know about the '-debug' categories
         aliases = {
             'reftest-debug': 'reftest',
             'jsreftest-debug': 'jsreftest',
             'crashtest-debug': 'crashtest',
         }
@@ -763,11 +777,12 @@ class AndroidEmulatorTest(BlobUploadMixi
         '''
         if self.config.get('blob_upload_branch'):
             # Except on interactive workers, we want the emulator to keep running
             # after the script is finished. So only kill it if blobber would otherwise
             # have run anyway (it doesn't get run on interactive workers).
             self._kill_processes(self.config["emulator_process_name"])
         super(AndroidEmulatorTest, self).upload_blobber_files()
 
+
 if __name__ == '__main__':
     emulatorTest = AndroidEmulatorTest()
     emulatorTest.run_and_exit()
--- a/testing/mozharness/scripts/awsy_script.py
+++ b/testing/mozharness/scripts/awsy_script.py
@@ -24,42 +24,43 @@ from mozharness.base.vcs.vcsbase import 
 from mozharness.mozilla.blob_upload import BlobUploadMixin, blobupload_config_options
 from mozharness.mozilla.tooltool import TooltoolMixin
 from mozharness.mozilla.structuredlog import StructuredOutputParser
 from mozharness.mozilla.testing.codecoverage import (
     CodeCoverageMixin,
     code_coverage_config_options
 )
 
+
 class AWSY(TestingMixin, MercurialScript, BlobUploadMixin, TooltoolMixin, CodeCoverageMixin):
     config_options = [
         [["--e10s"],
-        {"action": "store_true",
-         "dest": "e10s",
-         "default": False,
-         "help": "Run tests with multiple processes. (Desktop builds only)",
-         }],
+         {"action": "store_true",
+          "dest": "e10s",
+          "default": False,
+          "help": "Run tests with multiple processes. (Desktop builds only)",
+          }],
         [["--enable-stylo"],
-        {"action": "store_true",
-         "dest": "enable_stylo",
-         "default": False,
-         "help": "Run tests with Stylo enabled.",
-         }],
+         {"action": "store_true",
+          "dest": "enable_stylo",
+          "default": False,
+          "help": "Run tests with Stylo enabled.",
+          }],
         [["--disable-stylo"],
-        {"action": "store_true",
-         "dest": "disable_stylo",
-         "default": False,
-         "help": "Run tests with Stylo disabled.",
-         }],
+         {"action": "store_true",
+          "dest": "disable_stylo",
+          "default": False,
+          "help": "Run tests with Stylo disabled.",
+          }],
         [["--single-stylo-traversal"],
-        {"action": "store_true",
-         "dest": "single_stylo_traversal",
-         "default": False,
-         "help": "Set STYLO_THREADS=1.",
-         }]
+         {"action": "store_true",
+          "dest": "single_stylo_traversal",
+          "default": False,
+          "help": "Set STYLO_THREADS=1.",
+          }]
     ] + testing_config_options + copy.deepcopy(blobupload_config_options) \
                                + copy.deepcopy(code_coverage_config_options)
 
     error_list = [
         {'regex': re.compile(r'''(TEST-UNEXPECTED|PROCESS-CRASH)'''), 'level': ERROR},
     ]
 
     def __init__(self, **kwargs):
@@ -116,17 +117,16 @@ class AWSY(TestingMixin, MercurialScript
                                            'marionette_requirements.txt')]
 
         for requirements_file in requirements_files:
             self.register_virtualenv_module(requirements=[requirements_file],
                                             two_pass=True)
 
         self.register_virtualenv_module('awsy', self.awsy_path)
 
-
     def populate_webroot(self):
         """Populate the production test slaves' webroots"""
         self.info("Downloading pageset with tooltool...")
         manifest_file = os.path.join(self.awsy_path, 'tp5n-pageset.manifest')
         page_load_test_dir = os.path.join(self.webroot_dir, 'page_load_test')
         if not os.path.isdir(page_load_test_dir):
             self.mkdir_p(page_load_test_dir)
         self.tooltool_fetch(
@@ -135,17 +135,16 @@ class AWSY(TestingMixin, MercurialScript
             cache=self.config.get('tooltool_cache')
         )
         archive = os.path.join(page_load_test_dir, 'tp5n.zip')
         unzip = self.query_exe('unzip')
         unzip_cmd = [unzip, '-q', '-o', archive, '-d', page_load_test_dir]
         self.run_command(unzip_cmd, halt_on_failure=True)
         self.run_command("ls %s" % page_load_test_dir)
 
-
     def run_tests(self, args=None, **kw):
         '''
         AWSY test should be implemented here
         '''
         dirs = self.abs_dirs
         env = {}
         error_summary_file = os.path.join(dirs['abs_blob_upload_dir'],
                                           'marionette_errorsummary.log')
--- a/testing/mozharness/scripts/bouncer_submitter.py
+++ b/testing/mozharness/scripts/bouncer_submitter.py
@@ -58,36 +58,40 @@ class BouncerSubmitter(BaseScript, Purge
                                 'submit',
                             ],
                             default_actions=[
                                 'clobber',
                                 'download-shipped-locales',
                                 'submit',
                             ],
                             config={
-                                 'buildbot_json_path' : 'buildprops.json'
+                                 'buildbot_json_path': 'buildprops.json'
                             }
                             )
         self.locales = None
         self.credentials = None
 
     def _pre_config_lock(self, rw_config):
         super(BouncerSubmitter, self)._pre_config_lock(rw_config)
 
-        #override properties from buildbot properties here as defined by taskcluster properties
+        # override properties from buildbot properties here as defined by taskcluster properties
         self.read_buildbot_config()
 
-        #check if release promotion is true first before overwriting these properties
+        # check if release promotion is true first before overwriting these properties
         if self.buildbot_config["properties"].get("release_promotion"):
-            for prop in ['product', 'version', 'build_number', 'revision', 'bouncer_submitter_config', ]:
+            for prop in \
+                    ['product', 'version', 'build_number', 'revision',
+                     'bouncer_submitter_config', ]:
                 if self.buildbot_config["properties"].get(prop):
-                    self.info("Overriding %s with %s" % (prop,  self.buildbot_config["properties"].get(prop)))
+                    self.info("Overriding %s with %s" %
+                              (prop,  self.buildbot_config["properties"].get(prop)))
                     self.config[prop] = self.buildbot_config["properties"].get(prop)
             if self.buildbot_config["properties"].get("partial_versions"):
-                self.config["prev_versions"] = self.buildbot_config["properties"].get("partial_versions").split(", ")
+                self.config["prev_versions"] = \
+                    self.buildbot_config["properties"].get("partial_versions").split(", ")
 
         for opt in ["version", "credentials_file", "bouncer-api-prefix"]:
             if opt not in self.config:
                 self.fatal("%s must be specified" % opt)
         if self.need_shipped_locales():
             for opt in ["shipped-locales-url", "repo", "revision"]:
                 if opt not in self.config:
                     self.fatal("%s must be specified" % opt)
--- a/testing/mozharness/scripts/configtest.py
+++ b/testing/mozharness/scripts/configtest.py
@@ -19,38 +19,39 @@ try:
     import simplejson as json
 except ImportError:
     import json
 
 sys.path.insert(1, os.path.dirname(sys.path[0]))
 
 from mozharness.base.script import BaseScript
 
+
 # ConfigTest {{{1
 class ConfigTest(BaseScript):
     config_options = [[
-     ["--test-file",],
+     ["--test-file", ],
      {"action": "extend",
       "dest": "test_files",
       "help": "Specify which config files to test"
-     }
+      }
     ]]
 
     def __init__(self, require_config_file=False):
         self.config_files = []
         BaseScript.__init__(self, config_options=self.config_options,
                             all_actions=['list-config-files',
                                          'test-json-configs',
                                          'test-python-configs',
                                          'summary',
                                          ],
                             default_actions=['test-json-configs',
                                              'test-python-configs',
                                              'summary',
-                                            ],
+                                             ],
                             require_config_file=require_config_file)
 
     def query_config_files(self):
         """This query method, much like others, caches its runtime
         settings in self.VAR so we don't have to figure out config_files
         multiple times.
         """
         if self.config_files:
@@ -123,20 +124,22 @@ class ConfigTest(BaseScript):
                     self.add_summary("%s is invalid python." % config_file,
                                      level="error")
                     self.error(pprint.pformat(sys.exc_info()[1]))
                 else:
                     if 'config' in local_dict and isinstance(local_dict['config'], dict):
                         self.info("Good.")
                         filecount[1] += 1
                     else:
-                        self.add_summary("%s is valid python, but doesn't create a config dictionary." %
+                        self.add_summary("%s is valid python, "
+                                         "but doesn't create a config dictionary." %
                                          config_file, level="error")
         if filecount[0]:
             self.add_summary("%d of %d python config files were good." %
                              (filecount[1], filecount[0]))
         else:
             self.add_summary("No python config files to test.")
 
+
 # __main__ {{{1
 if __name__ == '__main__':
     config_test = ConfigTest()
     config_test.run_and_exit()
--- a/testing/mozharness/scripts/desktop_l10n.py
+++ b/testing/mozharness/scripts/desktop_l10n.py
@@ -29,17 +29,16 @@ from mozharness.mozilla.building.buildba
 from mozharness.mozilla.l10n.locales import LocalesMixin
 from mozharness.mozilla.mar import MarMixin
 from mozharness.mozilla.mock import MockMixin
 from mozharness.mozilla.release import ReleaseMixin
 from mozharness.mozilla.signing import SigningMixin
 from mozharness.mozilla.updates.balrog import BalrogMixin
 from mozharness.mozilla.taskcluster_helper import Taskcluster
 from mozharness.base.python import VirtualenvMixin
-from mozharness.mozilla.mock import ERROR_MSGS
 
 try:
     import simplejson as json
     assert json
 except ImportError:
     import json
 
 
@@ -153,19 +152,19 @@ class DesktopSingleLocale(LocalesMixin, 
     ], [
         ['--en-us-installer-url', ],
         {"action": "store",
          "dest": "en_us_installer_url",
          "type": "string",
          "help": "Specify the url of the en-us binary"}
     ], [
         ["--disable-mock"], {
-        "dest": "disable_mock",
-        "action": "store_true",
-        "help": "do not run under mock despite what gecko-config says"}
+         "dest": "disable_mock",
+         "action": "store_true",
+         "help": "do not run under mock despite what gecko-config says"}
     ]]
 
     def __init__(self, require_config_file=True):
         # fxbuild style:
         buildscript_kwargs = {
             'all_actions': [
                 "clobber",
                 "pull",
@@ -479,17 +478,17 @@ class DesktopSingleLocale(LocalesMixin, 
         self.read_buildbot_config()
         config = self.config
         revision = None
         if config.get("revision"):
             revision = config["revision"]
         elif 'revision' in self.buildbot_properties:
             revision = self.buildbot_properties['revision']
         elif (self.buildbot_config and
-                  self.buildbot_config.get('sourcestamp', {}).get('revision')):
+              self.buildbot_config.get('sourcestamp', {}).get('revision')):
             revision = self.buildbot_config['sourcestamp']['revision']
         elif self.buildbot_config and self.buildbot_config.get('revision'):
             revision = self.buildbot_config['revision']
         elif config.get("update_gecko_source_to_enUS", True):
             revision = self._query_enUS_revision()
 
         if not revision:
             self.fatal("Can't determine revision!")
@@ -613,17 +612,18 @@ class DesktopSingleLocale(LocalesMixin, 
             current_repo = {}
             for key, value in repository.iteritems():
                 try:
                     current_repo[key] = value % replace_dict
                 except TypeError:
                     # pass through non-interpolables, like booleans
                     current_repo[key] = value
                 except KeyError:
-                    self.error('not all the values in "{0}" can be replaced. Check your configuration'.format(value))
+                    self.error('not all the values in "{0}" can be replaced. Check your '
+                               'configuration'.format(value))
                     raise
             repos.append(current_repo)
         self.info("repositories: %s" % repos)
         self.vcs_checkout_repos(repos, parent_dir=dirs['abs_work_dir'],
                                 tag_override=config.get('tag_override'))
 
     def clone_locales(self):
         self.pull_locale_source()
@@ -814,17 +814,17 @@ class DesktopSingleLocale(LocalesMixin, 
                        glob.glob(os.path.join(upload_target, 'setup-stub.exe')))
             targets_exts = ["tar.bz2", "dmg", "langpack.xpi",
                             "complete.mar", "checksums", "zip",
                             "installer.exe", "installer-stub.exe"]
             targets = ["target.%s" % ext for ext in targets_exts]
             targets.extend(['setup.exe', 'setup-stub.exe'])
             for f in matches:
                 target_file = next(target_file for target_file in targets
-                                    if f.endswith(target_file[6:]))
+                                   if f.endswith(target_file[6:]))
                 if target_file:
                     # Remove from list of available options for this locale
                     targets.remove(target_file)
                 else:
                     # wasn't valid (or already matched)
                     raise RuntimeError("Unexpected matching file name encountered: %s"
                                        % f)
                 self.move(os.path.join(f),
@@ -966,17 +966,18 @@ class DesktopSingleLocale(LocalesMixin, 
         c_marfile = self._query_complete_mar_filename(locale)
         c_mar_url = self._query_complete_mar_url(locale)
 
         # Set other necessary properties for Balrog submission. None need to
         # be passed back to buildbot, so we won't write them to the properties
         # files
         # Locale is hardcoded to en-US, for silly reasons
         # The Balrog submitter translates this platform into a build target
-        # via https://github.com/mozilla/build-tools/blob/master/lib/python/release/platforms.py#L23
+        # via
+        # https://github.com/mozilla/build-tools/blob/master/lib/python/release/platforms.py#L23
         self.set_buildbot_property("completeMarSize", self.query_filesize(c_marfile))
         self.set_buildbot_property("completeMarHash", self.query_sha512sum(c_marfile))
         self.set_buildbot_property("completeMarUrl", c_mar_url)
         self.set_buildbot_property("locale", locale)
         if "partialInfo" in self.package_urls[locale]:
             self.set_buildbot_property("partialInfo",
                                        self.package_urls[locale]["partialInfo"])
         ret = FAILURE
@@ -1092,17 +1093,18 @@ class DesktopSingleLocale(LocalesMixin, 
         """Set buildbot properties required to trigger funsize tasks
          responsible to generate partial updates for successfully generated locales"""
         locales = self.query_locales()
         funsize_info = {
             'locales': locales,
             'branch': self.config['branch'],
             'appName': self.config['appName'],
             'platform': self.config['platform'],
-            'completeMarUrls':  {locale: self._query_complete_mar_url(locale) for locale in locales},
+            'completeMarUrls': {locale: self._query_complete_mar_url(locale)
+                                for locale in locales},
         }
         self.info('funsize info: %s' % funsize_info)
         self.set_buildbot_property('funsize_info', json.dumps(funsize_info),
                                    write_to_file=True)
 
     def taskcluster_upload(self):
         auth = os.path.join(os.getcwd(), self.config['taskcluster_credentials_file'])
         credentials = {}
@@ -1167,17 +1169,17 @@ class DesktopSingleLocale(LocalesMixin, 
                     'build_type': self.query_build_type(),
                     'locale': locale,
                 }
                 fmt.update(self.buildid_to_dict(self._query_buildid()))
                 routes.append(template.format(**fmt))
 
             self.info('Using routes: %s' % routes)
             tc = Taskcluster(branch,
-                             pushinfo.pushdate, # Use pushdate as the rank
+                             pushinfo.pushdate,  # Use pushdate as the rank
                              client_id,
                              access_token,
                              self.log_obj,
                              )
             task = tc.create_task(routes)
             tc.claim_task(task)
 
             for upload_file in files:
--- a/testing/mozharness/scripts/desktop_partner_repacks.py
+++ b/testing/mozharness/scripts/desktop_partner_repacks.py
@@ -93,27 +93,28 @@ class DesktopPartnerRepacks(ReleaseMixin
         #
 
         BaseScript.__init__(
             self,
             config_options=self.config_options,
             **buildscript_kwargs
         )
 
-
     def _pre_config_lock(self, rw_config):
         self.read_buildbot_config()
         if not self.buildbot_config:
             self.warning("Skipping buildbot properties overrides")
         else:
             if self.config.get('require_buildprops', False) is True:
                 if not self.buildbot_config:
-                    self.fatal("Unable to load properties from file: %s" % self.config.get('buildbot_json_path'))
+                    self.fatal("Unable to load properties from file: %s" %
+                               self.config.get('buildbot_json_path'))
             props = self.buildbot_config["properties"]
-            for prop in ['version', 'build_number', 'revision', 'repo_file', 'repack_manifests_url', 'partner']:
+            for prop in ['version', 'build_number', 'revision', 'repo_file',
+                         'repack_manifests_url', 'partner']:
                 if props.get(prop):
                     self.info("Overriding %s with %s" % (prop, props[prop]))
                     self.config[prop] = props.get(prop)
 
         if 'version' not in self.config:
             self.fatal("Version (-v) not supplied.")
         if 'build_number' not in self.config:
             self.fatal("Build number (-n) not supplied.")
@@ -186,12 +187,13 @@ class DesktopPartnerRepacks(ReleaseMixin
         if self.config.get('hgrepo'):
             repack_cmd.extend(["--repo", self.config['hgrepo']])
         if self.config.get('revision'):
             repack_cmd.extend(["--tag", self.config["revision"]])
 
         return self.run_command(repack_cmd,
                                 cwd=self.query_abs_dirs()['abs_scripts_dir'])
 
+
 # main {{{
 if __name__ == '__main__':
     partner_repacks = DesktopPartnerRepacks()
     partner_repacks.run_and_exit()
--- a/testing/mozharness/scripts/desktop_unittest.py
+++ b/testing/mozharness/scripts/desktop_unittest.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 # ***** BEGIN LICENSE BLOCK *****
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
+
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 # ***** END LICENSE BLOCK *****
 """desktop_unittest.py
 The goal of this is to extract desktop unittesting from buildbot's factory.py
 
 author: Jordan Lund
 """
 
@@ -19,38 +20,40 @@ import glob
 import imp
 
 from datetime import datetime, timedelta
 
 # load modules from parent dir
 sys.path.insert(1, os.path.dirname(sys.path[0]))
 
 from mozharness.base.errors import BaseErrorList
-from mozharness.base.log import INFO, ERROR
+from mozharness.base.log import INFO
 from mozharness.base.script import PreScriptAction
 from mozharness.base.vcs.vcsbase import MercurialScript
 from mozharness.mozilla.blob_upload import BlobUploadMixin, blobupload_config_options
 from mozharness.mozilla.buildbot import TBPL_EXCEPTION
 from mozharness.mozilla.mozbase import MozbaseMixin
 from mozharness.mozilla.structuredlog import StructuredOutputParser
 from mozharness.mozilla.testing.errors import HarnessErrorList
 from mozharness.mozilla.testing.unittest import DesktopUnittestOutputParser
 from mozharness.mozilla.testing.codecoverage import (
     CodeCoverageMixin,
     code_coverage_config_options
 )
 from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
 
-SUITE_CATEGORIES = ['gtest', 'cppunittest', 'jittest', 'mochitest', 'reftest', 'xpcshell', 'mozbase', 'mozmill']
+SUITE_CATEGORIES = ['gtest', 'cppunittest', 'jittest', 'mochitest', 'reftest', 'xpcshell',
+                    'mozbase', 'mozmill']
 SUITE_DEFAULT_E10S = ['mochitest', 'reftest']
 SUITE_NO_E10S = ['xpcshell']
 
 
 # DesktopUnittest {{{1
-class DesktopUnittest(TestingMixin, MercurialScript, BlobUploadMixin, MozbaseMixin, CodeCoverageMixin):
+class DesktopUnittest(TestingMixin, MercurialScript, BlobUploadMixin, MozbaseMixin,
+                      CodeCoverageMixin):
     config_options = [
         [['--mochitest-suite', ], {
             "action": "extend",
             "dest": "specified_mochitest_suites",
             "type": "string",
             "help": "Specify which mochi suite to run. "
                     "Suites are defined in the config file.\n"
                     "Examples: 'all', 'plain1', 'plain5', 'chrome', or 'a11y'"}
@@ -140,17 +143,18 @@ class DesktopUnittest(TestingMixin, Merc
             "action": "store",
             "dest": "this_chunk",
             "help": "Number of this chunk"}
          ],
         [["--allow-software-gl-layers"], {
             "action": "store_true",
             "dest": "allow_software_gl_layers",
             "default": False,
-            "help": "Permits a software GL implementation (such as LLVMPipe) to use the GL compositor."}
+            "help": "Permits a software GL implementation (such as LLVMPipe) to use "
+                    "the GL compositor."}
          ],
         [["--single-stylo-traversal"], {
             "action": "store_true",
             "dest": "single_stylo_traversal",
             "default": False,
             "help": "Forcibly enable single thread traversal in Stylo with STYLO_THREADS=1"}
          ],
         [["--enable-stylo"], {
@@ -270,18 +274,20 @@ class DesktopUnittest(TestingMixin, Merc
                                                         'plugins')
         dirs['abs_test_bin_components_dir'] = os.path.join(dirs['abs_test_bin_dir'],
                                                            'components')
         dirs['abs_mochitest_dir'] = os.path.join(dirs['abs_test_install_dir'], "mochitest")
         dirs['abs_reftest_dir'] = os.path.join(dirs['abs_test_install_dir'], "reftest")
         dirs['abs_xpcshell_dir'] = os.path.join(dirs['abs_test_install_dir'], "xpcshell")
         dirs['abs_cppunittest_dir'] = os.path.join(dirs['abs_test_install_dir'], "cppunittest")
         dirs['abs_gtest_dir'] = os.path.join(dirs['abs_test_install_dir'], "gtest")
-        dirs['abs_blob_upload_dir'] = os.path.join(abs_dirs['abs_work_dir'], 'blobber_upload_dir')
-        dirs['abs_jittest_dir'] = os.path.join(dirs['abs_test_install_dir'], "jit-test", "jit-test")
+        dirs['abs_blob_upload_dir'] = os.path.join(abs_dirs['abs_work_dir'],
+                                                   'blobber_upload_dir')
+        dirs['abs_jittest_dir'] = os.path.join(dirs['abs_test_install_dir'],
+                                               "jit-test", "jit-test")
         dirs['abs_mozbase_dir'] = os.path.join(dirs['abs_test_install_dir'], "mozbase")
         dirs['abs_mozmill_dir'] = os.path.join(dirs['abs_test_install_dir'], "mozmill")
 
         if os.path.isabs(c['virtualenv_path']):
             dirs['abs_virtualenv_dir'] = c['virtualenv_path']
         else:
             dirs['abs_virtualenv_dir'] = os.path.join(abs_dirs['abs_work_dir'],
                                                       c['virtualenv_path'])
@@ -324,20 +330,18 @@ class DesktopUnittest(TestingMixin, Merc
     def _pre_create_virtualenv(self, action):
         dirs = self.query_abs_dirs()
 
         self.register_virtualenv_module(name='pip>=1.5')
         self.register_virtualenv_module('psutil==3.1.1', method='pip')
         self.register_virtualenv_module(name='mock')
         self.register_virtualenv_module(name='simplejson')
 
-        requirements_files = [
-                os.path.join(dirs['abs_test_install_dir'],
-                    'config',
-                    'marionette_requirements.txt')]
+        requirements_files = [os.path.join(dirs['abs_test_install_dir'],
+                              'config', 'marionette_requirements.txt')]
 
         if self._query_specified_suites('mochitest') is not None:
             # mochitest is the only thing that needs this
             requirements_files.append(
                 os.path.join(dirs['abs_mochitest_dir'],
                              'websocketprocessbridge',
                              'websocketprocessbridge_requirements.txt'))
 
@@ -380,17 +384,17 @@ class DesktopUnittest(TestingMixin, Merc
             raw_log_file = os.path.join(dirs['abs_blob_upload_dir'],
                                         '%s_raw.log' % suite)
 
             error_summary_file = os.path.join(dirs['abs_blob_upload_dir'],
                                               '%s_errorsummary.log' % suite)
             str_format_values = {
                 'binary_path': self.binary_path,
                 'symbols_path': self._query_symbols_url(),
-                'abs_work_dir' : dirs['abs_work_dir'],
+                'abs_work_dir': dirs['abs_work_dir'],
                 'abs_app_dir': abs_app_dir,
                 'abs_res_dir': abs_res_dir,
                 'raw_log_file': raw_log_file,
                 'error_summary_file': error_summary_file,
                 'gtest_dir': os.path.join(dirs['abs_test_install_dir'],
                                           'gtest'),
             }
 
@@ -408,17 +412,18 @@ class DesktopUnittest(TestingMixin, Merc
             if c.get('total_chunks') and c.get('this_chunk'):
                 base_cmd.extend(['--total-chunks', c['total_chunks'],
                                  '--this-chunk', c['this_chunk']])
 
             if c['no_random']:
                 if suite_category == "mochitest":
                     base_cmd.append('--bisect-chunk=default')
                 else:
-                    self.warning("--no-random does not currently work with suites other than mochitest.")
+                    self.warning("--no-random does not currently work with suites other than "
+                                 "mochitest.")
 
             # set pluginsPath
             abs_res_plugins_dir = os.path.join(abs_res_dir, 'plugins')
             str_format_values['test_plugin_path'] = abs_res_plugins_dir
 
             if suite_category not in c["suite_definitions"]:
                 self.fatal("'%s' not defined in the config!")
 
@@ -499,17 +504,18 @@ class DesktopUnittest(TestingMixin, Merc
                 return flavor
 
     def structured_output(self, suite_category, flavor=None):
         unstructured_flavors = self.config.get('unstructured_flavors')
         if not unstructured_flavors:
             return False
         if suite_category not in unstructured_flavors:
             return True
-        if not unstructured_flavors.get(suite_category) or flavor in unstructured_flavors.get(suite_category):
+        if not unstructured_flavors.get(suite_category) or \
+                flavor in unstructured_flavors.get(suite_category):
             return False
         return True
 
     def get_test_output_parser(self, suite_category, flavor=None, strict=False,
                                **kwargs):
         if not self.structured_output(suite_category, flavor):
             return DesktopUnittestOutputParser(suite_category=suite_category, **kwargs)
         self.info("Structured output parser in use for %s." % suite_category)
@@ -542,20 +548,19 @@ class DesktopUnittest(TestingMixin, Merc
             suites = self._query_specified_suites(category) or []
             for suite in suites:
                 if any([suite.startswith(c) for c in compiled_code_suites]):
                     rejected.append(suite)
                     break
         if rejected:
             self.buildbot_status(TBPL_EXCEPTION)
             self.fatal("There are specified suites that are incompatible with "
-                      "--artifact try syntax flag: {}".format(', '.join(rejected)),
+                       "--artifact try syntax flag: {}".format(', '.join(rejected)),
                        exit_code=self.return_code)
 
-
     def download_and_extract(self):
         """
         download and extract test zip / download installer
         optimizes which subfolders to extract from tests zip
         """
         c = self.config
 
         extract_dirs = None
@@ -683,17 +688,17 @@ class DesktopUnittest(TestingMixin, Merc
                     # Mac specific, but points to abs_app_dir on other
                     # platforms.
                     'abs_res_dir': abs_res_dir,
                 }
                 options_list = []
                 env = {}
                 if isinstance(suites[suite], dict):
                     options_list = suites[suite].get('options', [])
-                    if self.config.get('verify') == True:
+                    if self.config.get('verify') is True:
                         tests_list = []
                     else:
                         tests_list = suites[suite].get('tests', [])
                     env = copy.deepcopy(suites[suite].get('env', {}))
                 else:
                     options_list = suites[suite]
                     tests_list = []
 
@@ -758,17 +763,18 @@ class DesktopUnittest(TestingMixin, Merc
                 env = self.query_env(partial_env=env, log_level=INFO)
                 cmd_timeout = self.get_timeout_for_category(suite_category)
 
                 for verify_args in self.query_verify_args(suite):
                     if (datetime.now() - self.start_time) > max_verify_time:
                         # Verification has run out of time. That is okay! Stop running
                         # tests so that a task timeout is not triggered, and so that
                         # (partial) results are made available in a timely manner.
-                        self.info("TinderboxPrint: Verification too long: Not all tests were verified.<br/>")
+                        self.info("TinderboxPrint: Verification too long: Not all tests "
+                                  "were verified.<br/>")
                         # Signal verify time exceeded, to break out of suites and
                         # suite categories loops also.
                         return False
 
                     final_cmd = copy.copy(cmd)
                     final_cmd.extend(verify_args)
                     return_code = self.run_command(final_cmd, cwd=dirs['abs_work_dir'],
                                                    output_timeout=cmd_timeout,
--- a/testing/mozharness/scripts/fx_desktop_build.py
+++ b/testing/mozharness/scripts/fx_desktop_build.py
@@ -73,17 +73,18 @@ class FxDesktopBuild(BuildScript, TryToo
                     "%(objdir)s/dist/firefox-*",
                     "%(objdir)s/dist/fennec*",
                     "%(objdir)s/dist/seamonkey*",
                     "%(objdir)s/dist/thunderbird*",
                     "%(objdir)s/dist/install/sea/*.exe"
                 ],
                 'stage_product': 'firefox',
                 'platform_supports_post_upload_to_latest': True,
-                'build_resources_path': '%(abs_src_dir)s/obj-firefox/.mozbuild/build_resources.json',
+                'build_resources_path': \
+                '%(abs_src_dir)s/obj-firefox/.mozbuild/build_resources.json',
                 'nightly_promotion_branches': ['mozilla-central', 'mozilla-aurora'],
 
                 # try will overwrite these
                 'clone_with_purge': False,
                 'clone_by_revision': False,
                 'tinderbox_build_dir': None,
                 'to_tinderbox_dated': True,
                 'release_to_try_builds': False,
@@ -200,17 +201,16 @@ class FxDesktopBuild(BuildScript, TryToo
                 rw_config.volatile_config['actions'])
             )
         # replace rw_config as well to set actions as in BaseScript
         rw_config.set_config(c, overwrite=True)
         rw_config.update_actions()
         self.actions = tuple(rw_config.actions)
         self.all_actions = tuple(rw_config.all_actions)
 
-
     def query_abs_dirs(self):
         if self.abs_dirs:
             return self.abs_dirs
         c = self.config
         abs_dirs = super(FxDesktopBuild, self).query_abs_dirs()
         if not c.get('app_ini_path'):
             self.fatal('"app_ini_path" is needed in your config for this '
                        'script.')
@@ -252,11 +252,12 @@ class FxDesktopBuild(BuildScript, TryToo
 
     @script.PreScriptRun
     def suppress_windows_modal_dialogs(self, *args, **kwargs):
         if self._is_windows():
             # Suppress Windows modal dialogs to avoid hangs
             import ctypes
             ctypes.windll.kernel32.SetErrorMode(0x8001)
 
+
 if __name__ == '__main__':
     fx_desktop_build = FxDesktopBuild()
     fx_desktop_build.run_and_exit()
--- a/testing/mozharness/scripts/l10n_bumper.py
+++ b/testing/mozharness/scripts/l10n_bumper.py
@@ -207,21 +207,23 @@ class L10nBumper(VCSScript):
         dirs = self.query_abs_dirs()
         tree = c.get('treestatus_tree', os.path.basename(c['gecko_pull_url'].rstrip("/")))
         treestatus_url = "%s/trees/%s" % (c['treestatus_base_url'], tree)
         treestatus_json = os.path.join(dirs['abs_work_dir'], 'treestatus.json')
         if not os.path.exists(dirs['abs_work_dir']):
             self.mkdir_p(dirs['abs_work_dir'])
         self.rmtree(treestatus_json)
 
-        self.run_command(["curl", "--retry", "4", "-o", treestatus_json, treestatus_url], throw_exception=True)
+        self.run_command(["curl", "--retry", "4", "-o", treestatus_json, treestatus_url],
+                         throw_exception=True)
 
         treestatus = self._read_json(treestatus_json)
         if treestatus['result']['status'] != 'closed':
-            self.info("treestatus is %s - assuming we can land" % repr(treestatus['result']['status']))
+            self.info("treestatus is %s - assuming we can land" %
+                      repr(treestatus['result']['status']))
             return True
 
         return False
 
     # Actions {{{1
     def check_treestatus(self):
         if not self.config['ignore_closed_tree'] and not self.query_treestatus():
             self.info("breaking early since treestatus is closed")
--- a/testing/mozharness/scripts/marionette.py
+++ b/testing/mozharness/scripts/marionette.py
@@ -2,24 +2,23 @@
 # ***** BEGIN LICENSE BLOCK *****
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 # ***** END LICENSE BLOCK *****
 
 import copy
 import os
-import re
 import sys
 
 # load modules from parent dir
 sys.path.insert(1, os.path.dirname(sys.path[0]))
 
 from mozharness.base.errors import BaseErrorList, TarErrorList
-from mozharness.base.log import INFO, ERROR, WARNING
+from mozharness.base.log import INFO
 from mozharness.base.script import PreScriptAction
 from mozharness.base.transfer import TransferMixin
 from mozharness.base.vcs.vcsbase import MercurialScript
 from mozharness.mozilla.blob_upload import BlobUploadMixin, blobupload_config_options
 from mozharness.mozilla.testing.errors import LogcatErrorList
 from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
 from mozharness.mozilla.testing.unittest import TestSummaryOutputParserHelper
 from mozharness.mozilla.testing.codecoverage import (
@@ -29,17 +28,18 @@ from mozharness.mozilla.testing.codecove
 from mozharness.mozilla.testing.errors import HarnessErrorList
 
 from mozharness.mozilla.structuredlog import StructuredOutputParser
 
 # TODO: we could remove emulator specific code after B2G ICS emulator buildbot
 #       builds is turned off, Bug 1209180.
 
 
-class MarionetteTest(TestingMixin, MercurialScript, BlobUploadMixin, TransferMixin, CodeCoverageMixin):
+class MarionetteTest(TestingMixin, MercurialScript, BlobUploadMixin, TransferMixin,
+                     CodeCoverageMixin):
     config_options = [[
         ["--application"],
         {"action": "store",
          "dest": "application",
          "default": None,
          "help": "application name of binary"
          }
     ], [
@@ -49,17 +49,18 @@ class MarionetteTest(TestingMixin, Mercu
          "default": None,
          "help": "Optional command-line argument to pass to the browser"
          }
     ], [
         ["--marionette-address"],
         {"action": "store",
          "dest": "marionette_address",
          "default": None,
-         "help": "The host:port of the Marionette server running inside Gecko.  Unused for emulator testing",
+         "help": "The host:port of the Marionette server running inside Gecko. "
+                 "Unused for emulator testing",
          }
     ], [
         ["--emulator"],
         {"action": "store",
          "type": "choice",
          "choices": ['arm', 'x86'],
          "dest": "emulator",
          "default": None,
@@ -93,17 +94,17 @@ class MarionetteTest(TestingMixin, Mercu
          "help": "Run tests with multiple processes. (Desktop builds only)",
          }
     ], [
         ["--headless"],
         {"action": "store_true",
          "dest": "headless",
          "default": False,
          "help": "Run tests in headless mode.",
-        }
+         }
     ], [
        ["--allow-software-gl-layers"],
        {"action": "store_true",
         "dest": "allow_software_gl_layers",
         "default": False,
         "help": "Permits a software GL implementation (such as LLVMPipe) to use the GL compositor."
         }
     ], [
@@ -149,17 +150,18 @@ class MarionetteTest(TestingMixin, Mercu
         if c.get('structured_output'):
             self.parser_class = StructuredOutputParser
         else:
             self.parser_class = TestSummaryOutputParserHelper
 
     def _pre_config_lock(self, rw_config):
         super(MarionetteTest, self)._pre_config_lock(rw_config)
         if not self.config.get('emulator') and not self.config.get('marionette_address'):
-                self.fatal("You need to specify a --marionette-address for non-emulator tests! (Try --marionette-address localhost:2828 )")
+                self.fatal("You need to specify a --marionette-address for non-emulator tests! "
+                           "(Try --marionette-address localhost:2828 )")
 
     def query_abs_dirs(self):
         if self.abs_dirs:
             return self.abs_dirs
         abs_dirs = super(MarionetteTest, self).query_abs_dirs()
         dirs = {}
         dirs['abs_test_install_dir'] = os.path.join(
             abs_dirs['abs_work_dir'], 'tests')
--- a/testing/mozharness/scripts/merge_day/gecko_migration.py
+++ b/testing/mozharness/scripts/merge_day/gecko_migration.py
@@ -15,24 +15,22 @@ http://hg.mozilla.org/build/tools/file/0
 and
 http://hg.mozilla.org/build/tools/file/084bc4e2fc76/release/merge_helper.py
 """
 
 import os
 import pprint
 import subprocess
 import sys
-from getpass import getpass
 
 sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
 
 from mozharness.base.errors import HgErrorList
 from mozharness.base.python import VirtualenvMixin, virtualenv_config_options
 from mozharness.base.vcs.vcsbase import MercurialScript
-from mozharness.mozilla.selfserve import SelfServeMixin
 from mozharness.mozilla.updates.balrog import BalrogMixin
 from mozharness.mozilla.buildbot import BuildbotMixin
 from mozharness.mozilla.repo_manipulation import MercurialRepoManipulationMixin
 
 VALID_MIGRATION_BEHAVIORS = (
     "beta_to_release", "central_to_beta", "release_to_esr", "bump_second_digit",
 )
 
@@ -99,23 +97,26 @@ class GeckoMigration(MercurialScript, Ba
         self.run_sanity_check()
 
 # Helper methods {{{1
     def run_sanity_check(self):
         """ Verify the configs look sane before proceeding.
             """
         message = ""
         if self.config['migration_behavior'] not in VALID_MIGRATION_BEHAVIORS:
-            message += "%s must be one of %s!\n" % (self.config['migration_behavior'], VALID_MIGRATION_BEHAVIORS)
+            message += "%s must be one of %s!\n" % (self.config['migration_behavior'],
+                                                    VALID_MIGRATION_BEHAVIORS)
         if self.config['migration_behavior'] == 'beta_to_release':
-            if self.config.get("require_remove_locales") and not self.config.get("remove_locales") and 'migrate' in self.actions:
+            if self.config.get("require_remove_locales") \
+                    and not self.config.get("remove_locales") and 'migrate' in self.actions:
                 message += "You must specify --remove-locale!\n"
         else:
             if self.config.get("require_remove_locales") or self.config.get("remove_locales"):
-                self.warning("--remove-locale isn't valid unless you're using beta_to_release migration_behavior!\n")
+                self.warning("--remove-locale isn't valid unless you're using beta_to_release "
+                             "migration_behavior!\n")
         if message:
             self.fatal(message)
 
     def query_abs_dirs(self):
         """ Allow for abs_from_dir and abs_to_dir
             """
         if self.abs_dirs:
             return self.abs_dirs
@@ -216,17 +217,18 @@ class GeckoMigration(MercurialScript, Ba
             # I'm reverting .hgtags to old_head, then appending the new tags
             # from new_head to .hgtags, and hoping nothing goes wrong.
             # I'd rather not write patch files from scratch, so this seems
             # like a slightly more complex but less objectionable method?
             self.info("Trying to preserve tags from before debugsetparents...")
             dirs = self.query_abs_dirs()
             patch_file = os.path.join(dirs['abs_work_dir'], 'patch_file')
             self.run_command(
-                subprocess.list2cmdline(hg + ['diff', '-r', old_head, '.hgtags', '-U9', '>', patch_file]),
+                subprocess.list2cmdline(hg + ['diff', '-r', old_head, '.hgtags',
+                                        '-U9', '>', patch_file]),
                 cwd=cwd,
             )
             self.run_command(
                 ['patch', '-R', '-p1', '-i', patch_file],
                 cwd=cwd,
                 halt_on_failure=True,
             )
             tag_diff = self.read_from_file(patch_file)
@@ -319,31 +321,31 @@ class GeckoMigration(MercurialScript, Ba
 
             We could have all of these individually toggled by flags, but
             by separating into workflow methods we can be more precise about
             what happens in each workflow, while allowing for things like
             staging beta user repo migrations.
             """
         dirs = self.query_abs_dirs()
         next_mb_version = self.get_version(dirs['abs_to_dir'])[0]
-        self.bump_version(dirs['abs_to_dir'], next_mb_version, next_mb_version, "a1", "", use_config_suffix=True)
+        self.bump_version(dirs['abs_to_dir'], next_mb_version, next_mb_version, "a1", "",
+                          use_config_suffix=True)
         self.apply_replacements()
         # bump m-c version
         curr_mc_version = self.get_version(dirs['abs_from_dir'])[0]
         next_mc_version = str(int(curr_mc_version) + 1)
         self.bump_version(
             dirs['abs_from_dir'], curr_mc_version, next_mc_version, "a1", "a1",
             bump_major=True,
             use_config_suffix=False
         )
         # touch clobber files
         self.touch_clobber_file(dirs['abs_from_dir'])
         self.touch_clobber_file(dirs['abs_to_dir'])
 
-
     def beta_to_release(self, *args, **kwargs):
         """ mozilla-beta -> mozilla-release behavior.
 
             We could have all of these individually toggled by flags, but
             by separating into workflow methods we can be more precise about
             what happens in each workflow, while allowing for things like
             staging beta user repo migrations.
             """
@@ -487,16 +489,18 @@ class GeckoMigration(MercurialScript, Ba
         if end_tag:
             end_tag = end_tag % {'major_version': to_fx_major_version}
             self.hg_tag(
                 dirs['abs_to_dir'], end_tag, user=self.config['hg_user'],
                 revision=base_to_rev, force=True,
             )
         # Call beta_to_release etc.
         if not hasattr(self, self.config['migration_behavior']):
-            self.fatal("Don't know how to proceed with migration_behavior %s !" % self.config['migration_behavior'])
+            self.fatal("Don't know how to proceed with migration_behavior %s !" %
+                       self.config['migration_behavior'])
         getattr(self, self.config['migration_behavior'])(end_tag=end_tag)
-        self.info("Verify the diff, and apply any manual changes, such as disabling features, and --commit-changes")
+        self.info("Verify the diff, and apply any manual changes, such as disabling features, "
+                  "and --commit-changes")
 
 
 # __main__ {{{1
 if __name__ == '__main__':
     GeckoMigration().run_and_exit()
--- a/testing/mozharness/scripts/mobile_l10n.py
+++ b/testing/mozharness/scripts/mobile_l10n.py
@@ -106,17 +106,17 @@ class MobileSingleLocale(MockMixin, Loca
          "type": "int",
          "help": "Specify the total number of chunks of locales"
          }
     ], [
         ["--disable-mock"],
         {"dest": "disable_mock",
          "action": "store_true",
          "help": "do not run under mock despite what gecko-config says",
-        }
+         }
     ], [
         ['--revision', ],
         {"action": "store",
          "dest": "revision",
          "type": "string",
          "help": "Override the gecko revision to use (otherwise use buildbot supplied"
                  " value, or en-US revision) "}
     ]]
@@ -178,17 +178,18 @@ class MobileSingleLocale(MockMixin, Loca
                 'buildnum': rc['buildnum']
             }
         repack_env = self.query_env(partial_env=c.get("repack_env"),
                                     replace_dict=replace_dict)
         if c.get('base_en_us_binary_url') and c.get('release_config_file'):
             rc = self.query_release_config()
             repack_env['EN_US_BINARY_URL'] = c['base_en_us_binary_url'] % replace_dict
         if 'MOZ_SIGNING_SERVERS' in os.environ:
-            repack_env['MOZ_SIGN_CMD'] = subprocess.list2cmdline(self.query_moz_sign_cmd(formats=['jar']))
+            repack_env['MOZ_SIGN_CMD'] = \
+                subprocess.list2cmdline(self.query_moz_sign_cmd(formats=['jar']))
         self.repack_env = repack_env
         return self.repack_env
 
     def query_l10n_env(self):
         return self.query_env()
 
     def query_upload_env(self):
         if self.upload_env:
@@ -266,17 +267,17 @@ class MobileSingleLocale(MockMixin, Loca
         self.read_buildbot_config()
         config = self.config
         revision = None
         if config.get("revision"):
             revision = config["revision"]
         elif 'revision' in self.buildbot_properties:
             revision = self.buildbot_properties['revision']
         elif (self.buildbot_config and
-                  self.buildbot_config.get('sourcestamp', {}).get('revision')):
+                self.buildbot_config.get('sourcestamp', {}).get('revision')):
             revision = self.buildbot_config['sourcestamp']['revision']
         elif self.buildbot_config and self.buildbot_config.get('revision'):
             revision = self.buildbot_config['revision']
         elif config.get("update_gecko_source_to_enUS", True):
             revision = self._query_enUS_revision()
 
         if not revision:
             self.fatal("Can't determine revision!")
@@ -342,30 +343,28 @@ class MobileSingleLocale(MockMixin, Loca
 
     def query_upload_url(self, locale):
         if locale in self.upload_urls:
             return self.upload_urls[locale]
         else:
             self.error("Can't determine the upload url for %s!" % locale)
 
     def query_abs_dirs(self):
-         if self.abs_dirs:
-             return self.abs_dirs
-         abs_dirs = super(MobileSingleLocale, self).query_abs_dirs()
+        if self.abs_dirs:
+            return self.abs_dirs
+        abs_dirs = super(MobileSingleLocale, self).query_abs_dirs()
 
-         dirs = {
-             'abs_tools_dir':
-                 os.path.join(abs_dirs['base_work_dir'], 'tools'),
-             'build_dir':
-                 os.path.join(abs_dirs['base_work_dir'], 'build'),
-         }
+        dirs = {
+             'abs_tools_dir': os.path.join(abs_dirs['base_work_dir'], 'tools'),
+             'build_dir': os.path.join(abs_dirs['base_work_dir'], 'build'),
+        }
 
-         abs_dirs.update(dirs)
-         self.abs_dirs = abs_dirs
-         return self.abs_dirs
+        abs_dirs.update(dirs)
+        self.abs_dirs = abs_dirs
+        return self.abs_dirs
 
     def add_failure(self, locale, message, **kwargs):
         self.locales_property[locale] = "Failed"
         prop_key = "%s_failure" % locale
         prop_value = self.query_buildbot_property(prop_key)
         if prop_value:
             prop_value = "%s  %s" % (prop_value, message)
         else:
@@ -374,17 +373,18 @@ class MobileSingleLocale(MockMixin, Loca
         MercurialScript.add_failure(self, locale, message=message, **kwargs)
 
     def summary(self):
         MercurialScript.summary(self)
         # TODO we probably want to make this configurable on/off
         locales = self.query_locales()
         for locale in locales:
             self.locales_property.setdefault(locale, "Success")
-        self.set_buildbot_property("locales", json.dumps(self.locales_property), write_to_file=True)
+        self.set_buildbot_property("locales", json.dumps(self.locales_property),
+                                   write_to_file=True)
 
     # Actions {{{2
     def clobber(self):
         self.read_buildbot_config()
         dirs = self.query_abs_dirs()
         c = self.config
         objdir = os.path.join(dirs['abs_work_dir'], c['mozilla_dir'],
                               c['objdir'])
@@ -405,30 +405,30 @@ class MobileSingleLocale(MockMixin, Loca
             current_repo = {}
             for key, value in repository.iteritems():
                 try:
                     current_repo[key] = value % replace_dict
                 except TypeError:
                     # pass through non-interpolables, like booleans
                     current_repo[key] = value
                 except KeyError:
-                    self.error('not all the values in "{0}" can be replaced. Check your configuration'.format(value))
+                    self.error('not all the values in "{0}" can be replaced. Check '
+                               'your configuration'.format(value))
                     raise
             repos.append(current_repo)
         self.info("repositories: %s" % repos)
         self.vcs_checkout_repos(repos, parent_dir=dirs['abs_work_dir'],
                                 tag_override=c.get('tag_override'))
 
     def clone_locales(self):
         self.pull_locale_source()
 
     # list_locales() is defined in LocalesMixin.
 
     def _setup_configure(self, buildid=None):
-        c = self.config
         dirs = self.query_abs_dirs()
         env = self.query_repack_env()
         make = self.query_exe("make")
         if self.run_command_m([make, "-f", "client.mk", "configure"],
                               cwd=dirs['abs_mozilla_dir'],
                               env=env,
                               error_list=MakefileErrorList):
             self.fatal("Configure failed!")
@@ -505,17 +505,18 @@ class MobileSingleLocale(MockMixin, Loca
         success_count = total_count = 0
         for locale in locales:
             total_count += 1
             if self.run_command_m([make, "installers-%s" % locale],
                                   cwd=dirs['abs_locales_dir'],
                                   env=repack_env,
                                   error_list=MakefileErrorList,
                                   halt_on_failure=False):
-                self.add_failure(locale, message="%s failed in make installers-%s!" % (locale, locale))
+                self.add_failure(locale, message="%s failed in make installers-%s!" %
+                                 (locale, locale))
                 continue
             success_count += 1
         self.summarize_success_count(success_count, total_count,
                                      message="Repacked %d of %d binaries successfully.")
 
     def validate_repacks_signed(self):
         c = self.config
         dirs = self.query_abs_dirs()
@@ -539,17 +540,18 @@ class MobileSingleLocale(MockMixin, Loca
             )
             self.disable_mock()
             if status:
                 self.add_failure(locale, message="Errors verifying %s binary!" % locale)
                 # No need to rm because upload is per-locale
                 continue
             success_count += 1
         self.summarize_success_count(success_count, total_count,
-                                     message="Validated signatures on %d of %d binaries successfully.")
+                                     message="Validated signatures on %d of %d "
+                                             "binaries successfully.")
 
     def taskcluster_upload(self):
         auth = os.path.join(os.getcwd(), self.config['taskcluster_credentials_file'])
         credentials = {}
         execfile(auth, credentials)
         client_id = credentials.get('taskcluster_clientId')
         access_token = credentials.get('taskcluster_accessToken')
         if not client_id or not access_token:
@@ -597,17 +599,17 @@ class MobileSingleLocale(MockMixin, Loca
                 'build_type': self.query_build_type(),
                 'locale': locale,
             }
             for template in templates:
                 routes.append(template.format(**fmt))
 
             self.info('Using routes: %s' % routes)
             tc = Taskcluster(branch,
-                             pushinfo.pushdate, # Use pushdate as the rank
+                             pushinfo.pushdate,  # Use pushdate as the rank
                              client_id,
                              access_token,
                              self.log_obj,
                              )
             task = tc.create_task(routes)
             tc.claim_task(task)
 
             for upload_file in abs_files:
@@ -628,17 +630,19 @@ class MobileSingleLocale(MockMixin, Loca
             rc = self.query_release_config()
             buildnum = rc['buildnum']
         for locale in locales:
             if self.query_failure(locale):
                 self.warning("Skipping previously failed locale %s." % locale)
                 continue
             total_count += 1
             if c.get('base_post_upload_cmd'):
-                upload_env['POST_UPLOAD_CMD'] = c['base_post_upload_cmd'] % {'version': version, 'locale': locale, 'buildnum': str(buildnum), 'post_upload_extra': ' '.join(c.get('post_upload_extra', []))}
+                upload_env['POST_UPLOAD_CMD'] = c['base_post_upload_cmd'] % \
+                    {'version': version, 'locale': locale, 'buildnum': str(buildnum),
+                        'post_upload_extra': ' '.join(c.get('post_upload_extra', []))}
             output = self.get_output_from_command_m(
                 # Ugly hack to avoid |make upload| stderr from showing up
                 # as get_output_from_command errors
                 "%s upload AB_CD=%s 2>&1" % (make, locale),
                 cwd=dirs['abs_locales_dir'],
                 env=upload_env,
                 silent=True
             )
@@ -682,28 +686,29 @@ class MobileSingleLocale(MockMixin, Loca
             'repo': self.config['tools_repo'],
             'vcs': "hg",
             'branch': "default",
             'dest': dirs['abs_tools_dir'],
         }]
         rev = self.vcs_checkout(**repos[0])
         self.set_buildbot_property("tools_revision", rev, write_to_file=True)
 
-    def query_apkfile_path(self,locale):
+    def query_apkfile_path(self, locale):
 
         dirs = self.query_abs_dirs()
         apkdir = os.path.join(dirs['abs_objdir'], 'dist')
-        r  = r"(\.)" + re.escape(locale) + r"(\.*)"
+        r = r"(\.)" + re.escape(locale) + r"(\.*)"
 
         apks = []
         for f in os.listdir(apkdir):
             if f.endswith(".apk") and re.search(r, f):
                 apks.append(f)
         if len(apks) == 0:
-            self.fatal("Found no apks files in %s, don't know what to do:\n%s" % (apkdir, apks), exit_code=1)
+            self.fatal("Found no apks files in %s, don't know what to do:\n%s" %
+                       (apkdir, apks), exit_code=1)
 
         return os.path.join(apkdir, apks[0])
 
     def query_is_release_or_beta(self):
 
         return bool(self.config.get("is_release_or_beta"))
 
     def submit_to_balrog(self):
@@ -716,44 +721,46 @@ class MobileSingleLocale(MockMixin, Loca
 
         dirs = self.query_abs_dirs()
         locales = self.query_locales()
         if not self.config.get('taskcluster_nightly'):
             balrogReady = True
             for locale in locales:
                 apk_url = self.query_upload_url(locale)
                 if not apk_url:
-                    self.add_failure(locale, message="Failed to detect %s url in make upload!" % (locale))
+                    self.add_failure(locale, message="Failed to detect %s url in make upload!" %
+                                     (locale))
                     balrogReady = False
                     continue
             if not balrogReady:
-                return self.fatal(message="Not all repacks successful, abort without submitting to balrog")
+                return self.fatal(message="Not all repacks successful, abort without "
+                                          "submitting to balrog.")
 
         env = self.query_upload_env()
         for locale in locales:
             apkfile = self.query_apkfile_path(locale)
             if self.config.get('taskcluster_nightly'):
                 # Taskcluster needs stage_platform
                 self.set_buildbot_property("stage_platform",
                                            self.config.get("stage_platform"))
                 self.set_buildbot_property("branch", self.config.get("branch"))
             else:
                 apk_url = self.query_upload_url(locale)
                 self.set_buildbot_property("completeMarUrl", apk_url)
 
                 # The Balrog submitter translates this platform into a build target
-                # via https://github.com/mozilla/build-tools/blob/master/lib/python/release/platforms.py#L23
+                # via https://github.com/mozilla/build-tools/blob/master/lib/python/release/platforms.py#L23  # noqa
                 self.set_buildbot_property(
                     "platform",
                     self.buildbot_config["properties"]["platform"])
-                #TODO: Is there a better way to get this?
+                # TODO: Is there a better way to get this?
 
             # Set other necessary properties for Balrog submission. None need to
             # be passed back to buildbot, so we won't write them to the properties
-            #files.
+            # files.
             self.set_buildbot_property("locale", locale)
 
             self.set_buildbot_property("appVersion", self.query_version())
 
             self.set_buildbot_property("appName", "Fennec")
             # TODO: don't hardcode
             self.set_buildbot_property("hashType", "sha512")
             self.set_buildbot_property("completeMarSize", self.query_filesize(apkfile))
@@ -773,12 +780,13 @@ class MobileSingleLocale(MockMixin, Loca
                 if self.query_is_nightly():
                     self.submit_balrog_updates(release_type="nightly")
                 else:
                     self.submit_balrog_updates(release_type="release")
 
                 if not self.query_is_nightly():
                     self.submit_balrog_release_pusher(dirs)
 
+
 # main {{{1
 if __name__ == '__main__':
     single_locale = MobileSingleLocale()
     single_locale.run_and_exit()
--- a/testing/mozharness/scripts/mobile_partner_repack.py
+++ b/testing/mozharness/scripts/mobile_partner_repack.py
@@ -168,17 +168,18 @@ class MobilePartnerRepack(LocalesMixin, 
                 installer_name = base_installer_name % replace_dict
                 parent_dir = '%s/original/%s/%s' % (dirs['abs_work_dir'],
                                                     platform, locale)
                 file_path = '%s/%s' % (parent_dir, installer_name)
                 self.mkdir_p(parent_dir)
                 total_count += 1
                 if not self.download_file(url, file_path):
                     self.add_failure(platform, locale,
-                                     message="Unable to download %(platform)s:%(locale)s installer!")
+                                     message="Unable to "
+                                             "download %(platform)s:%(locale)s installer!")
                 else:
                     success_count += 1
         self.summarize_success_count(success_count, total_count,
                                      message="Downloaded %d of %d installers successfully.")
 
     def _repack_apk(self, partner, orig_path, repack_path):
         """ Repack the apk with a partner update channel.
         Returns True for success, None for failure
@@ -232,29 +233,33 @@ class MobilePartnerRepack(LocalesMixin, 
     def repack(self):
         c = self.config
         rc = self.query_release_config()
         dirs = self.query_abs_dirs()
         locales = self.query_locales()
         success_count = total_count = 0
         for platform in c['platforms']:
             for locale in locales:
-                installer_name = c['installer_base_names'][platform] % {'version': rc['version'], 'locale': locale}
+                installer_name = c['installer_base_names'][platform] % \
+                    {'version': rc['version'], 'locale': locale}
                 if self.query_failure(platform, locale):
                     self.warning("%s:%s had previous issues; skipping!" % (platform, locale))
                     continue
-                original_path = '%s/original/%s/%s/%s' % (dirs['abs_work_dir'], platform, locale, installer_name)
+                original_path = '%s/original/%s/%s/%s' % \
+                    (dirs['abs_work_dir'], platform, locale, installer_name)
                 for partner in c['partner_config'].keys():
-                    repack_path = '%s/unsigned/partner-repacks/%s/%s/%s/%s' % (dirs['abs_work_dir'], partner, platform, locale, installer_name)
+                    repack_path = '%s/unsigned/partner-repacks/%s/%s/%s/%s' % \
+                        (dirs['abs_work_dir'], partner, platform, locale, installer_name)
                     total_count += 1
                     if self._repack_apk(partner, original_path, repack_path):
                         success_count += 1
                     else:
                         self.add_failure(platform, locale,
-                                         message="Unable to repack %(platform)s:%(locale)s installer!")
+                                         message="Unable to repack %(platform)s:%(locale)s "
+                                                 "installer!")
         self.summarize_success_count(success_count, total_count,
                                      message="Repacked %d of %d installers successfully.")
 
     def _upload(self, dir_name="unsigned/partner-repacks"):
         c = self.config
         dirs = self.query_abs_dirs()
         local_path = os.path.join(dirs['abs_work_dir'], dir_name)
         rc = self.query_release_config()
@@ -282,33 +287,37 @@ class MobilePartnerRepack(LocalesMixin, 
     def sign(self):
         c = self.config
         rc = self.query_release_config()
         dirs = self.query_abs_dirs()
         locales = self.query_locales()
         success_count = total_count = 0
         for platform in c['platforms']:
             for locale in locales:
-                installer_name = c['installer_base_names'][platform] % {'version': rc['version'], 'locale': locale}
+                installer_name = c['installer_base_names'][platform] % \
+                    {'version': rc['version'], 'locale': locale}
                 if self.query_failure(platform, locale):
                     self.warning("%s:%s had previous issues; skipping!" % (platform, locale))
                     continue
                 for partner in c['partner_config'].keys():
-                    unsigned_path = '%s/unsigned/partner-repacks/%s/%s/%s/%s' % (dirs['abs_work_dir'], partner, platform, locale, installer_name)
-                    signed_dir = '%s/partner-repacks/%s/%s/%s' % (dirs['abs_work_dir'], partner, platform, locale)
+                    unsigned_path = '%s/unsigned/partner-repacks/%s/%s/%s/%s' % \
+                        (dirs['abs_work_dir'], partner, platform, locale, installer_name)
+                    signed_dir = '%s/partner-repacks/%s/%s/%s' % \
+                        (dirs['abs_work_dir'], partner, platform, locale)
                     signed_path = "%s/%s" % (signed_dir, installer_name)
                     total_count += 1
                     self.info("Signing %s %s." % (platform, locale))
                     if not os.path.exists(unsigned_path):
                         self.error("Missing apk %s!" % unsigned_path)
                         continue
                     if self.sign_apk(unsigned_path, c['keystore'],
                                      self.store_passphrase, self.key_passphrase,
                                      c['key_alias']) != 0:
-                        self.add_summary("Unable to sign %s:%s apk!" % (platform, locale), level=FATAL)
+                        self.add_summary("Unable to sign %s:%s apk!" % (platform, locale),
+                                         level=FATAL)
                     else:
                         self.mkdir_p(signed_dir)
                         if self.align_apk(unsigned_path, signed_path):
                             self.add_failure(platform, locale,
                                              message="Unable to align %(platform)s%(locale)s apk!")
                             self.rmtree(signed_dir)
                         else:
                             success_count += 1
--- a/testing/mozharness/scripts/release/antivirus.py
+++ b/testing/mozharness/scripts/release/antivirus.py
@@ -70,44 +70,44 @@ class AntivirusScan(BaseScript, Virtuale
         r"^.*/mar-tools/.*$",
         r"^.*robocop.apk$",
         r"^.*contrib.*"
     ]
     CACHE_DIR = 'cache'
 
     def __init__(self):
         BaseScript.__init__(self,
-            config_options=self.config_options,
-            require_config_file=False,
-            config={
-                "virtualenv_modules": [
-                    "pip==1.5.5",
-                    "boto",
-                    "redo",
-                    "mar",
-                ],
-                "virtualenv_path": "venv",
-            },
-            all_actions=[
-                "create-virtualenv",
-                "activate-virtualenv",
-                "get-extract-script",
-                "get-files",
-                "scan-files",
-                "cleanup-cache",
-            ],
-            default_actions=[
-                "create-virtualenv",
-                "activate-virtualenv",
-                "get-extract-script",
-                "get-files",
-                "scan-files",
-                "cleanup-cache",
-            ],
-        )
+                            config_options=self.config_options,
+                            require_config_file=False,
+                            config={
+                                "virtualenv_modules": [
+                                    "pip==1.5.5",
+                                    "boto",
+                                    "redo",
+                                    "mar",
+                                ],
+                                "virtualenv_path": "venv",
+                            },
+                            all_actions=[
+                                "create-virtualenv",
+                                "activate-virtualenv",
+                                "get-extract-script",
+                                "get-files",
+                                "scan-files",
+                                "cleanup-cache",
+                            ],
+                            default_actions=[
+                                "create-virtualenv",
+                                "activate-virtualenv",
+                                "get-extract-script",
+                                "get-files",
+                                "scan-files",
+                                "cleanup-cache",
+                            ],
+                            )
         self.excludes = self.config.get('excludes', self.DEFAULT_EXCLUDES)
         self.dest_dir = self.CACHE_DIR
 
     def _get_candidates_prefix(self):
         return "pub/{}/candidates/{}-candidates/build{}/".format(
             self.config['product'],
             self.config["version"],
             self.config["build_number"]
@@ -117,18 +117,18 @@ class AntivirusScan(BaseScript, Virtuale
         for exclude in self.excludes:
             if re.search(exclude, keyname):
                 return True
         return False
 
     def get_extract_script(self):
         """Gets a copy of extract_and_run_command.py from tools, and the supporting mar.py,
         so that we can unpack various files for clam to scan them."""
-        remote_file = "{}/raw-file/{}/stage/extract_and_run_command.py".format(self.config["tools_repo"],
-                                                                               self.config["tools_revision"])
+        remote_file = "{}/raw-file/{}/stage/extract_and_run_command.py"\
+                      .format(self.config["tools_repo"], self.config["tools_revision"])
         self.download_file(remote_file, file_name="extract_and_run_command.py")
 
     def get_files(self):
         """Pull the candidate files down from S3 for scanning, using parallel requests"""
         from boto.s3.connection import S3Connection
         from boto.exception import S3CopyError, S3ResponseError
         from redo import retry
         from httplib import HTTPException
@@ -161,17 +161,18 @@ class AntivirusScan(BaseScript, Virtuale
         def find_release_files():
             candidates_prefix = self._get_candidates_prefix()
             self.info("Getting key names from candidates")
             for key in bucket.list(prefix=candidates_prefix):
                 keyname = key.name
                 if self._matches_exclude(keyname):
                     self.debug("Excluding {}".format(keyname))
                 else:
-                    destination = os.path.join(self.dest_dir, keyname.replace(candidates_prefix, ''))
+                    destination = os.path.join(self.dest_dir,
+                                               keyname.replace(candidates_prefix, ''))
                     dest_dir = os.path.dirname(destination)
                     if not os.path.isdir(dest_dir):
                         os.makedirs(dest_dir)
                     yield (keyname, destination)
 
         pool = ThreadPool(self.config["download_parallelization"])
         pool.map(worker, find_release_files())
 
--- a/testing/mozharness/scripts/release/beet_mover.py
+++ b/testing/mozharness/scripts/release/beet_mover.py
@@ -132,47 +132,49 @@ class BeetMover(BaseScript, VirtualenvMi
             'config_options': CONFIG_OPTIONS,
             'all_actions': [
                 # 'clobber',
                 'create-virtualenv',
                 'activate-virtualenv',
                 'generate-candidates-manifest',
                 'refresh-antivirus',
                 'verify-bits',  # beets
-                'download-bits', # beets
+                'download-bits',  # beets
                 'scan-bits',     # beets
                 'upload-bits',  # beets
             ],
             'require_config_file': False,
             # Default configuration
             'config': {
                 # base index url where to find taskcluster artifact based on taskid
-                "artifact_base_url": 'https://queue.taskcluster.net/v1/task/{taskid}/artifacts/public/{subdir}',
+                "artifact_base_url": \
+                'https://queue.taskcluster.net/v1/task/{taskid}/artifacts/public/{subdir}',
                 "virtualenv_modules": [
                     "pip==9.0.1",
                     "setuptools==36.6.0",
                     "boto",
                     "PyYAML",
                     "Jinja2",
                     "redo",
                     "mar",
                 ],
                 "virtualenv_path": "venv",
             },
         }
-        #todo do excludes need to be configured via command line for specific builds?
+        # todo do excludes need to be configured via command line for specific builds?
         super(BeetMover, self).__init__(**beetmover_kwargs)
 
         c = self.config
         self.manifest = {}
         # assigned in _post_create_virtualenv
         self.virtualenv_imports = None
         self.bucket = c['bucket']
         if not all(aws_creds):
-            self.fatal('credentials must be passed in env: "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"')
+            self.fatal('credentials must be passed in env: '
+                       '"AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"')
         self.aws_key_id, self.aws_secret_key = aws_creds
         # if excludes is set from command line, use it otherwise use defaults
         self.excludes = self.config.get('excludes', DEFAULT_EXCLUDES)
         dirs = self.query_abs_dirs()
         self.dest_dir = os.path.join(dirs['abs_work_dir'], CACHE_DIR)
         self.mime_fix()
 
     def activate_virtualenv(self):
@@ -252,17 +254,18 @@ class BeetMover(BaseScript, VirtualenvMi
         """
         downloads list of artifacts to self.dest_dir dir based on a given manifest
         """
         self.log('downloading and uploading artifacts to self_dest_dir...')
         dirs = self.query_abs_dirs()
 
         for locale in self.manifest['mapping']:
             for deliverable in self.manifest['mapping'][locale]:
-                self.log("downloading '{}' deliverable for '{}' locale".format(deliverable, locale))
+                self.log("downloading '{}' deliverable for '{}' locale".format(deliverable,
+                                                                               locale))
                 source = self.manifest['mapping'][locale][deliverable]['artifact']
                 self.retry(
                     self.download_file,
                     args=[source],
                     kwargs={'parent_dir': dirs['abs_work_dir']},
                     error_level=FATAL)
         self.log('Success!')
 
@@ -284,17 +287,18 @@ class BeetMover(BaseScript, VirtualenvMi
         bucket = conn.get_bucket(self.bucket)
 
         for locale in self.manifest['mapping']:
             for deliverable in self.manifest['mapping'][locale]:
                 self.log("uploading '{}' deliverable for '{}' locale".format(deliverable, locale))
                 # we have already downloaded the files locally so we can use that version
                 source = self.manifest['mapping'][locale][deliverable]['artifact']
                 s3_key = self.manifest['mapping'][locale][deliverable]['s3_key']
-                downloaded_file = os.path.join(dirs['abs_work_dir'], self.get_filename_from_url(source))
+                downloaded_file = os.path.join(dirs['abs_work_dir'],
+                                               self.get_filename_from_url(source))
                 # generate checksums for every uploaded file
                 beet_file_name = '{}.beet'.format(downloaded_file)
                 # upload checksums to a separate subdirectory
                 beet_dest = '{prefix}beetmover-checksums/{f}.beet'.format(
                     prefix=self._get_template_vars()["s3_prefix"],
                     f=self._strip_prefix(s3_key)
                 )
                 beet_contents = '\n'.join([
@@ -306,67 +310,70 @@ class BeetMover(BaseScript, VirtualenvMi
                 ])
                 self.write_to_file(beet_file_name, beet_contents)
                 self.upload_bit(source=downloaded_file, s3_key=s3_key,
                                 bucket=bucket)
                 self.upload_bit(source=beet_file_name, s3_key=beet_dest,
                                 bucket=bucket)
         self.log('Success!')
 
-
     def upload_bit(self, source, s3_key, bucket):
         boto = self.virtualenv_imports['boto']
         self.info('uploading to s3 with key: {}'.format(s3_key))
         key = boto.s3.key.Key(bucket)  # create new key
         key.key = s3_key  # set key name
 
         self.info("Checking if `{}` already exists".format(s3_key))
         key = bucket.get_key(s3_key)
         if not key:
             self.info("Uploading to `{}`".format(s3_key))
             key = bucket.new_key(s3_key)
             # set key value
             mime_type, _ = mimetypes.guess_type(source)
-            self.retry(lambda: key.set_contents_from_filename(source, headers={'Content-Type': mime_type}),
-                       error_level=FATAL),
+            self.retry(lambda: key.set_contents_from_filename(
+                       source, headers={'Content-Type': mime_type}), error_level=FATAL),
         else:
             if not get_hash(key.get_contents_as_string()) == get_hash(open(source).read()):
                 # for now, let's halt. If necessary, we can revisit this and allow for overwrites
                 #  to the same buildnum release with different bits
                 self.fatal("`{}` already exists with different checksum.".format(s3_key))
             self.log("`{}` has the same MD5 checksum, not uploading".format(s3_key))
 
     def scan_bits(self):
 
         dirs = self.query_abs_dirs()
 
-        filenames = [f for f in listdir(dirs['abs_work_dir']) if isfile(join(dirs['abs_work_dir'], f))]
+        filenames = [f for f in listdir(dirs['abs_work_dir'])
+                     if isfile(join(dirs['abs_work_dir'], f))]
         self.mkdir_p(self.dest_dir)
         for file_name in filenames:
             if self._matches_exclude(file_name):
                 self.info("Excluding {} from virus scan".format(file_name))
             else:
-                self.info('Copying {} to {}'.format(file_name,self.dest_dir))
-                self.copyfile(os.path.join(dirs['abs_work_dir'], file_name), os.path.join(self.dest_dir,file_name))
+                self.info('Copying {} to {}'.format(file_name, self.dest_dir))
+                self.copyfile(os.path.join(dirs['abs_work_dir'], file_name),
+                              os.path.join(self.dest_dir, file_name))
         self._scan_files()
         self.info('Emptying {}'.format(self.dest_dir))
         self.rmtree(self.dest_dir)
 
     def _scan_files(self):
         """Scan the files we've collected. We do the download and scan concurrently to make
         it easier to have a coherent log afterwards. Uses the venv python."""
-        external_tools_path = os.path.join(
-                              os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))), 'external_tools')
-        self.run_command([self.query_python_path(), os.path.join(external_tools_path,'extract_and_run_command.py'),
-                         '-j{}'.format(self.config['scan_parallelization']),
-                         'clamscan', '--no-summary', '--', self.dest_dir])
+        external_tools_path = os.path.join(os.path.abspath(os.path.dirname(
+                              os.path.dirname(mozharness.__file__))), 'external_tools')
+        self.run_command([self.query_python_path(), os.path.join(external_tools_path,
+                         'extract_and_run_command.py'),
+                          '-j{}'.format(self.config['scan_parallelization']),
+                          'clamscan', '--no-summary', '--', self.dest_dir])
 
     def _matches_exclude(self, keyname):
-         return any(re.search(exclude, keyname) for exclude in self.excludes)
+        return any(re.search(exclude, keyname) for exclude in self.excludes)
 
     def mime_fix(self):
         """ Add mimetypes for custom extensions """
         mimetypes.init()
         map(lambda (ext, mime_type,): mimetypes.add_type(mime_type, ext), MIME_MAP.items())
 
+
 if __name__ == '__main__':
     beet_mover = BeetMover(pop_aws_auth_from_env())
     beet_mover.run_and_exit()
--- a/testing/mozharness/scripts/release/generate-checksums.py
+++ b/testing/mozharness/scripts/release/generate-checksums.py
@@ -11,37 +11,41 @@ sys.path.insert(1, os.path.dirname(os.pa
 from mozharness.base.python import VirtualenvMixin, virtualenv_config_options
 from mozharness.base.script import BaseScript
 from mozharness.base.vcs.vcsbase import VCSMixin
 from mozharness.mozilla.checksums import parse_checksums_file
 from mozharness.mozilla.signing import SigningMixin
 from mozharness.mozilla.buildbot import BuildbotMixin
 from mozharness.mozilla.merkle import MerkleTree
 
+
 class ChecksumsGenerator(BaseScript, VirtualenvMixin, SigningMixin, VCSMixin, BuildbotMixin):
     config_options = [
         [["--stage-product"], {
             "dest": "stage_product",
-            "help": "Name of product used in file server's directory structure, eg: firefox, mobile",
+            "help": "Name of product used in file server's directory structure, "
+                    "e.g.: firefox, mobile",
         }],
         [["--version"], {
             "dest": "version",
-            "help": "Version of release, eg: 39.0b5",
+            "help": "Version of release, e.g.: 39.0b5",
         }],
         [["--build-number"], {
             "dest": "build_number",
-            "help": "Build number of release, eg: 2",
+            "help": "Build number of release, e.g.: 2",
         }],
         [["--bucket-name-prefix"], {
             "dest": "bucket_name_prefix",
-            "help": "Prefix of bucket name, eg: net-mozaws-prod-delivery. This will be used to generate a full bucket name (such as net-mozaws-prod-delivery-{firefox,archive}.",
+            "help": "Prefix of bucket name, e.g.: net-mozaws-prod-delivery. This will be used to "
+                    "generate a full bucket name (such as "
+                    "net-mozaws-prod-delivery-{firefox,archive}.",
         }],
         [["--bucket-name-full"], {
             "dest": "bucket_name_full",
-            "help": "Full bucket name, eg: net-mozaws-prod-delivery-firefox",
+            "help": "Full bucket name, e.g.: net-mozaws-prod-delivery-firefox",
         }],
         [["-j", "--parallelization"], {
             "dest": "parallelization",
             "default": 20,
             "type": int,
             "help": "Number of checksums file to download concurrently",
         }],
         [["-f", "--format"], {
@@ -49,58 +53,59 @@ class ChecksumsGenerator(BaseScript, Vir
             "default": [],
             "action": "append",
             "help": "Format(s) to generate big checksums file for. Default: sha512",
         }],
         [["--include"], {
             "dest": "includes",
             "default": [],
             "action": "append",
-            "help": "List of patterns to include in big checksums file. See script source for default.",
+            "help": "List of patterns to include in big checksums file. See script "
+                    "source for default.",
         }],
         [["--tools-repo"], {
             "dest": "tools_repo",
             "default": "https://hg.mozilla.org/build/tools",
         }],
         [["--credentials"], {
             "dest": "credentials",
             "help": "File containing access key and secret access key for S3",
         }],
     ] + virtualenv_config_options
 
     def __init__(self):
         BaseScript.__init__(self,
-            config_options=self.config_options,
-            require_config_file=False,
-            config={
-                "virtualenv_modules": [
-                    "pip==1.5.5",
-                    "boto",
-                ],
-                "virtualenv_path": "venv",
-                'buildbot_json_path': 'buildprops.json',
-            },
-            all_actions=[
-                "create-virtualenv",
-                "collect-individual-checksums",
-                "create-big-checksums",
-                "create-summary",
-                "sign",
-                "upload",
-                "copy-info-files",
-            ],
-            default_actions=[
-                "create-virtualenv",
-                "collect-individual-checksums",
-                "create-big-checksums",
-                "create-summary",
-                "sign",
-                "upload",
-            ],
-        )
+                            config_options=self.config_options,
+                            require_config_file=False,
+                            config={
+                                "virtualenv_modules": [
+                                    "pip==1.5.5",
+                                    "boto",
+                                ],
+                                "virtualenv_path": "venv",
+                                'buildbot_json_path': 'buildprops.json',
+                            },
+                            all_actions=[
+                                "create-virtualenv",
+                                "collect-individual-checksums",
+                                "create-big-checksums",
+                                "create-summary",
+                                "sign",
+                                "upload",
+                                "copy-info-files",
+                            ],
+                            default_actions=[
+                                "create-virtualenv",
+                                "collect-individual-checksums",
+                                "create-big-checksums",
+                                "create-summary",
+                                "sign",
+                                "upload",
+                            ],
+                            )
 
         self.checksums = {}
         self.bucket = None
         self.bucket_name = self._get_bucket_name()
         self.file_prefix = self._get_file_prefix()
         # set the env var for boto to read our special config file
         # rather than anything else we have at ~/.boto
         os.environ["BOTO_CONFIG"] = os.path.abspath(self.config["credentials"])
@@ -139,17 +144,18 @@ class ChecksumsGenerator(BaseScript, Vir
                 r"^.*/jsshell.*$",
             ]
 
     def _get_bucket_name(self):
         if self.config.get('bucket_name_full'):
             return self.config['bucket_name_full']
 
         suffix = "archive"
-        # Firefox has a special bucket, per https://github.com/mozilla-services/product-delivery-tools/blob/master/bucketmap.go
+        # Firefox has a special bucket, per
+        # https://github.com/mozilla-services/product-delivery-tools/blob/master/bucketmap.go
         if self.config["stage_product"] == "firefox":
             suffix = "firefox"
 
         return "{}-{}".format(self.config["bucket_name_prefix"], suffix)
 
     def _get_file_prefix(self):
         return "pub/{}/candidates/{}-candidates/build{}/".format(
             self.config["stage_product"], self.config["version"], self.config["build_number"]
@@ -183,16 +189,17 @@ class ChecksumsGenerator(BaseScript, Vir
         """This step grabs all of the small checksums files for the release,
         filters out any unwanted files from within them, and adds the remainder
         to self.checksums for subsequent steps to use."""
         bucket = self._get_bucket()
         self.info("File prefix is: {}".format(self.file_prefix))
 
         # Temporary holding place for checksums
         raw_checksums = []
+
         def worker(item):
             self.debug("Downloading {}".format(item))
             # TODO: It would be nice to download the associated .asc file
             # and verify against it.
             sums = bucket.get_key(item).get_contents_as_string()
             raw_checksums.append(sums)
 
         def find_checksums_files():
@@ -217,17 +224,18 @@ class ChecksumsGenerator(BaseScript, Vir
         pool = ThreadPool(self.config["parallelization"])
         pool.map(worker, find_checksums_files())
 
         for c in raw_checksums:
             for f, info in parse_checksums_file(c).iteritems():
                 for pattern in self.config["includes"]:
                     if re.search(pattern, f):
                         if f in self.checksums:
-                            self.fatal("Found duplicate checksum entry for {}, don't know which one to pick.".format(f))
+                            self.fatal("Found duplicate checksum entry for {}, "
+                                       "don't know which one to pick.".format(f))
                         if not set(self.config["formats"]) <= set(info["hashes"]):
                             self.fatal("Missing necessary format for file {}".format(f))
                         self.debug("Adding checksums for file: {}".format(f))
                         self.checksums[f] = info
                         break
                 else:
                     self.debug("Ignoring checksums for file: {}".format(f))
 
@@ -239,17 +247,18 @@ class ChecksumsGenerator(BaseScript, Vir
         """
         for fmt in self.config["formats"]:
             hash_fn = self._get_hash_function(fmt)
             files = [fn for fn in sorted(self.checksums)]
             data = [self.checksums[fn]["hashes"][fmt] for fn in files]
 
             tree = MerkleTree(hash_fn, data)
             head = tree.head().encode("hex")
-            proofs = [tree.inclusion_proof(i).to_rfc6962_bis().encode("hex") for i in range(len(files))]
+            proofs = [tree.inclusion_proof(i).to_rfc6962_bis().encode("hex")
+                      for i in range(len(files))]
 
             summary = self._get_summary_filename(fmt)
             self.info("Creating summary file: {}".format(summary))
 
             content = "{} TREE_HEAD\n".format(head)
             for i in range(len(files)):
                 content += "{} {}\n".format(proofs[i], files[i])
 
--- a/testing/mozharness/scripts/release/postrelease_version_bump.py
+++ b/testing/mozharness/scripts/release/postrelease_version_bump.py
@@ -180,11 +180,12 @@ class PostReleaseVersionBump(MercurialSc
         message = "No bug - Tagging {revision} with {tags} a=release CLOSED TREE"
         message = message.format(
             revision=self.config["revision"],
             tags=', '.join(tags))
         self.hg_tag(cwd=dirs["abs_gecko_dir"], tags=tags,
                     revision=self.config["revision"], message=message,
                     user=self.config["hg_user"], force=True)
 
+
 # __main__ {{{1
 if __name__ == '__main__':
     PostReleaseVersionBump().run_and_exit()
--- a/testing/mozharness/scripts/release/publish_balrog.py
+++ b/testing/mozharness/scripts/release/publish_balrog.py
@@ -161,12 +161,11 @@ class PublishBalrog(MercurialScript, Bui
             cmd.extend(["--schedule-at", self.config["schedule_at"]])
         if self.config.get("background_rate"):
             cmd.extend(["--background-rate", str(self.config["background_rate"])])
 
         self.retry(lambda: self.run_command(cmd, halt_on_failure=True, env=env),
                    error_level=FATAL)
 
 
-
 # __main__ {{{1
 if __name__ == '__main__':
     PublishBalrog().run_and_exit()
--- a/testing/mozharness/scripts/release/push-candidate-to-releases.py
+++ b/testing/mozharness/scripts/release/push-candidate-to-releases.py
@@ -61,37 +61,37 @@ class ReleasePusher(BaseScript, Virtuale
             "default": 20,
             "type": "int",
             "help": "Number of copy requests to run concurrently",
         }],
     ] + virtualenv_config_options
 
     def __init__(self, aws_creds):
         BaseScript.__init__(self,
-            config_options=self.config_options,
-            require_config_file=False,
-            config={
-                "virtualenv_modules": [
-                    "pip==1.5.5",
-                    "boto",
-                    "redo",
-                ],
-                "virtualenv_path": "venv",
-            },
-            all_actions=[
-                "create-virtualenv",
-                "activate-virtualenv",
-                "push-to-releases",
-            ],
-            default_actions=[
-                "create-virtualenv",
-                "activate-virtualenv",
-                "push-to-releases",
-            ],
-        )
+                            config_options=self.config_options,
+                            require_config_file=False,
+                            config={
+                                    "virtualenv_modules": [
+                                        "pip==1.5.5",
+                                        "boto",
+                                        "redo",
+                                    ],
+                                    "virtualenv_path": "venv",
+                                   },
+                            all_actions=[
+                                "create-virtualenv",
+                                "activate-virtualenv",
+                                "push-to-releases",
+                            ],
+                            default_actions=[
+                                "create-virtualenv",
+                                "activate-virtualenv",
+                                "push-to-releases",
+                            ],
+                            )
 
         # validate aws credentials
         if not (all(aws_creds) or self.config.get('credentials')):
             self.fatal("aws creds not defined. please add them to your config or env.")
         if any(aws_creds) and self.config.get('credentials'):
             self.fatal("aws creds found in env and self.config. please declare in one place only.")
 
         # set aws credentials
@@ -147,38 +147,40 @@ class ReleasePusher(BaseScript, Virtuale
             self.warning("Destination already exists with %s keys" % len(keys))
 
         def worker(item):
             source, destination = item
 
             def copy_key():
                 source_key = bucket.get_key(source)
                 dest_key = bucket.get_key(destination)
-                # According to http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
+                # According to
+                # http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
                 # S3 key MD5 is represented as ETag, except when objects are
                 # uploaded using multipart method. In this case objects's ETag
                 # is constructed using its MD5, minus symbol, and number of
-                # part. See http://stackoverflow.com/questions/12186993/what-is-the-algorithm-to-compute-the-amazon-s3-etag-for-a-file-larger-than-5gb#answer-19896823
+                # part. See http://stackoverflow.com/questions/12186993/what-is-the-algorithm-to-compute-the-amazon-s3-etag-for-a-file-larger-than-5gb#answer-19896823  # noqa
                 source_md5 = source_key.etag.split("-")[0]
                 if dest_key:
                     dest_md5 = dest_key.etag.split("-")[0]
                 else:
                     dest_md5 = None
 
                 if not dest_key:
                     self.info("Copying {} to {}".format(source, destination))
                     bucket.copy_key(destination, self.config["bucket_name"],
                                     source)
                 elif source_md5 == dest_md5:
                     self.warning(
                         "{} already exists with the same content ({}), skipping copy".format(
                             destination, dest_md5))
                 else:
                     self.fatal(
-                        "{} already exists with the different content (src ETag: {}, dest ETag: {}), aborting".format(
+                        "{} already exists with the different content "
+                        "(src ETag: {}, dest ETag: {}), aborting".format(
                             destination, source_key.etag, dest_key.etag))
 
             return retry(copy_key, sleeptime=5, max_sleeptime=60,
                          retry_exceptions=(S3CopyError, S3ResponseError))
 
         def find_release_files():
             candidates_prefix = self._get_candidates_prefix()
             release_prefix = self._get_releases_prefix()
@@ -190,11 +192,12 @@ class ReleasePusher(BaseScript, Virtuale
                 else:
                     destination = keyname.replace(candidates_prefix,
                                                   release_prefix)
                     yield (keyname, destination)
 
         pool = ThreadPool(self.config["parallelization"])
         pool.map(worker, find_release_files())
 
+
 if __name__ == "__main__":
     myScript = ReleasePusher(pop_aws_auth_from_env())
     myScript.run_and_exit()
--- a/testing/mozharness/scripts/release/submit-to-ct.py
+++ b/testing/mozharness/scripts/release/submit-to-ct.py
@@ -11,43 +11,44 @@ from mozharness.mozilla.signed_certifica
 
 
 class CTSubmitter(BaseScript, VirtualenvMixin):
     config_options = virtualenv_config_options
 
     config_options = [
         [["--chain"], {
             "dest": "chain",
-            "help": "URL from which to download the cert chain to be submitted to CT (in PEM format)"
+            "help": "URL from which to download the cert chain to be "
+                    "submitted to CT (in PEM format)"
         }],
         [["--log"], {
             "dest": "log",
             "help": "URL for the log to which the chain should be submitted"
         }],
         [["--sct"], {
             "dest": "sct",
             "help": "File where the SCT from the log should be written"
         }],
     ]
 
     def __init__(self):
         BaseScript.__init__(self,
-            config_options=self.config_options,
-            config={
-                "virtualenv_modules": [
-                    "pem",
-                    "redo",
-                    "requests",
-                ],
-                "virtualenv_path": "venv",
-            },
-            require_config_file=False,
-            all_actions=["add-chain"],
-            default_actions=["add-chain"],
-        )
+                            config_options=self.config_options,
+                            config={
+                                "virtualenv_modules": [
+                                    "pem",
+                                    "redo",
+                                    "requests",
+                                ],
+                                "virtualenv_path": "venv",
+                            },
+                            require_config_file=False,
+                            all_actions=["add-chain"],
+                            default_actions=["add-chain"],
+                            )
 
         self.chain_url = self.config["chain"]
         self.log_url = self.config["log"]
         self.sct_filename = self.config["sct"]
 
     def add_chain(self):
         from redo import retry
         import requests
@@ -55,27 +56,28 @@ class CTSubmitter(BaseScript, Virtualenv
 
         def get_chain():
             r = requests.get(self.chain_url)
             r.raise_for_status()
             return r.text
 
         chain = retry(get_chain)
 
-        req = { "chain": [] }
+        req = {"chain": []}
         chain = pem.parse(chain)
         for i in range(len(chain)):
             cert = crypto.load_certificate(crypto.FILETYPE_PEM, str(chain[i]))
             der = crypto.dump_certificate(crypto.FILETYPE_ASN1, cert)
             req["chain"].append(base64.b64encode(der))
 
         def post_chain():
             r = requests.post(self.log_url + '/ct/v1/add-chain', json=req)
             r.raise_for_status()
             return r.json()
 
         resp = retry(post_chain)
         sct = SignedCertificateTimestamp(resp)
         self.write_to_file(self.sct_filename, sct.to_rfc6962())
 
+
 if __name__ == "__main__":
     myScript = CTSubmitter()
     myScript.run_and_exit()
old mode 100644
new mode 100755
--- a/testing/mozharness/scripts/release/updates.py
+++ b/testing/mozharness/scripts/release/updates.py
@@ -89,18 +89,17 @@ class UpdatesBumper(MercurialScript, Bui
         # taskcluster properties
         self.read_buildbot_config()
         if not self.buildbot_config:
             self.warning("Skipping buildbot properties overrides")
             return
         # TODO: version and appVersion should come from repo
         props = self.buildbot_config["properties"]
         for prop in ['product', 'version', 'build_number', 'revision',
-                     'appVersion', 'balrog_api_root', "channels",
-                     'generate_bz2_blob']:
+                     'appVersion', 'balrog_api_root', "channels"]:
             if props.get(prop):
                 self.info("Overriding %s with %s" % (prop, props[prop]))
                 self.config[prop] = props.get(prop)
 
         partials = [v.strip() for v in props["partial_versions"].split(",")]
         self.config["partial_versions"] = [v.split("build") for v in partials]
         self.config["platforms"] = [p.strip() for p in
                                     props["platforms"].split(",")]
@@ -265,20 +264,16 @@ class UpdatesBumper(MercurialScript, Bui
                          build_number=self.config["build_number"])
                 for t in tags]
         self.hg_tag(cwd=dirs["abs_tools_dir"], tags=tags,
                     user=self.config["hg_user"], force=True)
 
     def submit_to_balrog(self):
         for _, channel_config in self.query_channel_configs():
             self._submit_to_balrog(channel_config)
-        if 'generate_bz2_blob' in self.config and \
-                self.config['generate_bz2_blob']:
-            for _, channel_config in self.query_channel_configs():
-                self._submit_to_balrog_bz2(channel_config)
 
     def _submit_to_balrog(self, channel_config):
         dirs = self.query_abs_dirs()
         auth = os.path.join(os.getcwd(), self.config['credentials_file'])
         cmd = [
             sys.executable,
             os.path.join(dirs["abs_tools_dir"],
                          "scripts/build-promotion/balrog-release-pusher.py")]
@@ -306,65 +301,12 @@ class UpdatesBumper(MercurialScript, Bui
             cmd.extend(["--partial-update", partial])
         if channel_config["requires_mirrors"]:
             cmd.append("--requires-mirrors")
         if self.config["balrog_use_dummy_suffix"]:
             cmd.append("--dummy")
 
         self.retry(lambda: self.run_command(cmd, halt_on_failure=True))
 
-    def _submit_to_balrog_bz2(self, channel_config):
-        if "bz2_blob_suffix" not in channel_config:
-            self.info("No need to generate BZ2 blob")
-            return
-
-        dirs = self.query_abs_dirs()
-        # Use env varialbe instead of command line to avoid issues with blob
-        # names starting with "-", e.g. "-bz2"
-        env = {"BALROG_BLOB_SUFFIX": channel_config["bz2_blob_suffix"]}
-        auth = os.path.join(os.getcwd(), self.config['credentials_file'])
-        cmd = [
-            sys.executable,
-            os.path.join(dirs["abs_tools_dir"],
-                         "scripts/build-promotion/balrog-release-pusher.py")]
-        cmd.extend([
-            "--api-root", self.config["balrog_api_root"],
-            "--download-domain", self.config["download_domain"],
-            "--archive-domain", self.config["archive_domain"],
-            "--credentials-file", auth,
-            "--product", self.config["product"],
-            "--version", self.config["version"],
-            "--build-number", str(self.config["build_number"]),
-            "--app-version", self.config["appVersion"],
-            "--username", self.config["balrog_username"],
-            "--complete-mar-filename-pattern",
-            channel_config["complete_mar_filename_pattern"],
-            "--complete-mar-bouncer-product-pattern",
-            channel_config["complete_mar_bouncer_product_pattern"],
-            "--verbose",
-        ])
-
-        for v, build_number in self.query_matching_partials(channel_config):
-            if v < "56.0":
-                self.info("Adding %s to partials" % v)
-                partial = "{version}build{build_number}".format(
-                    version=v, build_number=build_number)
-                cmd.extend(["--partial-update", partial])
-            else:
-                self.info("Not adding %s to partials" % v)
-
-        for c in channel_config["channel_names"]:
-            cmd.extend(["--channel", c])
-        for r in channel_config["bz2_rules_to_update"]:
-            cmd.extend(["--rule-to-update", r])
-        for p in self.config["platforms"]:
-            cmd.extend(["--platform", p])
-        if channel_config["requires_mirrors"]:
-            cmd.append("--requires-mirrors")
-        if self.config["balrog_use_dummy_suffix"]:
-            cmd.append("--dummy")
-
-        self.retry(lambda: self.run_command(cmd, halt_on_failure=True, env=env))
-
 
 # __main__ {{{1
 if __name__ == '__main__':
     UpdatesBumper().run_and_exit()
--- a/testing/mozharness/scripts/spidermonkey_build.py
+++ b/testing/mozharness/scripts/spidermonkey_build.py
@@ -146,32 +146,33 @@ class SpidermonkeyBuild(MockMixin,
 
         if self.buildbot_config is None:
             self.info("Reading buildbot build properties...")
             self.read_buildbot_config()
 
         if self.buildbot_config:
             bb_props = [('mock_target', 'mock_target', None),
                         ('hgurl', 'hgurl', None),
-                        ('clobberer_url', 'clobberer_url', 'https://api.pub.build.mozilla.org/clobberer/lastclobber'),
+                        ('clobberer_url', 'clobberer_url',
+                         'https://api.pub.build.mozilla.org/clobberer/lastclobber'),
                         ('force_clobber', 'force_clobber', None),
                         ('branch', 'blob_upload_branch', None),
                         ]
             buildbot_props = self.buildbot_config.get('properties', {})
             for bb_prop, cfg_prop, default in bb_props:
                 if not self.config.get(cfg_prop) and buildbot_props.get(bb_prop, default):
                     self.config[cfg_prop] = buildbot_props.get(bb_prop, default)
             self.config['is_automation'] = True
         else:
             self.config['is_automation'] = False
             self.config.setdefault('blob_upload_branch', 'devel')
 
         dirs = self.query_abs_dirs()
         replacements = self.config['env_replacements'].copy()
-        for k,v in replacements.items():
+        for k, v in replacements.items():
             replacements[k] = v % dirs
 
         self.env = self.query_env(replace_dict=replacements,
                                   partial_env=self.config['partial_env'],
                                   purge_env=nuisance_env_vars)
         self.env['MOZ_UPLOAD_DIR'] = dirs['abs_blob_upload_dir']
         self.env['TOOLTOOL_DIR'] = dirs['abs_work_dir']
 
@@ -231,24 +232,26 @@ class SpidermonkeyBuild(MockMixin,
         elif 'branch' in self.config:
             # Used for locally testing try vs non-try
             return self.config['branch']
         else:
             return os.path.basename(self.query_repo())
 
     def query_compiler_manifest(self):
         dirs = self.query_abs_dirs()
-        manifest = os.path.join(dirs['abs_work_dir'], dirs['analysis_scriptdir'], self.config['compiler_manifest'])
+        manifest = os.path.join(dirs['abs_work_dir'], dirs['analysis_scriptdir'],
+                                self.config['compiler_manifest'])
         if os.path.exists(manifest):
             return manifest
         return os.path.join(dirs['abs_work_dir'], self.config['compiler_manifest'])
 
     def query_sixgill_manifest(self):
         dirs = self.query_abs_dirs()
-        manifest = os.path.join(dirs['abs_work_dir'], dirs['analysis_scriptdir'], self.config['sixgill_manifest'])
+        manifest = os.path.join(dirs['abs_work_dir'], dirs['analysis_scriptdir'],
+                                self.config['sixgill_manifest'])
         if os.path.exists(manifest):
             return manifest
         return os.path.join(dirs['abs_work_dir'], self.config['sixgill_manifest'])
 
     def query_buildid(self):
         if self.buildid:
             return self.buildid
         if self.buildbot_config and 'properties' in self.buildbot_config:
--- a/testing/mozharness/scripts/telemetry/telemetry_client.py
+++ b/testing/mozharness/scripts/telemetry/telemetry_client.py
@@ -13,17 +13,17 @@ import sys
 # load modules from parent dir
 sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
 
 GECKO_SRCDIR = os.path.join(os.path.expanduser('~'), 'checkouts', 'gecko')
 
 TELEMETRY_TEST_HOME = os.path.join(GECKO_SRCDIR, 'toolkit', 'components', 'telemetry',
                                    'tests', 'marionette')
 
-from mozharness.base.python import PostScriptRun, PreScriptAction
+from mozharness.base.python import PreScriptAction
 from mozharness.mozilla.structuredlog import StructuredOutputParser
 from mozharness.mozilla.testing.testbase import (
     TestingMixin,
     testing_config_options,
 )
 from mozharness.mozilla.testing.codecoverage import (
     CodeCoverageMixin,
     code_coverage_config_options
@@ -31,17 +31,18 @@ from mozharness.mozilla.testing.codecove
 from mozharness.mozilla.vcstools import VCSToolsScript
 
 # General command line arguments for Firefox ui tests
 telemetry_tests_config_options = [
     [["--allow-software-gl-layers"], {
         "action": "store_true",
         "dest": "allow_software_gl_layers",
         "default": False,
-        "help": "Permits a software GL implementation (such as LLVMPipe) to use the GL compositor.",
+        "help": "Permits a software GL implementation (such as LLVMPipe) "
+                "to use the GL compositor.",
     }],
     [["--enable-webrender"], {
         "action": "store_true",
         "dest": "enable_webrender",
         "default": False,
         "help": "Tries to enable the WebRender compositor.",
     }],
     [['--dry-run'], {
@@ -96,17 +97,16 @@ class TelemetryTests(TestingMixin, VCSTo
         self.test_url = self.config.get('test_url')
 
         if not self.test_url and not self.test_packages_url:
             self.fatal(
                 'You must use --test-url, or --test-packages-url')
 
     @PreScriptAction('create-virtualenv')
     def _pre_create_virtualenv(self, action):
-        dirs = self.query_abs_dirs()
 
         requirements = os.path.join(GECKO_SRCDIR, 'testing',
                                     'config', 'telemetry_tests_requirements.txt')
         self.register_virtualenv_module(requirements=[requirements], two_pass=True)
 
     def query_abs_dirs(self):
         if self.abs_dirs:
             return self.abs_dirs
@@ -153,17 +153,18 @@ class TelemetryTests(TestingMixin, VCSTo
             '-vv',
         ]
 
         parser = StructuredOutputParser(config=self.config,
                                         log_obj=self.log_obj,
                                         strict=False)
 
         # Add the default tests to run
-        tests = [os.path.join(dirs['abs_telemetry_dir'], 'tests', test) for test in self.default_tests]
+        tests = [os.path.join(dirs['abs_telemetry_dir'], 'tests', test)
+                 for test in self.default_tests]
         cmd.extend(tests)
 
         # Set further environment settings
         env = env or self.query_env()
         env.update({'MINIDUMP_SAVE_PATH': dirs['abs_blob_upload_dir']})
         if self.query_minidump_stackwalk():
             env.update({'MINIDUMP_STACKWALK': self.minidump_stackwalk_path})
         env['RUST_BACKTRACE'] = '1'
--- a/testing/mozharness/scripts/web_platform_tests.py
+++ b/testing/mozharness/scripts/web_platform_tests.py
@@ -1,37 +1,36 @@
 #!/usr/bin/env python
 # ***** BEGIN LICENSE BLOCK *****
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 # ***** END LICENSE BLOCK *****
 import copy
-import glob
-import json
 import os
 import sys
 
 # load modules from parent dir
 sys.path.insert(1, os.path.dirname(sys.path[0]))
 
 from mozharness.base.errors import BaseErrorList
 from mozharness.base.script import PreScriptAction
 from mozharness.base.vcs.vcsbase import MercurialScript
 from mozharness.mozilla.blob_upload import BlobUploadMixin, blobupload_config_options
-from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options, TOOLTOOL_PLATFORM_DIR
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
 from mozharness.mozilla.testing.codecoverage import (
     CodeCoverageMixin,
     code_coverage_config_options
 )
 from mozharness.mozilla.testing.errors import HarnessErrorList
 
 from mozharness.mozilla.structuredlog import StructuredOutputParser
 from mozharness.base.log import INFO
 
+
 class WebPlatformTest(TestingMixin, MercurialScript, BlobUploadMixin, CodeCoverageMixin):
     config_options = [
         [['--test-type'], {
             "action": "extend",
             "dest": "test_type",
             "help": "Specify the test types to run."}
          ],
         [['--e10s'], {
@@ -49,17 +48,18 @@ class WebPlatformTest(TestingMixin, Merc
             "action": "store",
             "dest": "this_chunk",
             "help": "Number of this chunk"}
          ],
         [["--allow-software-gl-layers"], {
             "action": "store_true",
             "dest": "allow_software_gl_layers",
             "default": False,
-            "help": "Permits a software GL implementation (such as LLVMPipe) to use the GL compositor."}
+            "help": "Permits a software GL implementation (such as LLVMPipe) "
+                    "to use the GL compositor."}
          ],
         [["--enable-webrender"], {
             "action": "store_true",
             "dest": "enable_webrender",
             "default": False,
             "help": "Tries to enable the WebRender compositor."}
          ],
         [["--headless"], {
@@ -163,17 +163,17 @@ class WebPlatformTest(TestingMixin, Merc
                                     'marionette_requirements.txt')
 
         self.register_virtualenv_module(requirements=[requirements],
                                         two_pass=True)
 
     def _query_cmd(self):
         if not self.binary_path:
             self.fatal("Binary path could not be determined")
-            #And exit
+            # And exit
 
         c = self.config
         dirs = self.query_abs_dirs()
         abs_app_dir = self.query_abs_app_dir()
         run_file_name = "runtests.py"
 
         cmd = [self.query_python_path('python'), '-u']
         cmd.append(os.path.join(dirs["abs_wpttest_dir"], run_file_name))
@@ -261,17 +261,18 @@ class WebPlatformTest(TestingMixin, Merc
 
     def _install_fonts(self):
         # Ensure the Ahem font is available
         dirs = self.query_abs_dirs()
 
         if not sys.platform.startswith("darwin"):
             font_path = os.path.join(os.path.dirname(self.binary_path), "fonts")
         else:
-            font_path = os.path.join(os.path.dirname(self.binary_path), os.pardir, "Resources", "res", "fonts")
+            font_path = os.path.join(os.path.dirname(self.binary_path), os.pardir,
+                                     "Resources", "res", "fonts")
         if not os.path.exists(font_path):
             os.makedirs(font_path)
         ahem_src = os.path.join(dirs["abs_wpttest_dir"], "tests", "fonts", "Ahem.ttf")
         ahem_dest = os.path.join(font_path, "Ahem.ttf")
         with open(ahem_src, "rb") as src, open(ahem_dest, "wb") as dest:
             dest.write(src.read())
 
     def run_tests(self):
--- a/tools/lint/flake8.yml
+++ b/tools/lint/flake8.yml
@@ -1,39 +1,38 @@
 ---
 flake8:
     description: Python linter
     include:
-        - build/moz.configure/*.configure
-        - build/*.py
-        - configure.py
-        - config/check_macroassembler_style.py
         - config/mozunit.py
         - layout/tools/reftest
         - python/mach
         - python/mach_commands.py
         - python/mozlint
         - python/mozversioncontrol
         - security/manager
         - taskcluster
         - testing/firefox-ui
         - testing/mach_commands.py
         - testing/marionette/client
         - testing/marionette/harness
         - testing/marionette/puppeteer
+        - testing/mochitest
         - testing/mozbase
-        - testing/mochitest
+        - testing/mozharness/external_tools
+        - testing/mozharness/mozfile
+        - testing/mozharness/mozinfo
+        - testing/mozharness/scripts
         - testing/remotecppunittests.py
         - testing/runcppunittests.py
         - testing/talos/
         - testing/xpcshell
         - tools/git
         - tools/lint
         - tools/mercurial
         - tools/tryselect
         - toolkit/components/telemetry
     # Excludes should be added to topsrcdir/.flake8 due to a bug in flake8 where
     # specifying --exclude causes custom configuration files to be ignored.
     exclude: []
-    # The configure option is used by the build system
-    extensions: ['configure', 'py']
+    extensions: ['py']
     type: external
     payload: python.flake8:lint