Bug 1280956 - Use in-tree linter job to flake8 test taskcluster directory draft
authorJustin Wood <Callek@gmail.com>
Mon, 20 Jun 2016 21:06:55 -0400
changeset 380188 75f22a47b922dce93f94413e46496b5370782e6f
parent 380187 57f790e0daffeaf4a134043fab58337454908ce7
child 523667 6fe76f3ac0bc6202a0384c2d3ad9ed571e3843c4
push id21160
push userCallek@gmail.com
push dateTue, 21 Jun 2016 01:13:11 +0000
bugs1280956
milestone50.0a1
Bug 1280956 - Use in-tree linter job to flake8 test taskcluster directory MozReview-Commit-ID: FsWmAnnycZ2
taskcluster/ci/legacy/tasks/branches/base_jobs.yml
taskcluster/mach_commands.py
taskcluster/scripts/builder/gaia_props.py
taskcluster/taskgraph/create.py
taskcluster/taskgraph/decision.py
taskcluster/taskgraph/generator.py
taskcluster/taskgraph/graph.py
taskcluster/taskgraph/kind/base.py
taskcluster/taskgraph/kind/docker_image.py
taskcluster/taskgraph/kind/legacy.py
taskcluster/taskgraph/optimize.py
taskcluster/taskgraph/parameters.py
taskcluster/taskgraph/target_tasks.py
taskcluster/taskgraph/test/test_create.py
taskcluster/taskgraph/test/test_decision.py
taskcluster/taskgraph/test/test_generator.py
taskcluster/taskgraph/test/test_graph.py
taskcluster/taskgraph/test/test_kind_docker_image.py
taskcluster/taskgraph/test/test_kind_legacy.py
taskcluster/taskgraph/test/test_optimize.py
taskcluster/taskgraph/test/test_parameters.py
taskcluster/taskgraph/test/test_try_option_syntax.py
taskcluster/taskgraph/test/test_util_docker.py
taskcluster/taskgraph/test/test_util_legacy_commit_parser.py
taskcluster/taskgraph/test/test_util_templates.py
taskcluster/taskgraph/test/test_util_time.py
taskcluster/taskgraph/try_option_syntax.py
taskcluster/taskgraph/types.py
taskcluster/taskgraph/util/docker.py
taskcluster/taskgraph/util/legacy_commit_parser.py
taskcluster/taskgraph/util/templates.py
taskcluster/taskgraph/util/time.py
tools/lint/flake8.lint
--- a/taskcluster/ci/legacy/tasks/branches/base_jobs.yml
+++ b/taskcluster/ci/legacy/tasks/branches/base_jobs.yml
@@ -540,16 +540,17 @@ tasks:
     root: true
     when:
       file_patterns:
         - '**/*.py'
         - '**/.flake8'
         - 'python/mozlint/**'
         - 'tools/lint/**'
         - 'testing/docker/lint/**'
+        - 'taskcluster/**'
   android-api-15-gradle-dependencies:
     task: tasks/builds/android_api_15_gradle_dependencies.yml
     root: true
     when:
       file_patterns:
         - 'mobile/android/config/**'
         - 'testing/docker/android-gradle-build/**'
         - 'testing/mozharness/configs/builds/releng_sub_android_configs/*gradle_dependencies.py'
--- a/taskcluster/mach_commands.py
+++ b/taskcluster/mach_commands.py
@@ -73,109 +73,107 @@ class MachCommands(MachCommandBase):
         import mozunit
         suite = unittest.defaultTestLoader.discover('taskgraph.test')
         runner = mozunit.MozTestRunner(verbosity=2)
         result = runner.run(suite)
         if not result.wasSuccessful:
             sys.exit(1)
 
     @ShowTaskGraphSubCommand('taskgraph', 'tasks',
-                         description="Show all tasks in the taskgraph")
+                             description="Show all tasks in the taskgraph")
     def taskgraph_tasks(self, **options):
         return self.show_taskgraph('full_task_set', options)
 
     @ShowTaskGraphSubCommand('taskgraph', 'full',
-                         description="Show the full taskgraph")
+                             description="Show the full taskgraph")
     def taskgraph_full(self, **options):
         return self.show_taskgraph('full_task_graph', options)
 
     @ShowTaskGraphSubCommand('taskgraph', 'target',
-                         description="Show the target task set")
+                             description="Show the target task set")
     def taskgraph_target(self, **options):
         return self.show_taskgraph('target_task_set', options)
 
     @ShowTaskGraphSubCommand('taskgraph', 'target-graph',
-                         description="Show the target taskgraph")
+                             description="Show the target taskgraph")
     def taskgraph_target_taskgraph(self, **options):
         return self.show_taskgraph('target_task_graph', options)
 
     @ShowTaskGraphSubCommand('taskgraph', 'optimized',
-                         description="Show the optimized taskgraph")
+                             description="Show the optimized taskgraph")
     def taskgraph_optimized(self, **options):
         return self.show_taskgraph('optimized_task_graph', options)
 
     @SubCommand('taskgraph', 'decision',
                 description="Run the decision task")
     @CommandArgument('--root', '-r',
-        default='taskcluster/ci',
-        help="root of the taskgraph definition relative to topsrcdir")
+                     default='taskcluster/ci',
+                     help="root of the taskgraph definition relative to topsrcdir")
     @CommandArgument('--base-repository',
-        required=True,
-        help='URL for "base" repository to clone')
+                     required=True,
+                     help='URL for "base" repository to clone')
     @CommandArgument('--head-repository',
-        required=True,
-        help='URL for "head" repository to fetch revision from')
+                     required=True,
+                     help='URL for "head" repository to fetch revision from')
     @CommandArgument('--head-ref',
-        required=True,
-        help='Reference (this is same as rev usually for hg)')
+                     required=True,
+                     help='Reference (this is same as rev usually for hg)')
     @CommandArgument('--head-rev',
-        required=True,
-        help='Commit revision to use from head repository')
+                     required=True,
+                     help='Commit revision to use from head repository')
     @CommandArgument('--message',
-        required=True,
-        help='Commit message to be parsed. Example: "try: -b do -p all -u all"')
+                     required=True,
+                     help='Commit message to be parsed. Example: "try: -b do -p all -u all"')
     @CommandArgument('--revision-hash',
-        required=True,
-        help='Treeherder revision hash (long revision id) to attach results to')
+                     required=True,
+                     help='Treeherder revision hash (long revision id) to attach results to')
     @CommandArgument('--project',
-        required=True,
-        help='Project to use for creating task graph. Example: --project=try')
+                     required=True,
+                     help='Project to use for creating task graph. Example: --project=try')
     @CommandArgument('--pushlog-id',
-        dest='pushlog_id',
-        required=True,
-        default=0)
+                     dest='pushlog_id',
+                     required=True,
+                     default=0)
     @CommandArgument('--owner',
-        required=True,
-        help='email address of who owns this graph')
+                     required=True,
+                     help='email address of who owns this graph')
     @CommandArgument('--level',
-        required=True,
-        help='SCM level of this repository')
+                     required=True,
+                     help='SCM level of this repository')
     def taskgraph_decision(self, **options):
         """Run the decision task: generate a task graph and submit to
         TaskCluster.  This is only meant to be called within decision tasks,
         and requires a great many arguments.  Commands like `mach taskgraph
         optimized` are better suited to use on the command line, and can take
         the parameters file generated by a decision task.  """
 
         import taskgraph.decision
         try:
             self.setup_logging()
             return taskgraph.decision.taskgraph_decision(options)
-        except Exception as e:
+        except Exception:
             traceback.print_exc()
             sys.exit(1)
 
-
     def setup_logging(self, quiet=False, verbose=True):
         """
         Set up Python logging for all loggers, sending results to stderr (so
         that command output can be redirected easily) and adding the typical
         mach timestamp.
         """
         # remove the old terminal handler
         self.log_manager.replace_terminal_handler(None)
 
         # re-add it, with level and fh set appropriately
         if not quiet:
             level = logging.DEBUG if verbose else logging.INFO
             self.log_manager.add_terminal_logging(fh=sys.stderr, level=level)
 
         # all of the taskgraph logging is unstructured logging
         self.log_manager.enable_unstructured()
-        
 
     def show_taskgraph(self, graph_attr, options):
         import taskgraph.parameters
         import taskgraph.target_tasks
         import taskgraph.generator
 
         try:
             self.setup_logging(quiet=options['quiet'], verbose=options['verbose'])
@@ -187,17 +185,17 @@ class MachCommands(MachCommandBase):
                 root_dir=options['root'],
                 parameters=parameters,
                 target_tasks_method=target_tasks_method)
 
             tg = getattr(tgg, graph_attr)
 
             show_method = getattr(self, 'show_taskgraph_' + (options['format'] or 'labels'))
             show_method(tg)
-        except Exception as e:
+        except Exception:
             traceback.print_exc()
             sys.exit(1)
 
     def show_taskgraph_labels(self, taskgraph):
         for label in taskgraph.graph.visit_postorder():
             print(label)
 
     def show_taskgraph_json(self, taskgraph):
@@ -205,30 +203,31 @@ class MachCommands(MachCommandBase):
         # disassemble the dictionary
         for task in taskgraph.to_json().itervalues():
             print(json.dumps(task))
 
 
 @CommandProvider
 class LoadImage(object):
     @Command('taskcluster-load-image', category="ci",
-        description="Load a pre-built Docker image")
+             description="Load a pre-built Docker image")
     @CommandArgument('--task-id',
-        help="Load the image at public/image.tar in this task, rather than "
-             "searching the index")
+                     help="Load the image at public/image.tar in this task,"
+                          "rather than searching the index")
     @CommandArgument('image_name', nargs='?',
-        help="Load the image of this name based on the current contents of the tree "
-             "(as built for mozilla-central or mozilla-inbound)")
+                     help="Load the image of this name based on the current"
+                          "contents of the tree (as built for mozilla-central"
+                          "or mozilla-inbound)")
     def load_image(self, image_name, task_id):
         from taskgraph.docker import load_image_by_name, load_image_by_task_id
         if not image_name and not task_id:
             print("Specify either IMAGE-NAME or TASK-ID")
             sys.exit(1)
         try:
             if task_id:
                 ok = load_image_by_task_id(task_id)
             else:
                 ok = load_image_by_name(image_name)
             if not ok:
                 sys.exit(1)
-        except Exception as e:
+        except Exception:
             traceback.print_exc()
             sys.exit(1)
--- a/taskcluster/scripts/builder/gaia_props.py
+++ b/taskcluster/scripts/builder/gaia_props.py
@@ -30,12 +30,12 @@ props_path = os.path.join(args.gecko, 'b
 if not os.path.isfile(props_path):
         print >> sys.stderr, \
             'Gecko directory does not contain b2g/config/gaia.json'
         sys.exit(1)
 
 props = json.load(open(props_path))
 
 if args.prop == 'revision':
-    print(props['revision']);
+    print(props['revision'])
 
 if args.prop == 'repository':
     print(urlparse.urljoin('https://hg.mozilla.org', props['repo_path']))
--- a/taskcluster/taskgraph/create.py
+++ b/taskcluster/taskgraph/create.py
@@ -3,24 +3,24 @@
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import concurrent.futures as futures
 import requests
 import requests.adapters
 import json
-import collections
 import os
 import logging
 
 from slugid import nice as slugid
 
 logger = logging.getLogger(__name__)
 
+
 def create_tasks(taskgraph, label_to_taskid):
     # TODO: use the taskGroupId of the decision task
     task_group_id = slugid()
     taskid_to_label = {t: l for l, t in label_to_taskid.iteritems()}
 
     session = requests.Session()
 
     decision_task_id = os.environ.get('TASK_ID')
@@ -55,19 +55,21 @@ def create_tasks(taskgraph, label_to_tas
 
             fs[task_id] = e.submit(_create_task, session, task_id,
                                    taskid_to_label[task_id], task_def)
 
         # Wait for all futures to complete.
         for f in futures.as_completed(fs.values()):
             f.result()
 
+
 def _create_task(session, task_id, label, task_def):
     # create the task using 'http://taskcluster/queue', which is proxied to the queue service
     # with credentials appropriate to this job.
     logger.debug("Creating task with taskId {} for {}".format(task_id, label))
-    res = session.put('http://taskcluster/queue/v1/task/{}'.format(task_id), data=json.dumps(task_def))
+    res = session.put('http://taskcluster/queue/v1/task/{}'.format(task_id),
+                      data=json.dumps(task_def))
     if res.status_code != 200:
         try:
             logger.error(res.json()['message'])
         except:
             logger.error(res.text)
         res.raise_for_status()
--- a/taskcluster/taskgraph/decision.py
+++ b/taskcluster/taskgraph/decision.py
@@ -98,18 +98,18 @@ def get_decision_parameters(options):
         'target_tasks_method',
     ] if n in options}
 
     project = parameters['project']
     try:
         parameters.update(PER_PROJECT_PARAMETERS[project])
     except KeyError:
         logger.warning("using default project parameters; add {} to "
-              "PER_PROJECT_PARAMETERS in {} to customize behavior "
-              "for this project".format(project, __file__))
+                       "PER_PROJECT_PARAMETERS in {} to customize behavior "
+                       "for this project".format(project, __file__))
         parameters.update(PER_PROJECT_PARAMETERS['default'])
 
     return Parameters(parameters)
 
 
 def write_artifact(filename, data):
     logger.info('writing artifact file `{}`'.format(filename))
     if not os.path.isdir(ARTIFACTS_DIR):
--- a/taskcluster/taskgraph/generator.py
+++ b/taskcluster/taskgraph/generator.py
@@ -1,24 +1,24 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 import logging
 import os
-import re
 import yaml
 
 from .graph import Graph
 from .types import TaskGraph
 from .optimize import optimize_task_graph
 
 logger = logging.getLogger(__name__)
 
+
 class TaskGraphGenerator(object):
     """
     The central controller for taskgraph.  This handles all phases of graph
     generation.  The task is generated from all of the kinds defined in
     subdirectories of the generator's root directory.
 
     Access to the results of this generation, as well as intermediate values at
     various phases of generation, is available via properties.  This encourages
@@ -56,17 +56,16 @@ class TaskGraphGenerator(object):
     def full_task_set(self):
         """
         The full task set: all tasks defined by any kind (a graph without edges)
 
         @type: TaskGraph
         """
         return self._run_until('full_task_set')
 
-
     @property
     def full_task_graph(self):
         """
         The full task graph: the full task set, with edges representing
         dependencies.
 
         @type: TaskGraph
         """
@@ -176,17 +175,18 @@ class TaskGraphGenerator(object):
             {l: all_tasks[l] for l in target_graph.nodes},
             target_graph)
         yield 'target_task_graph', target_task_graph
 
         logger.info("Generating optimized task graph")
         do_not_optimize = set()
         if not self.parameters.get('optimize_target_tasks', True):
             do_not_optimize = target_task_set.graph.nodes
-        optimized_task_graph, label_to_taskid = optimize_task_graph(target_task_graph, do_not_optimize)
+        optimized_task_graph, label_to_taskid = optimize_task_graph(target_task_graph,
+                                                                    do_not_optimize)
         yield 'label_to_taskid', label_to_taskid
         yield 'optimized_task_graph', optimized_task_graph
 
     def _run_until(self, name):
         while name not in self._run_results:
             try:
                 k, v = self._run.next()
             except StopIteration:
--- a/taskcluster/taskgraph/graph.py
+++ b/taskcluster/taskgraph/graph.py
@@ -1,16 +1,17 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import collections
 
+
 class Graph(object):
     """
     Generic representation of a directed acyclic graph with labeled edges
     connecting the nodes.  Graph operations are implemented in a functional
     manner, so the data structure is immutable.
 
     It permits at most one edge of a given name between any set of nodes.  The
     graph is not checked for cycles, and methods may hang or otherwise fail if
@@ -49,17 +50,19 @@ class Graph(object):
         assert nodes <= self.nodes
 
         # generate a new graph by expanding along edges until reaching a fixed
         # point
         new_nodes, new_edges = nodes, set()
         nodes, edges = set(), set()
         while (new_nodes, new_edges) != (nodes, edges):
             nodes, edges = new_nodes, new_edges
-            add_edges = set((left, right, name) for (left, right, name) in self.edges if left in nodes)
+            add_edges = set((left, right, name)
+                            for (left, right, name) in self.edges
+                            if left in nodes)
             add_nodes = set(right for (_, right, _) in add_edges)
             new_nodes = nodes | add_nodes
             new_edges = edges | add_edges
         return Graph(new_nodes, new_edges)
 
     def visit_postorder(self):
         """
         Generate a sequence of nodes in postorder, such that every node is
--- a/taskcluster/taskgraph/kind/base.py
+++ b/taskcluster/taskgraph/kind/base.py
@@ -2,16 +2,17 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import os
 import abc
 
+
 class Kind(object):
     """
     A kind represents a collection of tasks that share common characteristics.
     For example, all build jobs.  Each instance of a kind is intialized with a
     path from which it draws its task configuration.  The instance is free to
     store as much local state as it needs.
     """
     __metaclass__ = abc.ABCMeta
--- a/taskcluster/taskgraph/kind/docker_image.py
+++ b/taskcluster/taskgraph/kind/docker_image.py
@@ -3,17 +3,16 @@
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import logging
 import json
 import os
 import urllib2
-import hashlib
 import tarfile
 import time
 
 from . import base
 from ..types import Task
 from taskgraph.util.docker import (
     docker_image,
     generate_context_hash
@@ -49,37 +48,39 @@ class DockerImageKind(base.Kind):
             'head_repository': params['head_repository'],
             'head_ref': params['head_ref'] or params['head_rev'],
             'head_rev': params['head_rev'],
             'owner': params['owner'],
             'level': params['level'],
             'from_now': json_time_from_now,
             'now': current_json_time(),
             'source': '{repo}file/{rev}/testing/taskcluster/tasks/image.yml'
-                    .format(repo=params['head_repository'], rev=params['head_rev']),
+                      .format(repo=params['head_repository'], rev=params['head_rev']),
         }
 
         tasks = []
         templates = Templates(self.path)
         for image_name in self.config['images']:
             context_path = os.path.join('testing', 'docker', image_name)
             context_hash = generate_context_hash(context_path)
 
             image_parameters = dict(parameters)
             image_parameters['context_hash'] = context_hash
             image_parameters['context_path'] = context_path
             image_parameters['artifact_path'] = 'public/image.tar'
             image_parameters['image_name'] = image_name
 
-            image_artifact_path = "public/decision_task/image_contexts/{}/context.tar.gz".format(image_name)
+            image_artifact_path = \
+                "public/decision_task/image_contexts/{}/context.tar.gz".format(image_name)
             if os.environ.get('TASK_ID'):
                 destination = os.path.join(
                     os.environ['HOME'],
                     "artifacts/decision_task/image_contexts/{}/context.tar.gz".format(image_name))
-                image_parameters['context_url'] = ARTIFACT_URL.format(os.environ['TASK_ID'], image_artifact_path)
+                image_parameters['context_url'] = ARTIFACT_URL.format(
+                    os.environ['TASK_ID'], image_artifact_path)
                 self.create_context_tar(context_path, destination, image_name)
             else:
                 # skip context generation since this isn't a decision task
                 # TODO: generate context tarballs using subdirectory clones in
                 # the image-building task so we don't have to worry about this.
                 image_parameters['context_url'] = 'file:///tmp/' + image_artifact_path
 
             image_task = templates.load('image.yml', image_parameters)
@@ -89,17 +90,18 @@ class DockerImageKind(base.Kind):
                 'image_name': image_name,
             }
 
             # As an optimization, if the context hash exists for mozilla-central, that image
             # task ID will be used.  The reasoning behind this is that eventually everything ends
             # up on mozilla-central at some point if most tasks use this as a common image
             # for a given context hash, a worker within Taskcluster does not need to contain
             # the same image per branch.
-            index_paths = ['docker.images.v1.{}.{}.hash.{}'.format(project, image_name, context_hash)
+            index_paths = ['docker.images.v1.{}.{}.hash.{}'.format(
+                                project, image_name, context_hash)
                            for project in ['mozilla-central', params['project']]]
 
             tasks.append(Task(self, 'build-docker-image-' + image_name,
                               task=image_task['task'], attributes=attributes,
                               index_paths=index_paths))
 
         return tasks
 
@@ -111,17 +113,18 @@ class DockerImageKind(base.Kind):
             try:
                 url = INDEX_URL.format(index_path)
                 existing_task = json.load(urllib2.urlopen(url))
                 # Only return the task ID if the artifact exists for the indexed
                 # task.  Otherwise, continue on looking at each of the branches.  Method
                 # continues trying other branches in case mozilla-central has an expired
                 # artifact, but 'project' might not. Only return no task ID if all
                 # branches have been tried
-                request = urllib2.Request(ARTIFACT_URL.format(existing_task['taskId'], 'public/image.tar'))
+                request = urllib2.Request(
+                    ARTIFACT_URL.format(existing_task['taskId'], 'public/image.tar'))
                 request.get_method = lambda: 'HEAD'
                 urllib2.urlopen(request)
 
                 # HEAD success on the artifact is enough
                 return True, existing_task['taskId']
             except urllib2.HTTPError:
                 pass
 
--- a/taskcluster/taskgraph/kind/legacy.py
+++ b/taskcluster/taskgraph/kind/legacy.py
@@ -4,23 +4,21 @@
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import copy
 import json
 import logging
 import os
 import re
-import sys
 import time
-from collections import defaultdict, namedtuple
+from collections import namedtuple
 
 from . import base
 from ..types import Task
-from functools import partial
 from mozpack.path import match as mozpackmatch
 from slugid import nice as slugid
 from taskgraph.util.legacy_commit_parser import parse_commit
 from taskgraph.util.time import (
     json_time_from_now,
     current_json_time,
 )
 from taskgraph.util.templates import Templates
@@ -44,52 +42,56 @@ TREEHERDER_ROUTES = {
     'production': 'tc-treeherder'
 }
 
 # time after which a try build's results will expire
 TRY_EXPIRATION = "14 days"
 
 logger = logging.getLogger(__name__)
 
+
 def mklabel():
     return TASKID_PLACEHOLDER.format(slugid())
 
+
 def merge_dicts(*dicts):
     merged_dict = {}
     for dictionary in dicts:
         merged_dict.update(dictionary)
     return merged_dict
 
+
 def gaia_info():
     '''Fetch details from in tree gaia.json (which links this version of
     gecko->gaia) and construct the usual base/head/ref/rev pairing...'''
     gaia = json.load(open(os.path.join(GECKO, 'b2g', 'config', 'gaia.json')))
 
     if gaia['git'] is None or \
        gaia['git']['remote'] == '' or \
        gaia['git']['git_revision'] == '' or \
        gaia['git']['branch'] == '':
 
-       # Just use the hg params...
-       return {
-         'gaia_base_repository': 'https://hg.mozilla.org/{}'.format(gaia['repo_path']),
-         'gaia_head_repository': 'https://hg.mozilla.org/{}'.format(gaia['repo_path']),
-         'gaia_ref': gaia['revision'],
-         'gaia_rev': gaia['revision']
-       }
+        # Just use the hg params...
+        return {
+          'gaia_base_repository': 'https://hg.mozilla.org/{}'.format(gaia['repo_path']),
+          'gaia_head_repository': 'https://hg.mozilla.org/{}'.format(gaia['repo_path']),
+          'gaia_ref': gaia['revision'],
+          'gaia_rev': gaia['revision']
+        }
 
     else:
         # Use git
         return {
             'gaia_base_repository': gaia['git']['remote'],
             'gaia_head_repository': gaia['git']['remote'],
             'gaia_rev': gaia['git']['git_revision'],
             'gaia_ref': gaia['git']['branch'],
         }
 
+
 def configure_dependent_task(task_path, parameters, taskid, templates, build_treeherder_config):
     """Configure a build dependent task. This is shared between post-build and test tasks.
 
     :param task_path: location to the task yaml
     :param parameters: parameters to load the template
     :param taskid: taskid of the dependent task
     :param templates: reference to the template builder
     :param build_treeherder_config: parent treeherder config
@@ -123,30 +125,32 @@ def configure_dependent_task(task_path, 
     if 'routes' not in task['task']:
         task['task']['routes'] = []
 
     if 'scopes' not in task['task']:
         task['task']['scopes'] = []
 
     return task
 
+
 def set_interactive_task(task, interactive):
     r"""Make the task interactive.
 
     :param task: task definition.
     :param interactive: True if the task should be interactive.
     """
     if not interactive:
         return
 
     payload = task["task"]["payload"]
     if "features" not in payload:
         payload["features"] = {}
     payload["features"]["interactive"] = True
 
+
 def remove_caches_from_task(task):
     r"""Remove all caches but tc-vcs from the task.
 
     :param task: task definition.
     """
     whitelist = [
         re.compile("^level-[123]-.*-tc-vcs(-public-sources)?$"),
         re.compile("^tooltool-cache$"),
@@ -154,16 +158,17 @@ def remove_caches_from_task(task):
     try:
         caches = task["task"]["payload"]["cache"]
         for cache in caches.keys():
             if not any(pat.match(cache) for pat in whitelist):
                 caches.pop(cache)
     except KeyError:
         pass
 
+
 def query_vcs_info(repository, revision):
     """Query the pushdate and pushid of a repository/revision.
 
     This is intended to be used on hg.mozilla.org/mozilla-central and
     similar. It may or may not work for other hg repositories.
     """
     if not repository or not revision:
         logger.warning('cannot query vcs info because vcs info not provided')
@@ -184,17 +189,17 @@ def query_vcs_info(repository, revision)
 
         pushid = contents['changesets'][-1]['pushid']
         pushdate = contents['changesets'][-1]['pushdate'][0]
 
         return VCSInfo(pushid, pushdate, changesets)
 
     except Exception:
         logger.exception("Error querying VCS info for '%s' revision '%s'",
-                repository, revision)
+                         repository, revision)
         return None
 
 
 def set_expiration(task, timestamp):
     task_def = task['task']
     task_def['expires'] = timestamp
     if task_def.get('deadline', timestamp) > timestamp:
         task_def['deadline'] = timestamp
@@ -205,22 +210,24 @@ def set_expiration(task, timestamp):
         return
 
     # for docker-worker, artifacts is a dictionary
     # for generic-worker, artifacts is a list
     # for taskcluster-worker, it will depend on what we do in artifacts plugin
     for artifact in artifacts.values() if hasattr(artifacts, "values") else artifacts:
         artifact['expires'] = timestamp
 
+
 def format_treeherder_route(destination, project, revision, pushlog_id):
     return "{}.v2.{}.{}.{}".format(destination,
                                    project,
                                    revision,
                                    pushlog_id)
 
+
 def decorate_task_treeherder_routes(task, project, revision, pushlog_id):
     """Decorate the given task with treeherder routes.
 
     Uses task.extra.treeherderEnv if available otherwise defaults to only
     staging.
 
     :param dict task: task definition.
     :param str project: The project the tasks are running for.
@@ -238,32 +245,35 @@ def decorate_task_treeherder_routes(task
 
     for env in treeheder_env:
         route = format_treeherder_route(TREEHERDER_ROUTES[env],
                                         project,
                                         revision,
                                         pushlog_id)
         task['routes'].append(route)
 
+
 def decorate_task_json_routes(task, json_routes, parameters):
     """Decorate the given task with routes.json routes.
 
     :param dict task: task definition.
     :param json_routes: the list of routes to use from routes.json
     :param parameters: dictionary of parameters to use in route templates
     """
     routes = task.get('routes', [])
     for route in json_routes:
         routes.append(route.format(**parameters))
 
     task['routes'] = routes
 
+
 class BuildTaskValidationException(Exception):
     pass
 
+
 def validate_build_task(task):
     '''The build tasks have some required fields in extra this function ensures
     they are there. '''
     if 'task' not in task:
         raise BuildTaskValidationException('must have task field')
 
     task_def = task['task']
 
@@ -276,16 +286,17 @@ def validate_build_task(task):
 
         if 'build' not in locations:
             raise BuildTaskValidationException('task.extra.locations.build missing')
 
         if 'tests' not in locations and 'test_packages' not in locations:
             raise BuildTaskValidationException('task.extra.locations.tests or '
                                                'task.extra.locations.tests_packages missing')
 
+
 class LegacyKind(base.Kind):
     """
     This kind generates a full task graph from the old YAML files in
     `testing/taskcluster/tasks`.  The tasks already have dependency links.
 
     The existing task-graph generation generates slugids for tasks during task
     generation, so this kind labels tasks using those slugids, with a prefix of
     "TaskLabel==".  These labels are unfortunately not stable from run to run.
@@ -313,17 +324,18 @@ class LegacyKind(base.Kind):
 
         # Default to current time if querying the head rev fails
         pushdate = time.strftime('%Y%m%d%H%M%S', time.gmtime())
         vcs_info = query_vcs_info(params['head_repository'], params['head_rev'])
         changed_files = set()
         if vcs_info:
             pushdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(vcs_info.pushdate))
 
-            logger.debug('{} commits influencing task scheduling:'.format(len(vcs_info.changesets)))
+            logger.debug(
+                '{} commits influencing task scheduling:'.format(len(vcs_info.changesets)))
             for c in vcs_info.changesets:
                 logger.debug("{cset} {desc}".format(
                     cset=c['node'][0:12],
                     desc=c['desc'].splitlines()[0].encode('ascii', 'ignore')))
                 changed_files |= set(c['files'])
 
         # Template parameters used when expanding the graph
         parameters = dict(gaia_info().items() + {
@@ -362,17 +374,18 @@ class LegacyKind(base.Kind):
         for env in TREEHERDER_ROUTES:
             route = format_treeherder_route(TREEHERDER_ROUTES[env],
                                             parameters['project'],
                                             parameters['head_rev'],
                                             parameters['pushlog_id'])
             graph['scopes'].add("queue:route:{}".format(route))
 
         graph['metadata'] = {
-            'source': '{repo}file/{rev}/testing/taskcluster/mach_commands.py'.format(repo=params['head_repository'], rev=params['head_rev']),
+            'source': '{repo}file/{rev}/testing/taskcluster/mach_commands.py'.format(
+                        repo=params['head_repository'], rev=params['head_rev']),
             'owner': params['owner'],
             # TODO: Add full mach commands to this example?
             'description': 'Task graph generated via ./mach taskcluster-graph',
             'name': 'task graph local'
         }
 
         # Filter the job graph according to conditions met by this invocation run.
         def should_run(task):
@@ -392,17 +405,17 @@ class LegacyKind(base.Kind):
                 for pattern in file_patterns:
                     for path in changed_files:
                         if mozpackmatch(path, pattern):
                             logger.debug('scheduling {task} because pattern {pattern} '
                                          'matches {path}'.format(
                                              task=task['task'],
                                              pattern=pattern,
                                              path=path,
-                            ))
+                                         ))
                             return True
 
                 # No file patterns matched. Discard task.
                 logger.debug('discarding {task} because no relevant files changed'.format(
                     task=task['task'],
                     pattern=pattern,
                     path=path))
                 return False
@@ -413,17 +426,18 @@ class LegacyKind(base.Kind):
 
         all_routes = {}
 
         for build in job_graph:
             logging.debug("loading build task {}".format(build['task']))
             interactive = cmdline_interactive or build["interactive"]
             build_parameters = merge_dicts(parameters, build['additional-parameters'])
             build_parameters['build_slugid'] = mklabel()
-            build_parameters['source'] = '{repo}file/{rev}/testing/taskcluster/{file}'.format(repo=params['head_repository'], rev=params['head_rev'], file=build['task'])
+            build_parameters['source'] = '{repo}file/{rev}/testing/taskcluster/{file}'.format(
+                repo=params['head_repository'], rev=params['head_rev'], file=build['task'])
             build_task = templates.load(build['task'], build_parameters)
 
             # Copy build_* attributes to expose them to post-build tasks
             # as well as json routes and tests
             task_extra = build_task['task']['extra']
             build_parameters['build_name'] = task_extra['build_name']
             build_parameters['build_type'] = task_extra['build_type']
             build_parameters['build_product'] = task_extra['build_product']
@@ -440,48 +454,52 @@ class LegacyKind(base.Kind):
                                             build_parameters['head_rev'],
                                             build_parameters['pushlog_id'])
             decorate_task_json_routes(build_task['task'],
                                       json_routes,
                                       build_parameters)
 
             # Ensure each build graph is valid after construction.
             validate_build_task(build_task)
-            attributes = build_task['attributes'] = {'kind':'legacy', 'legacy_kind': 'build'}
+            attributes = build_task['attributes'] = {'kind': 'legacy', 'legacy_kind': 'build'}
             if 'build_name' in build:
                 attributes['build_platform'] = build['build_name']
             if 'build_type' in task_extra:
                 attributes['build_type'] = {'dbg': 'debug'}.get(task_extra['build_type'],
                                                                 task_extra['build_type'])
             if build.get('is_job'):
                 attributes['job'] = build['build_name']
                 attributes['legacy_kind'] = 'job'
             graph['tasks'].append(build_task)
 
             for location in build_task['task']['extra'].get('locations', {}):
-                build_parameters['{}_location'.format(location)] = build_task['task']['extra']['locations'][location]
+                build_parameters['{}_location'.format(location)] = \
+                    build_task['task']['extra']['locations'][location]
 
             for url in build_task['task']['extra'].get('url', {}):
                 build_parameters['{}_url'.format(url)] = \
                     build_task['task']['extra']['url'][url]
 
             define_task = DEFINE_TASK.format(build_task['task']['workerType'])
 
             for route in build_task['task'].get('routes', []):
                 if route.startswith('index.gecko.v2') and route in all_routes:
-                    raise Exception("Error: route '%s' is in use by multiple tasks: '%s' and '%s'" % (
-                        route,
-                        build_task['task']['metadata']['name'],
-                        all_routes[route],
-                    ))
+                    raise Exception(
+                        "Error: route '%s' is in use by multiple tasks: '%s' and '%s'" % (
+                            route,
+                            build_task['task']['metadata']['name'],
+                            all_routes[route],
+                        ))
                 all_routes[route] = build_task['task']['metadata']['name']
 
             graph['scopes'].add(define_task)
             graph['scopes'] |= set(build_task['task'].get('scopes', []))
-            route_scopes = map(lambda route: 'queue:route:' + route, build_task['task'].get('routes', []))
+            route_scopes = map(
+                lambda route: 'queue:route:' + route, build_task['task'].get('routes', [])
+                )
             graph['scopes'] |= set(route_scopes)
 
             # Treeherder symbol configuration for the graph required for each
             # build so tests know which platform they belong to.
             build_treeherder_config = build_task['task']['extra']['treeherder']
 
             if 'machine' not in build_treeherder_config:
                 message = '({}), extra.treeherder.machine required for all builds'
@@ -612,9 +630,8 @@ class LegacyKind(base.Kind):
         if 'docker-image' in taskdict:
             deps.append(('build-docker-image-{docker-image}'.format(**taskdict), 'docker-image'))
 
         return deps
 
     def optimize_task(self, task, taskgraph):
         # no optimization for the moment
         return False, None
-
--- a/taskcluster/taskgraph/optimize.py
+++ b/taskcluster/taskgraph/optimize.py
@@ -70,17 +70,19 @@ def annotate_task_graph(target_task_grap
     for label in target_task_graph.graph.visit_postorder():
         task = target_task_graph.tasks[label]
         named_task_dependencies = named_links_dict.get(label, {})
 
         # check whether any dependencies have been optimized away
         dependencies = [target_task_graph.tasks[l] for l in named_task_dependencies.itervalues()]
         for t in dependencies:
             if t.optimized and not t.task_id:
-                raise Exception("task {} was optimized away, but {} depends on it".format(t.label, label))
+                raise Exception(
+                    "task {} was optimized away, but {} depends on it".format(
+                        t.label, label))
 
         # if this task is blacklisted, don't even consider optimizing
         replacement_task_id = None
         if label in do_not_optimize:
             optimized = False
         # if any dependencies can't be optimized, this task can't, either
         elif any(not t.optimized for t in dependencies):
             optimized = False
@@ -126,18 +128,22 @@ def get_subgraph(annotated_task_graph, n
         named_task_dependencies = {
                 name: label_to_taskid[label]
                 for name, label in named_links_dict.get(label, {}).iteritems()}
         task.task = resolve_task_references(task.label, task.task, named_task_dependencies)
         task.task.setdefault('dependencies', []).extend(named_task_dependencies.itervalues())
         tasks_by_taskid[task.task_id] = task
 
     # resolve edges to taskIds
-    edges_by_taskid = ((label_to_taskid.get(left), label_to_taskid.get(right), name)
-        for (left, right, name) in annotated_task_graph.graph.edges)
+    edges_by_taskid = (
+        (label_to_taskid.get(left), label_to_taskid.get(right), name)
+        for (left, right, name) in annotated_task_graph.graph.edges
+        )
     # ..and drop edges that are no longer in the task graph
-    edges_by_taskid = set((left, right, name)
+    edges_by_taskid = set(
+        (left, right, name)
         for (left, right, name) in edges_by_taskid
-        if left in tasks_by_taskid and right in tasks_by_taskid)
+        if left in tasks_by_taskid and right in tasks_by_taskid
+        )
 
     return TaskGraph(
         tasks_by_taskid,
         Graph(set(tasks_by_taskid), edges_by_taskid))
--- a/taskcluster/taskgraph/parameters.py
+++ b/taskcluster/taskgraph/parameters.py
@@ -2,20 +2,20 @@
 
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import json
-import sys
 import yaml
 from mozbuild.util import ReadOnlyDict
 
+
 class Parameters(ReadOnlyDict):
     """An immutable dictionary with nicer KeyError messages on failure"""
     def __getitem__(self, k):
         try:
             return super(Parameters, self).__getitem__(k)
         except KeyError:
             raise KeyError("taskgraph parameter {!r} not found".format(k))
 
--- a/taskcluster/taskgraph/target_tasks.py
+++ b/taskcluster/taskgraph/target_tasks.py
@@ -3,40 +3,46 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 from taskgraph import try_option_syntax
 
 _target_task_methods = {}
+
+
 def _target_task(name):
     def wrap(func):
         _target_task_methods[name] = func
         return func
     return wrap
 
+
 def get_method(method):
     """Get a target_task_method to pass to a TaskGraphGenerator."""
     return _target_task_methods[method]
 
+
 @_target_task('from_parameters')
 def target_tasks_from_parameters(full_task_graph, parameters):
     """Get the target task set from parameters['target_tasks'].  This is
     useful for re-running a decision task with the same target set as in an
     earlier run, by copying `target_tasks.json` into `parameters.yml`."""
     return parameters['target_tasks']
 
+
 @_target_task('try_option_syntax')
 def target_tasks_try_option_syntax(full_task_graph, parameters):
     """Generate a list of target tasks based on try syntax in
     parameters['message'] and, for context, the full task graph."""
     options = try_option_syntax.TryOptionSyntax(parameters['message'], full_task_graph)
     return [t.label for t in full_task_graph.tasks.itervalues()
             if options.task_matches(t.attributes)]
 
+
 @_target_task('all_builds_and_tests')
 def target_tasks_all_builds_and_tests(full_task_graph, parameters):
     """Trivially target all build and test tasks.  This is used for
     branches where we want to build "everyting", but "everything"
     does not include uninteresting things like docker images"""
     return [t.label for t in full_task_graph.tasks.itervalues()
             if t.attributes.get('kind') == 'legacy']
--- a/taskcluster/taskgraph/test/test_create.py
+++ b/taskcluster/taskgraph/test/test_create.py
@@ -8,16 +8,17 @@ import unittest
 import os
 
 from .. import create
 from ..graph import Graph
 from ..types import Task, TaskGraph
 
 from mozunit import main
 
+
 class FakeKind(object):
 
     def get_task_definition(self, task, deps_by_name):
         # sanity-check the deps_by_name
         for k, v in deps_by_name.iteritems():
             assert k == 'edge'
         return {'payload': 'hello world'}
 
@@ -78,9 +79,8 @@ class TestCreate(unittest.TestCase):
         create.create_tasks(taskgraph, label_to_taskid)
 
         for tid, task in self.created_tasks.iteritems():
             self.assertEqual(task['dependencies'], [os.environ['TASK_ID']])
 
 
 if __name__ == '__main__':
     main()
-
--- a/taskcluster/taskgraph/test/test_decision.py
+++ b/taskcluster/taskgraph/test/test_decision.py
@@ -11,16 +11,17 @@ import shutil
 import unittest
 import tempfile
 
 from .. import decision
 from ..graph import Graph
 from ..types import Task, TaskGraph
 from mozunit import main
 
+
 class TestDecision(unittest.TestCase):
 
     def test_taskgraph_to_json(self):
         tasks = {
             'a': Task(kind=None, label='a', attributes={'attr': 'a-task'}),
             'b': Task(kind=None, label='b', task={'task': 'def'}),
         }
         graph = Graph(nodes=set('ab'), edges={('a', 'b', 'edgelabel')})
@@ -38,41 +39,37 @@ class TestDecision(unittest.TestCase):
             'b': {
                 'label': 'b',
                 'attributes': {},
                 'task': {'task': 'def'},
                 'dependencies': {},
             }
         })
 
-
     def test_write_artifact_json(self):
         data = [{'some': 'data'}]
         tmpdir = tempfile.mkdtemp()
         try:
             decision.ARTIFACTS_DIR = os.path.join(tmpdir, "artifacts")
             decision.write_artifact("artifact.json", data)
             with open(os.path.join(decision.ARTIFACTS_DIR, "artifact.json")) as f:
                 self.assertEqual(json.load(f), data)
         finally:
             if os.path.exists(tmpdir):
                 shutil.rmtree(tmpdir)
             decision.ARTIFACTS_DIR = 'artifacts'
 
-
     def test_write_artifact_yml(self):
         data = [{'some': 'data'}]
         tmpdir = tempfile.mkdtemp()
         try:
             decision.ARTIFACTS_DIR = os.path.join(tmpdir, "artifacts")
             decision.write_artifact("artifact.yml", data)
             with open(os.path.join(decision.ARTIFACTS_DIR, "artifact.yml")) as f:
                 self.assertEqual(yaml.safe_load(f), data)
         finally:
             if os.path.exists(tmpdir):
                 shutil.rmtree(tmpdir)
             decision.ARTIFACTS_DIR = 'artifacts'
 
 
 if __name__ == '__main__':
     main()
-
-
--- a/taskcluster/taskgraph/test/test_generator.py
+++ b/taskcluster/taskgraph/test/test_generator.py
@@ -86,16 +86,18 @@ class TestGenerator(unittest.TestCase):
                                      {('t-1', 't-0', 'prev')}))
         self.assertEqual(sorted(self.tgg.target_task_graph.tasks.keys()),
                          sorted(['t-0', 't-1']))
 
     def test_optimized_task_graph(self):
         "The optimized task graph contains task ids"
         self.target_tasks = ['t-2']
         tid = self.tgg.label_to_taskid
-        self.assertEqual(self.tgg.optimized_task_graph.graph,
-             graph.Graph({tid['t-0'], tid['t-1'], tid['t-2']}, {
-                 (tid['t-1'], tid['t-0'], 'prev'),
-                 (tid['t-2'], tid['t-1'], 'prev'),
-             }))
+        self.assertEqual(
+            self.tgg.optimized_task_graph.graph,
+            graph.Graph({tid['t-0'], tid['t-1'], tid['t-2']}, {
+                (tid['t-1'], tid['t-0'], 'prev'),
+                (tid['t-2'], tid['t-1'], 'prev'),
+            })
+            )
 
 if __name__ == '__main__':
     main()
--- a/taskcluster/taskgraph/test/test_graph.py
+++ b/taskcluster/taskgraph/test/test_graph.py
@@ -81,17 +81,17 @@ class TestGraph(unittest.TestCase):
                          Graph(set(['1', '2', '3']), {
                              ('2', '1', 'red'),
                              ('2', '1', 'blue'),
                              ('3', '1', 'red'),
                              ('3', '2', 'blue'),
                              ('3', '2', 'green'),
                          }))
 
-    def test_transitive_closure_disjoint(self):
+    def test_transitive_closure_disjoint_edges(self):
         "transitive closure of a disjoint graph keeps those edges"
         self.assertEqual(self.disjoint.transitive_closure(set(['3', 'β'])),
                          Graph(set(['1', '2', '3', 'β', 'γ']), {
                              ('2', '1', 'red'),
                              ('3', '1', 'red'),
                              ('3', '2', 'green'),
                              ('β', 'γ', 'κόκκινο'),
                          }))
--- a/taskcluster/taskgraph/test/test_kind_docker_image.py
+++ b/taskcluster/taskgraph/test/test_kind_docker_image.py
@@ -1,22 +1,20 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import unittest
 import tempfile
-import shutil
 import os
 
 from ..kind import docker_image
-from ..types import Task
-from mozunit import main, MockedOpen
+from mozunit import main
 
 
 class TestDockerImageKind(unittest.TestCase):
 
     def setUp(self):
         self.kind = docker_image.DockerImageKind(
                 os.path.join(docker_image.GECKO, 'taskcluster', 'ci', 'docker-image'),
                 {})
--- a/taskcluster/taskgraph/test/test_kind_legacy.py
+++ b/taskcluster/taskgraph/test/test_kind_legacy.py
@@ -3,21 +3,19 @@
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import unittest
 
 from ..kind.legacy import (
     LegacyKind,
-    TASKID_PLACEHOLDER,
     validate_build_task,
     BuildTaskValidationException
 )
-from ..types import Task
 from mozunit import main
 
 
 class TestLegacyKind(unittest.TestCase):
     # NOTE: much of LegacyKind is copy-pasted from the old legacy code, which
     # is emphatically *not* designed for testing, so this test class does not
     # attempt to test the entire class.
 
--- a/taskcluster/taskgraph/test/test_optimize.py
+++ b/taskcluster/taskgraph/test/test_optimize.py
@@ -3,20 +3,18 @@
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import unittest
 
 from ..optimize import optimize_task_graph, resolve_task_references
 from ..optimize import annotate_task_graph, get_subgraph
-from .. import optimize
 from .. import types
 from .. import graph
-from mozunit import main
 
 
 class TestResolveTaskReferences(unittest.TestCase):
 
     def do(self, input, output):
         taskid_for_edge_name = {'edge%d' % n: 'tid%d' % n for n in range(1, 4)}
         self.assertEqual(resolve_task_references('subject', input, taskid_for_edge_name), output)
 
@@ -42,18 +40,21 @@ class TestResolveTaskReferences(unittest
 
     def test_escaping(self):
         "resolve_task_references resolves escapes in task references"
         self.do({'escape': {'task-reference': '<<><edge3>>'}},
                 {'escape': '<tid3>'})
 
     def test_invalid(self):
         "resolve_task_references raises a KeyError on reference to an invalid task"
-        self.assertRaisesRegexp(KeyError, "task 'subject' has no dependency with label 'no-such'", lambda:
-            resolve_task_references('subject', {'task-reference': '<no-such>'}, {}))
+        self.assertRaisesRegexp(
+            KeyError,
+            "task 'subject' has no dependency with label 'no-such'",
+            lambda: resolve_task_references('subject', {'task-reference': '<no-such>'}, {})
+            )
 
 
 class FakeKind(object):
 
     def __init__(self, optimize_task):
         self.optimize_task = optimize_task
 
 
@@ -74,86 +75,99 @@ class TestOptimize(unittest.TestCase):
     def make_graph(self, *tasks_and_edges):
         tasks = {t.label: t for t in tasks_and_edges if isinstance(t, types.Task)}
         edges = {e for e in tasks_and_edges if not isinstance(e, types.Task)}
         return types.TaskGraph(tasks, graph.Graph(set(tasks), edges))
 
     def assert_annotations(self, graph, **annotations):
         def repl(task_id):
             return 'SLUGID' if task_id and len(task_id) == 22 else task_id
-        got_annotations = {t.label: (t.optimized, repl(t.task_id)) for t in graph.tasks.itervalues()}
+        got_annotations = {
+            t.label: (t.optimized, repl(t.task_id)) for t in graph.tasks.itervalues()
+            }
         self.assertEqual(got_annotations, annotations)
 
     def test_annotate_task_graph_no_optimize(self):
         "annotating marks everything as un-optimized if the kind returns that"
         self.make_kind(lambda task, deps: (False, None))
         graph = self.make_graph(
             self.make_task('task1'),
             self.make_task('task2'),
             self.make_task('task3'),
             ('task2', 'task1', 'build'),
             ('task2', 'task3', 'image'),
         )
-        opt = annotate_task_graph(graph, set(),
-                graph.graph.named_links_dict(), {})
-        self.assert_annotations(graph,
+        annotate_task_graph(graph, set(), graph.graph.named_links_dict(), {})
+        self.assert_annotations(
+            graph,
             task1=(False, None),
             task2=(False, None),
-            task3=(False, None))
+            task3=(False, None)
+            )
 
     def test_annotate_task_graph_taskid_without_optimize(self):
         "raises exception if kind returns a taskid without optimizing"
         self.make_kind(lambda task, deps: (False, 'some-taskid'))
         graph = self.make_graph(self.make_task('task1'))
-        self.assertRaises(Exception, lambda:
-            annotate_task_graph(graph, set(), graph.graph.named_links_dict(), {}))
+        self.assertRaises(
+            Exception,
+            lambda: annotate_task_graph(graph, set(), graph.graph.named_links_dict(), {})
+            )
 
     def test_annotate_task_graph_optimize_away_dependency(self):
         "raises exception if kind optimizes away a task on which another depends"
         self.make_kind(lambda task, deps: (True, None) if task.label == 'task1' else (False, None))
         graph = self.make_graph(
             self.make_task('task1'),
             self.make_task('task2'),
             ('task2', 'task1', 'build'),
         )
-        self.assertRaises(Exception, lambda:
-            annotate_task_graph(graph, set(), graph.graph.named_links_dict(), {}))
+        self.assertRaises(
+            Exception,
+            lambda: annotate_task_graph(graph, set(), graph.graph.named_links_dict(), {})
+            )
 
     def test_annotate_task_graph_do_not_optimize(self):
         "annotating marks everything as un-optimized if in do_not_optimize"
         self.make_kind(lambda task, deps: (True, 'taskid'))
         graph = self.make_graph(
             self.make_task('task1'),
             self.make_task('task2'),
             ('task2', 'task1', 'build'),
         )
         label_to_taskid = {}
-        opt = annotate_task_graph(graph, {'task1', 'task2'},
-                graph.graph.named_links_dict(), label_to_taskid)
-        self.assert_annotations(graph,
+        annotate_task_graph(graph, {'task1', 'task2'},
+                            graph.graph.named_links_dict(), label_to_taskid)
+        self.assert_annotations(
+            graph,
             task1=(False, None),
-            task2=(False, None))
+            task2=(False, None)
+            )
         self.assertEqual
 
     def test_annotate_task_graph_nos_propagate(self):
         "annotating marks a task with a non-optimized dependency as non-optimized"
-        self.make_kind(lambda task, deps: (False, None) if task.label == 'task1' else (True, 'taskid'))
+        self.make_kind(
+            lambda task, deps: (False, None) if task.label == 'task1' else (True, 'taskid')
+            )
         graph = self.make_graph(
             self.make_task('task1'),
             self.make_task('task2'),
             self.make_task('task3'),
             ('task2', 'task1', 'build'),
             ('task2', 'task3', 'image'),
         )
-        opt = annotate_task_graph(graph, set(),
-                graph.graph.named_links_dict(), {})
-        self.assert_annotations(graph,
+        annotate_task_graph(graph, set(),
+                            graph.graph.named_links_dict(), {})
+        self.assert_annotations(
+            graph,
             task1=(False, None),
             task2=(False, None),  # kind would have returned (True, 'taskid') here
-            task3=(True, 'taskid'))
+            task3=(True, 'taskid')
+            )
 
     def test_get_subgraph_single_dep(self):
         "when a single dependency is optimized, it is omitted from the graph"
         graph = self.make_graph(
             self.make_task('task1', optimized=True, task_id='dep1'),
             self.make_task('task2', optimized=False),
             self.make_task('task3', optimized=False),
             ('task2', 'task1', 'build'),
@@ -202,18 +216,21 @@ class TestOptimize(unittest.TestCase):
         self.assertEqual(sub.graph.edges, set())
         self.assertEqual(sub.tasks[task1].task_id, task1)
         self.assertEqual(sorted(sub.tasks[task1].task['dependencies']), [])
 
     def test_get_subgraph_refs_resolved(self):
         "get_subgraph resolves task references"
         graph = self.make_graph(
             self.make_task('task1', optimized=True, task_id='dep1'),
-            self.make_task('task2', optimized=False,
-                task_def={'payload': {'task-reference': 'http://<build>/<test>'}}),
+            self.make_task(
+                'task2',
+                optimized=False,
+                task_def={'payload': {'task-reference': 'http://<build>/<test>'}}
+                ),
             ('task2', 'task1', 'build'),
             ('task2', 'task3', 'test'),
             self.make_task('task3', optimized=False),
         )
         label_to_taskid = {'task1': 'dep1'}
         sub = get_subgraph(graph, graph.graph.named_links_dict(), label_to_taskid)
         task2 = label_to_taskid['task2']
         task3 = label_to_taskid['task3']
@@ -221,17 +238,19 @@ class TestOptimize(unittest.TestCase):
         self.assertEqual(sub.graph.edges, {(task2, task3, 'test')})
         self.assertEqual(sub.tasks[task2].task_id, task2)
         self.assertEqual(sorted(sub.tasks[task2].task['dependencies']), sorted([task3, 'dep1']))
         self.assertEqual(sub.tasks[task2].task['payload'], 'http://dep1/' + task3)
         self.assertEqual(sub.tasks[task3].task_id, task3)
 
     def test_optimize(self):
         "optimize_task_graph annotates and extracts the subgraph from a simple graph"
-        self.make_kind(lambda task, deps: (True, 'dep1') if task.label == 'task1' else (False, None))
+        self.make_kind(
+            lambda task, deps: (True, 'dep1') if task.label == 'task1' else (False, None)
+            )
         input = self.make_graph(
             self.make_task('task1'),
             self.make_task('task2'),
             self.make_task('task3'),
             ('task2', 'task1', 'build'),
             ('task2', 'task3', 'image'),
         )
         opt, label_to_taskid = optimize_task_graph(input, set())
--- a/taskcluster/taskgraph/test/test_parameters.py
+++ b/taskcluster/taskgraph/test/test_parameters.py
@@ -4,20 +4,22 @@
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import unittest
 
 from ..parameters import Parameters, load_parameters_file
 from mozunit import main, MockedOpen
 
+
 class TestParameters(unittest.TestCase):
 
     def test_Parameters_immutable(self):
         p = Parameters(x=10, y=20)
+
         def assign():
             p['x'] = 20
         self.assertRaises(Exception, assign)
 
     def test_Parameters_KeyError(self):
         p = Parameters(x=10, y=20)
         self.assertRaises(KeyError, lambda: p['z'])
 
--- a/taskcluster/taskgraph/test/test_try_option_syntax.py
+++ b/taskcluster/taskgraph/test/test_try_option_syntax.py
@@ -9,40 +9,42 @@ import unittest
 from ..try_option_syntax import TryOptionSyntax
 from ..graph import Graph
 from ..types import TaskGraph, Task
 from mozunit import main
 
 # an empty graph, for things that don't look at it
 empty_graph = TaskGraph({}, Graph(set(), set()))
 
+
 def unittest_task(n, tp):
     return (n, Task('test', n, {
         'unittest_try_name': n,
         'test_platform': tp,
     }))
 
+
 def talos_task(n, tp):
     return (n, Task('test', n, {
         'talos_try_name': n,
         'test_platform': tp,
     }))
 
-tasks = {k: v for k,v in [
+tasks = {k: v for k, v in [
     unittest_task('mochitest-browser-chrome', 'linux'),
     unittest_task('mochitest-browser-chrome-e10s', 'linux64'),
     unittest_task('mochitest-chrome', 'linux'),
     unittest_task('mochitest-webgl', 'linux'),
     unittest_task('crashtest-e10s', 'linux'),
     unittest_task('gtest', 'linux64'),
     talos_task('dromaeojs', 'linux64'),
 ]}
-unittest_tasks = {k: v for k,v in tasks.iteritems()
+unittest_tasks = {k: v for k, v in tasks.iteritems()
                   if 'unittest_try_name' in v.attributes}
-talos_tasks = {k: v for k,v in tasks.iteritems()
+talos_tasks = {k: v for k, v in tasks.iteritems()
                if 'talos_try_name' in v.attributes}
 graph_with_jobs = TaskGraph(tasks, Graph(set(tasks), set()))
 
 
 class TestTryOptionSyntax(unittest.TestCase):
 
     def test_empty_message(self):
         "Given an empty message, it should return an empty value"
--- a/taskcluster/taskgraph/test/test_util_docker.py
+++ b/taskcluster/taskgraph/test/test_util_docker.py
@@ -5,33 +5,35 @@
 from __future__ import absolute_import, print_function, unicode_literals
 
 import os
 import shutil
 import tempfile
 import unittest
 
 from ..util import docker
-from mozunit import main, MockedOpen
+from mozunit import MockedOpen
 
 
 class TestDocker(unittest.TestCase):
 
     def test_generate_context_hash(self):
         tmpdir = tempfile.mkdtemp()
         old_GECKO = docker.GECKO
         docker.GECKO = tmpdir
         try:
             os.makedirs(os.path.join(tmpdir, 'docker', 'my-image'))
             with open(os.path.join(tmpdir, 'docker', 'my-image', 'Dockerfile'), "w") as f:
                 f.write("FROM node\nADD a-file\n")
             with open(os.path.join(tmpdir, 'docker', 'my-image', 'a-file'), "w") as f:
                 f.write("data\n")
-            self.assertEqual(docker.generate_context_hash('docker/my-image'),
-                    '781143fcc6cc72c9024b058665265cb6bae3fb8031cad7227dd169ffbfced434')
+            self.assertEqual(
+                docker.generate_context_hash('docker/my-image'),
+                '781143fcc6cc72c9024b058665265cb6bae3fb8031cad7227dd169ffbfced434'
+                )
         finally:
             docker.GECKO = old_GECKO
             shutil.rmtree(tmpdir)
 
     def test_docker_image_explicit_registry(self):
         files = {}
         files["{}/myimage/REGISTRY".format(docker.DOCKER_ROOT)] = "cool-images"
         files["{}/myimage/VERSION".format(docker.DOCKER_ROOT)] = "1.2.3"
--- a/taskcluster/taskgraph/test/test_util_legacy_commit_parser.py
+++ b/taskcluster/taskgraph/test/test_util_legacy_commit_parser.py
@@ -7,84 +7,85 @@
 import unittest
 import mozunit
 from taskgraph.util.legacy_commit_parser import (
     parse_commit,
     normalize_test_list,
     parse_test_opts
 )
 
+
 class TestCommitParser(unittest.TestCase):
 
     def test_normalize_test_list_none(self):
         self.assertEqual(
             normalize_test_list({}, ['woot'], 'none'), []
         )
 
     def test_normalize_test_list_all(self):
         self.assertEqual(
             normalize_test_list({}, ['woot'], 'all'),
-            [{ 'test': 'woot' }]
+            [{'test': 'woot'}]
         )
 
     def test_normalize_test_list_specific_tests(self):
         self.assertEqual(sorted(
             normalize_test_list({}, ['woot'], 'a,b,c')),
-            sorted([{ 'test': 'a' }, { 'test': 'b' }, { 'test': 'c' }])
+            sorted([{'test': 'a'}, {'test': 'b'}, {'test': 'c'}])
         )
 
     def test_normalize_test_list_specific_tests_with_whitespace(self):
         self.assertEqual(sorted(
             normalize_test_list({}, ['woot'], 'a, b, c')),
-            sorted([{ 'test': 'a' }, { 'test': 'b' }, { 'test': 'c' }])
+            sorted([{'test': 'a'}, {'test': 'b'}, {'test': 'c'}])
         )
 
     def test_normalize_test_list_with_alias(self):
         self.assertEqual(sorted(
-            normalize_test_list({ "a": "alpha" }, ['woot'], 'a, b, c')),
-            sorted([{ 'test': 'alpha' }, { 'test': 'b' }, { 'test': 'c' }])
+            normalize_test_list({"a": "alpha"}, ['woot'], 'a, b, c')),
+            sorted([{'test': 'alpha'}, {'test': 'b'}, {'test': 'c'}])
         )
 
     def test_normalize_test_list_with_alias_and_chunk(self):
         self.assertEqual(
-            normalize_test_list({ "a": "alpha" }, ['woot'], 'a-1, a-3'),
-            [{ 'test': 'alpha', "only_chunks": set([1, 3])  }]
+            normalize_test_list({"a": "alpha"}, ['woot'], 'a-1, a-3'),
+            [{'test': 'alpha', "only_chunks": set([1, 3])}]
         )
 
     def test_normalize_test_list_with_alias_pattern(self):
-        self.assertEqual(sorted(
-            normalize_test_list({ "a": '/.*oo.*/' },
-                                ['woot', 'foo', 'bar'],
-                                'a, b, c')),
-            sorted([{ 'test': t } for t in ['woot', 'foo', 'b', 'c']])
+        self.assertEqual(
+            sorted(normalize_test_list({"a": '/.*oo.*/'},
+                                       ['woot', 'foo', 'bar'],
+                                       'a, b, c')),
+            sorted([{'test': t} for t in ['woot', 'foo', 'b', 'c']])
         )
 
     def test_normalize_test_list_with_alias_pattern_anchored(self):
-        self.assertEqual(sorted(
-            normalize_test_list({ "a": '/.*oo/' },
-                                ['woot', 'foo', 'bar'],
-                                'a, b, c')),
-            sorted([{ 'test': t } for t in ['foo', 'b', 'c']])
+        self.assertEqual(
+            sorted(normalize_test_list({"a": '/.*oo/'},
+                                       ['woot', 'foo', 'bar'],
+                                       'a, b, c')),
+            sorted([{'test': t} for t in ['foo', 'b', 'c']])
         )
 
     def test_normalize_test_list_with_alias_pattern_list(self):
-        self.assertEqual(sorted(
-            normalize_test_list({ "a": ['/.*oo/', 'bar', '/bi.*/'] },
-                                ['woot', 'foo', 'bar', 'bing', 'baz'],
-                                'a, b')),
-            sorted([{ 'test': t } for t in ['foo', 'bar', 'bing', 'b']])
+        self.assertEqual(
+            sorted(normalize_test_list({"a": ['/.*oo/', 'bar', '/bi.*/']},
+                                       ['woot', 'foo', 'bar', 'bing', 'baz'],
+                                       'a, b')),
+            sorted([{'test': t} for t in ['foo', 'bar', 'bing', 'b']])
         )
 
     def test_normalize_test_list_with_alias_pattern_list_chunks(self):
-        self.assertEqual(sorted(
-            normalize_test_list({ "a": ['/.*oo/', 'bar', '/bi.*/'] },
-                                ['woot', 'foo', 'bar', 'bing', 'baz'],
-                                'a-1, a-4, b')),
+        self.assertEqual(
+            sorted(normalize_test_list({"a": ['/.*oo/', 'bar', '/bi.*/']},
+                                       ['woot', 'foo', 'bar', 'bing', 'baz'],
+                                       'a-1, a-4, b')),
             sorted([{'test': 'b'}] + [
-                { 'test': t, 'only_chunks': set([1, 4])} for t in ['foo', 'bar', 'bing']])
+                {'test': t, 'only_chunks': set([1, 4])} for t in ['foo', 'bar', 'bing']])
         )
 
     def test_commit_no_tests(self):
         '''
         This test covers the case of builds but no tests passed -u none
         '''
         commit = 'try: -b o -p linux -u none -t none'
         jobs = {
@@ -322,34 +323,33 @@ class TestCommitParser(unittest.TestCase
                 'interactive': False,
                 'when': {},
             },
             {
                 'task': 'task/linux',
                 'dependents': [{
                     'allowed_build_tasks': {
                         'task/linux': {
-                            'task':'task/web-platform-tests',
-                            'unittest_try_name':'web-platform-tests'
+                            'task': 'task/web-platform-tests',
+                            'unittest_try_name': 'web-platform-tests'
                         }
                     }
                 }],
                 'additional-parameters': {},
                 'post-build': [],
                 'build_name': 'linux',
                 'build_type': 'opt',
                 'interactive': False,
                 'when': {},
             }
         ]
 
         result, triggers = parse_commit(commit, jobs)
         self.assertEqual(expected, result)
 
-
     def test_specific_test_platforms(self):
         '''
         This test cases covers the platform specific test exclusion options.
         Intentionally includes platforms with spaces.
         '''
         commit = 'try: -b od -p all -u all[Windows XP,b2g] -t none'
         jobs = {
             'flags': {
@@ -654,16 +654,78 @@ class TestCommitParser(unittest.TestCase
                         }
                     }
                 }
             }
         }
 
         expected = [
             {
+                'task': 'task/linux64',
+                'dependents': [
+                    {
+                        'allowed_build_tasks': {
+                            'task/linux': {
+                                'task': 'task/web-platform-tests',
+                                'unittest_try_name': 'web-platform-tests',
+                            },
+                            'task/linux-debug': {
+                                'task': 'task/web-platform-tests',
+                                'unittest_try_name': 'web-platform-tests',
+                            },
+                            'task/linux64': {
+                                'task': 'task/web-platform-tests',
+                                'unittest_try_name': 'web-platform-tests',
+                            },
+                            'task/linux64-debug': {
+                                'task': 'task/web-platform-tests',
+                                'unittest_try_name': 'web-platform-tests',
+                            }
+                        }
+                    }
+                ],
+                'build_name': 'linux64',
+                'build_type': 'opt',
+                'interactive': False,
+                'post-build': [],
+                'when': {},
+                'additional-parameters': {}
+            },
+            {
+                'task': 'task/linux64-debug',
+                'dependents': [
+                    {
+                        'allowed_build_tasks': {
+                            'task/linux': {
+                                'task': 'task/web-platform-tests',
+                                'unittest_try_name': 'web-platform-tests',
+                            },
+                            'task/linux-debug': {
+                                'task': 'task/web-platform-tests',
+                                'unittest_try_name': 'web-platform-tests',
+                            },
+                            'task/linux64': {
+                                'task': 'task/web-platform-tests',
+                                'unittest_try_name': 'web-platform-tests',
+                            },
+                            'task/linux64-debug': {
+                                'task': 'task/web-platform-tests',
+                                'unittest_try_name': 'web-platform-tests',
+                            }
+                        }
+                    }
+                ],
+                'build_name': 'linux64',
+                'build_type': 'debug',
+                'interactive': False,
+                'post-build': [],
+                'when': {},
+                'additional-parameters': {}
+            },
+            {
                 'task': 'task/linux',
                 'dependents': [
                     {
                         'allowed_build_tasks': {
                             'task/linux': {
                                 'task': 'task/web-platform-tests',
                                 'unittest_try_name': 'web-platform-tests',
                             },
@@ -677,16 +739,21 @@ class TestCommitParser(unittest.TestCase
                             },
                             'task/linux64-debug': {
                                 'task': 'task/web-platform-tests',
                                 'unittest_try_name': 'web-platform-tests',
                             }
                         }
                     }
                 ],
+                'build_name': 'linux',
+                'build_type': 'opt',
+                'interactive': False,
+                'post-build': [],
+                'when': {},
                 'additional-parameters': {}
             },
             {
                 'task': 'task/linux-debug',
                 'dependents': [
                     {
                         'allowed_build_tasks': {
                             'task/linux': {
@@ -703,80 +770,34 @@ class TestCommitParser(unittest.TestCase
                             },
                             'task/linux64-debug': {
                                 'task': 'task/web-platform-tests',
                                 'unittest_try_name': 'web-platform-tests',
                             }
                         }
                     }
                 ],
-                'additional-parameters': {}
-            },
-            {
-                'task': 'task/linux64',
-                'dependents': [
-                    {
-                        'allowed_build_tasks': {
-                            'task/linux': {
-                                'task': 'task/web-platform-tests',
-                                'unittest_try_name': 'web-platform-tests',
-                            },
-                            'task/linux-debug': {
-                                'task': 'task/web-platform-tests',
-                                'unittest_try_name': 'web-platform-tests',
-                            },
-                            'task/linux64': {
-                                'task': 'task/web-platform-tests',
-                                'unittest_try_name': 'web-platform-tests',
-                            },
-                            'task/linux64-debug': {
-                                'task': 'task/web-platform-tests',
-                                'unittest_try_name': 'web-platform-tests',
-                            }
-                        }
-                    }
-                ],
-                'additional-parameters': {}
-            },
-            {
-                'task': 'task/linux64-debug',
-                'dependents': [
-                    {
-                        'allowed_build_tasks': {
-                            'task/linux': {
-                                'task': 'task/web-platform-tests',
-                                'unittest_try_name': 'web-platform-tests',
-                            },
-                            'task/linux-debug': {
-                                'task': 'task/web-platform-tests',
-                                'unittest_try_name': 'web-platform-tests',
-                            },
-                            'task/linux64': {
-                                'task': 'task/web-platform-tests',
-                                'unittest_try_name': 'web-platform-tests',
-                            },
-                            'task/linux64-debug': {
-                                'task': 'task/web-platform-tests',
-                                'unittest_try_name': 'web-platform-tests',
-                            }
-                        }
-                    }
-                ],
+                'build_name': 'linux',
+                'build_type': 'debug',
+                'interactive': False,
+                'post-build': [],
+                'when': {},
                 'additional-parameters': {}
             }
         ]
 
         result, triggers = parse_commit(commit, jobs)
         self.assertEqual(expected, result)
 
-    def test_commit_with_builds_and_tests(self):
+    def test_commit_long_form(self):
         '''
         This tests the long form of the try flags.
         '''
-        commit = 'try: --build od --platform linux,linux64 --unittests web-platform-tests --talos none'
+        commit = \
+            'try: --build od --platform linux,linux64 --unittests web-platform-tests --talos none'
         jobs = {
             'flags': {
                 'builds': ['linux', 'linux64'],
                 'tests': ['web-platform-tests'],
             },
             'builds': {
                 'linux': {
                     'types': {
@@ -949,40 +970,40 @@ class TestCommitParser(unittest.TestCase
         self.assertEqual(sorted(expected), sorted(result))
 
 
 class TryTestParserTest(unittest.TestCase):
 
     def test_parse_opts_valid(self):
         self.assertEquals(
             parse_test_opts('all[Mulet Linux]'),
-            [{ 'test': 'all', 'platforms': ['Mulet Linux'] }]
+            [{'test': 'all', 'platforms': ['Mulet Linux']}]
         )
 
         self.assertEquals(
             parse_test_opts('all[Amazing, Foobar woot,yeah]'),
-            [{ 'test': 'all', 'platforms': ['Amazing', 'Foobar woot', 'yeah'] }]
+            [{'test': 'all', 'platforms': ['Amazing', 'Foobar woot', 'yeah']}]
         )
 
         self.assertEquals(
             parse_test_opts('a,b, c'),
             [
-                { 'test': 'a' },
-                { 'test': 'b' },
-                { 'test': 'c' },
+                {'test': 'a'},
+                {'test': 'b'},
+                {'test': 'c'},
             ]
         )
         self.assertEquals(
             parse_test_opts('woot, bar[b], baz, qux[ z ],a'),
             [
-                { 'test': 'woot' },
-                { 'test': 'bar', 'platforms': ['b'] },
-                { 'test': 'baz' },
-                { 'test': 'qux', 'platforms': ['z'] },
-                { 'test': 'a' }
+                {'test': 'woot'},
+                {'test': 'bar', 'platforms': ['b']},
+                {'test': 'baz'},
+                {'test': 'qux', 'platforms': ['z']},
+                {'test': 'a'}
             ]
         )
 
         self.assertEquals(
             parse_test_opts('mochitest-3[Ubuntu,10.6,10.8,Windows XP,Windows 7,Windows 8]'),
             [
                 {
                     'test': 'mochitest-3',
--- a/taskcluster/taskgraph/test/test_util_templates.py
+++ b/taskcluster/taskgraph/test/test_util_templates.py
@@ -1,16 +1,14 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
-import os
-
 import unittest
 import mozunit
 import textwrap
 from taskgraph.util.templates import (
     Templates,
     TemplatesException
 )
 
@@ -148,42 +146,41 @@ class TemplatesTest(unittest.TestCase):
     def test_inheritance_implicat_pass(self):
         '''
         Implicitly pass parameters from the child to the ancestor.
         '''
         content = self.subject.load('inherit_pass.yml', {
             'a': 'overriden'
         })
 
-        self.assertEqual(content, { 'values': ['overriden', 'b', 'c'] });
-
+        self.assertEqual(content, {'values': ['overriden', 'b', 'c']})
 
     def test_inheritance_circular(self):
         '''
         Circular reference handling.
         '''
         with self.assertRaisesRegexp(TemplatesException, 'circular'):
             self.subject.load('circular.yml', {})
 
     def test_deep_inheritance(self):
         content = self.subject.load('deep/4.yml', {
             'value': 'myvalue'
         })
-        self.assertEqual(content, { 'variable': 'myvalue' })
+        self.assertEqual(content, {'variable': 'myvalue'})
 
     def test_inheritance_with_simple_extensions(self):
         content = self.subject.load('extend_parent.yml', {})
         self.assertEquals(content, {
             'list': ['1', '2', '3', '4'],
             'obj': {
                 'from_parent': True,
                 'deeper': {
                     'woot': 'bar',
                     'list': ['baz', 'bar']
                 },
                 'level': 2,
             },
-            'was_list': { 'replaced': True }
+            'was_list': {'replaced': True}
         })
 
 
 if __name__ == '__main__':
     mozunit.main()
--- a/taskcluster/taskgraph/test/test_util_time.py
+++ b/taskcluster/taskgraph/test/test_util_time.py
@@ -9,16 +9,17 @@ import mozunit
 from datetime import datetime
 from taskgraph.util.time import (
     InvalidString,
     UnknownTimeMeasurement,
     value_of,
     json_time_from_now
 )
 
+
 class FromNowTest(unittest.TestCase):
 
     def test_invalid_str(self):
         with self.assertRaises(InvalidString):
             value_of('wtfs')
 
     def test_missing_unit(self):
         with self.assertRaises(InvalidString):
@@ -38,19 +39,19 @@ class FromNowTest(unittest.TestCase):
         self.assertEqual(value_of('1 month').total_seconds(), 2592000)
         self.assertEqual(value_of('1y').total_seconds(), 31536000)
 
         with self.assertRaises(UnknownTimeMeasurement):
             value_of('1m').total_seconds()  # ambiguous between minute and month
 
     def test_json_from_now_utc_now(self):
         # Just here to ensure we don't raise.
-        time = json_time_from_now('1 years')
+        json_time_from_now('1 years')
 
     def test_json_from_now(self):
         now = datetime(2014, 1, 1)
         self.assertEqual(json_time_from_now('1 years', now),
-                '2015-01-01T00:00:00Z')
+                         '2015-01-01T00:00:00Z')
         self.assertEqual(json_time_from_now('6 days', now),
-                '2014-01-07T00:00:00Z')
+                         '2014-01-07T00:00:00Z')
 
 if __name__ == '__main__':
     mozunit.main()
--- a/taskcluster/taskgraph/try_option_syntax.py
+++ b/taskcluster/taskgraph/try_option_syntax.py
@@ -13,24 +13,27 @@ TRY_DELIMITER = 'try:'
 
 # The build type aliases are very cryptic and only used in try flags these are
 # mappings from the single char alias to a longer more recognizable form.
 BUILD_TYPE_ALIASES = {
     'o': 'opt',
     'd': 'debug'
 }
 
+
 # mapping from shortcut name (usable with -u) to a boolean function identifying
 # matching test names
 def alias_prefix(prefix):
     return lambda name: name.startswith(prefix)
 
+
 def alias_contains(infix):
     return lambda name: infix in name
 
+
 def alias_matches(pattern):
     pattern = re.compile(pattern)
     return lambda name: pattern.match(name)
 
 UNITTEST_ALIASES = {
     # Aliases specify shorthands that can be used in try syntax.  The shorthand
     # is the dictionary key, with the value representing a pattern for matching
     # unittest_try_names.
@@ -97,28 +100,28 @@ UNITTEST_ALIASES = {
 # [test_platforms]} translations, This includes only the most commonly-used
 # substrings.  This is intended only for backward-compatibility.  New test
 # platforms should have their `test_platform` spelled out fully in try syntax.
 UNITTEST_PLATFORM_PRETTY_NAMES = {
     'Ubuntu': ['linux', 'linux64'],
     'x64': ['linux64'],
     # other commonly-used substrings for platforms not yet supported with
     # in-tree taskgraphs:
-    #'10.10': [..TODO..],
-    #'10.10.5': [..TODO..],
-    #'10.6': [..TODO..],
-    #'10.8': [..TODO..],
-    #'Android 2.3 API9': [..TODO..],
-    #'Android 4.3 API15+': [..TODO..],
-    #'Windows 7':  [..TODO..],
-    #'Windows 7 VM': [..TODO..],
-    #'Windows 8':  [..TODO..],
-    #'Windows XP': [..TODO..],
-    #'win32': [..TODO..],
-    #'win64': [..TODO..],
+    # '10.10': [..TODO..],
+    # '10.10.5': [..TODO..],
+    # '10.6': [..TODO..],
+    # '10.8': [..TODO..],
+    # 'Android 2.3 API9': [..TODO..],
+    # 'Android 4.3 API15+': [..TODO..],
+    # 'Windows 7':  [..TODO..],
+    # 'Windows 7 VM': [..TODO..],
+    # 'Windows 8':  [..TODO..],
+    # 'Windows XP': [..TODO..],
+    # 'win32': [..TODO..],
+    # 'win64': [..TODO..],
 }
 
 # We have a few platforms for which we want to do some "extra" builds, or at
 # least build-ish things.  Sort of.  Anyway, these other things are implemented
 # as different "platforms".
 RIDEALONG_BUILDS = {
     'linux': [
         'linux-l10n',
@@ -132,16 +135,17 @@ RIDEALONG_BUILDS = {
         'sm-compacting',
         'sm-rootanalysis',
         'sm-package',
     ],
 }
 
 TEST_CHUNK_SUFFIX = re.compile('(.*)-([0-9]+)$')
 
+
 class TryOptionSyntax(object):
 
     def __init__(self, message, full_task_graph):
         """
         Parse a "try syntax" formatted commit message.  This is the old "-b do -p
         win32 -u all" format.  Aliases are applied to map short names to full
         names.
 
@@ -181,46 +185,50 @@ class TryOptionSyntax(object):
                 break
 
         if try_idx is None:
             return
 
         # Argument parser based on try flag flags
         parser = argparse.ArgumentParser()
         parser.add_argument('-b', '--build', dest='build_types')
-        parser.add_argument('-p', '--platform', nargs='?', dest='platforms', const='all', default='all')
-        parser.add_argument('-u', '--unittests', nargs='?', dest='unittests', const='all', default='all')
+        parser.add_argument('-p', '--platform', nargs='?',
+                            dest='platforms', const='all', default='all')
+        parser.add_argument('-u', '--unittests', nargs='?',
+                            dest='unittests', const='all', default='all')
         parser.add_argument('-t', '--talos', nargs='?', dest='talos', const='all', default='all')
-        parser.add_argument('-i', '--interactive', dest='interactive', action='store_true', default=False)
+        parser.add_argument('-i', '--interactive',
+                            dest='interactive', action='store_true', default=False)
         parser.add_argument('-j', '--job', dest='jobs', action='append')
         # In order to run test jobs multiple times
         parser.add_argument('--trigger-tests', dest='trigger_tests', type=int, default=1)
         args, _ = parser.parse_known_args(parts[try_idx:])
 
         self.jobs = self.parse_jobs(args.jobs)
         self.build_types = self.parse_build_types(args.build_types)
         self.platforms = self.parse_platforms(args.platforms)
-        self.unittests = self.parse_test_option("unittest_try_name", args.unittests, full_task_graph)
+        self.unittests = self.parse_test_option(
+            "unittest_try_name", args.unittests, full_task_graph)
         self.talos = self.parse_test_option("talos_try_name", args.talos, full_task_graph)
         self.trigger_tests = args.trigger_tests
         self.interactive = args.interactive
 
     def parse_jobs(self, jobs_arg):
         if not jobs_arg or jobs_arg == ['all']:
             return None
         expanded = []
         for job in jobs_arg:
             expanded.extend(j.strip() for j in job.split(','))
         return expanded
 
     def parse_build_types(self, build_types_arg):
         if build_types_arg is None:
             build_types_arg = []
-        build_types = filter(None, [ BUILD_TYPE_ALIASES.get(build_type) for
-                build_type in build_types_arg ])
+        build_types = filter(None, [BUILD_TYPE_ALIASES.get(build_type) for
+                             build_type in build_types_arg])
         return build_types
 
     def parse_platforms(self, platform_arg):
         if platform_arg == 'all':
             return None
 
         results = []
         for build in platform_arg.split(','):
@@ -368,27 +376,27 @@ class TryOptionSyntax(object):
         '''
         Expand a test if its name refers to an alias, returning a list of test
         dictionaries cloned from the first (to maintain any metadata).
         '''
         if test['test'] not in UNITTEST_ALIASES:
             return [test]
 
         alias = UNITTEST_ALIASES[test['test']]
+
         def mktest(name):
             newtest = copy.deepcopy(test)
             newtest['test'] = name
             return newtest
 
         def exprmatch(alias):
             return [t for t in all_tests if alias(t)]
 
         return [mktest(t) for t in exprmatch(alias)]
 
-
     def parse_test_chunks(self, all_tests, tests):
         '''
         Test flags may include parameters to narrow down the number of chunks in a
         given push. We don't model 1 chunk = 1 job in taskcluster so we must check
         each test flag to see if it is actually specifying a chunk.
         '''
         results = []
         seen_chunks = {}
@@ -496,19 +504,18 @@ class TryOptionSyntax(object):
         else:
             # TODO: match other kinds
             return False
 
     def __str__(self):
         def none_for_all(list):
             if list is None:
                 return '<all>'
-            return ', '.join(str (e) for e in list)
+            return ', '.join(str(e) for e in list)
 
         return "\n".join([
             "build_types: " + ", ".join(self.build_types),
             "platforms: " + none_for_all(self.platforms),
             "unittests: " + none_for_all(self.unittests),
             "jobs: " + none_for_all(self.jobs),
             "trigger_tests: " + str(self.trigger_tests),
             "interactive: " + str(self.interactive),
         ])
-
--- a/taskcluster/taskgraph/types.py
+++ b/taskcluster/taskgraph/types.py
@@ -1,14 +1,15 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
+
 class Task(object):
     """
     Representation of a task in a TaskGraph.
 
     Each has, at creation:
 
     - kind: Kind instance that created this task
     - label; the label for this task
--- a/taskcluster/taskgraph/util/docker.py
+++ b/taskcluster/taskgraph/util/docker.py
@@ -6,16 +6,17 @@ from __future__ import absolute_import, 
 
 import hashlib
 import os
 
 GECKO = os.path.realpath(os.path.join(__file__, '..', '..', '..', '..'))
 DOCKER_ROOT = os.path.join(GECKO, 'testing', 'docker')
 ARTIFACT_URL = 'https://queue.taskcluster.net/v1/task/{}/artifacts/{}'
 
+
 def docker_image(name):
     '''Determine the docker image name, including repository and tag, from an
     in-tree docker file.'''
     try:
         with open(os.path.join(DOCKER_ROOT, name, 'REGISTRY')) as f:
             registry = f.read().strip()
     except IOError:
         with open(os.path.join(DOCKER_ROOT, 'REGISTRY')) as f:
--- a/taskcluster/taskgraph/util/legacy_commit_parser.py
+++ b/taskcluster/taskgraph/util/legacy_commit_parser.py
@@ -15,16 +15,17 @@ TEST_CHUNK_SUFFIX = re.compile('(.*)-([0
 
 # The build type aliases are very cryptic and only used in try flags these are
 # mappings from the single char alias to a longer more recognizable form.
 BUILD_TYPE_ALIASES = {
     'o': 'opt',
     'd': 'debug'
 }
 
+
 def parse_test_opts(input_str):
     '''Test argument parsing is surprisingly complicated with the "restrictions"
     logic this function is responsible for parsing this out into a easier to
     work with structure like { test: '..', platforms: ['..'] }'''
 
     # Final results which we will return.
     tests = []
 
@@ -104,16 +105,17 @@ def escape_whitespace_in_brackets(input_
     return result
 
 
 def normalize_platform_list(alias, all_builds, build_list):
     if build_list == 'all':
         return all_builds
     return [alias.get(build, build) for build in build_list.split(',')]
 
+
 def normalize_test_list(aliases, all_tests, job_list):
     '''
     Normalize a set of jobs (builds or tests) there are three common cases:
 
         - job_list is == 'none' (meaning an empty list)
         - job_list is == 'all' (meaning use the list of jobs for that job type)
         - job_list is comma delimited string which needs to be split
 
@@ -132,17 +134,17 @@ def normalize_test_list(aliases, all_tes
     if not tests:
         return []
 
     # Special case where tests is 'all' and must be expanded
     if tests[0]['test'] == 'all':
         results = []
         all_entry = tests[0]
         for test in all_tests:
-            entry = { 'test': test }
+            entry = {'test': test}
             # If there are platform restrictions copy them across the list.
             if 'platforms' in all_entry:
                 entry['platforms'] = list(all_entry['platforms'])
             results.append(entry)
         return parse_test_chunks(aliases, all_tests, results)
     else:
         return parse_test_chunks(aliases, all_tests, tests)
 
@@ -155,16 +157,17 @@ def handle_alias(test, aliases, all_test
     :param dict test: the test to expand
     :param dict aliases: Dict of alias name -> real name.
     :param list all_tests: test flags from job_flags.yml structure.
     '''
     if test['test'] not in aliases:
         return [test]
 
     alias = aliases[test['test']]
+
     def mktest(name):
         newtest = copy.deepcopy(test)
         newtest['test'] = name
         return newtest
 
     def exprmatch(alias):
         if not alias.startswith('/') or not alias.endswith('/'):
             return [alias]
@@ -213,16 +216,17 @@ def parse_test_chunks(aliases, all_tests
                 test['test'] = name
                 test['only_chunks'] = seen_chunks[name]
                 results.append(test)
 
     # uniquify the results over the test names
     results = {test['test']: test for test in results}.values()
     return results
 
+
 def extract_tests_from_platform(test_jobs, build_platform, build_task, tests):
     '''
     Build the list of tests from the current build.
 
     :param dict test_jobs: Entire list of tests (from job_flags.yml).
     :param dict build_platform: Current build platform.
     :param str build_task: Build task path.
     :param list tests: Test flags.
@@ -278,16 +282,17 @@ def extract_tests_from_platform(test_job
     return results
 
 '''
 This module exists to deal with parsing the options flags that try uses. We do
 not try to build a graph or anything here but match up build flags to tasks via
 the "jobs" datastructure (see job_flags.yml)
 '''
 
+
 def parse_commit(message, jobs):
     '''
     :param message: Commit message that is typical to a try push.
     :param jobs: Dict (see job_flags.yml)
     '''
 
     # shlex used to ensure we split correctly when giving values to argparse.
     parts = shlex.split(escape_whitespace_in_brackets(message))
@@ -298,19 +303,21 @@ def parse_commit(message, jobs):
             break
 
     if try_idx is None:
         return [], 0
 
     # Argument parser based on try flag flags
     parser = argparse.ArgumentParser()
     parser.add_argument('-b', '--build', dest='build_types')
-    parser.add_argument('-p', '--platform', nargs='?', dest='platforms', const='all', default='all')
+    parser.add_argument('-p', '--platform', nargs='?',
+                        dest='platforms', const='all', default='all')
     parser.add_argument('-u', '--unittests', nargs='?', dest='tests', const='all', default='all')
-    parser.add_argument('-i', '--interactive', dest='interactive', action='store_true', default=False)
+    parser.add_argument('-i', '--interactive',
+                        dest='interactive', action='store_true', default=False)
     parser.add_argument('-j', '--job', dest='jobs', action='append')
     # In order to run test jobs multiple times
     parser.add_argument('--trigger-tests', dest='trigger_tests', type=int, default=1)
     # Once bug 1250993 is fixed we can only use --trigger-tests
     parser.add_argument('--rebuild', dest='trigger_tests', type=int, default=1)
     args, unknown = parser.parse_known_args(parts[try_idx:])
 
     # Normalize default value to something easier to detect.
@@ -323,18 +330,18 @@ def parse_commit(message, jobs):
         for job in args.jobs:
             expanded.extend(j.strip() for j in job.split(','))
         args.jobs = expanded
 
     # Then builds...
     if args.build_types is None:
         args.build_types = []
 
-    build_types = [ BUILD_TYPE_ALIASES.get(build_type, build_type) for
-            build_type in args.build_types ]
+    build_types = [BUILD_TYPE_ALIASES.get(build_type, build_type) for
+                   build_type in args.build_types]
 
     aliases = jobs['flags'].get('aliases', {})
 
     platforms = set()
     for base in normalize_platform_list(aliases, jobs['flags']['builds'], args.platforms):
         # Silently skip unknown platforms.
         if base not in jobs['builds']:
             continue
--- a/taskcluster/taskgraph/util/templates.py
+++ b/taskcluster/taskgraph/util/templates.py
@@ -1,46 +1,49 @@
 import os
 
 import pystache
 import yaml
 
 # Key used in template inheritance...
 INHERITS_KEY = '$inherits'
 
+
 def merge_to(source, dest):
     '''
     Merge dict and arrays (override scalar values)
 
     :param dict source: to copy from
     :param dict dest: to copy to.
     '''
 
     for key, value in source.items():
         # Override mismatching or empty types
-        if type(value) != type(dest.get(key)):
+        if type(value) != type(dest.get(key)):  # noqa
             dest[key] = source[key]
             continue
 
         # Merge dict
         if isinstance(value, dict):
             merge_to(value, dest[key])
             continue
 
         if isinstance(value, list):
             dest[key] = dest[key] + source[key]
             continue
 
         dest[key] = source[key]
 
     return dest
 
+
 class TemplatesException(Exception):
     pass
 
+
 class Templates():
     '''
     The taskcluster integration makes heavy use of yaml to describe tasks this
     class handles the loading/rendering.
     '''
 
     def __init__(self, root):
         '''
@@ -49,17 +52,17 @@ class Templates():
         :param str root: Root path where to load yaml files.
         '''
         if not root:
             raise TemplatesException('Root is required')
 
         if not os.path.isdir(root):
             raise TemplatesException('Root must be a directory')
 
-        self.root = root;
+        self.root = root
 
     def _inherits(self, path, obj, properties, seen):
         blueprint = obj.pop(INHERITS_KEY)
         seen.add(path)
 
         # Resolve the path here so we can detect circular references.
         template = self.resolve_path(blueprint.get('from'))
         variables = blueprint.get('variables', {})
@@ -81,18 +84,16 @@ class Templates():
             out = self.load(template, variables, seen)
         except TemplatesException as e:
             msg = 'Error expanding parent ("{}") of "{}" original error {}'
             raise TemplatesException(msg.format(template, path, str(e)))
 
         # Anything left in obj is merged into final results (and overrides)
         return merge_to(obj, out)
 
-
-
     def render(self, path, content, parameters, seen):
         '''
         Renders a given yaml string.
 
         :param str path:  used to prevent infinite recursion in inheritance.
         :param str content: Of yaml file.
         :param dict parameters: For mustache templates.
         :param set seen: Seen files (used for inheritance)
--- a/taskcluster/taskgraph/util/time.py
+++ b/taskcluster/taskgraph/util/time.py
@@ -3,93 +3,105 @@
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 # Python port of the ms.js node module this is not a direct port some things are
 # more complicated or less precise and we lean on time delta here.
 
 import re
 import datetime
 
-PATTERN=re.compile(
+PATTERN = re.compile(
     '((?:\d+)?\.?\d+) *([a-z]+)'
 )
 
+
 def seconds(value):
     return datetime.timedelta(seconds=int(value))
 
+
 def minutes(value):
     return datetime.timedelta(minutes=int(value))
 
+
 def hours(value):
     return datetime.timedelta(hours=int(value))
 
+
 def days(value):
     return datetime.timedelta(days=int(value))
 
+
 def months(value):
     # See warning in years(), below
     return datetime.timedelta(days=int(value) * 30)
 
+
 def years(value):
     # Warning here "years" are vague don't use this for really sensitive date
     # computation the idea is to give you a absolute amount of time in the
     # future which is not the same thing as "precisely on this date next year"
     return datetime.timedelta(days=int(value) * 365)
 
 ALIASES = {}
 ALIASES['seconds'] = ALIASES['second'] = ALIASES['s'] = seconds
 ALIASES['minutes'] = ALIASES['minute'] = ALIASES['min'] = minutes
 ALIASES['hours'] = ALIASES['hour'] = ALIASES['h'] = hours
 ALIASES['days'] = ALIASES['day'] = ALIASES['d'] = days
 ALIASES['months'] = ALIASES['month'] = ALIASES['mo'] = months
 ALIASES['years'] = ALIASES['year'] = ALIASES['y'] = years
 
+
 class InvalidString(Exception):
     pass
 
+
 class UnknownTimeMeasurement(Exception):
     pass
 
+
 def value_of(input_str):
     '''
     Convert a string to a json date in the future
     :param str input_str: (ex: 1d, 2d, 6years, 2 seconds)
     :returns: Unit given in seconds
     '''
 
     matches = PATTERN.search(input_str)
 
     if matches is None or len(matches.groups()) < 2:
         raise InvalidString("'{}' is invalid string".format(input_str))
 
     value, unit = matches.groups()
 
     if unit not in ALIASES:
         raise UnknownTimeMeasurement(
-            '{} is not a valid time measure use one of {}'.format(unit,
-                sorted(ALIASES.keys()))
+            '{} is not a valid time measure use one of {}'.format(
+                unit,
+                sorted(ALIASES.keys())
+            )
         )
 
     return ALIASES[unit](value)
 
+
 def json_time_from_now(input_str, now=None):
     '''
     :param str input_str: Input string (see value of)
     :param datetime now: Optionally set the definition of `now`
     :returns: JSON string representation of time in future.
     '''
 
     if now is None:
         now = datetime.datetime.utcnow()
 
     time = now + value_of(input_str)
 
     # Sorta a big hack but the json schema validator for date does not like the
     # ISO dates until 'Z' (for timezone) is added...
     return time.isoformat() + 'Z'
 
+
 def current_json_time():
     '''
     :returns: JSON string representation of the current time.
     '''
 
     return datetime.datetime.utcnow().isoformat() + 'Z'
-
--- a/tools/lint/flake8.lint
+++ b/tools/lint/flake8.lint
@@ -119,14 +119,15 @@ def lint(files, **lintargs):
 
 
 LINTER = {
     'name': "flake8",
     'description': "Python linter",
     'include': [
         'python/mozlint',
         'tools/lint',
-        'testing/marionette/client'
+        'taskcluster',
+        'testing/marionette/client',
     ],
     'exclude': [],
     'type': 'external',
     'payload': lint,
 }