Bug 1383880: allow only one optimization per task; r?ahal draft
authorDustin J. Mitchell <dustin@mozilla.com>
Tue, 01 Aug 2017 17:37:58 +0000
changeset 644473 4b15fa9efb59b1dfaeb75d46f7acfcfb99dc5e4b
parent 644472 1a763b4e44fd206657c671e99fef1c3a08e5fb8f
child 644474 983d8894ab30c0d1a890aae4c2be9e5b237a0b8c
push id73455
push userdmitchell@mozilla.com
push dateFri, 11 Aug 2017 02:28:51 +0000
reviewersahal
bugs1383880
milestone57.0a1
Bug 1383880: allow only one optimization per task; r?ahal It is not at *all* clear how multiple optimizations for a single task should interact. No simple logical operation is right in all cases, and in fact in most imaginable cases the desired behavior turns out to be independent of all but one of the optimizations. For example, given both `seta` and `skip-unless-files-changed` optimizations, if SETA says to skip a test, it is low value and should be skipped regardless of what files have changed. But if SETA says to run a test, then it has likely been skipped in previous pushes, so it should be run regardless of what has changed in this push. MozReview-Commit-ID: 3OsvRnWjai4
taskcluster/ci/android-stuff/kind.yml
taskcluster/taskgraph/generator.py
taskcluster/taskgraph/optimize.py
taskcluster/taskgraph/task.py
taskcluster/taskgraph/test/test_optimize.py
taskcluster/taskgraph/test/test_taskgraph.py
taskcluster/taskgraph/transforms/docker_image.py
taskcluster/taskgraph/transforms/job/__init__.py
taskcluster/taskgraph/transforms/job/toolchain.py
taskcluster/taskgraph/transforms/task.py
taskcluster/taskgraph/transforms/tests.py
--- a/taskcluster/ci/android-stuff/kind.yml
+++ b/taskcluster/ci/android-stuff/kind.yml
@@ -63,22 +63,22 @@ jobs:
             command:
               - "/bin/bash"
               - "-c"
               - "/home/worker/bin/before.sh && /home/worker/bin/build.sh && /home/worker/bin/after.sh && true\n"
             max-run-time: 36000
         scopes:
           - docker-worker:relengapi-proxy:tooltool.download.internal
           - docker-worker:relengapi-proxy:tooltool.download.public
-        optimizations:
-          - - skip-unless-changed
-            - - "mobile/android/config/**"
-              - "testing/mozharness/configs/builds/releng_sub_android_configs/*gradle_dependencies.py"
-              - "**/*.gradle"
-              - "taskcluster/docker/android-gradle-build/**"
+        optimization:
+          skip-unless-changed:
+            - "mobile/android/config/**"
+            - "testing/mozharness/configs/builds/releng_sub_android_configs/*gradle_dependencies.py"
+            - "**/*.gradle"
+            - "taskcluster/docker/android-gradle-build/**"
 
     android-test:
         description: "Android armv7 unit tests"
         treeherder:
             platform: android-4-0-armv7-api15/opt
             kind: test
             tier: 2
             symbol: tc(test)
@@ -113,20 +113,20 @@ jobs:
             command:
               # NOTE: this could probably be a job description with run.using = 'mozharness'
               - "/bin/bash"
               - "bin/build.sh"
             max-run-time: 36000
         scopes:
           - docker-worker:relengapi-proxy:tooltool.download.internal
           - docker-worker:relengapi-proxy:tooltool.download.public
-        optimizations:
-          - - skip-unless-changed
-            - - "mobile/android/base/**"
-              - "mobile/android/tests/background/junit4/**"
+        optimization:
+          skip-unless-changed:
+            - "mobile/android/base/**"
+            - "mobile/android/tests/background/junit4/**"
 
     android-lint:
         description: "Android lint"
         treeherder:
             platform: android-4-0-armv7-api15/opt
             kind: test
             tier: 2
             symbol: tc(lint)
@@ -176,27 +176,27 @@ jobs:
             command:
               # NOTE: this could probably be a job description with run.using = 'mozharness'
               - "/bin/bash"
               - "bin/build.sh"
             max-run-time: 36000
         scopes:
           - docker-worker:relengapi-proxy:tooltool.download.internal
           - docker-worker:relengapi-proxy:tooltool.download.public
-        optimizations:
-          - - skip-unless-changed
-            - - "mobile/android/**/*.java"
-              - "mobile/android/**/*.jpeg"
-              - "mobile/android/**/*.jpg"
-              - "mobile/android/**/*.png"
-              - "mobile/android/**/*.svg"
-              - "mobile/android/**/*.xml" # Manifest & android resources
-              - "mobile/android/**/*.gradle"
-              - "mobile/android/**/Makefile.in"
-              - "mobile/android/**/moz.build"
+        optimization:
+          skip-unless-changed:
+            - "mobile/android/**/*.java"
+            - "mobile/android/**/*.jpeg"
+            - "mobile/android/**/*.jpg"
+            - "mobile/android/**/*.png"
+            - "mobile/android/**/*.svg"
+            - "mobile/android/**/*.xml" # Manifest & android resources
+            - "mobile/android/**/*.gradle"
+            - "mobile/android/**/Makefile.in"
+            - "mobile/android/**/moz.build"
 
     android-checkstyle:
         description: "Android checkstyle"
         treeherder:
             platform: android-4-0-armv7-api15/opt
             kind: test
             tier: 2
             symbol: tc(checkstyle)
@@ -234,23 +234,23 @@ jobs:
             command:
               # NOTE: this could probably be a job description with run.using = 'mozharness'
               - "/bin/bash"
               - "bin/build.sh"
             max-run-time: 36000
         scopes:
           - docker-worker:relengapi-proxy:tooltool.download.internal
           - docker-worker:relengapi-proxy:tooltool.download.public
-        optimizations:
-          - - skip-unless-changed
-            - - "mobile/android/**/checkstyle.xml"
-              - "mobile/android/**/*.java"
-              - "mobile/android/**/*.gradle"
-              - "mobile/android/**/Makefile.in"
-              - "mobile/android/**/moz.build"
+        optimization:
+          skip-unless-changed:
+           - "mobile/android/**/checkstyle.xml"
+           - "mobile/android/**/*.java"
+           - "mobile/android/**/*.gradle"
+           - "mobile/android/**/Makefile.in"
+           - "mobile/android/**/moz.build"
 
     android-findbugs:
         description: "Android findbugs"
         treeherder:
             platform: android-4-0-armv7-api15/opt
             kind: test
             tier: 2
             symbol: tc(findbugs)
@@ -294,14 +294,14 @@ jobs:
             command:
               # NOTE: this could probably be a job description with run.using = 'mozharness'
               - "/bin/bash"
               - "bin/build.sh"
             max-run-time: 36000
         scopes:
           - docker-worker:relengapi-proxy:tooltool.download.internal
           - docker-worker:relengapi-proxy:tooltool.download.public
-        optimizations:
-          - - skip-unless-changed
-            - - "mobile/android/**/*.java"
-              - "mobile/android/**/*.gradle"
-              - "mobile/android/**/Makefile.in"
-              - "mobile/android/**/moz.build"
+        optimization:
+          skip-unless-changed:
+            - "mobile/android/**/*.java"
+            - "mobile/android/**/*.gradle"
+            - "mobile/android/**/Makefile.in"
+            - "mobile/android/**/moz.build"
--- a/taskcluster/taskgraph/generator.py
+++ b/taskcluster/taskgraph/generator.py
@@ -62,17 +62,17 @@ class Kind(object):
 
         # perform the transformations on the loaded inputs
         trans_config = TransformConfig(self.name, self.path, config, parameters,
                                        kind_dependencies_tasks)
         tasks = [Task(self.name,
                       label=task_dict['label'],
                       attributes=task_dict['attributes'],
                       task=task_dict['task'],
-                      optimizations=task_dict.get('optimizations'),
+                      optimization=task_dict.get('optimization'),
                       dependencies=task_dict.get('dependencies'))
                  for task_dict in transforms(trans_config, inputs)]
         return tasks
 
 
 class TaskGraphGenerator(object):
     """
     The central controller for taskgraph.  This handles all phases of graph
--- a/taskcluster/taskgraph/optimize.py
+++ b/taskcluster/taskgraph/optimize.py
@@ -40,27 +40,23 @@ def optimize_task_graph(target_task_grap
                         named_links_dict=named_links_dict,
                         label_to_taskid=label_to_taskid,
                         existing_tasks=existing_tasks)
     return get_subgraph(target_task_graph, named_links_dict, label_to_taskid), label_to_taskid
 
 
 def optimize_task(task, params):
     """
-    Optimize a single task by running its optimizations in order until one
-    succeeds.
+    Run the optimization for a given task
     """
-    for opt in task.optimizations:
-        opt_type, args = opt[0], opt[1:]
-        opt_fn = _optimizations[opt_type]
-        opt_result = opt_fn(task, params, *args)
-        if opt_result:
-            return opt_result
-
-    return False
+    if not task.optimization:
+        return False
+    opt_type, arg = task.optimization
+    opt_fn = _optimizations[opt_type]
+    return opt_fn(task, params, arg)
 
 
 def annotate_task_graph(target_task_graph, params, do_not_optimize,
                         named_links_dict, label_to_taskid, existing_tasks):
     """
     Annotate each task in the graph with .optimized (boolean) and .task_id
     (possibly None), following the rules for optimization and calling the task
     kinds' `optimize_task` method.
@@ -170,31 +166,32 @@ def optimization(name):
         if name in _optimizations:
             raise Exception("multiple optimizations with name {}".format(name))
         _optimizations[name] = func
         return func
     return wrap
 
 
 @optimization('index-search')
-def opt_index_search(task, params, index_path):
-    try:
-        task_id = find_task_id(
-            index_path,
-            use_proxy=bool(os.environ.get('TASK_ID')))
-
-        return task_id or True
-    except requests.exceptions.HTTPError:
-        pass
+def opt_index_search(task, params, index_paths):
+    for index_path in index_paths:
+        try:
+            task_id = find_task_id(
+                index_path,
+                use_proxy=bool(os.environ.get('TASK_ID')))
+            return task_id
+        except requests.exceptions.HTTPError:
+            # 404 will end up here and go on to the next index path
+            pass
 
     return False
 
 
 @optimization('seta')
-def opt_seta(task, params):
+def opt_seta(task, params, _):
     bbb_task = False
 
     # for bbb tasks we need to send in the buildbot buildername
     if task.task.get('provisionerId', '') == 'buildbot-bridge':
         label = task.task.get('payload').get('buildername')
         bbb_task = True
     else:
         label = task.label
--- a/taskcluster/taskgraph/task.py
+++ b/taskcluster/taskgraph/task.py
@@ -8,64 +8,64 @@ from __future__ import absolute_import, 
 class Task(object):
     """
     Representation of a task in a TaskGraph.  Each Task has, at creation:
 
     - kind: the name of the task kind
     - label; the label for this task
     - attributes: a dictionary of attributes for this task (used for filtering)
     - task: the task definition (JSON-able dictionary)
-    - optimizations: optimizations to apply to the task (see taskgraph.optimize)
+    - optimization: optimization to apply to the task (see taskgraph.optimize)
     - dependencies: tasks this one depends on, in the form {name: label}, for example
       {'build': 'build-linux64/opt', 'docker-image': 'build-docker-image-desktop-test'}
 
     And later, as the task-graph processing proceeds:
 
     - task_id -- TaskCluster taskId under which this task will be created
     - optimized -- true if this task need not be performed
 
     This class is just a convenience wraper for the data type and managing
     display, comparison, serialization, etc. It has no functionality of its own.
     """
     def __init__(self, kind, label, attributes, task,
-                 optimizations=None, dependencies=None):
+                 optimization=None, dependencies=None):
         self.kind = kind
         self.label = label
         self.attributes = attributes
         self.task = task
 
         self.task_id = None
         self.optimized = False
 
         self.attributes['kind'] = kind
 
-        self.optimizations = optimizations or []
+        self.optimization = optimization
         self.dependencies = dependencies or {}
 
     def __eq__(self, other):
         return self.kind == other.kind and \
             self.label == other.label and \
             self.attributes == other.attributes and \
             self.task == other.task and \
             self.task_id == other.task_id and \
-            self.optimizations == other.optimizations and \
+            self.optimization == other.optimization and \
             self.dependencies == other.dependencies
 
     def __repr__(self):
         return ('Task({kind!r}, {label!r}, {attributes!r}, {task!r}, '
-                'optimizations={optimizations!r}, '
+                'optimization={optimization!r}, '
                 'dependencies={dependencies!r})'.format(**self.__dict__))
 
     def to_json(self):
         rv = {
             'kind': self.kind,
             'label': self.label,
             'attributes': self.attributes,
             'dependencies': self.dependencies,
-            'optimizations': self.optimizations,
+            'optimization': self.optimization,
             'task': self.task,
         }
         if self.task_id:
             rv['task_id'] = self.task_id
         return rv
 
     @classmethod
     def from_json(cls, task_dict):
@@ -74,13 +74,13 @@ class Task(object):
         the original Task object.  This is used to "resume" the task-graph
         generation process, for example in Action tasks.
         """
         rv = cls(
             kind=task_dict['kind'],
             label=task_dict['label'],
             attributes=task_dict['attributes'],
             task=task_dict['task'],
-            optimizations=task_dict['optimizations'],
+            optimization=task_dict['optimization'],
             dependencies=task_dict.get('dependencies'))
         if 'task_id' in task_dict:
             rv.task_id = task_dict['task_id']
         return rv
--- a/taskcluster/taskgraph/test/test_optimize.py
+++ b/taskcluster/taskgraph/test/test_optimize.py
@@ -56,28 +56,28 @@ class TestResolveTaskReferences(unittest
 
 class TestOptimize(unittest.TestCase):
 
     kind = None
 
     @classmethod
     def setUpClass(cls):
         # set up some simple optimization functions
-        optimization('no-optimize')(lambda self, params: False)
-        optimization('optimize-away')(lambda self, params: True)
+        optimization('no-optimize')(lambda self, params, arg: False)
+        optimization('optimize-away')(lambda self, params, arg: True)
         optimization('optimize-to-task')(lambda self, params, task: task)
 
     def make_task(self, label, optimization=None, task_def=None, optimized=None, task_id=None):
         task_def = task_def or {'sample': 'task-def'}
         task = Task(kind='test', label=label, attributes={}, task=task_def)
         task.optimized = optimized
         if optimization:
-            task.optimizations = [optimization]
+            task.optimization = optimization
         else:
-            task.optimizations = []
+            task.optimization = None
         task.task_id = task_id
         return task
 
     def make_graph(self, *tasks_and_edges):
         tasks = {t.label: t for t in tasks_and_edges if isinstance(t, Task)}
         edges = {e for e in tasks_and_edges if not isinstance(e, Task)}
         return TaskGraph(tasks, graph.Graph(set(tasks), edges))
 
@@ -87,35 +87,35 @@ class TestOptimize(unittest.TestCase):
         got_annotations = {
             t.label: repl(t.task_id) or t.optimized for t in graph.tasks.itervalues()
         }
         self.assertEqual(got_annotations, annotations)
 
     def test_annotate_task_graph_no_optimize(self):
         "annotating marks everything as un-optimized if the kind returns that"
         graph = self.make_graph(
-            self.make_task('task1', ['no-optimize']),
-            self.make_task('task2', ['no-optimize']),
-            self.make_task('task3', ['no-optimize']),
+            self.make_task('task1', ['no-optimize', []]),
+            self.make_task('task2', ['no-optimize', []]),
+            self.make_task('task3', ['no-optimize', []]),
             ('task2', 'task1', 'build'),
             ('task2', 'task3', 'image'),
         )
         annotate_task_graph(graph, {}, set(), graph.graph.named_links_dict(), {}, None)
         self.assert_annotations(
             graph,
             task1=False,
             task2=False,
             task3=False
         )
 
     def test_annotate_task_graph_optimize_away_dependency(self):
         "raises exception if kind optimizes away a task on which another depends"
         graph = self.make_graph(
-            self.make_task('task1', ['optimize-away']),
-            self.make_task('task2', ['no-optimize']),
+            self.make_task('task1', ['optimize-away', []]),
+            self.make_task('task2', ['no-optimize', []]),
             ('task2', 'task1', 'build'),
         )
         self.assertRaises(
             Exception,
             lambda: annotate_task_graph(graph, {}, set(), graph.graph.named_links_dict(), {}, None)
         )
 
     def test_annotate_task_graph_do_not_optimize(self):
@@ -133,17 +133,17 @@ class TestOptimize(unittest.TestCase):
             task1=False,
             task2=False
         )
         self.assertEqual
 
     def test_annotate_task_graph_nos_do_not_propagate(self):
         "a task with a non-optimized dependency can be optimized"
         graph = self.make_graph(
-            self.make_task('task1', ['no-optimize']),
+            self.make_task('task1', ['no-optimize', []]),
             self.make_task('task2', ['optimize-to-task', 'taskid']),
             self.make_task('task3', ['optimize-to-task', 'taskid']),
             ('task2', 'task1', 'build'),
             ('task2', 'task3', 'image'),
         )
         annotate_task_graph(graph, {}, set(),
                             graph.graph.named_links_dict(), {}, None)
         self.assert_annotations(
@@ -229,18 +229,18 @@ class TestOptimize(unittest.TestCase):
         self.assertEqual(sorted(sub.tasks[task2].task['dependencies']), sorted([task3, 'dep1']))
         self.assertEqual(sub.tasks[task2].task['payload'], 'http://dep1/' + task3)
         self.assertEqual(sub.tasks[task3].task_id, task3)
 
     def test_optimize(self):
         "optimize_task_graph annotates and extracts the subgraph from a simple graph"
         input = self.make_graph(
             self.make_task('task1', ['optimize-to-task', 'dep1']),
-            self.make_task('task2', ['no-optimize']),
-            self.make_task('task3', ['no-optimize']),
+            self.make_task('task2', ['no-optimize', []]),
+            self.make_task('task3', ['no-optimize', []]),
             ('task2', 'task1', 'build'),
             ('task2', 'task3', 'image'),
         )
         opt, label_to_taskid = optimize_task_graph(input, {}, set())
         self.assertEqual(opt.graph, graph.Graph(
             {label_to_taskid['task2'], label_to_taskid['task3']},
             {(label_to_taskid['task2'], label_to_taskid['task3'], 'image')}))
 
--- a/taskcluster/taskgraph/test/test_taskgraph.py
+++ b/taskcluster/taskgraph/test/test_taskgraph.py
@@ -19,60 +19,60 @@ class TestTaskGraph(unittest.TestCase):
     def test_taskgraph_to_json(self):
         tasks = {
             'a': Task(kind='test', label='a',
                       attributes={'attr': 'a-task'},
                       task={'taskdef': True}),
             'b': Task(kind='test', label='b',
                       attributes={},
                       task={'task': 'def'},
-                      optimizations=[['seta']],
+                      optimization={'seta': None},
                       # note that this dep is ignored, superseded by that
                       # from the taskgraph's edges
                       dependencies={'first': 'a'}),
         }
         graph = Graph(nodes=set('ab'), edges={('a', 'b', 'edgelabel')})
         taskgraph = TaskGraph(tasks, graph)
 
         res = taskgraph.to_json()
 
         self.assertEqual(res, {
             'a': {
                 'kind': 'test',
                 'label': 'a',
                 'attributes': {'attr': 'a-task', 'kind': 'test'},
                 'task': {'taskdef': True},
                 'dependencies': {'edgelabel': 'b'},
-                'optimizations': [],
+                'optimization': None,
             },
             'b': {
                 'kind': 'test',
                 'label': 'b',
                 'attributes': {'kind': 'test'},
                 'task': {'task': 'def'},
                 'dependencies': {},
-                'optimizations': [['seta']],
+                'optimization': {'seta': None},
             }
         })
 
     def test_round_trip(self):
         graph = TaskGraph(tasks={
             'a': Task(
                 kind='fancy',
                 label='a',
                 attributes={},
                 dependencies={'prereq': 'b'},  # must match edges, below
-                optimizations=[['seta']],
+                optimization={'seta': None},
                 task={'task': 'def'}),
             'b': Task(
                 kind='pre',
                 label='b',
                 attributes={},
                 dependencies={},
-                optimizations=[['seta']],
+                optimization={'seta': None},
                 task={'task': 'def2'}),
         }, graph=Graph(nodes={'a', 'b'}, edges={('a', 'b', 'prereq')}))
 
         tasks, new_graph = TaskGraph.from_json(graph.to_json())
         self.assertEqual(graph, new_graph)
 
 
 if __name__ == '__main__':
--- a/taskcluster/taskgraph/transforms/docker_image.py
+++ b/taskcluster/taskgraph/transforms/docker_image.py
@@ -56,36 +56,36 @@ def fill_template(config, tasks):
                 context_hash=context_hash,
             ))
 
         # As an optimization, if the context hash exists for a high level, that image
         # task ID will be used.  The reasoning behind this is that eventually everything ends
         # up on level 3 at some point if most tasks use this as a common image
         # for a given context hash, a worker within Taskcluster does not need to contain
         # the same image per branch.
-        optimizations = [['index-search', '{}.level-{}.{}.hash.{}'.format(
-            INDEX_PREFIX, level, image_name, context_hash)]
-            for level in reversed(range(int(config.params['level']), 4))]
+        optimization = {'index-search': ['{}.level-{}.{}.hash.{}'.format(
+            INDEX_PREFIX, level, image_name, context_hash)
+            for level in reversed(range(int(config.params['level']), 4))]}
 
         # Adjust the zstandard compression level based on the execution level.
         # We use faster compression for level 1 because we care more about
         # end-to-end times. We use slower/better compression for other levels
         # because images are read more often and it is worth the trade-off to
         # burn more CPU once to reduce image size.
         zstd_level = '3' if int(config.params['level']) == 1 else '10'
 
         # include some information that is useful in reconstructing this task
         # from JSON
         taskdesc = {
             'label': 'build-docker-image-' + image_name,
             'description': description,
             'attributes': {'image_name': image_name},
             'expires-after': '1 year',
             'routes': routes,
-            'optimizations': optimizations,
+            'optimization': optimization,
             'scopes': ['secrets:get:project/taskcluster/gecko/hgfingerprint'],
             'treeherder': {
                 'symbol': job_symbol,
                 'platform': 'taskcluster-images/opt',
                 'kind': 'other',
                 'tier': 1,
             },
             'run-on-projects': [],
--- a/taskcluster/taskgraph/transforms/job/__init__.py
+++ b/taskcluster/taskgraph/transforms/job/__init__.py
@@ -53,22 +53,22 @@ job_description_schema = Schema({
     Optional('routes'): task_description_schema['routes'],
     Optional('scopes'): task_description_schema['scopes'],
     Optional('tags'): task_description_schema['tags'],
     Optional('extra'): task_description_schema['extra'],
     Optional('treeherder'): task_description_schema['treeherder'],
     Optional('index'): task_description_schema['index'],
     Optional('run-on-projects'): task_description_schema['run-on-projects'],
     Optional('coalesce-name'): task_description_schema['coalesce-name'],
-    Optional('optimizations'): task_description_schema['optimizations'],
+    Optional('optimization'): task_description_schema['optimization'],
     Optional('needs-sccache'): task_description_schema['needs-sccache'],
 
     # The "when" section contains descriptions of the circumstances
     # under which this task should be included in the task graph.  This
-    # will be converted into an element in the `optimizations` list.
+    # will be converted into an optimization.
     Optional('when'): Any({
         # This task only needs to be run if a file matching one of the given
         # patterns has changed in the push.  The patterns use the mozpack
         # match function (python/mozbuild/mozpack/path.py).
         Optional('files-changed'): [basestring],
     }),
 
     # A description of how to run this job.
@@ -97,32 +97,37 @@ def validate(config, jobs):
         yield validate_schema(job_description_schema, job,
                               "In job {!r}:".format(job.get('name', job.get('label'))))
 
 
 @transforms.add
 def rewrite_when_to_optimization(config, jobs):
     for job in jobs:
         when = job.pop('when', {})
-        files_changed = when.get('files-changed')
-        if not files_changed:
+        if not when:
             yield job
             continue
 
+        if job.get('optimization'):
+            raise Exception("{}: cannot use both job.optimization and job.when"
+                            .format(job['name']))
+
+        files_changed = when.get('files-changed')
+
         # add some common files
         files_changed.extend([
             '{}/**'.format(config.path),
             'taskcluster/taskgraph/**',
         ])
         if 'in-tree' in job.get('worker', {}).get('docker-image', {}):
             files_changed.append('taskcluster/docker/{}/**'.format(
                 job['worker']['docker-image']['in-tree']))
 
         # "only when files changed" implies "skip if files have not changed"
-        job.setdefault('optimizations', []).append(['skip-unless-changed', files_changed])
+        job['optimization'] = {'skip-unless-changed': files_changed}
 
         assert 'when' not in job
         yield job
 
 
 @transforms.add
 def make_task_description(config, jobs):
     """Given a build description, create a task description"""
--- a/taskcluster/taskgraph/transforms/job/toolchain.py
+++ b/taskcluster/taskgraph/transforms/job/toolchain.py
@@ -47,17 +47,17 @@ toolchain_run_schema = Schema({
     Required('toolchain-artifact'): basestring,
 
     # An alias that can be used instead of the real toolchain job name in
     # the toolchains list for build jobs.
     Optional('toolchain-alias'): basestring,
 })
 
 
-def add_optimizations(config, run, taskdesc):
+def add_optimization(config, run, taskdesc):
     files = list(run.get('resources', []))
     # This file
     files.append('taskcluster/taskgraph/transforms/job/toolchain.py')
     # The script
     files.append('taskcluster/scripts/misc/{}'.format(run['script']))
 
     digest = hash_paths(GECKO, files)
 
@@ -71,23 +71,21 @@ def add_optimizations(config, run, taskd
         digest = hashlib.sha256('\n'.join(data)).hexdigest()
 
     label = taskdesc['label']
     subs = {
         'name': label.replace('%s-' % config.kind, ''),
         'digest': digest,
     }
 
-    optimizations = taskdesc.setdefault('optimizations', [])
-
     # We'll try to find a cached version of the toolchain at levels above
     # and including the current level, starting at the highest level.
     for level in reversed(range(int(config.params['level']), 4)):
         subs['level'] = level
-        optimizations.append(['index-search', TOOLCHAIN_INDEX.format(**subs)])
+        taskdesc['optimization'] = {'index-search': [TOOLCHAIN_INDEX.format(**subs)]}
 
     # ... and cache at the lowest level.
     taskdesc.setdefault('routes', []).append(
         'index.{}'.format(TOOLCHAIN_INDEX.format(**subs)))
 
 
 @run_job_using("docker-worker", "toolchain-script", schema=toolchain_run_schema)
 def docker_worker_toolchain(config, job, taskdesc):
@@ -147,17 +145,17 @@ def docker_worker_toolchain(config, job,
             run['script'])
     ]
 
     attributes = taskdesc.setdefault('attributes', {})
     attributes['toolchain-artifact'] = run['toolchain-artifact']
     if 'toolchain-alias' in run:
         attributes['toolchain-alias'] = run['toolchain-alias']
 
-    add_optimizations(config, run, taskdesc)
+    add_optimization(config, run, taskdesc)
 
 
 @run_job_using("generic-worker", "toolchain-script", schema=toolchain_run_schema)
 def windows_toolchain(config, job, taskdesc):
     run = job['run']
     taskdesc['run-on-projects'] = ['trunk', 'try']
 
     worker = taskdesc['worker']
@@ -194,9 +192,9 @@ def windows_toolchain(config, job, taskd
         r'{} -c ./build/src/taskcluster/scripts/misc/{}'.format(bash, run['script'])
     ]
 
     attributes = taskdesc.setdefault('attributes', {})
     attributes['toolchain-artifact'] = run['toolchain-artifact']
     if 'toolchain-alias' in run:
         attributes['toolchain-alias'] = run['toolchain-alias']
 
-    add_optimizations(config, run, taskdesc)
+    add_optimization(config, run, taskdesc)
--- a/taskcluster/taskgraph/transforms/task.py
+++ b/taskcluster/taskgraph/transforms/task.py
@@ -129,27 +129,29 @@ task_description_schema = Schema({
     # See the attributes documentation for details.
     Optional('run-on-projects'): [basestring],
 
     # If the task can be coalesced, this is the name used in the coalesce key
     # the project, etc. will be added automatically.  Note that try (level 1)
     # tasks are never coalesced
     Optional('coalesce-name'): basestring,
 
-    # Optimizations to perform on this task during the optimization phase,
-    # specified in order.  These optimizations are defined in
-    # taskcluster/taskgraph/optimize.py.
-    Optional('optimizations'): [Any(
-        # search the index for the given index namespace, and replace this task if found
-        ['index-search', basestring],
+    # Optimization to perform on this task during the optimization phase.
+    # Optimizations are defined in taskcluster/taskgraph/optimize.py.
+    Required('optimization', default=None): Any(
+        # always run this task (default)
+        None,
+        # search the index for the given index namespaces, and replace this task if found
+        # the search occurs in order, with the first match winning
+        {'index-search': [basestring]},
         # consult SETA and skip this task if it is low-value
-        ['seta'],
+        {'seta': None},
         # skip this task if none of the given file patterns match
-        ['skip-unless-changed', [basestring]],
-    )],
+        {'skip-unless-changed': [basestring]},
+    ),
 
     # the provisioner-id/worker-type for the task.  The following parameters will
     # be substituted in this string:
     #  {level} -- the scm level of this push
     'worker-type': basestring,
 
     # Whether the job should use sccache compiler caching.
     Required('needs-sccache', default=False): bool,
@@ -1056,17 +1058,17 @@ def build_task(config, tasks):
                 env = payload.setdefault('env', {})
                 env['MOZ_AUTOMATION'] = '1'
 
         yield {
             'label': task['label'],
             'task': task_def,
             'dependencies': task.get('dependencies', {}),
             'attributes': attributes,
-            'optimizations': task.get('optimizations', []),
+            'optimization': task.get('optimization', None),
         }
 
 
 # Check that the v2 route templates match those used by Mozharness.  This can
 # go away once Mozharness builds are no longer performed in Buildbot, and the
 # Mozharness code referencing routes.json is deleted.
 def check_v2_routes():
     with open(os.path.join(GECKO, "testing/mozharness/configs/routes.json"), "rb") as f:
--- a/taskcluster/taskgraph/transforms/tests.py
+++ b/taskcluster/taskgraph/transforms/tests.py
@@ -881,17 +881,16 @@ def make_job_description(config, tests):
             attr_try_name: try_name,
         })
 
         jobdesc = {}
         name = '{}-{}'.format(test['test-platform'], test['test-name'])
         jobdesc['name'] = name
         jobdesc['label'] = label
         jobdesc['description'] = test['description']
-        jobdesc['when'] = test.get('when', {})
         jobdesc['attributes'] = attributes
         jobdesc['dependencies'] = {'build': build_label}
 
         if test['mozharness']['requires-signed-builds'] is True:
             jobdesc['dependencies']['build-signing'] = test['build-signing-label']
 
         jobdesc['expires-after'] = test['expires-after']
         jobdesc['routes'] = []
@@ -911,19 +910,21 @@ def make_job_description(config, tests):
         jobdesc['treeherder'] = {
             'symbol': test['treeherder-symbol'],
             'kind': 'test',
             'tier': test['tier'],
             'platform': test.get('treeherder-machine-platform', test['build-platform']),
         }
 
         # run SETA unless this is a try push
-        jobdesc['optimizations'] = optimizations = []
-        if config.params['project'] != 'try':
-            optimizations.append(['seta'])
+        if config.params['project'] == 'try':
+            jobdesc['when'] = test.get('when', {})
+        else:
+            # when SETA is enabled, the 'when' does not apply (optimizations don't mix)
+            jobdesc['optimization'] = {'seta': None}
 
         run = jobdesc['run'] = {}
         run['using'] = 'mozharness-test'
         run['test'] = test
 
         jobdesc['worker-type'] = test.pop('worker-type')
 
         yield jobdesc