Bug 1472792: give both retrigger actions the same name; r?bstack draft
authorDustin J. Mitchell <dustin@mozilla.com>
Fri, 06 Jul 2018 20:28:23 +0000
changeset 815622 0c6a04b45e71559ad5c8aecf6edab34be6398178
parent 814904 fa376bf17cc95539f5e37186977d760296fb5093
push id115582
push userdmitchell@mozilla.com
push dateMon, 09 Jul 2018 15:33:50 +0000
reviewersbstack
bugs1472792
milestone63.0a1
Bug 1472792: give both retrigger actions the same name; r?bstack This additionally reconsiders the order of all of the actions, spacing them 50 "units" apart and putting the more common actions first. MozReview-Commit-ID: 98IOYKVMcGU
taskcluster/docs/actions.rst
taskcluster/taskgraph/actions/add_new_jobs.py
taskcluster/taskgraph/actions/add_talos.py
taskcluster/taskgraph/actions/backfill.py
taskcluster/taskgraph/actions/cancel.py
taskcluster/taskgraph/actions/cancel_all.py
taskcluster/taskgraph/actions/create_interactive.py
taskcluster/taskgraph/actions/mochitest_retrigger.py
taskcluster/taskgraph/actions/purge_caches.py
taskcluster/taskgraph/actions/release_promotion.py
taskcluster/taskgraph/actions/rerun.py
taskcluster/taskgraph/actions/retrigger.py
taskcluster/taskgraph/actions/run_missing_tests.py
--- a/taskcluster/docs/actions.rst
+++ b/taskcluster/docs/actions.rst
@@ -74,16 +74,19 @@ The arguments are:
 ``task``
   the definition of the target task (or None, as for ``task_id``)
 
 The example above defines an action that is available in the context-menu for
 the entire task-group (result-set or push in Treeherder terminology). To create
 an action that shows up in the context menu for a task we would specify the
 ``context`` parameter.
 
+The ``order`` value is the sort key defining the order of actions in the
+resulting ``actions.json`` file.  If multiple actions have the same name and
+match the same task, the action with the smallest ``order`` will be used.
 
 Setting the Action Context
 ..........................
 The context parameter should be a list of tag-sets, such as
 ``context=[{"platform": "linux"}]``, which will make the task show up in the
 context-menu for any task with ``task.tags.platform = 'linux'``. Below is
 some examples of context parameters and the resulting conditions on
 ``task.tags`` (tags used below are just illustrative).
--- a/taskcluster/taskgraph/actions/add_new_jobs.py
+++ b/taskcluster/taskgraph/actions/add_new_jobs.py
@@ -13,17 +13,17 @@ from .util import (create_tasks, fetch_g
 
 @register_callback_action(
     name='add-new-jobs',
     title='Add new jobs',
     kind='hook',
     generic=True,
     symbol='add-new',
     description="Add new jobs using task labels.",
-    order=10000,
+    order=100,
     context=[],
     schema={
         'type': 'object',
         'properties': {
             'tasks': {
                 'type': 'array',
                 'description': 'An array of task labels',
                 'items': {
--- a/taskcluster/taskgraph/actions/add_talos.py
+++ b/taskcluster/taskgraph/actions/add_talos.py
@@ -16,17 +16,17 @@ logger = logging.getLogger(__name__)
 
 @register_callback_action(
     name='run-all-talos',
     title='Run All Talos Tests',
     kind='hook',
     generic=True,
     symbol='raT',
     description="Add all Talos tasks to a push.",
-    order=100,  # Useful for sheriffs, but not top of the list
+    order=150,
     context=[],
     schema={
         'type': 'object',
         'properties': {
             'times': {
                 'type': 'integer',
                 'default': 1,
                 'minimum': 1,
--- a/taskcluster/taskgraph/actions/backfill.py
+++ b/taskcluster/taskgraph/actions/backfill.py
@@ -26,17 +26,17 @@ logger = logging.getLogger(__name__)
     title='Backfill',
     name='backfill',
     kind='hook',
     generic=True,
     symbol='Bk',
     description=('Take the label of the current task, '
                  'and trigger the task with that label '
                  'on previous pushes in the same project.'),
-    order=0,
+    order=200,
     context=[{}],  # This will be available for all tasks
     schema={
         'type': 'object',
         'properties': {
             'depth': {
                 'type': 'integer',
                 'default': 5,
                 'minimum': 1,
--- a/taskcluster/taskgraph/actions/cancel.py
+++ b/taskcluster/taskgraph/actions/cancel.py
@@ -14,15 +14,15 @@ from .registry import register_callback_
     title='Cancel Task',
     name='cancel',
     symbol='cx',
     kind='hook',
     generic=True,
     description=(
         'Cancel the given task'
     ),
-    order=100,
+    order=350,
     context=[{}]
 )
 def cancel_action(parameters, graph_config, input, task_group_id, task_id, task):
     # Note that this is limited by the scopes afforded to generic actions to
     # only cancel tasks with the level-specific schedulerId.
     cancel_task(task_id, use_proxy=True)
--- a/taskcluster/taskgraph/actions/cancel_all.py
+++ b/taskcluster/taskgraph/actions/cancel_all.py
@@ -42,17 +42,17 @@ def list_group(task_group_id, session):
     name='cancel-all',
     kind='hook',
     generic=True,
     symbol='cAll',
     description=(
         'Cancel all running and pending tasks created by the decision task '
         'this action task is associated with.'
     ),
-    order=100,
+    order=400,
     context=[]
 )
 def cancel_all_action(parameters, graph_config, input, task_group_id, task_id, task):
     session = get_session()
     own_task_id = os.environ.get('TASK_ID', '')
     with futures.ThreadPoolExecutor(CONCURRENCY) as e:
         cancels_jobs = [
             e.submit(cancel_task, t, use_proxy=True)
--- a/taskcluster/taskgraph/actions/create_interactive.py
+++ b/taskcluster/taskgraph/actions/create_interactive.py
@@ -29,17 +29,17 @@ task. You may need to wait for it to beg
     title='Create Interactive Task',
     name='create-interactive',
     symbol='create-inter',
     kind='hook',
     generic=True,
     description=(
         'Create a a copy of the task that you can interact with'
     ),
-    order=1,
+    order=50,
     context=[{'worker-implementation': 'docker-worker'}],
     schema={
         'type': 'object',
         'properties': {
             'notify': {
                 'type': 'string',
                 'format': 'email',
                 'title': 'Who to notify of the pending interactive task',
deleted file mode 100644
--- a/taskcluster/taskgraph/actions/mochitest_retrigger.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-from __future__ import absolute_import, print_function, unicode_literals
-
-import json
-import logging
-
-from slugid import nice as slugid
-
-from .util import (create_task_from_def, fetch_graph_and_labels)
-from .registry import register_callback_action
-from taskgraph.util.parameterization import resolve_task_references
-
-TASKCLUSTER_QUEUE_URL = "https://queue.taskcluster.net/v1/task"
-
-logger = logging.getLogger(__name__)
-
-
-@register_callback_action(
-    name='retrigger-mochitest-reftest-with-options',
-    title='Mochitest/Reftest Retrigger',
-    kind='hook',
-    generic=True,
-    symbol='tr',
-    description="Retriggers the specified mochitest/reftest job with additional options",
-    context=[{'test-type': 'mochitest'},
-             {'test-type': 'reftest'}],
-    order=0,
-    schema={
-        'type': 'object',
-        'properties': {
-            'path': {
-                'type': 'string',
-                'maxLength': 255,
-                'default': '',
-                'title': 'Path name',
-                'description': 'Path of test to retrigger'
-            },
-            'logLevel': {
-                'type': 'string',
-                'enum': ['debug', 'info', 'warning', 'error', 'critical'],
-                'default': 'debug',
-                'title': 'Log level',
-                'description': 'Log level for output (default is DEBUG, which is highest)'
-            },
-            'runUntilFail': {
-                'type': 'boolean',
-                'default': True,
-                'title': 'Run until failure',
-                'description': ('Runs the specified set of tests repeatedly '
-                                'until failure (or 30 times)')
-            },
-            'repeat': {
-                'type': 'integer',
-                'default': 30,
-                'minimum': 1,
-                'title': 'Run tests N times',
-                'description': ('Run tests repeatedly (usually used in '
-                                'conjunction with runUntilFail)')
-            },
-            'environment': {
-                'type': 'object',
-                'default': {'MOZ_LOG': ''},
-                'title': 'Extra environment variables',
-                'description': 'Extra environment variables to use for this run',
-                'additionalProperties': {'type': 'string'}
-            },
-            'preferences': {
-                'type': 'object',
-                'default': {'mygeckopreferences.pref': 'myvalue2'},
-                'title': 'Extra gecko (about:config) preferences',
-                'description': 'Extra gecko (about:config) preferences to use for this run',
-                'additionalProperties': {'type': 'string'}
-            }
-        },
-        'additionalProperties': False,
-        'required': ['path']
-    }
-)
-def mochitest_retrigger_action(parameters, graph_config, input, task_group_id, task_id, task):
-    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
-        parameters, graph_config)
-
-    pre_task = full_task_graph.tasks[task['metadata']['name']]
-
-    # fix up the task's dependencies, similar to how optimization would
-    # have done in the decision
-    dependencies = {name: label_to_taskid[label]
-                    for name, label in pre_task.dependencies.iteritems()}
-    new_task_definition = resolve_task_references(pre_task.label, pre_task.task, dependencies)
-    new_task_definition.setdefault('dependencies', []).extend(dependencies.itervalues())
-
-    # don't want to run mozharness tests, want a custom mach command instead
-    new_task_definition['payload']['command'] += ['--no-run-tests']
-
-    custom_mach_command = [task['tags']['test-type']]
-
-    # mochitests may specify a flavor
-    if new_task_definition['payload']['env'].get('MOCHITEST_FLAVOR'):
-        custom_mach_command += [
-            '--keep-open=false',
-            '-f',
-            new_task_definition['payload']['env']['MOCHITEST_FLAVOR']
-        ]
-
-    enable_e10s = json.loads(new_task_definition['payload']['env'].get(
-        'ENABLE_E10S', 'true'))
-    if not enable_e10s:
-        custom_mach_command += ['--disable-e10s']
-
-    custom_mach_command += ['--log-tbpl=-',
-                            '--log-tbpl-level={}'.format(input.get('logLevel', 'debug'))]
-    if input.get('runUntilFail'):
-        custom_mach_command += ['--run-until-failure']
-    if input.get('repeat'):
-        custom_mach_command += ['--repeat', str(input.get('repeat', 30))]
-
-    # add any custom gecko preferences
-    for (key, val) in input.get('preferences', {}).iteritems():
-        custom_mach_command += ['--setpref', '{}={}'.format(key, val)]
-
-    custom_mach_command += [input['path']]
-    new_task_definition['payload']['env']['CUSTOM_MACH_COMMAND'] = ' '.join(
-        custom_mach_command)
-
-    # update environment
-    new_task_definition['payload']['env'].update(input.get('environment', {}))
-
-    # tweak the treeherder symbol
-    new_task_definition['extra']['treeherder']['symbol'] += '-custom'
-
-    logging.info("New task definition: %s", new_task_definition)
-
-    # actually create the new task
-    new_task_id = slugid()
-    create_task_from_def(new_task_id, new_task_definition, parameters['level'])
--- a/taskcluster/taskgraph/actions/purge_caches.py
+++ b/taskcluster/taskgraph/actions/purge_caches.py
@@ -19,17 +19,17 @@ logger = logging.getLogger(__name__)
     name='purge-caches',
     kind='hook',
     generic=True,
     symbol='purge-caches',
     description=(
         'Purge any caches associated with this task '
         'across all workers of the same workertype as the task.'
     ),
-    order=100,
+    order=450,
     context=[{'worker-implementation': 'docker-worker'}]
 )
 def purge_caches_action(parameters, graph_config, input, task_group_id, task_id, task):
     if task['payload'].get('cache'):
         for cache in task['payload']['cache']:
             purge_cache(task['provisionerId'], task['workerType'], cache, use_proxy=True)
     else:
         logger.info('Task has no caches. Will not clear anything!')
--- a/taskcluster/taskgraph/actions/release_promotion.py
+++ b/taskcluster/taskgraph/actions/release_promotion.py
@@ -51,17 +51,17 @@ def get_flavors(graph_config, param):
     ])
 
 
 @register_callback_action(
     name='release-promotion',
     title='Release Promotion',
     symbol='${input.release_promotion_flavor}',
     description="Promote a release.",
-    order=10000,
+    order=500,
     context=[],
     available=is_release_promotion_available,
     schema=lambda graph_config: {
         'type': 'object',
         'properties': {
             'build_number': {
                 'type': 'integer',
                 'default': 1,
--- a/taskcluster/taskgraph/actions/rerun.py
+++ b/taskcluster/taskgraph/actions/rerun.py
@@ -27,17 +27,17 @@ RERUN_STATES = ('exception', 'failed')
     kind='hook',
     generic=True,
     symbol='rr',
     description=(
         'Rerun a task.\n\n'
         'This only works on failed or exception tasks in the original taskgraph,'
         ' and is CoT friendly.'
     ),
-    order=1,
+    order=300,
     context=[{}],
     schema={
         'type': 'object',
         'properties': {}
     }
 )
 def rerun_action(parameters, graph_config, input, task_group_id, task_id, task):
     parameters = dict(parameters)
--- a/taskcluster/taskgraph/actions/retrigger.py
+++ b/taskcluster/taskgraph/actions/retrigger.py
@@ -1,37 +1,162 @@
 # -*- coding: utf-8 -*-
 
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
+import json
 import logging
 
+from slugid import nice as slugid
 from .util import (
     create_tasks,
+    create_task_from_def,
     fetch_graph_and_labels
 )
+from ..util.parameterization import resolve_task_references
 from .registry import register_callback_action
 
 logger = logging.getLogger(__name__)
 
 
 @register_callback_action(
+    name='retrigger',
+    cb_name='retrigger-mochitest',
+    title='Retrigger Mochitest/Reftest',
+    symbol='rt',
+    kind='hook',
+    generic=True,
+    description="Retriggers the specified mochitest/reftest job with additional options",
+    context=[{'test-type': 'mochitest'},
+             {'test-type': 'reftest'}],
+    order=10,
+    schema={
+        'type': 'object',
+        'properties': {
+            'path': {
+                'type': 'string',
+                'maxLength': 255,
+                'default': '',
+                'title': 'Path name',
+                'description': 'Path of test to retrigger'
+            },
+            'logLevel': {
+                'type': 'string',
+                'enum': ['debug', 'info', 'warning', 'error', 'critical'],
+                'default': 'debug',
+                'title': 'Log level',
+                'description': 'Log level for output (default is DEBUG, which is highest)'
+            },
+            'runUntilFail': {
+                'type': 'boolean',
+                'default': True,
+                'title': 'Run until failure',
+                'description': ('Runs the specified set of tests repeatedly '
+                                'until failure (or 30 times)')
+            },
+            'repeat': {
+                'type': 'integer',
+                'default': 30,
+                'minimum': 1,
+                'title': 'Run tests N times',
+                'description': ('Run tests repeatedly (usually used in '
+                                'conjunction with runUntilFail)')
+            },
+            'environment': {
+                'type': 'object',
+                'default': {'MOZ_LOG': ''},
+                'title': 'Extra environment variables',
+                'description': 'Extra environment variables to use for this run',
+                'additionalProperties': {'type': 'string'}
+            },
+            'preferences': {
+                'type': 'object',
+                'default': {'mygeckopreferences.pref': 'myvalue2'},
+                'title': 'Extra gecko (about:config) preferences',
+                'description': 'Extra gecko (about:config) preferences to use for this run',
+                'additionalProperties': {'type': 'string'}
+            }
+        },
+        'additionalProperties': False,
+        'required': ['path']
+    }
+)
+def mochitest_retrigger_action(parameters, graph_config, input, task_group_id, task_id, task):
+    decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
+        parameters, graph_config)
+
+    pre_task = full_task_graph.tasks[task['metadata']['name']]
+
+    # fix up the task's dependencies, similar to how optimization would
+    # have done in the decision
+    dependencies = {name: label_to_taskid[label]
+                    for name, label in pre_task.dependencies.iteritems()}
+    new_task_definition = resolve_task_references(pre_task.label, pre_task.task, dependencies)
+    new_task_definition.setdefault('dependencies', []).extend(dependencies.itervalues())
+
+    # don't want to run mozharness tests, want a custom mach command instead
+    new_task_definition['payload']['command'] += ['--no-run-tests']
+
+    custom_mach_command = [task['tags']['test-type']]
+
+    # mochitests may specify a flavor
+    if new_task_definition['payload']['env'].get('MOCHITEST_FLAVOR'):
+        custom_mach_command += [
+            '--keep-open=false',
+            '-f',
+            new_task_definition['payload']['env']['MOCHITEST_FLAVOR']
+        ]
+
+    enable_e10s = json.loads(new_task_definition['payload']['env'].get(
+        'ENABLE_E10S', 'true'))
+    if not enable_e10s:
+        custom_mach_command += ['--disable-e10s']
+
+    custom_mach_command += ['--log-tbpl=-',
+                            '--log-tbpl-level={}'.format(input.get('logLevel', 'debug'))]
+    if input.get('runUntilFail'):
+        custom_mach_command += ['--run-until-failure']
+    if input.get('repeat'):
+        custom_mach_command += ['--repeat', str(input.get('repeat', 30))]
+
+    # add any custom gecko preferences
+    for (key, val) in input.get('preferences', {}).iteritems():
+        custom_mach_command += ['--setpref', '{}={}'.format(key, val)]
+
+    custom_mach_command += [input['path']]
+    new_task_definition['payload']['env']['CUSTOM_MACH_COMMAND'] = ' '.join(
+        custom_mach_command)
+
+    # update environment
+    new_task_definition['payload']['env'].update(input.get('environment', {}))
+
+    # tweak the treeherder symbol
+    new_task_definition['extra']['treeherder']['symbol'] += '-custom'
+
+    logging.info("New task definition: %s", new_task_definition)
+
+    # actually create the new task
+    new_task_id = slugid()
+    create_task_from_def(new_task_id, new_task_definition, parameters['level'])
+
+
+@register_callback_action(
     title='Retrigger',
     name='retrigger',
     symbol='rt',
     kind='hook',
     generic=True,
     description=(
         'Create a clone of the task.\n\n'
     ),
-    order=1,
+    order=11,  # must be greater than other orders in this file, as this is the fallback version
     context=[{}],
     schema={
         'type': 'object',
         'properties': {
             'downstream': {
                 'type': 'boolean',
                 'description': (
                     'If true, downstream tasks from this one will be cloned as well. '
--- a/taskcluster/taskgraph/actions/run_missing_tests.py
+++ b/taskcluster/taskgraph/actions/run_missing_tests.py
@@ -22,17 +22,17 @@ logger = logging.getLogger(__name__)
     generic=True,
     symbol='rmt',
     description=(
         "Run tests in the selected push that were optimized away, usually by SETA."
         "\n"
         "This action is for use on pushes that will be merged into another branch,"
         "to check that optimization hasn't hidden any failures."
     ),
-    order=100,  # Useful for sheriffs, but not top of the list
+    order=250,
     context=[],  # Applies to decision task
 )
 def run_missing_tests(parameters, graph_config, input, task_group_id, task_id, task):
     decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
         parameters, graph_config)
     target_tasks = get_artifact(decision_task_id, "public/target-tasks.json")
 
     # The idea here is to schedule all tasks of the `test` kind that were