deleted file mode 100644
--- a/taskcluster/ci/docker-image/image.yml
+++ /dev/null
@@ -1,68 +0,0 @@
----
-task:
- created:
- relative-datestamp: "0 seconds"
- deadline:
- relative-datestamp: "24 hours"
- metadata:
- name: 'Docker Image Build: {{image_name}}'
- description: 'Build the docker image {{image_name}} for use by dependent tasks'
- source: '{{source}}'
- owner: mozilla-taskcluster-maintenance@mozilla.com
- tags:
- createdForUser: '{{owner}}'
-
- workerType: gecko-images
- provisionerId: aws-provisioner-v1
- schedulerId: task-graph-scheduler
-
- routes:
- # Indexing routes to avoid building the same image twice
- - index.{{index_image_prefix}}.level-{{level}}.{{image_name}}.latest
- - index.{{index_image_prefix}}.level-{{level}}.{{image_name}}.pushdate.{{year}}.{{month}}-{{day}}-{{pushtime}}
- - index.{{index_image_prefix}}.level-{{level}}.{{image_name}}.hash.{{context_hash}}
- # Treeherder routes
- - tc-treeherder.v2.{{project}}.{{head_rev}}.{{pushlog_id}}
- - tc-treeherder-stage.v2.{{project}}.{{head_rev}}.{{pushlog_id}}
-
- scopes:
- - secrets:get:project/taskcluster/gecko/hgfingerprint
- - docker-worker:cache:level-{{level}}-imagebuilder-v1
-
- payload:
- env:
- HASH: '{{context_hash}}'
- PROJECT: '{{project}}'
- CONTEXT_URL: '{{context_url}}'
- IMAGE_NAME: '{{image_name}}'
- GECKO_BASE_REPOSITORY: '{{base_repository}}'
- GECKO_HEAD_REPOSITORY: '{{head_repository}}'
- GECKO_HEAD_REV: '{{head_rev}}'
- HG_STORE_PATH: '/home/worker/checkouts/hg-store'
- cache:
- 'level-{{level}}-imagebuilder-v1': '/home/worker/checkouts'
- features:
- dind: true
- chainOfTrust: true
- taskclusterProxy: true
- image: '{{#docker_image}}image_builder{{/docker_image}}'
- maxRunTime: 3600
- artifacts:
- '{{artifact_path}}':
- type: 'file'
- path: '/home/worker/workspace/artifacts/image.tar.zst'
- expires:
- relative-datestamp: "1 year"
- extra:
- imageMeta: # Useful when converting back from JSON in action tasks
- level: '{{level}}'
- contextHash: '{{context_hash}}'
- imageName: '{{image_name}}'
- treeherderEnv:
- - staging
- - production
- treeherder:
- jobKind: other
- build:
- platform: 'taskcluster-images'
- groupSymbol: 'I'
--- a/taskcluster/ci/docker-image/kind.yml
+++ b/taskcluster/ci/docker-image/kind.yml
@@ -1,19 +1,28 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
loader: taskgraph.task.docker_image:load_tasks
-images_path: '../../../taskcluster/docker'
+
+transforms:
+ - taskgraph.transforms.docker_image:transforms
+ - taskgraph.transforms.task:transforms
# make a task for each docker-image we might want. For the moment, since we
# write artifacts for each, these are whitelisted, but ideally that will change
# (to use subdirectory clones of the proper directory), at which point we can
# generate tasks for every docker image in the directory, secure in the
# knowledge that unnecessary images will be omitted from the target task graph
-images:
- desktop-test: dt
- desktop1604-test: dt16t
- desktop-build: db
- tester: tst
- lint: lnt
- android-gradle-build: agb
+jobs:
+ desktop-test:
+ symbol: I(dt)
+ desktop1604-test:
+ symbol: I(dt16t)
+ desktop-build:
+ symbol: I(db)
+ tester:
+ symbol: I(tst)
+ lint:
+ symbol: I(lnt)
+ android-gradle-build:
+ symbol: I(agb)
--- a/taskcluster/taskgraph/task/docker_image.py
+++ b/taskcluster/taskgraph/task/docker_image.py
@@ -3,85 +3,48 @@
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import urllib2
-from . import base
-from .. import GECKO
-from taskgraph.util.docker import (
- docker_image,
- generate_context_hash,
- INDEX_PREFIX,
-)
+from . import transform
+from taskgraph.util.docker import INDEX_PREFIX
+from taskgraph.transforms.base import TransformSequence, TransformConfig
from taskgraph.util.taskcluster import get_artifact_url
-from taskgraph.util.templates import Templates
+from taskgraph.util.python_path import find_object
logger = logging.getLogger(__name__)
-def load_tasks(kind, path, config, params, loaded_tasks):
- parameters = {
- 'pushlog_id': params.get('pushlog_id', 0),
- 'pushdate': params['moz_build_date'],
- 'pushtime': params['moz_build_date'][8:],
- 'year': params['moz_build_date'][0:4],
- 'month': params['moz_build_date'][4:6],
- 'day': params['moz_build_date'][6:8],
- 'project': params['project'],
- 'docker_image': docker_image,
- 'base_repository': params['base_repository'] or params['head_repository'],
- 'head_repository': params['head_repository'],
- 'head_ref': params['head_ref'] or params['head_rev'],
- 'head_rev': params['head_rev'],
- 'owner': params['owner'],
- 'level': params['level'],
- 'source': '{repo}file/{rev}/taskcluster/ci/docker-image/image.yml'
- .format(repo=params['head_repository'], rev=params['head_rev']),
- 'index_image_prefix': INDEX_PREFIX,
- 'artifact_path': 'public/image.tar.zst',
- }
+def transform_inputs(inputs, kind, path, config, params, loaded_tasks):
+ """
+ Transform a sequence of inputs according to the transform configuration.
+ """
+ transforms = TransformSequence()
+ for xform_path in config['transforms']:
+ transform = find_object(xform_path)
+ transforms.add(transform)
- tasks = []
- templates = Templates(path)
- for image_name, image_symbol in config['images'].iteritems():
- context_path = os.path.join('taskcluster', 'docker', image_name)
- context_hash = generate_context_hash(GECKO, context_path, image_name)
-
- image_parameters = dict(parameters)
- image_parameters['image_name'] = image_name
- image_parameters['context_hash'] = context_hash
-
- image_task = templates.load('image.yml', image_parameters)
- attributes = {'image_name': image_name}
-
- # unique symbol for different docker image
- if 'extra' in image_task['task']:
- image_task['task']['extra']['treeherder']['symbol'] = image_symbol
-
- # As an optimization, if the context hash exists for a high level, that image
- # task ID will be used. The reasoning behind this is that eventually everything ends
- # up on level 3 at some point if most tasks use this as a common image
- # for a given context hash, a worker within Taskcluster does not need to contain
- # the same image per branch.
- index_paths = ['{}.level-{}.{}.hash.{}'.format(
- INDEX_PREFIX, level, image_name, context_hash)
- for level in reversed(range(int(params['level']), 4))]
-
- tasks.append(DockerImageTask(kind, 'build-docker-image-' + image_name,
- task=image_task['task'], attributes=attributes,
- index_paths=index_paths))
-
+ # perform the transformations
+ trans_config = TransformConfig(kind, path, config, params)
+ tasks = [DockerImageTask(kind, t)
+ for t in transforms(trans_config, inputs)]
return tasks
-class DockerImageTask(base.Task):
+def load_tasks(kind, path, config, params, loaded_tasks):
+ return transform_inputs(
+ transform.get_inputs(kind, path, config, params, loaded_tasks),
+ kind, path, config, params, loaded_tasks)
+
+
+class DockerImageTask(transform.TransformTask):
def get_dependencies(self, taskgraph):
return []
def optimize(self, params):
optimized, taskId = super(DockerImageTask, self).optimize(params)
if optimized and taskId:
try:
# Only return the task ID if the artifact exists for the indexed
@@ -103,14 +66,11 @@ class DockerImageTask(base.Task):
def from_json(cls, task_dict):
# Generating index_paths for optimization
imgMeta = task_dict['task']['extra']['imageMeta']
image_name = imgMeta['imageName']
context_hash = imgMeta['contextHash']
index_paths = ['{}.level-{}.{}.hash.{}'.format(
INDEX_PREFIX, level, image_name, context_hash)
for level in reversed(range(int(imgMeta['level']), 4))]
- docker_image_task = cls(kind='docker-image',
- label=task_dict['label'],
- attributes=task_dict['attributes'],
- task=task_dict['task'],
- index_paths=index_paths)
+ task_dict['index_paths'] = index_paths
+ docker_image_task = cls(kind='docker-image', task=task_dict)
return docker_image_task
--- a/taskcluster/taskgraph/task/transform.py
+++ b/taskcluster/taskgraph/task/transform.py
@@ -79,18 +79,18 @@ class TransformTask(base.Task):
"""
Tasks of this class are generated by applying transformations to a sequence
of input entities. By default, it gets those inputs from YAML data in the
kind directory, but subclasses may override `get_inputs` to produce them in
some other way.
"""
def __init__(self, kind, task):
- self.dependencies = task['dependencies']
- self.when = task['when']
+ self.dependencies = task.get('dependencies', {})
+ self.when = task.get('when', {})
super(TransformTask, self).__init__(kind, task['label'],
task['attributes'], task['task'],
index_paths=task.get('index-paths'))
def get_dependencies(self, taskgraph):
return [(label, name) for name, label in self.dependencies.items()]
def optimize(self, params):
deleted file mode 100644
--- a/taskcluster/taskgraph/test/test_task_docker_image.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-from __future__ import absolute_import, print_function, unicode_literals
-
-import unittest
-import os
-
-from ..task import docker_image
-from mozunit import main
-
-
-KIND_PATH = os.path.join(docker_image.GECKO, 'taskcluster', 'ci', 'docker-image')
-
-
-class TestDockerImageKind(unittest.TestCase):
-
- def setUp(self):
- self.task = docker_image.DockerImageTask(
- 'docker-image',
- KIND_PATH,
- {},
- {},
- index_paths=[])
-
- def test_get_task_dependencies(self):
- # this one's easy!
- self.assertEqual(self.task.get_dependencies(None), [])
-
- # TODO: optimize_task
-
-
-if __name__ == '__main__':
- main()
--- a/taskcluster/taskgraph/test/test_taskgraph.py
+++ b/taskcluster/taskgraph/test/test_taskgraph.py
@@ -9,17 +9,17 @@ import unittest
from ..graph import Graph
from ..task.docker_image import DockerImageTask
from ..task.transform import TransformTask
from ..taskgraph import TaskGraph
from mozunit import main
from taskgraph.util.docker import INDEX_PREFIX
-class TestTargetTasks(unittest.TestCase):
+class TestTaskGraph(unittest.TestCase):
def test_from_json(self):
task = {
"routes": [],
"extra": {
"imageMeta": {
"contextHash": "<hash>",
"imageName": "<image>",
@@ -35,20 +35,22 @@ class TestTargetTasks(unittest.TestCase)
task={
'label': 'a',
'attributes': {},
'dependencies': {},
'when': {},
'task': {'task': 'def'},
}),
'b': DockerImageTask(kind='docker-image',
- label='b',
- attributes={},
- task=task,
- index_paths=index_paths),
+ task={
+ 'label': 'b',
+ 'attributes': {},
+ 'task': task,
+ 'index_paths': index_paths,
+ }),
}, graph=Graph(nodes={'a', 'b'}, edges=set()))
tasks, new_graph = TaskGraph.from_json(graph.to_json())
self.assertEqual(graph.tasks['a'], new_graph.tasks['a'])
self.assertEqual(graph, new_graph)
if __name__ == '__main__':
main()
new file mode 100644
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/docker_image.py
@@ -0,0 +1,126 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Transform the upload-symbols task description template,
+ taskcluster/ci/upload-symbols/job-template.yml
+into an actual task description.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import os
+
+from taskgraph.transforms.base import TransformSequence
+from .. import GECKO
+from taskgraph.util.docker import (
+ docker_image,
+ generate_context_hash,
+ INDEX_PREFIX,
+)
+
+transforms = TransformSequence()
+
+ROUTE_TEMPLATES = [
+ 'index.{index_prefix}.level-{level}.{image_name}.latest',
+ 'index.{index_prefix}.level-{level}.{image_name}.pushdate.{year}.{month}-{day}-{pushtime}',
+ 'index.{index_prefix}.level-{level}.{image_name}.hash.{context_hash}',
+]
+
+
+@transforms.add
+def fill_template(config, tasks):
+ for task in tasks:
+ image_name = task.pop('name')
+ job_symbol = task.pop('symbol')
+
+ context_path = os.path.join('taskcluster', 'docker', image_name)
+ context_hash = generate_context_hash(GECKO, context_path, image_name)
+
+ description = 'Build the docker image {} for use by dependent tasks'.format(
+ image_name)
+
+ routes = []
+ for tpl in ROUTE_TEMPLATES:
+ routes.append(tpl.format(
+ index_prefix=INDEX_PREFIX,
+ level=config.params['level'],
+ image_name=image_name,
+ project=config.params['project'],
+ head_rev=config.params['head_rev'],
+ pushlog_id=config.params.get('pushlog_id', 0),
+ pushtime=config.params['moz_build_date'][8:],
+ year=config.params['moz_build_date'][0:4],
+ month=config.params['moz_build_date'][4:6],
+ day=config.params['moz_build_date'][6:8],
+ context_hash=context_hash,
+ ))
+
+ # As an optimization, if the context hash exists for a high level, that image
+ # task ID will be used. The reasoning behind this is that eventually everything ends
+ # up on level 3 at some point if most tasks use this as a common image
+ # for a given context hash, a worker within Taskcluster does not need to contain
+ # the same image per branch.
+ index_paths = ['{}.level-{}.{}.hash.{}'.format(
+ INDEX_PREFIX, level, image_name, context_hash)
+ for level in reversed(range(int(config.params['level']), 4))]
+
+ # include some information that is useful in reconstructing this task
+ # from JSON
+ extra = {
+ 'imageMeta': {
+ 'level': config.params['level'],
+ 'contextHash': context_hash,
+ 'imageName': image_name,
+ },
+ }
+
+ taskdesc = {
+ 'label': 'build-docker-image-' + image_name,
+ 'description': description,
+ 'attributes': {'image_name': image_name},
+ 'expires-after': '1 year',
+ 'routes': routes,
+ 'index-paths': index_paths,
+ 'scopes': ['secrets:get:project/taskcluster/gecko/hgfingerprint'],
+ 'extra': extra,
+ 'treeherder': {
+ 'symbol': job_symbol,
+ 'platform': 'taskcluster-images/opt',
+ 'kind': 'other',
+ 'tier': 1,
+ },
+ 'run-on-projects': [],
+ 'worker-type': 'aws-provisioner-v1/gecko-images',
+ # can't use {in-tree: ..} here, otherwise we might try to build
+ # this image..
+ 'worker': {
+ 'implementation': 'docker-worker',
+ 'docker-image': docker_image('image_builder'),
+ 'caches': [{
+ 'type': 'persistent',
+ 'name': 'level-{}-imagebuilder-v1'.format(config.params['level']),
+ 'mount-point': '/home/worker/checkouts',
+ }],
+ 'artifacts': [{
+ 'type': 'file',
+ 'path': '/home/worker/workspace/artifacts/image.tar.zst',
+ 'name': 'public/image.tar.zst',
+ }],
+ 'env': {
+ 'HG_STORE_PATH': '/home/worker/checkouts/hg-store',
+ 'HASH': context_hash,
+ 'PROJECT': config.params['project'],
+ 'IMAGE_NAME': image_name,
+ 'GECKO_BASE_REPOSITORY': config.params['base_repository'],
+ 'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
+ 'GECKO_HEAD_REV': config.params['head_rev'],
+ },
+ 'chain-of-trust': True,
+ 'docker-in-docker': True,
+ 'taskcluster-proxy': True,
+ 'max-run-time': 3600,
+ },
+ }
+
+ yield taskdesc
--- a/taskcluster/taskgraph/transforms/task.py
+++ b/taskcluster/taskgraph/transforms/task.py
@@ -157,16 +157,17 @@ task_description_schema = Schema({
# worker features that should be enabled
Required('relengapi-proxy', default=False): bool,
Required('chain-of-trust', default=False): bool,
Required('taskcluster-proxy', default=False): bool,
Required('allow-ptrace', default=False): bool,
Required('loopback-video', default=False): bool,
Required('loopback-audio', default=False): bool,
+ Required('docker-in-docker', default=False): bool, # (aka 'dind')
# caches to set up for the task
Optional('caches'): [{
# only one type is supported by any of the workers right now
'type': 'persistent',
# name of the cache, allowing re-use by subsequent tasks naming the
# same cache
@@ -370,16 +371,17 @@ GROUP_NAMES = {
'tc-X-e10s': 'Xpcshell tests executed by TaskCluster with e10s',
'tc-L10n': 'Localised Repacks executed by Taskcluster',
'tc-BM-L10n': 'Beetmover for locales executed by Taskcluster',
'tc-Up': 'Balrog submission of updates, executed by Taskcluster',
'tc-cs': 'Checksum signing executed by Taskcluster',
'tc-BMcs': 'Beetmover checksums, executed by Taskcluster',
'Aries': 'Aries Device Image',
'Nexus 5-L': 'Nexus 5-L Device Image',
+ 'I': 'Docker Image Builds',
'TL': 'Toolchain builds for Linux 64-bits',
'TM': 'Toolchain builds for OSX',
'TW32': 'Toolchain builds for Windows 32-bits',
'TW64': 'Toolchain builds for Windows 64-bits',
'SM-tc': 'Spidermonkey builds',
}
UNKNOWN_GROUP_NAME = "Treeherder group {} has no name; add it to " + __file__
@@ -455,16 +457,19 @@ def build_docker_worker_payload(config,
if worker.get('allow-ptrace'):
features['allowPtrace'] = True
task_def['scopes'].append('docker-worker:feature:allowPtrace')
if worker.get('chain-of-trust'):
features['chainOfTrust'] = True
+ if worker.get('docker-in-docker'):
+ features['dind'] = True
+
if task.get('needs-sccache'):
features['taskclusterProxy'] = True
task_def['scopes'].append(
'assume:project:taskcluster:level-{level}-sccache-buckets'.format(
level=config.params['level'])
)
worker['env']['USE_SCCACHE'] = '1'
else:
@@ -475,20 +480,21 @@ def build_docker_worker_payload(config,
for lo in 'audio', 'video':
if worker.get('loopback-' + lo):
capitalized = 'loopback' + lo.capitalize()
devices = capabilities.setdefault('devices', {})
devices[capitalized] = True
task_def['scopes'].append('docker-worker:capability:device:' + capitalized)
task_def['payload'] = payload = {
- 'command': worker['command'],
'image': image,
'env': worker['env'],
}
+ if 'command' in worker:
+ payload['command'] = worker['command']
if 'max-run-time' in worker:
payload['maxRunTime'] = worker['max-run-time']
if 'retry-exit-status' in worker:
payload['onExitStatus'] = {'retry': [worker['retry-exit-status']]}
if 'artifacts' in worker: