Bug 1465181 - [taskgraph] Stop hardcoding the workdir to /builds/worker in 'job' tasks, r?gps draft
authorAndrew Halberstadt <ahalberstadt@mozilla.com>
Tue, 29 May 2018 16:05:35 -0400
changeset 802312 87d0ac8c56192de6333133ab92f17fc36f35e919
parent 802311 9b02b8027d1951813fec75ce496fdd4e8d19f79e
child 802313 023be0e39ffbba3f2041f563fbf1ad163d3dd092
push id111858
push userahalberstadt@mozilla.com
push dateThu, 31 May 2018 17:24:02 +0000
reviewersgps
bugs1465181
milestone62.0a1
Bug 1465181 - [taskgraph] Stop hardcoding the workdir to /builds/worker in 'job' tasks, r?gps This adds an optional 'workdir' key to all job schemas. It still defaults to /builds/worker, but can be overriden by individual tasks or schema implementations. MozReview-Commit-ID: LY20xfBhbCP
taskcluster/scripts/run-task
taskcluster/taskgraph/transforms/job/__init__.py
taskcluster/taskgraph/transforms/job/common.py
taskcluster/taskgraph/transforms/job/debian_package.py
taskcluster/taskgraph/transforms/job/hazard.py
taskcluster/taskgraph/transforms/job/mach.py
taskcluster/taskgraph/transforms/job/mozharness.py
taskcluster/taskgraph/transforms/job/mozharness_test.py
taskcluster/taskgraph/transforms/job/run_task.py
taskcluster/taskgraph/transforms/job/spidermonkey.py
taskcluster/taskgraph/transforms/job/toolchain.py
--- a/taskcluster/scripts/run-task
+++ b/taskcluster/scripts/run-task
@@ -463,17 +463,17 @@ def vcs_checkout(source_repo, dest, stor
                                      repo_name=source_repo.split('/')[-1]))
 
     print_line(b'vcs', msg.encode('utf-8'))
 
     return revision
 
 
 def main(args):
-    print_line(b'setup', b'run-task started\n')
+    print_line(b'setup', b'run-task started in %s\n' % os.getcwd().encode('utf-8'))
     running_as_root = IS_POSIX and os.getuid() == 0
 
     # Arguments up to '--' are ours. After are for the main task
     # to be executed.
     try:
         i = args.index('--')
         our_args = args[0:i]
         task_args = args[i + 1:]
--- a/taskcluster/taskgraph/transforms/job/__init__.py
+++ b/taskcluster/taskgraph/transforms/job/__init__.py
@@ -77,16 +77,19 @@ job_description_schema = Schema({
         Optional('files-changed'): [basestring],
     }),
 
     # A description of how to run this job.
     'run': {
         # The key to a job implementation in a peer module to this one
         'using': basestring,
 
+        # Base work directory used to set up the task.
+        Optional('workdir'): basestring,
+
         # Any remaining content is verified against that job implementation's
         # own schema.
         Extra: object,
     },
 
     Required('worker-type'): task_description_schema['worker-type'],
 
     # This object will be passed through to the task description, with additions
@@ -144,16 +147,18 @@ def make_task_description(config, jobs):
         if impl:
             job.setdefault('tags', {})['worker-implementation'] = impl
         worker = job.setdefault('worker', {})
         assert 'implementation' not in worker
         worker['implementation'] = impl
         if os:
             worker['os'] = os
 
+        job['run'].setdefault('workdir', '/builds/worker')
+
         taskdesc = copy.deepcopy(job)
 
         # fill in some empty defaults to make run implementations easier
         taskdesc.setdefault('attributes', {})
         taskdesc.setdefault('dependencies', {})
         taskdesc.setdefault('routes', [])
         taskdesc.setdefault('scopes', [])
         taskdesc.setdefault('extra', {})
--- a/taskcluster/taskgraph/transforms/job/common.py
+++ b/taskcluster/taskgraph/transforms/job/common.py
@@ -22,17 +22,17 @@ def docker_worker_add_workspace_cache(co
     key name to avoid undesired conflicts with other caches."""
     taskdesc['worker'].setdefault('caches', []).append({
         'type': 'persistent',
         'name': 'level-{}-{}-build-{}-{}-workspace'.format(
             config.params['level'], config.params['project'],
             taskdesc['attributes']['build_platform'],
             taskdesc['attributes']['build_type'],
         ),
-        'mount-point': "/builds/worker/workspace",
+        'mount-point': "{workdir}/workspace".format(**job['run']),
         # Don't enable the workspace cache when we can't guarantee its
         # behavior, like on Try.
         'skip-untrusted': True,
     })
     if extra:
         taskdesc['worker']['caches'][-1]['name'] += '-{}'.format(
             extra
         )
@@ -43,17 +43,17 @@ def add_artifacts(config, job, taskdesc,
         'name': get_artifact_prefix(taskdesc),
         'path': path,
         'type': 'directory',
     })
 
 
 def docker_worker_add_artifacts(config, job, taskdesc):
     """ Adds an artifact directory to the task """
-    add_artifacts(config, job, taskdesc, path='/builds/worker/artifacts/')
+    add_artifacts(config, job, taskdesc, path='{workdir}/artifacts/'.format(**job['run']))
 
 
 def generic_worker_add_artifacts(config, job, taskdesc):
     """ Adds an artifact directory to the task """
     # The path is the location on disk; it doesn't necessarily
     # mean the artifacts will be public or private; that is set via the name
     # attribute in add_artifacts.
     add_artifacts(config, job, taskdesc, path=get_artifact_prefix(taskdesc))
@@ -80,24 +80,24 @@ def support_vcs_checkout(config, job, ta
         # Sparse checkouts need their own cache because they can interfere
         # with clients that aren't sparse aware.
         if sparse:
             name += '-sparse'
 
         taskdesc['worker'].setdefault('caches', []).append({
             'type': 'persistent',
             'name': name,
-            'mount-point': '/builds/worker/checkouts',
+            'mount-point': '{workdir}/checkouts'.format(**job['run']),
         })
 
     taskdesc['worker'].setdefault('env', {}).update({
         'GECKO_BASE_REPOSITORY': config.params['base_repository'],
         'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
         'GECKO_HEAD_REV': config.params['head_rev'],
-        'HG_STORE_PATH': '/builds/worker/checkouts/hg-store',
+        'HG_STORE_PATH': '{workdir}/checkouts/hg-store'.format(**job['run']),
     })
 
     if 'comm_base_repository' in config.params:
         taskdesc['worker']['env'].update({
             'COMM_BASE_REPOSITORY': config.params['comm_base_repository'],
             'COMM_HEAD_REPOSITORY': config.params['comm_head_repository'],
             'COMM_HEAD_REV': config.params['comm_head_rev'],
         })
@@ -171,21 +171,21 @@ def docker_worker_add_tooltool(config, j
 
     assert job['worker']['implementation'] in ('docker-worker', 'docker-engine')
 
     level = config.params['level']
 
     taskdesc['worker'].setdefault('caches', []).append({
         'type': 'persistent',
         'name': 'level-%s-tooltool-cache' % level,
-        'mount-point': '/builds/worker/tooltool-cache',
+        'mount-point': '{workdir}/tooltool-cache'.format(**job['run']),
     })
 
     taskdesc['worker'].setdefault('env', {}).update({
-        'TOOLTOOL_CACHE': '/builds/worker/tooltool-cache',
+        'TOOLTOOL_CACHE': '{workdir}/tooltool-cache'.format(**job['run']),
     })
 
     taskdesc['worker']['relengapi-proxy'] = True
     taskdesc['scopes'].extend([
         'docker-worker:relengapi-proxy:tooltool.download.public',
     ])
 
     if internal:
@@ -209,9 +209,9 @@ def support_use_artifacts(config, job, t
         urls[kind] = []
 
         for artifact in artifacts:
             path = '/'.join([prefix, artifact])
             urls[kind].append(get_artifact_url(task_id, path))
 
     env = taskdesc['worker'].setdefault('env', {})
     env['USE_ARTIFACT_URLS'] = {'task-reference': json.dumps(urls)}
-    env['USE_ARTIFACT_PATH'] = '/builds/worker/use-artifacts'
+    env['USE_ARTIFACT_PATH'] = '{workdir}/use-artifacts'.format(**job['run'])
--- a/taskcluster/taskgraph/transforms/job/debian_package.py
+++ b/taskcluster/taskgraph/transforms/job/debian_package.py
@@ -51,16 +51,19 @@ run_schema = Schema({
     # List of package tasks to get build dependencies from.
     Optional('packages'): [basestring],
 
     # What resolver to use to install build dependencies. The default
     # (apt-get) is good in most cases, but in subtle cases involving
     # a *-backports archive, its solver might not be able to find a
     # solution that satisfies the build dependencies.
     Optional('resolver'): Any('apt-get', 'aptitude'),
+
+    # Base work directory used to set up the task.
+    Required('workdir'): basestring,
 })
 
 
 @run_job_using("docker-worker", "debian-package", schema=run_schema)
 def docker_worker_debian_package(config, job, taskdesc):
     run = job['run']
 
     name = taskdesc['label'].replace('{}-'.format(config.kind), '', 1)
--- a/taskcluster/taskgraph/transforms/job/hazard.py
+++ b/taskcluster/taskgraph/transforms/job/hazard.py
@@ -29,16 +29,19 @@ haz_run_schema = Schema({
     Optional('mozconfig'): basestring,
 
     # The set of secret names to which the task has access; these are prefixed
     # with `project/releng/gecko/{treeherder.kind}/level-{level}/`.   Setting
     # this will enable any worker features required and set the task's scopes
     # appropriately.  `true` here means ['*'], all secrets.  Not supported on
     # Windows
     Required('secrets', default=False): Any(bool, [basestring]),
+
+    # Base work directory used to set up the task.
+    Required('workdir'): basestring,
 })
 
 
 @run_job_using("docker-worker", "hazard", schema=haz_run_schema)
 def docker_worker_hazard(config, job, taskdesc):
     run = job['run']
 
     worker = taskdesc['worker']
@@ -57,16 +60,16 @@ def docker_worker_hazard(config, job, ta
     })
 
     # script parameters
     if run.get('mozconfig'):
         env['MOZCONFIG'] = run['mozconfig']
 
     # build-haz-linux.sh needs this otherwise it assumes the checkout is in
     # the workspace.
-    env['GECKO_DIR'] = '/builds/worker/checkouts/gecko'
+    env['GECKO_DIR'] = '{workdir}/checkouts/gecko'.format(**run)
 
     worker['command'] = [
-        '/builds/worker/bin/run-task',
-        '--vcs-checkout', '/builds/worker/checkouts/gecko',
+        '{workdir}/bin/run-task'.format(**run),
+        '--vcs-checkout', '{workdir}/checkouts/gecko'.format(**run),
         '--',
         '/bin/bash', '-c', run['command']
     ]
--- a/taskcluster/taskgraph/transforms/job/mach.py
+++ b/taskcluster/taskgraph/transforms/job/mach.py
@@ -15,21 +15,24 @@ mach_schema = Schema({
     Required('using'): 'mach',
 
     # The mach command (omitting `./mach`) to run
     Required('mach'): basestring,
 
     # if true, perform a checkout of a comm-central based branch inside the
     # gecko checkout
     Required('comm-checkout'): bool,
+
+    # Base work directory used to set up the task.
+    Required('workdir'): basestring,
 })
 
 
 @run_job_using("docker-worker", "mach", schema=mach_schema, defaults={'comm-checkout': False})
 @run_job_using("native-engine", "mach", schema=mach_schema, defaults={'comm-checkout': False})
 def docker_worker_mach(config, job, taskdesc):
     run = job['run']
 
     # defer to the run_task implementation
-    run['command'] = 'cd /builds/worker/checkouts/gecko && ./mach ' + run['mach']
+    run['command'] = 'cd {workdir}/checkouts/gecko && ./mach {mach}'.format(**run)
     run['using'] = 'run-task'
     del run['mach']
     configure_taskdesc_for_run(config, job, taskdesc, job['worker']['implementation'])
--- a/taskcluster/taskgraph/transforms/job/mozharness.py
+++ b/taskcluster/taskgraph/transforms/job/mozharness.py
@@ -96,16 +96,19 @@ mozharness_run_schema = Schema({
 
     # If false don't pass --branch mozharness script
     # Only disableable on windows
     Required('use-magic-mh-args'): bool,
 
     # if true, perform a checkout of a comm-central based branch inside the
     # gecko checkout
     Required('comm-checkout'): bool,
+
+    # Base work directory used to set up the task.
+    Required('workdir'): basestring,
 })
 
 
 mozharness_defaults = {
     'tooltool-downloads': False,
     'secrets': False,
     'taskcluster-proxy': False,
     'need-xvfb': False,
@@ -194,27 +197,28 @@ def mozharness_on_docker_worker_setup(co
         docker_worker_add_tooltool(config, job, taskdesc, internal=internal)
 
     # Retry if mozharness returns TBPL_RETRY
     worker['retry-exit-status'] = [4]
 
     docker_worker_setup_secrets(config, job, taskdesc)
 
     command = [
-        '/builds/worker/bin/run-task',
-        '--vcs-checkout', '/builds/worker/workspace/build/src',
-        '--tools-checkout', '/builds/worker/workspace/build/tools',
+        '{workdir}/bin/run-task'.format(**run),
+        '--vcs-checkout', '{workdir}/workspace/build/src'.format(**run),
+        '--tools-checkout', '{workdir}/workspace/build/tools'.format(**run),
     ]
     if run['comm-checkout']:
-        command.append('--comm-checkout=/builds/worker/workspace/build/src/comm')
+        command.append('--comm-checkout={workdir}/workspace/build/src/comm'.format(**run))
 
     command += [
         '--',
-        '/builds/worker/workspace/build/src/{}'.format(
-            run.get('job-script', 'taskcluster/scripts/builder/build-linux.sh')
+        '{workdir}/workspace/build/src/{script}'.format(
+            workdir=run['workdir'],
+            script=run.get('job-script', 'taskcluster/scripts/builder/build-linux.sh'),
         ),
     ]
 
     worker['command'] = command
 
 
 @run_job_using("generic-worker", "mozharness", schema=mozharness_run_schema,
                defaults=mozharness_defaults)
--- a/taskcluster/taskgraph/transforms/job/mozharness_test.py
+++ b/taskcluster/taskgraph/transforms/job/mozharness_test.py
@@ -77,61 +77,64 @@ def get_variant(test_platform):
     return ''
 
 
 test_description_schema = {str(k): v for k, v in test_description_schema.schema.iteritems()}
 
 mozharness_test_run_schema = Schema({
     Required('using'): 'mozharness-test',
     Required('test'): test_description_schema,
+    # Base work directory used to set up the task.
+    Required('workdir'): basestring,
 })
 
 
 def test_packages_url(taskdesc):
     """Account for different platforms that name their test packages differently"""
     return get_artifact_url('<build>', get_artifact_path(taskdesc, 'target.test_packages.json'))
 
 
 @run_job_using('docker-engine', 'mozharness-test', schema=mozharness_test_run_schema)
 @run_job_using('docker-worker', 'mozharness-test', schema=mozharness_test_run_schema)
 def mozharness_test_on_docker(config, job, taskdesc):
+    run = job['run']
     test = taskdesc['run']['test']
     mozharness = test['mozharness']
     worker = taskdesc['worker']
 
     # apply some defaults
     worker['docker-image'] = test['docker-image']
     worker['allow-ptrace'] = True  # required for all tests, for crashreporter
     worker['loopback-video'] = test['loopback-video']
     worker['loopback-audio'] = test['loopback-audio']
     worker['max-run-time'] = test['max-run-time']
     worker['retry-exit-status'] = test['retry-exit-status']
 
     artifacts = [
         # (artifact name prefix, in-image path)
-        ("public/logs/", "/builds/worker/workspace/build/upload/logs/"),
-        ("public/test", "/builds/worker/artifacts/"),
-        ("public/test_info/", "/builds/worker/workspace/build/blobber_upload_dir/"),
+        ("public/logs/", "{workdir}/workspace/build/upload/logs/".format(**run)),
+        ("public/test", "{workdir}/artifacts/".format(**run)),
+        ("public/test_info/", "{workdir}/workspace/build/blobber_upload_dir/".format(**run)),
     ]
 
     installer_url = get_artifact_url('<build>', mozharness['build-artifact-name'])
     mozharness_url = get_artifact_url('<build>',
                                       get_artifact_path(taskdesc, 'mozharness.zip'))
 
     worker['artifacts'] = [{
         'name': prefix,
-        'path': os.path.join('/builds/worker/workspace', path),
+        'path': os.path.join('{workdir}/workspace'.format(**run), path),
         'type': 'directory',
     } for (prefix, path) in artifacts]
 
     worker['caches'] = [{
         'type': 'persistent',
         'name': 'level-{}-{}-test-workspace'.format(
             config.params['level'], config.params['project']),
-        'mount-point': "/builds/worker/workspace",
+        'mount-point': "{workdir}/workspace".format(**run),
     }]
 
     env = worker['env'] = {
         'MOZHARNESS_CONFIG': ' '.join(mozharness['config']),
         'MOZHARNESS_SCRIPT': mozharness['script'],
         'MOZILLA_BUILD_URL': {'task-reference': installer_url},
         'NEED_PULSEAUDIO': 'true',
         'NEED_WINDOW_MANAGER': 'true',
@@ -156,34 +159,34 @@ def mozharness_test_on_docker(config, jo
     if mozharness['tooltool-downloads']:
         docker_worker_add_tooltool(config, job, taskdesc, internal=True)
 
     if test['reboot']:
         raise Exception('reboot: {} not supported on generic-worker'.format(test['reboot']))
 
     # assemble the command line
     command = [
-        '/builds/worker/bin/run-task',
+        '{workdir}/bin/run-task'.format(**run),
     ]
 
     # Support vcs checkouts regardless of whether the task runs from
     # source or not in case it is needed on an interactive loaner.
     support_vcs_checkout(config, job, taskdesc)
 
     # If we have a source checkout, run mozharness from it instead of
     # downloading a zip file with the same content.
     if test['checkout']:
-        command.extend(['--vcs-checkout', '/builds/worker/checkouts/gecko'])
-        env['MOZHARNESS_PATH'] = '/builds/worker/checkouts/gecko/testing/mozharness'
+        command.extend(['--vcs-checkout', '{workdir}/checkouts/gecko'.format(**run)])
+        env['MOZHARNESS_PATH'] = '{workdir}/checkouts/gecko/testing/mozharness'.format(**run)
     else:
         env['MOZHARNESS_URL'] = {'task-reference': mozharness_url}
 
     command.extend([
         '--',
-        '/builds/worker/bin/test-linux.sh',
+        '{workdir}/bin/test-linux.sh'.format(**run),
     ])
 
     command.extend([
         {"task-reference": "--installer-url=" + installer_url},
         {"task-reference": "--test-packages-url=" + test_packages_url(taskdesc)},
     ])
     command.extend(mozharness.get('extra-options', []))
 
--- a/taskcluster/taskgraph/transforms/job/run_task.py
+++ b/taskcluster/taskgraph/transforms/job/run_task.py
@@ -14,17 +14,17 @@ from voluptuous import Required, Any
 
 run_task_schema = Schema({
     Required('using'): 'run-task',
 
     # if true, add a cache at ~worker/.cache, which is where things like pip
     # tend to hide their caches.  This cache is never added for level-1 jobs.
     Required('cache-dotcache'): bool,
 
-    # if true (the default), perform a checkout in /builds/worker/checkouts/gecko
+    # if true (the default), perform a checkout in {workdir}/checkouts/gecko
     Required('checkout'): bool,
 
     # The sparse checkout profile to use. Value is the filename relative to the
     # directory where sparse profiles are defined (build/sparse-profiles/).
     Required('sparse-profile'): Any(basestring, None),
 
     # if true, perform a checkout of a comm-central based branch inside the
     # gecko checkout
@@ -37,16 +37,19 @@ run_task_schema = Schema({
     Required('use-artifacts'): Any(None, {
         basestring: [basestring],
     }),
 
     # The command arguments to pass to the `run-task` script, after the
     # checkout arguments.  If a list, it will be passed directly; otherwise
     # it will be included in a single argument to `bash -cx`.
     Required('command'): Any([basestring], basestring),
+
+    # Base work directory used to set up the task.
+    Required('workdir'): basestring,
 })
 
 
 def common_setup(config, job, taskdesc):
     run = job['run']
     if run['checkout']:
         support_vcs_checkout(config, job, taskdesc,
                              sparse=bool(run['sparse-profile']))
@@ -56,17 +59,17 @@ def common_setup(config, job, taskdesc):
 
     taskdesc['worker'].setdefault('env', {})['MOZ_SCM_LEVEL'] = config.params['level']
 
 
 def add_checkout_to_command(run, command):
     if not run['checkout']:
         return
 
-    command.append('--vcs-checkout=/builds/worker/checkouts/gecko')
+    command.append('--vcs-checkout={workdir}/checkouts/gecko'.format(**run))
 
     if run['sparse-profile']:
         command.append('--sparse-profile=build/sparse-profiles/%s' %
                        run['sparse-profile'])
 
 
 defaults = {
     'cache-dotcache': False,
@@ -82,31 +85,31 @@ def docker_worker_run_task(config, job, 
     run = job['run']
     worker = taskdesc['worker'] = job['worker']
     common_setup(config, job, taskdesc)
 
     if run.get('cache-dotcache'):
         worker['caches'].append({
             'type': 'persistent',
             'name': 'level-{level}-{project}-dotcache'.format(**config.params),
-            'mount-point': '/builds/worker/.cache',
+            'mount-point': '{workdir}/.cache'.format(**run),
             'skip-untrusted': True,
         })
 
     # This must match EXIT_PURGE_CACHES in taskcluster/scripts/run-task
     worker.setdefault('retry-exit-status', []).append(72)
     worker.setdefault('purge-caches-exit-status', []).append(72)
 
     run_command = run['command']
     if isinstance(run_command, basestring):
         run_command = ['bash', '-cx', run_command]
-    command = ['/builds/worker/bin/run-task']
+    command = ['{workdir}/bin/run-task'.format(**run)]
     add_checkout_to_command(run, command)
     if run['comm-checkout']:
-        command.append('--comm-checkout=/builds/worker/checkouts/gecko/comm')
+        command.append('--comm-checkout={workdir}/checkouts/gecko/comm'.format(**run))
     command.append('--fetch-hgfingerprint')
     command.append('--')
     command.extend(run_command)
     worker['command'] = command
 
 
 @run_job_using("native-engine", "run-task", schema=run_task_schema, defaults=defaults)
 def native_engine_run_task(config, job, taskdesc):
--- a/taskcluster/taskgraph/transforms/job/spidermonkey.py
+++ b/taskcluster/taskgraph/transforms/job/spidermonkey.py
@@ -20,16 +20,19 @@ from taskgraph.transforms.job.common imp
 )
 
 sm_run_schema = Schema({
     Required('using'): Any('spidermonkey', 'spidermonkey-package', 'spidermonkey-mozjs-crate',
                            'spidermonkey-rust-bindings'),
 
     # The SPIDERMONKEY_VARIANT
     Required('spidermonkey-variant'): basestring,
+
+    # Base work directory used to set up the task.
+    Required('workdir'): basestring,
 })
 
 
 @run_job_using("docker-worker", "spidermonkey", schema=sm_run_schema)
 @run_job_using("docker-worker", "spidermonkey-package", schema=sm_run_schema)
 @run_job_using("docker-worker", "spidermonkey-mozjs-crate",
                schema=sm_run_schema)
 @run_job_using("docker-worker", "spidermonkey-rust-bindings",
@@ -38,17 +41,17 @@ def docker_worker_spidermonkey(config, j
     run = job['run']
 
     worker = taskdesc['worker']
     worker['artifacts'] = []
     worker.setdefault('caches', []).append({
         'type': 'persistent',
         'name': 'level-{}-{}-build-spidermonkey-workspace'.format(
             config.params['level'], config.params['project']),
-        'mount-point': "/builds/worker/workspace",
+        'mount-point': "{workdir}/workspace".format(**run),
         'skip-untrusted': True,
     })
 
     docker_worker_add_artifacts(config, job, taskdesc)
     docker_worker_add_tooltool(config, job, taskdesc)
 
     env = worker.setdefault('env', {})
     env.update({
@@ -64,22 +67,23 @@ def docker_worker_spidermonkey(config, j
     if run['using'] == 'spidermonkey-package':
         script = "build-sm-package.sh"
     elif run['using'] == 'spidermonkey-mozjs-crate':
         script = "build-sm-mozjs-crate.sh"
     elif run['using'] == 'spidermonkey-rust-bindings':
         script = "build-sm-rust-bindings.sh"
 
     worker['command'] = [
-        '/builds/worker/bin/run-task',
-        '--vcs-checkout', '/builds/worker/workspace/build/src',
+        '{workdir}/bin/run-task'.format(**run),
+        '--vcs-checkout', '{workdir}/workspace/build/src'.format(**run),
         '--',
         '/bin/bash',
         '-c',
-        'cd /builds/worker && workspace/build/src/taskcluster/scripts/builder/%s' % script
+        'cd {workdir} && workspace/build/src/taskcluster/scripts/builder/{script}'.format(
+            workdir=run['workdir'], script=script)
     ]
 
 
 @run_job_using("generic-worker", "spidermonkey", schema=sm_run_schema)
 def generic_worker_spidermonkey(config, job, taskdesc):
     assert job['worker']['os'] == 'windows', 'only supports windows right now'
 
     run = job['run']
--- a/taskcluster/taskgraph/transforms/job/toolchain.py
+++ b/taskcluster/taskgraph/transforms/job/toolchain.py
@@ -58,16 +58,19 @@ toolchain_run_schema = Schema({
     Optional('resources'): [basestring],
 
     # Path to the artifact produced by the toolchain job
     Required('toolchain-artifact'): basestring,
 
     # An alias that can be used instead of the real toolchain job name in
     # the toolchains list for build jobs.
     Optional('toolchain-alias'): basestring,
+
+    # Base work directory used to set up the task.
+    Required('workdir'): basestring,
 })
 
 
 def get_digest_data(config, run, taskdesc):
     files = list(run.get('resources', []))
     # This file
     files.append('taskcluster/taskgraph/transforms/job/toolchain.py')
     # The script
@@ -151,25 +154,25 @@ def docker_worker_toolchain(config, job,
         args = ' ' + shell_quote(*args)
 
     sparse_profile = []
     if run.get('sparse-profile'):
         sparse_profile = ['--sparse-profile',
                           'build/sparse-profiles/{}'.format(run['sparse-profile'])]
 
     worker['command'] = [
-        '/builds/worker/bin/run-task',
-        '--vcs-checkout=/builds/worker/workspace/build/src',
+        '{workdir}/bin/run-task'.format(**run),
+        '--vcs-checkout={workdir}/workspace/build/src'.format(**run),
     ] + sparse_profile + [
         '--',
         'bash',
         '-c',
-        'cd /builds/worker && '
+        'cd {} && '
         '{}workspace/build/src/taskcluster/scripts/misc/{}{}'.format(
-            wrapper, run['script'], args)
+            run['workdir'], wrapper, run['script'], args)
     ]
 
     attributes = taskdesc.setdefault('attributes', {})
     attributes['toolchain-artifact'] = run['toolchain-artifact']
     if 'toolchain-alias' in run:
         attributes['toolchain-alias'] = run['toolchain-alias']
 
     if not taskgraph.fast: