Bug 1250585 - beetmover - add push to mirrors, r?rail
* this also fixes bug where we take build/0 from tc artifacts regardless if
there was a retry
MozReview-Commit-ID: KKJCGF6Hc7k
new file mode 100644
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/aws.py
@@ -0,0 +1,15 @@
+import os
+
+
+def pop_aws_auth_from_env():
+ """
+ retrieves aws creds and deletes them from os.environ if present.
+ """
+ aws_key_id = os.environ.get("AWS_ACCESS_KEY_ID")
+ aws_secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY")
+
+ if aws_key_id and aws_secret_key: # we passed creds via environment
+ del os.environ['AWS_ACCESS_KEY_ID']
+ del os.environ['AWS_SECRET_ACCESS_KEY']
+
+ return aws_key_id, aws_secret_key
--- a/testing/mozharness/scripts/release/beet_mover.py
+++ b/testing/mozharness/scripts/release/beet_mover.py
@@ -15,41 +15,25 @@ import pprint
import re
from os import listdir
from os.path import isfile, join
sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
from mozharness.base.log import FATAL
from mozharness.base.python import VirtualenvMixin
from mozharness.base.script import BaseScript
-import mozharness
+from mozharness.mozilla.aws import pop_aws_auth_from_env
def get_hash(content, hash_type="md5"):
h = hashlib.new(hash_type)
h.update(content)
return h.hexdigest()
-def get_aws_auth():
- """
- retrieves aws creds and deletes them from os.environ if present.
- """
- aws_key_id = os.environ.get("AWS_ACCESS_KEY_ID")
- aws_secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY")
-
- if aws_key_id and aws_secret_key:
- del os.environ['AWS_ACCESS_KEY_ID']
- del os.environ['AWS_SECRET_ACCESS_KEY']
- else:
- exit("could not determine aws credentials from os environment")
-
- return aws_key_id, aws_secret_key
-
-
CONFIG_OPTIONS = [
[["--template"], {
"dest": "template",
"help": "Specify jinja2 template file",
}],
[['--locale', ], {
"action": "extend",
"dest": "locales",
@@ -136,19 +120,17 @@ class BeetMover(BaseScript, VirtualenvMi
'download-bits', # beets
'scan-bits', # beets
'upload-bits', # beets
],
'require_config_file': False,
# Default configuration
'config': {
# base index url where to find taskcluster artifact based on taskid
- # TODO - find out if we need to support taskcluster run number other than 0.
- # e.g. maybe we could end up with artifacts in > 'run 0' in a re-trigger situation?
- "artifact_base_url": 'https://queue.taskcluster.net/v1/task/{taskid}/runs/0/artifacts/public/{subdir}',
+ "artifact_base_url": 'https://queue.taskcluster.net/v1/task/{taskid}/artifacts/public/{subdir}',
"virtualenv_modules": [
"boto",
"PyYAML",
"Jinja2",
"redo",
"mar",
],
"virtualenv_path": "venv",
@@ -162,16 +144,18 @@ class BeetMover(BaseScript, VirtualenvMi
#todo do excludes need to be configured via command line for specific builds?
super(BeetMover, self).__init__(**beetmover_kwargs)
c = self.config
self.manifest = {}
# assigned in _post_create_virtualenv
self.virtualenv_imports = None
self.bucket = c['buckets']['production'] if c['production'] else c['buckets']['development']
+ if not all(aws_creds):
+ self.fatal('credentials must be passed in env: "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"')
self.aws_key_id, self.aws_secret_key = aws_creds
# if excludes is set from command line, use it otherwise use defaults
self.excludes = self.config.get('excludes', DEFAULT_EXCLUDES)
dirs = self.query_abs_dirs()
self.dest_dir = os.path.join(dirs['abs_work_dir'], CACHE_DIR)
def activate_virtualenv(self):
"""
@@ -334,10 +318,10 @@ class BeetMover(BaseScript, VirtualenvMi
self.run_command([self.query_python_path(), os.path.join(external_tools_path,'extract_and_run_command.py'),
'-j{}'.format(self.config['scan_parallelization']),
'clamscan', '--no-summary', '--', self.dest_dir])
def _matches_exclude(self, keyname):
return any(re.search(exclude, keyname) for exclude in self.excludes)
if __name__ == '__main__':
- beet_mover = BeetMover(get_aws_auth())
+ beet_mover = BeetMover(pop_aws_auth_from_env())
beet_mover.run_and_exit()
--- a/testing/mozharness/scripts/release/push-candidate-to-releases.py
+++ b/testing/mozharness/scripts/release/push-candidate-to-releases.py
@@ -1,17 +1,19 @@
from multiprocessing.pool import ThreadPool
import os
import re
import sys
+
sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
from mozharness.base.python import VirtualenvMixin, virtualenv_config_options
from mozharness.base.script import BaseScript
+from mozharness.mozilla.aws import pop_aws_auth_from_env
class ReleasePusher(BaseScript, VirtualenvMixin):
config_options = [
[["--product"], {
"dest": "product",
"help": "Product being released, eg: firefox, thunderbird",
}],
@@ -40,17 +42,17 @@ class ReleasePusher(BaseScript, Virtuale
[["-j", "--parallelization"], {
"dest": "parallelization",
"default": 20,
"type": "int",
"help": "Number of copy requests to run concurrently",
}],
] + virtualenv_config_options
- def __init__(self):
+ def __init__(self, aws_creds):
BaseScript.__init__(self,
config_options=self.config_options,
require_config_file=False,
config={
"virtualenv_modules": [
"boto",
"redo",
],
@@ -63,19 +65,30 @@ class ReleasePusher(BaseScript, Virtuale
],
default_actions=[
"create-virtualenv",
"activate-virtualenv",
"push-to-releases",
],
)
- # set the env var for boto to read our special config file
- # rather than anything else we have at ~/.boto
- os.environ["BOTO_CONFIG"] = os.path.abspath(self.config["credentials"])
+ # validate aws credentials
+ if not (all(aws_creds) or self.config.get('credentials')):
+ self.fatal("aws creds not defined. please add them to your config or env.")
+ if any(aws_creds) and self.config.get('credentials'):
+ self.fatal("aws creds found in env and self.config. please declare in one place only.")
+
+ # set aws credentials
+ if aws_creds:
+ self.aws_key_id, self.aws_secret_key = aws_creds
+ else: # use
+ self.aws_key_id, self.aws_secret_key = None, None
+ # set the env var for boto to read our special config file
+ # rather than anything else we have at ~/.boto
+ os.environ["BOTO_CONFIG"] = os.path.abspath(self.config["credentials"])
def _pre_config_lock(self, rw_config):
super(ReleasePusher, self)._pre_config_lock(rw_config)
# This default is set here rather in the config because default
# lists cannot be completely overidden, only appended to.
if not self.config.get("excludes"):
self.config["excludes"] = [
@@ -123,17 +136,18 @@ class ReleasePusher(BaseScript, Virtuale
from boto.exception import S3CopyError, S3ResponseError
from redo import retry
# suppress boto debug logging, it's too verbose with --loglevel=debug
import logging
logging.getLogger('boto').setLevel(logging.INFO)
self.info("Connecting to S3")
- conn = S3Connection()
+ conn = S3Connection(aws_access_key_id=self.aws_key_id,
+ aws_secret_access_key=self.aws_secret_key)
self.info("Getting bucket {}".format(self.config["bucket_name"]))
bucket = conn.get_bucket(self.config["bucket_name"])
# ensure the destination is empty
self.info("Checking destination {} is empty".format(self._get_releases_prefix()))
keys = [k for k in bucket.list(prefix=self._get_releases_prefix())]
if keys:
self.fatal("Destination already exists with %s keys, aborting" %
@@ -162,10 +176,10 @@ class ReleasePusher(BaseScript, Virtuale
destination = keyname.replace(candidates_prefix,
release_prefix)
yield (keyname, destination)
pool = ThreadPool(self.config["parallelization"])
pool.map(worker, find_release_files())
if __name__ == "__main__":
- myScript = ReleasePusher()
+ myScript = ReleasePusher(pop_aws_auth_from_env())
myScript.run_and_exit()