bug 1353770, update docker-py to docker 2.2.1, r?gps draft
authorAxel Hecht <axel@pike.org>
Wed, 05 Apr 2017 17:49:55 +0200
changeset 10673 0c56378c20fcd15200ed66809acb124b2c0ed8d7
parent 10668 414d5bf9a3e33c5dfd0a92b53881e3b1811daa67
push id1607
push useraxel@mozilla.com
push dateWed, 05 Apr 2017 15:50:16 +0000
reviewersgps
bugs1353770
bug 1353770, update docker-py to docker 2.2.1, r?gps The changes are mostly mechanical: docker.Client -> docker.DockerClient self.client -> self.client.api I used DockerClient because there's an opportunity to streamline a few hand-woven things. At least execute is one. Real changes: Hardcoding the gateway to be localhost. The gateway IP of the bridge network isn't reachable from the host on docker-for-mac. Use docker.utils.json_stream.json_stream, which gets around the json.loads errors reading more than one line at a time. MozReview-Commit-ID: IXrhGo8aYdS
test-requirements.txt
testing/vcttesting/docker.py
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -56,18 +56,17 @@ django-pipeline==1.3.27 \
 
 dnspython==1.15.0 \
     --hash=sha256:861e6e58faa730f9845aaaa9c6c832851fbf89382ac52915a51f89c71accdd31 \
     --hash=sha256:40f563e1f7a7b80dc5a4e76ad75c23da53d62f1e15e6e517293b04e1f84ead7c
 
 django-storages==1.4.1 \
     --hash=sha256:0ad7049caa7148b846906a7e114e5d245dba714a7a1ef895150234ae25788c46
 
-docker-py==1.10.6 \
-    --hash=sha256:35b506e95861914fa5ad57a6707e3217b4082843b883be246190f57013948aba
+docker==2.2.1
 
 docker-pycreds==0.2.1 \
     --hash=sha256:58d2688f92de5d6f1a6ac4fe25da461232f0e0a4c1212b93b256b046b2d714a9
 
 dulwich==0.16.1 \
     --hash=sha256:470d0feec9d4e7aba091c02f62db7f9cc6549ffe3f623a8039f96f584159da05
 
 enum34==1.1.1 \
--- a/testing/vcttesting/docker.py
+++ b/testing/vcttesting/docker.py
@@ -28,16 +28,17 @@ import uuid
 import warnings
 
 import backports.lzma as lzma
 
 from docker.errors import (
     APIError as DockerAPIError,
     DockerException,
 )
+from docker.utils.json_stream import json_stream
 from contextlib import contextmanager
 from io import BytesIO
 
 import concurrent.futures as futures
 from coverage.data import CoverageData
 
 from .util import (
     limited_threadpoolexecutor,
@@ -198,41 +199,41 @@ class Docker(object):
             'last-vct-id',
             'last-treestatus-id',
             'vct-cid',
         )
         for k in keys:
             self.state.setdefault(k, None)
 
         try:
-            self.client = docker.Client(base_url=url, tls=tls, version='auto')
+            self.client = docker.DockerClient(base_url=url, tls=tls, version='auto')
         except DockerException:
             self.client = None
             return
 
         # We need API 1.22+ for some networking APIs.
-        if docker.utils.compare_version('1.22', self.client.api_version) < 0:
+        if docker.utils.compare_version('1.22', self.client.api.api_version) < 0:
             warnings.warn('Warning: unable to speak to Docker servers older '
                           'than Docker 1.10.x')
             self.client = None
             return
 
         # Try to obtain a network hostname for the Docker server. We use this
         # for determining where to look for opened ports.
         # This is a bit complicated because Docker can be running from a local
         # socket or or another host via something like boot2docker.
-        docker_url = urlparse.urlparse(self.client.base_url)
+        docker_url = urlparse.urlparse(self.client.api.base_url)
         self.docker_hostname = docker_url.hostname
         if docker_url.hostname in ('localunixsocket', 'localhost', '127.0.0.1'):
-            networks = self.client.networks()
+            networks = self.client.api.networks()
             for network in networks:
                 if network['Name'] == 'bridge':
                     ipam = network['IPAM']
                     try:
-                        addr = ipam['Config'][0]['Gateway']
+                        addr = '127.0.0.1'
                     except KeyError:
                         warnings.warn('Warning: Unable to determine ip '
                                       'address of the docker gateway. Please '
                                       'ensure docker is listening on a tcp '
                                       'socket by setting -H '
                                       'tcp://127.0.0.1:4243 in your docker '
                                       'configuration file.')
                         self.client = None
@@ -243,17 +244,17 @@ class Docker(object):
 
     def is_alive(self):
         """Whether the connection to Docker is alive."""
         if not self.client:
             return False
 
         # This is a layering violation with docker.client, but meh.
         try:
-            self.client._get(self.client._url('/version'), timeout=5)
+            self.client.api._get(self.client.api._url('/version'), timeout=5)
             return True
         except requests.exceptions.RequestException:
             return False
 
     def _get_vct_files(self):
         """Obtain all the files in the version-control-tools repo.
 
         Returns a dict of relpath to full path.
@@ -371,17 +372,17 @@ class Docker(object):
             fh.flush()
             fh.seek(0)
 
             # Docker 1.10 no longer appears to allow import of .xz files
             # directly. Do the decompress locally.
             if url.endswith('.xz'):
                 fh = lzma.decompress(fh.read())
 
-            res = self.client.import_image_from_data(
+            res = self.client.api.import_image_from_data(
                 fh, repository=repository, tag=tag)
             # docker-py doesn't parse the JSON response in what is almost
             # certainly a bug. Do it ourselves.
             return json.loads(res.strip())['status']
 
     def ensure_built(self, name, verbose=False, use_last=False):
         """Ensure a Docker image from a builder directory is built and up to
         date.
@@ -509,44 +510,43 @@ class Docker(object):
         # Need to seek to beginning so .read() inside docker.client will return
         # data.
         buf.seek(0)
 
         # The API here is wonky, possibly due to buggy behavior in
         # docker.client always setting stream=True if version > 1.8.
         # We assume this is a bug that will change behavior later and work
         # around it by ensuring consistent behavior.
-        for stream in self.client.build(fileobj=buf, custom_context=True,
-                                        rm=True, stream=True):
-            s = json.loads(stream)
+        for s in json_stream(self.client.api.build(fileobj=buf, custom_context=True,
+                                        rm=True, stream=True)):
             if 'stream' not in s:
                 continue
 
             s = s['stream']
             if verbose:
                 for l in s.strip().splitlines():
                     sys.stdout.write('%s> %s\n' % (name, l))
             match = re.match('^Successfully built ([a-f0-9]{12})$', s.rstrip())
             if match:
                 image = match.group(1)
                 # There is likely a trailing newline.
                 full_image = self.get_full_image(image.rstrip())
 
                 # We only tag the image once to avoid redundancy.
                 have_tag = False
-                for i in self.client.images():
+                for i in self.client.api.images():
                     if i['Id'] == full_image:
                         for repotag in i['RepoTags']:
                             repo, tag = repotag.split(':')
                             if repo == name:
                                 have_tag = True
 
                         break
                 if not have_tag:
-                    self.client.tag(full_image, name, str(uuid.uuid1()))
+                    self.client.api.tag(full_image, name, str(uuid.uuid1()))
 
                 return full_image
 
         raise Exception('Unable to confirm image was built: %s' % name)
 
     def ensure_images_built(self, names, ansibles=None, existing=None,
                             verbose=False, use_last=False, max_workers=None):
         """Ensure that multiple images are built.
@@ -711,50 +711,50 @@ class Docker(object):
         # error will be raised creating a new container. Since Ansible
         # containers are incremental images, it's only a matter of time before
         # this limit gets hit.
         #
         # When we approach this limit, walk the stack of images and reset the
         # base image to the first image built with Ansible. This ensures
         # some cache hits and continuation and prevents us from brushing into
         # the limit.
-        history = self.client.history(start_image)
+        history = self.client.api.history(start_image)
         if len(history) > 120:
             # Newest to oldest.
             for base in history:
                 if base['CreatedBy'].startswith('/sync-and-build'):
                     start_image = base['Id']
 
         with self.vct_container(image=vct_image, cid=vct_cid, verbose=verbose) \
                 as vct_state:
             cmd = ['/sync-and-build', '%s.yml' % playbook]
-            host_config = self.client.create_host_config(
+            host_config = self.client.api.create_host_config(
                 volumes_from=[vct_state['Name']])
             with self.create_container(start_image, command=cmd, host_config=host_config) as cid:
                 output = deque(maxlen=20)
-                self.client.start(cid)
+                self.client.api.start(cid)
 
-                for s in self.client.attach(cid, stream=True, logs=True):
+                for s in self.client.api.attach(cid, stream=True, logs=True):
                     for line in s.splitlines():
                         if line != '':
                             output.append(line)
                             if verbose:
                                 print('%s> %s' % (repository, line))
 
-                state = self.client.inspect_container(cid)
+                state = self.client.api.inspect_container(cid)
                 if state['State']['ExitCode']:
                     # This should arguably be part of the exception.
                     for line in output:
                         print('ERROR %s> %s' % (repository, line))
                     raise Exception('Ansible did not run on %s successfully' %
                                     repository)
 
                 tag = str(uuid.uuid1())
 
-                iid = self.client.commit(cid['Id'], repository=repository,
+                iid = self.client.api.commit(cid['Id'], repository=repository,
                                          tag=tag)['Id']
                 iid = self.get_full_image(iid)
                 return iid, repository, tag
 
     def build_hgmo(self, images=None, verbose=False, use_last=False):
         """Ensure the images for a hg.mozilla.org service are built.
 
         hg-master runs the ssh service while hg-slave runs hgweb. The mirroring
@@ -891,20 +891,20 @@ class Docker(object):
         for this every time the containers start. So, we do the first run code
         once and commit the result to a new image.
         """
         web_environ = {}
 
         if 'FETCH_BMO' in os.environ or self.clobber_needed('bmofetch'):
             web_environ['FETCH_BMO'] = '1'
 
-        host_config = self.client.create_host_config(
+        host_config = self.client.api.create_host_config(
             port_bindings={80: None})
 
-        web_id = self.client.create_container(
+        web_id = self.client.api.create_container(
             web_image,
             environment=web_environ,
             host_config=host_config,
             labels=['bmoweb-bootstrapping'])['Id']
 
         with self.start_container(web_id) as web_state:
             web_hostname, web_port = self._get_host_hostname_port(
                 web_state, '80/tcp')
@@ -917,20 +917,20 @@ class Docker(object):
         web_unique_id = str(uuid.uuid1())
 
         # Save an image of the stopped containers.
         # We tag with a unique ID so we can identify all bootrapped images
         # easily from Docker's own metadata. We have to give a tag becaue
         # Docker will forget the repository name if a name image has only a
         # repository name as well.
 
-        web_bootstrap = self.client.commit(
+        web_bootstrap = self.client.api.commit(
             web_id, repository='bmoweb-bootstrapped', tag=web_unique_id)['Id']
 
-        self.client.remove_container(web_id, v=True)
+        self.client.api.remove_container(web_id, v=True)
 
         return web_bootstrap
 
     def start_bmo(self, cluster, http_port=80,
                   web_image=None, verbose=False):
         """Start a bugzilla.mozilla.org cluster.
 
         Code in this function is pretty much inlined in self.start_mozreview
@@ -940,30 +940,30 @@ class Docker(object):
         """
         if not web_image:
             images = self.build_bmo(verbose=verbose)
             web_image = images['bmoweb']
 
         containers = self.state['containers'].setdefault(cluster, [])
 
         network_name = 'bmo-%s' % uuid.uuid4()
-        self.client.create_network(network_name, driver='bridge')
+        self.client.api.create_network(network_name, driver='bridge')
 
         bmo_url = 'http://%s:%s/' % (self.docker_hostname, http_port)
-        bmo_host_config = self.client.create_host_config(
+        bmo_host_config = self.client.api.create_host_config(
             port_bindings={80: http_port})
-        web_id = self.client.create_container(
+        web_id = self.client.api.create_container(
             web_image,
             environment={'BMO_URL': bmo_url},
             host_config=bmo_host_config,
             networking_config=self.network_config(network_name, 'web'),
             labels=['bmoweb'])['Id']
         containers.append(web_id)
-        self.client.start(web_id)
-        web_state = self.client.inspect_container(web_id)
+        self.client.api.start(web_id)
+        web_state = self.client.api.inspect_container(web_id)
 
         self.save_state()
 
         hostname, hostport = self._get_host_hostname_port(web_state, '80/tcp')
         bmo_url = 'http://%s:%d/' % (hostname, hostport)
 
         print('waiting for Bugzilla to start')
         wait_for_http(
@@ -1369,41 +1369,41 @@ class Docker(object):
         count = 0
 
         ids = self.state['containers'].get(cluster, [])
         networks = set()
 
         with futures.ThreadPoolExecutor(max(1, len(ids))) as e:
             for container in reversed(ids):
                 if count == 0:
-                    state = self.client.inspect_container(container)
+                    state = self.client.api.inspect_container(container)
                     for network in state['NetworkSettings']['Networks'].values():
                         networks.add(network['NetworkID'])
 
                 count += 1
-                e.submit(self.client.remove_container, container, force=True,
+                e.submit(self.client.api.remove_container, container, force=True,
                          v=True)
 
         # There should only be 1, so don't use a ThreadPoolExecutor.
         for network in networks:
-            self.client.remove_network(network)
+            self.client.api.remove_network(network)
 
         print('stopped %d containers' % count)
 
         try:
             del self.state['containers'][cluster]
             self.save_state()
         except KeyError:
             pass
 
     def network_config(self, network_name, alias):
         """Obtain a networking config object."""
-        return self.client.create_networking_config(
+        return self.client.api.create_networking_config(
             endpoints_config={
-                network_name: self.client.create_endpoint_config(
+                network_name: self.client.api.create_endpoint_config(
                     aliases=[alias],
                 )
             }
         )
 
     def build_all_images(self, verbose=False, use_last=False, mozreview=True,
                          hgmo=True, bmo=True, max_workers=None):
         docker_images = set()
@@ -1462,30 +1462,30 @@ class Docker(object):
         hgmo_result = f_hgmo.result() if hgmo else None
         bmo_result = f_bmo.result() if bmo else None
 
         self.prune_images()
 
         return mr_result, hgmo_result, bmo_result
 
     def get_full_image(self, image):
-        for i in self.client.images():
+        for i in self.client.api.images():
             iid = i['Id']
             if iid.startswith('sha256:'):
                 iid = iid[7:]
 
             if iid[0:12] == image:
                 return i['Id']
 
         return image
 
     def prune_images(self):
         """Prune images that are old and likely unused."""
         running = set(self.get_full_image(c['Image'])
-                      for c in self.client.containers())
+                      for c in self.client.api.containers())
 
         ignore_images = set([
             self.state['last-autoland-id'],
             self.state['last-autolanddb-id'],
             self.state['last-bmoweb-id'],
             self.state['last-hgrb-id'],
             self.state['last-pulse-id'],
             self.state['last-rbweb-id'],
@@ -1511,17 +1511,17 @@ class Docker(object):
             'hgweb',
             'ldap',
             'vct',
             'treestatus',
         ])
 
         to_delete = {}
 
-        for i in self.client.images():
+        for i in self.client.api.images():
             iid = i['Id']
 
             # Don't do anything with images attached to running containers -
             # Docker won't allow it.
             if iid in running:
                 continue
 
             # Don't do anything with our last used images.
@@ -1537,67 +1537,67 @@ class Docker(object):
         retained = {}
         for key, image in sorted(self.state['images'].items()):
             if image not in to_delete:
                 retained[key] = image
 
         with futures.ThreadPoolExecutor(8) as e:
             for image, repo in to_delete.items():
                 print('Pruning old %s image %s' % (repo, image))
-                e.submit(self.client.remove_image, image)
+                e.submit(self.client.api.remove_image, image)
 
         self.state['images'] = retained
         self.save_state()
 
     def save_state(self):
         with open(self._state_path, 'wb') as fh:
             json.dump(self.state, fh, indent=4, sort_keys=True)
 
     def all_docker_images(self):
         """Obtain the set of all known Docker image IDs."""
-        return {i['Id'] for i in self.client.images(all=True)}
+        return {i['Id'] for i in self.client.api.images(all=True)}
 
     @contextmanager
     def start_container(self, cid, **kwargs):
         """Context manager for starting and stopping a Docker container.
 
         The container with id ``cid`` will be started when the context manager
         is entered and stopped when the context manager is execited.
 
         The context manager receives the inspected state of the container,
         immediately after it is started.
         """
-        self.client.start(cid, **kwargs)
+        self.client.api.start(cid, **kwargs)
         try:
-            state = self.client.inspect_container(cid)
+            state = self.client.api.inspect_container(cid)
             yield state
         finally:
             try:
-                self.client.stop(cid, timeout=20)
+                self.client.api.stop(cid, timeout=20)
             except DockerAPIError as e:
                 # Silently ignore failures if the container doesn't exist, as
                 # the container is obviously stopped.
                 if e.response.status_code != 404:
                     raise
 
     @contextmanager
     def create_container(self, image, remove_volumes=False, **kwargs):
         """Context manager for creating a temporary container.
 
         A container will be created from an image. When the context manager
         exists, the container will be removed.
 
         This context manager is useful for temporary containers that shouldn't
         outlive the life of the process.
         """
-        s = self.client.create_container(image, **kwargs)
+        s = self.client.api.create_container(image, **kwargs)
         try:
             yield s
         finally:
-            self.client.remove_container(s['Id'], force=True, v=remove_volumes)
+            self.client.api.remove_container(s['Id'], force=True, v=remove_volumes)
 
     @contextmanager
     def vct_container(self, image=None, cid=None, verbose=False):
         """Obtain a container with content of v-c-t available inside.
 
         We employ some hacks to make this as fast as possible. Three run modes
         are possible:
 
@@ -1608,51 +1608,51 @@ class Docker(object):
         The multiple code paths make the logic a bit difficult. But it makes
         code in consumers slightly easier to follow.
         """
         existing_cid = self.state['vct-cid']
 
         # If we're going to use an existing container, verify it exists.
         if not cid and existing_cid:
             try:
-                state = self.client.inspect_container(existing_cid)
+                state = self.client.api.inspect_container(existing_cid)
             except DockerAPIError:
                 existing_cid = None
                 self.state['vct-cid'] = None
 
         # Build the image if we're in temporary container mode.
         if not image and not cid and not existing_cid:
             image = self.ensure_built('vct', verbose=verbose)
 
         start = False
 
         if cid:
-            state = self.client.inspect_container(cid)
+            state = self.client.api.inspect_container(cid)
             if not state['State']['Running']:
                 raise RuntimeError(
                     "Container '%s' should have been started by the calling "
                     "function, but is not running" % cid)
         elif existing_cid:
             cid = existing_cid
             start = True
         else:
-            host_config = self.client.create_host_config(
+            host_config = self.client.api.create_host_config(
                 port_bindings={873: None})
 
-            cid = self.client.create_container(image,
+            cid = self.client.api.create_container(image,
                                                volumes=['/vct-mount'],
                                                ports=[873],
                                                host_config=host_config,
                                                labels=['vct'])['Id']
             start = True
 
         try:
             if start:
-                self.client.start(cid)
-                state = self.client.inspect_container(cid)
+                self.client.api.start(cid)
+                state = self.client.api.inspect_container(cid)
                 ports = state['NetworkSettings']['Ports']
                 port = ports['873/tcp'][0]['HostPort']
                 url = 'rsync://%s:%s/vct-mount/' % (self.docker_hostname, port)
 
                 get_and_write_vct_node()
                 vct_paths = self._get_vct_files()
                 with tempfile.NamedTemporaryFile() as fh:
                     for f in sorted(vct_paths.keys()):
@@ -1665,61 +1665,61 @@ class Docker(object):
 
                 self.state['last-vct-id'] = image
                 self.state['vct-cid'] = cid
                 self.save_state()
 
             yield state
         finally:
             if start:
-                self.client.stop(cid)
+                self.client.api.stop(cid)
 
     @contextmanager
     def auto_clean_orphans(self):
         if not self.is_alive():
             yield
             return
 
-        containers = {c['Id'] for c in self.client.containers(all=True)}
-        images = {i['Id'] for i in self.client.images(all=True)}
-        networks = {n['Id'] for n in self.client.networks()}
+        containers = {c['Id'] for c in self.client.api.containers(all=True)}
+        images = {i['Id'] for i in self.client.api.images(all=True)}
+        networks = {n['Id'] for n in self.client.api.networks()}
         try:
             yield
         finally:
             with futures.ThreadPoolExecutor(8) as e:
-                for c in self.client.containers(all=True):
+                for c in self.client.api.containers(all=True):
                     if c['Id'] not in containers:
-                        e.submit(self.client.remove_container, c['Id'],
+                        e.submit(self.client.api.remove_container, c['Id'],
                                  force=True, v=True)
 
             with futures.ThreadPoolExecutor(8) as e:
-                for i in self.client.images(all=True):
+                for i in self.client.api.images(all=True):
                     if i['Id'] not in images:
-                        e.submit(self.client.remove_image, c['Id'])
+                        e.submit(self.client.api.remove_image, c['Id'])
 
             with futures.ThreadPoolExecutor(8) as e:
-                for n in self.client.networks():
+                for n in self.client.api.networks():
                     if n['Id'] not in networks:
-                        e.submit(self.client.remove_network, n['Id'])
+                        e.submit(self.client.api.remove_network, n['Id'])
 
     def execute(self, cid, cmd, stdout=False, stderr=False, stream=False,
                 detach=False):
         """Execute a command on a container.
 
         Returns the output of the command.
 
         This mimics the old docker.execute() API, which was removed in
         docker-py 1.3.0.
         """
-        r = self.client.exec_create(cid, cmd, stdout=stdout, stderr=stderr)
-        return self.client.exec_start(r['Id'], stream=stream, detach=detach)
+        r = self.client.api.exec_create(cid, cmd, stdout=stdout, stderr=stderr)
+        return self.client.api.exec_start(r['Id'], stream=stream, detach=detach)
 
     def get_file_content(self, cid, path):
         """Get the contents of a file from a container."""
-        r, stat = self.client.get_archive(cid, path)
+        r, stat = self.client.api.get_archive(cid, path)
         buf = BytesIO(r.read())
         buf.seek(0)
         t = tarfile.open(mode='r', fileobj=buf)
         fp = t.extractfile(os.path.basename(path))
         return fp.read()
 
     def get_directory_contents(self, cid, path, tar='/bin/tar'):
         """Obtain the contents of all files in a directory in a container.
@@ -1815,24 +1815,24 @@ class Docker(object):
         gateway = state['NetworkSettings']['Gateway']
         return gateway, host_port
 
     def _get_assert_container_running_fn(self, cid):
         """Obtain a function that raises during invocation if a container
         stops."""
         def assert_running():
             try:
-                info = self.client.inspect_container(cid)
+                info = self.client.api.inspect_container(cid)
             except DockerAPIError as e:
                 if e.response.status_code == 404:
                     raise Exception('Container does not exist '
                                     '(stopped running?): %s' % cid)
 
                 raise
 
             if not info['State']['Running']:
                 raise Exception('Container stopped running: %s' % cid)
 
         return assert_running
 
     def _get_sorted_images(self):
-        return sorted(self.client.images(), key=lambda x: x['Created'],
+        return sorted(self.client.api.images(), key=lambda x: x['Created'],
                       reverse=True)