diff options
63 files changed, 2344 insertions, 244 deletions
@@ -1,15 +1,15 @@ Metadata-Version: 2.1 Name: docker -Version: 4.1.0 +Version: 4.4.4 Summary: A Python library for the Docker Engine API. Home-page: https://github.com/docker/docker-py Maintainer: Joffrey F Maintainer-email: joffrey@docker.com License: Apache License 2.0 -Project-URL: Source, https://github.com/docker/docker-py Project-URL: Documentation, https://docker-py.readthedocs.io -Project-URL: Tracker, https://github.com/docker/docker-py/issues Project-URL: Changelog, https://docker-py.readthedocs.io/en/stable/change-log.html +Project-URL: Source, https://github.com/docker/docker-py +Project-URL: Tracker, https://github.com/docker/docker-py/issues Description: # Docker SDK for Python [![Build Status](https://travis-ci.org/docker/docker-py.svg?branch=master)](https://travis-ci.org/docker/docker-py) @@ -70,7 +70,7 @@ Description: # Docker SDK for Python ```python >>> for line in container.logs(stream=True): - ... print line.strip() + ... print(line.strip()) Reticulating spline 2... Reticulating spline 3... ... @@ -100,6 +100,8 @@ Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 Classifier: Topic :: Software Development Classifier: Topic :: Utilities Classifier: License :: OSI Approved :: Apache Software License @@ -58,7 +58,7 @@ You can stream logs: ```python >>> for line in container.logs(stream=True): -... print line.strip() +... print(line.strip()) Reticulating spline 2... Reticulating spline 3... ... diff --git a/docker.egg-info/PKG-INFO b/docker.egg-info/PKG-INFO index 8754725..3f43ae6 100644 --- a/docker.egg-info/PKG-INFO +++ b/docker.egg-info/PKG-INFO @@ -1,15 +1,15 @@ Metadata-Version: 2.1 Name: docker -Version: 4.1.0 +Version: 4.4.4 Summary: A Python library for the Docker Engine API. Home-page: https://github.com/docker/docker-py Maintainer: Joffrey F Maintainer-email: joffrey@docker.com License: Apache License 2.0 -Project-URL: Source, https://github.com/docker/docker-py Project-URL: Documentation, https://docker-py.readthedocs.io -Project-URL: Tracker, https://github.com/docker/docker-py/issues Project-URL: Changelog, https://docker-py.readthedocs.io/en/stable/change-log.html +Project-URL: Source, https://github.com/docker/docker-py +Project-URL: Tracker, https://github.com/docker/docker-py/issues Description: # Docker SDK for Python [![Build Status](https://travis-ci.org/docker/docker-py.svg?branch=master)](https://travis-ci.org/docker/docker-py) @@ -70,7 +70,7 @@ Description: # Docker SDK for Python ```python >>> for line in container.logs(stream=True): - ... print line.strip() + ... print(line.strip()) Reticulating spline 2... Reticulating spline 3... ... @@ -100,6 +100,8 @@ Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 Classifier: Topic :: Software Development Classifier: Topic :: Utilities Classifier: License :: OSI Approved :: Apache Software License diff --git a/docker.egg-info/SOURCES.txt b/docker.egg-info/SOURCES.txt index 6daee4c..e0a62dd 100644 --- a/docker.egg-info/SOURCES.txt +++ b/docker.egg-info/SOURCES.txt @@ -32,6 +32,10 @@ docker/api/secret.py docker/api/service.py docker/api/swarm.py docker/api/volume.py +docker/context/__init__.py +docker/context/api.py +docker/context/config.py +docker/context/context.py docker/credentials/__init__.py docker/credentials/constants.py docker/credentials/errors.py @@ -95,6 +99,7 @@ tests/integration/api_volume_test.py tests/integration/base.py tests/integration/client_test.py tests/integration/conftest.py +tests/integration/context_api_test.py tests/integration/errors_test.py tests/integration/models_containers_test.py tests/integration/models_images_test.py @@ -110,6 +115,9 @@ tests/integration/credentials/store_test.py tests/integration/credentials/utils_test.py tests/integration/testdata/dummy-plugin/config.json tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt +tests/ssh/__init__.py +tests/ssh/api_build_test.py +tests/ssh/base.py tests/unit/__init__.py tests/unit/api_build_test.py tests/unit/api_container_test.py @@ -120,6 +128,7 @@ tests/unit/api_test.py tests/unit/api_volume_test.py tests/unit/auth_test.py tests/unit/client_test.py +tests/unit/context_test.py tests/unit/dockertypes_test.py tests/unit/errors_test.py tests/unit/fake_api.py @@ -130,6 +139,7 @@ tests/unit/models_images_test.py tests/unit/models_networks_test.py tests/unit/models_resources_test.py tests/unit/models_services_test.py +tests/unit/sshadapter_test.py tests/unit/ssladapter_test.py tests/unit/swarm_test.py tests/unit/types_containers_test.py diff --git a/docker.egg-info/requires.txt b/docker.egg-info/requires.txt index 415d2e6..d4c77af 100644 --- a/docker.egg-info/requires.txt +++ b/docker.egg-info/requires.txt @@ -8,11 +8,8 @@ ipaddress>=1.0.16 [:python_version < "3.5"] backports.ssl_match_hostname>=3.5 -[:sys_platform == "win32" and python_version < "3.6"] -pypiwin32==219 - -[:sys_platform == "win32" and python_version >= "3.6"] -pypiwin32==223 +[:sys_platform == "win32"] +pywin32==227 [ssh] paramiko>=2.4.2 diff --git a/docker/__init__.py b/docker/__init__.py index cf732e1..e5c1a8f 100644 --- a/docker/__init__.py +++ b/docker/__init__.py @@ -1,6 +1,9 @@ # flake8: noqa from .api import APIClient from .client import DockerClient, from_env +from .context import Context +from .context import ContextAPI +from .tls import TLSConfig from .version import version, version_info __version__ = version diff --git a/docker/api/client.py b/docker/api/client.py index 35dc84e..2b67291 100644 --- a/docker/api/client.py +++ b/docker/api/client.py @@ -7,6 +7,19 @@ import requests.exceptions import six import websocket +from .. import auth +from ..constants import (DEFAULT_NUM_POOLS, DEFAULT_NUM_POOLS_SSH, + DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS, + DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM, + MINIMUM_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES) +from ..errors import (DockerException, InvalidVersion, TLSParameterError, + create_api_error_from_http_exception) +from ..tls import TLSConfig +from ..transport import SSLHTTPAdapter, UnixHTTPAdapter +from ..utils import check_resource, config, update_headers, utils +from ..utils.json_stream import json_stream +from ..utils.proxy import ProxyConfig +from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter from .build import BuildApiMixin from .config import ConfigApiMixin from .container import ContainerApiMixin @@ -19,22 +32,7 @@ from .secret import SecretApiMixin from .service import ServiceApiMixin from .swarm import SwarmApiMixin from .volume import VolumeApiMixin -from .. import auth -from ..constants import ( - DEFAULT_TIMEOUT_SECONDS, DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM, - DEFAULT_DOCKER_API_VERSION, MINIMUM_DOCKER_API_VERSION, - STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS_SSH, DEFAULT_NUM_POOLS -) -from ..errors import ( - DockerException, InvalidVersion, TLSParameterError, - create_api_error_from_http_exception -) -from ..tls import TLSConfig -from ..transport import SSLHTTPAdapter, UnixHTTPAdapter -from ..utils import utils, check_resource, update_headers, config -from ..utils.socket import frames_iter, consume_socket_output, demux_adaptor -from ..utils.json_stream import json_stream -from ..utils.proxy import ProxyConfig + try: from ..transport import NpipeHTTPAdapter except ImportError: @@ -91,6 +89,11 @@ class APIClient( user_agent (str): Set a custom user agent for requests to the server. credstore_env (dict): Override environment variables when calling the credential store process. + use_ssh_client (bool): If set to `True`, an ssh connection is made + via shelling out to the ssh client. Ensure the ssh client is + installed and configured on the host. + max_pool_size (int): The maximum number of connections + to save in the pool. """ __attrs__ = requests.Session.__attrs__ + ['_auth_configs', @@ -102,7 +105,8 @@ class APIClient( def __init__(self, base_url=None, version=None, timeout=DEFAULT_TIMEOUT_SECONDS, tls=False, user_agent=DEFAULT_USER_AGENT, num_pools=None, - credstore_env=None): + credstore_env=None, use_ssh_client=False, + max_pool_size=DEFAULT_MAX_POOL_SIZE): super(APIClient, self).__init__() if tls and not base_url: @@ -138,7 +142,8 @@ class APIClient( if base_url.startswith('http+unix://'): self._custom_adapter = UnixHTTPAdapter( - base_url, timeout, pool_connections=num_pools + base_url, timeout, pool_connections=num_pools, + max_pool_size=max_pool_size ) self.mount('http+docker://', self._custom_adapter) self._unmount('http://', 'https://') @@ -152,7 +157,8 @@ class APIClient( ) try: self._custom_adapter = NpipeHTTPAdapter( - base_url, timeout, pool_connections=num_pools + base_url, timeout, pool_connections=num_pools, + max_pool_size=max_pool_size ) except NameError: raise DockerException( @@ -163,7 +169,8 @@ class APIClient( elif base_url.startswith('ssh://'): try: self._custom_adapter = SSHHTTPAdapter( - base_url, timeout, pool_connections=num_pools + base_url, timeout, pool_connections=num_pools, + max_pool_size=max_pool_size, shell_out=use_ssh_client ) except NameError: raise DockerException( @@ -183,14 +190,14 @@ class APIClient( self.base_url = base_url # version detection needs to be after unix adapter mounting - if version is None: - self._version = DEFAULT_DOCKER_API_VERSION - elif isinstance(version, six.string_types): - if version.lower() == 'auto': - self._version = self._retrieve_server_version() - else: - self._version = version + if version is None or (isinstance( + version, + six.string_types + ) and version.lower() == 'auto'): + self._version = self._retrieve_server_version() else: + self._version = version + if not isinstance(self._version, six.string_types): raise DockerException( 'Version parameter must be a string or None. Found {0}'.format( type(version).__name__ @@ -490,7 +497,7 @@ class APIClient( Args: dockercfg_path (str): Use a custom path for the Docker config file (default ``$HOME/.docker/config.json`` if present, - otherwise``$HOME/.dockercfg``) + otherwise ``$HOME/.dockercfg``) Returns: None diff --git a/docker/api/container.py b/docker/api/container.py index 45bd352..754b5dc 100644 --- a/docker/api/container.py +++ b/docker/api/container.py @@ -480,6 +480,9 @@ class ContainerApiMixin(object): For example, ``/dev/sda:/dev/xvda:rwm`` allows the container to have read-write access to the host's ``/dev/sda`` via a node named ``/dev/xvda`` inside the container. + device_requests (:py:class:`list`): Expose host resources such as + GPUs to the container, as a list of + :py:class:`docker.types.DeviceRequest` instances. dns (:py:class:`list`): Set custom DNS servers. dns_opt (:py:class:`list`): Additional options to be added to the container's ``resolv.conf`` file @@ -503,7 +506,7 @@ class ContainerApiMixin(object): bytes) or a string with a units identification char (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is specified without a units character, bytes are assumed as an - mem_reservation (int or str): Memory soft limit. + mem_reservation (float or str): Memory soft limit. mem_swappiness (int): Tune a container's memory swappiness behavior. Accepts number between 0 and 100. memswap_limit (str or int): Maximum amount of memory + swap a @@ -520,6 +523,8 @@ class ContainerApiMixin(object): - ``container:<name|id>`` Reuse another container's network stack. - ``host`` Use the host network stack. + This mode is incompatible with ``port_bindings``. + oom_kill_disable (bool): Whether to disable OOM killer. oom_score_adj (int): An integer value containing the score given to the container in order to tune OOM killer preferences. @@ -528,7 +533,8 @@ class ContainerApiMixin(object): pids_limit (int): Tune a container's pids limit. Set ``-1`` for unlimited. port_bindings (dict): See :py:meth:`create_container` - for more information. + for more information. + Imcompatible with ``host`` in ``network_mode``. privileged (bool): Give extended privileges to this container. publish_all_ports (bool): Publish all ports to the host. read_only (bool): Mount the container's root filesystem as read @@ -636,6 +642,8 @@ class ContainerApiMixin(object): network, using the IPv6 protocol. Defaults to ``None``. link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6) addresses. + driver_opt (dict): A dictionary of options to provide to the + network driver. Defaults to ``None``. Returns: (dict) An endpoint config. @@ -694,7 +702,8 @@ class ContainerApiMixin(object): return self._stream_raw_result(res, chunk_size, False) @utils.check_resource('container') - def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE): + def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE, + encode_stream=False): """ Retrieve a file or folder from a container in the form of a tar archive. @@ -705,6 +714,8 @@ class ContainerApiMixin(object): chunk_size (int): The number of bytes returned by each iteration of the generator. If ``None``, data will be streamed as it is received. Default: 2 MB + encode_stream (bool): Determines if data should be encoded + (gzip-compressed) during transmission. Default: False Returns: (tuple): First element is a raw tar data stream. Second element is @@ -729,8 +740,13 @@ class ContainerApiMixin(object): params = { 'path': path } + headers = { + "Accept-Encoding": "gzip, deflate" + } if encode_stream else { + "Accept-Encoding": "identity" + } url = self._url('/containers/{0}/archive', container) - res = self._get(url, params=params, stream=True) + res = self._get(url, params=params, stream=True, headers=headers) self._raise_for_status(res) encoded_stat = res.headers.get('x-docker-container-path-stat') return ( @@ -1120,7 +1136,7 @@ class ContainerApiMixin(object): else: if decode: raise errors.InvalidArgument( - "decode is only available in conjuction with stream=True" + "decode is only available in conjunction with stream=True" ) return self._result(self._get(url, params={'stream': False}), json=True) @@ -1206,8 +1222,8 @@ class ContainerApiMixin(object): cpu_shares (int): CPU shares (relative weight) cpuset_cpus (str): CPUs in which to allow execution cpuset_mems (str): MEMs in which to allow execution - mem_limit (int or str): Memory limit - mem_reservation (int or str): Memory soft limit + mem_limit (float or str): Memory limit + mem_reservation (float or str): Memory soft limit memswap_limit (int or str): Total memory (memory + swap), -1 to disable swap kernel_memory (int or str): Kernel memory limit diff --git a/docker/api/daemon.py b/docker/api/daemon.py index f715a13..6b71926 100644 --- a/docker/api/daemon.py +++ b/docker/api/daemon.py @@ -109,7 +109,7 @@ class DaemonApiMixin(object): the Docker server. dockercfg_path (str): Use a custom path for the Docker config file (default ``$HOME/.docker/config.json`` if present, - otherwise``$HOME/.dockercfg``) + otherwise ``$HOME/.dockercfg``) Returns: (dict): The response from the login request diff --git a/docker/api/image.py b/docker/api/image.py index 11c8cf7..56c5448 100644 --- a/docker/api/image.py +++ b/docker/api/image.py @@ -81,10 +81,18 @@ class ImageApiMixin(object): If the server returns an error. """ params = { - 'filter': name, 'only_ids': 1 if quiet else 0, 'all': 1 if all else 0, } + if name: + if utils.version_lt(self._version, '1.25'): + # only use "filter" on API 1.24 and under, as it is deprecated + params['filter'] = name + else: + if filters: + filters['reference'] = name + else: + filters = {'reference': name} if filters: params['filters'] = utils.convert_filters(filters) res = self._result(self._get(self._url("/images/json"), params=params), @@ -343,13 +351,14 @@ class ImageApiMixin(object): return self._result(self._post(url, params=params), True) def pull(self, repository, tag=None, stream=False, auth_config=None, - decode=False, platform=None): + decode=False, platform=None, all_tags=False): """ Pulls an image. Similar to the ``docker pull`` command. Args: repository (str): The repository to pull - tag (str): The tag to pull + tag (str): The tag to pull. If ``tag`` is ``None`` or empty, it + is set to ``latest``. stream (bool): Stream the output as a generator. Make sure to consume the generator, otherwise pull might get cancelled. auth_config (dict): Override the credentials that are found in the @@ -358,6 +367,8 @@ class ImageApiMixin(object): decode (bool): Decode the JSON data from the server into dicts. Only applies with ``stream=True`` platform (str): Platform in the format ``os[/arch[/variant]]`` + all_tags (bool): Pull all image tags, the ``tag`` parameter is + ignored. Returns: (generator or str): The output @@ -382,8 +393,12 @@ class ImageApiMixin(object): } """ - if not tag: - repository, tag = utils.parse_repository_tag(repository) + repository, image_tag = utils.parse_repository_tag(repository) + tag = tag or image_tag or 'latest' + + if all_tags: + tag = None + registry, repo_name = auth.resolve_repository_name(repository) params = { diff --git a/docker/api/network.py b/docker/api/network.py index 750b91b..139c2d1 100644 --- a/docker/api/network.py +++ b/docker/api/network.py @@ -216,7 +216,7 @@ class NetworkApiMixin(object): def connect_container_to_network(self, container, net_id, ipv4_address=None, ipv6_address=None, aliases=None, links=None, - link_local_ips=None): + link_local_ips=None, driver_opt=None): """ Connect a container to a network. @@ -240,7 +240,8 @@ class NetworkApiMixin(object): "Container": container, "EndpointConfig": self.create_endpoint_config( aliases=aliases, links=links, ipv4_address=ipv4_address, - ipv6_address=ipv6_address, link_local_ips=link_local_ips + ipv6_address=ipv6_address, link_local_ips=link_local_ips, + driver_opt=driver_opt ), } diff --git a/docker/client.py b/docker/client.py index 99ae196..5add5d7 100644 --- a/docker/client.py +++ b/docker/client.py @@ -1,5 +1,5 @@ from .api.client import APIClient -from .constants import DEFAULT_TIMEOUT_SECONDS +from .constants import (DEFAULT_TIMEOUT_SECONDS, DEFAULT_MAX_POOL_SIZE) from .models.configs import ConfigCollection from .models.containers import ContainerCollection from .models.images import ImageCollection @@ -35,6 +35,11 @@ class DockerClient(object): user_agent (str): Set a custom user agent for requests to the server. credstore_env (dict): Override environment variables when calling the credential store process. + use_ssh_client (bool): If set to `True`, an ssh connection is made + via shelling out to the ssh client. Ensure the ssh client is + installed and configured on the host. + max_pool_size (int): The maximum number of connections + to save in the pool. """ def __init__(self, *args, **kwargs): self.api = APIClient(*args, **kwargs) @@ -62,14 +67,19 @@ class DockerClient(object): Args: version (str): The version of the API to use. Set to ``auto`` to - automatically detect the server's version. Default: ``1.35`` + automatically detect the server's version. Default: ``auto`` timeout (int): Default timeout for API calls, in seconds. + max_pool_size (int): The maximum number of connections + to save in the pool. ssl_version (int): A valid `SSL version`_. assert_hostname (bool): Verify the hostname of the server. environment (dict): The environment to read environment variables from. Default: the value of ``os.environ`` credstore_env (dict): Override environment variables when calling the credential store process. + use_ssh_client (bool): If set to `True`, an ssh connection is + made via shelling out to the ssh client. Ensure the ssh + client is installed and configured on the host. Example: @@ -80,9 +90,15 @@ class DockerClient(object): https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1 """ timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS) + max_pool_size = kwargs.pop('max_pool_size', DEFAULT_MAX_POOL_SIZE) version = kwargs.pop('version', None) + use_ssh_client = kwargs.pop('use_ssh_client', False) return cls( - timeout=timeout, version=version, **kwargs_from_env(**kwargs) + timeout=timeout, + max_pool_size=max_pool_size, + version=version, + use_ssh_client=use_ssh_client, + **kwargs_from_env(**kwargs) ) # Resources diff --git a/docker/constants.py b/docker/constants.py index 4b96e1c..43fce61 100644 --- a/docker/constants.py +++ b/docker/constants.py @@ -1,7 +1,7 @@ import sys from .version import version -DEFAULT_DOCKER_API_VERSION = '1.35' +DEFAULT_DOCKER_API_VERSION = '1.39' MINIMUM_DOCKER_API_VERSION = '1.21' DEFAULT_TIMEOUT_SECONDS = 60 STREAM_HEADER_SIZE_BYTES = 8 @@ -9,6 +9,18 @@ CONTAINER_LIMITS_KEYS = [ 'memory', 'memswap', 'cpushares', 'cpusetcpus' ] +DEFAULT_HTTP_HOST = "127.0.0.1" +DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock" +DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine' + +BYTE_UNITS = { + 'b': 1, + 'k': 1024, + 'm': 1024 * 1024, + 'g': 1024 * 1024 * 1024 +} + + INSECURE_REGISTRY_DEPRECATION_WARNING = \ 'The `insecure_registry` argument to {} ' \ 'is deprecated and non-functional. Please remove it.' @@ -24,6 +36,8 @@ DEFAULT_NUM_POOLS = 25 # For more details see: https://github.com/docker/docker-py/issues/2246 DEFAULT_NUM_POOLS_SSH = 9 +DEFAULT_MAX_POOL_SIZE = 10 + DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048 DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8'] diff --git a/docker/context/__init__.py b/docker/context/__init__.py new file mode 100644 index 0000000..0a6707f --- /dev/null +++ b/docker/context/__init__.py @@ -0,0 +1,3 @@ +# flake8: noqa +from .context import Context +from .api import ContextAPI diff --git a/docker/context/api.py b/docker/context/api.py new file mode 100644 index 0000000..c45115b --- /dev/null +++ b/docker/context/api.py @@ -0,0 +1,203 @@ +import json +import os + +from docker import errors +from docker.context.config import get_meta_dir +from docker.context.config import METAFILE +from docker.context.config import get_current_context_name +from docker.context.config import write_context_name_to_docker_config +from docker.context import Context + + +class ContextAPI(object): + """Context API. + Contains methods for context management: + create, list, remove, get, inspect. + """ + DEFAULT_CONTEXT = Context("default", "swarm") + + @classmethod + def create_context( + cls, name, orchestrator=None, host=None, tls_cfg=None, + default_namespace=None, skip_tls_verify=False): + """Creates a new context. + Returns: + (Context): a Context object. + Raises: + :py:class:`docker.errors.MissingContextParameter` + If a context name is not provided. + :py:class:`docker.errors.ContextAlreadyExists` + If a context with the name already exists. + :py:class:`docker.errors.ContextException` + If name is default. + + Example: + + >>> from docker.context import ContextAPI + >>> ctx = ContextAPI.create_context(name='test') + >>> print(ctx.Metadata) + { + "Name": "test", + "Metadata": {}, + "Endpoints": { + "docker": { + "Host": "unix:///var/run/docker.sock", + "SkipTLSVerify": false + } + } + } + """ + if not name: + raise errors.MissingContextParameter("name") + if name == "default": + raise errors.ContextException( + '"default" is a reserved context name') + ctx = Context.load_context(name) + if ctx: + raise errors.ContextAlreadyExists(name) + endpoint = "docker" + if orchestrator and orchestrator != "swarm": + endpoint = orchestrator + ctx = Context(name, orchestrator) + ctx.set_endpoint( + endpoint, host, tls_cfg, + skip_tls_verify=skip_tls_verify, + def_namespace=default_namespace) + ctx.save() + return ctx + + @classmethod + def get_context(cls, name=None): + """Retrieves a context object. + Args: + name (str): The name of the context + + Example: + + >>> from docker.context import ContextAPI + >>> ctx = ContextAPI.get_context(name='test') + >>> print(ctx.Metadata) + { + "Name": "test", + "Metadata": {}, + "Endpoints": { + "docker": { + "Host": "unix:///var/run/docker.sock", + "SkipTLSVerify": false + } + } + } + """ + if not name: + name = get_current_context_name() + if name == "default": + return cls.DEFAULT_CONTEXT + return Context.load_context(name) + + @classmethod + def contexts(cls): + """Context list. + Returns: + (Context): List of context objects. + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + names = [] + for dirname, dirnames, fnames in os.walk(get_meta_dir()): + for filename in fnames + dirnames: + if filename == METAFILE: + try: + data = json.load( + open(os.path.join(dirname, filename), "r")) + names.append(data["Name"]) + except Exception as e: + raise errors.ContextException( + "Failed to load metafile {}: {}".format( + filename, e)) + + contexts = [cls.DEFAULT_CONTEXT] + for name in names: + contexts.append(Context.load_context(name)) + return contexts + + @classmethod + def get_current_context(cls): + """Get current context. + Returns: + (Context): current context object. + """ + return cls.get_context() + + @classmethod + def set_current_context(cls, name="default"): + ctx = cls.get_context(name) + if not ctx: + raise errors.ContextNotFound(name) + + err = write_context_name_to_docker_config(name) + if err: + raise errors.ContextException( + 'Failed to set current context: {}'.format(err)) + + @classmethod + def remove_context(cls, name): + """Remove a context. Similar to the ``docker context rm`` command. + + Args: + name (str): The name of the context + + Raises: + :py:class:`docker.errors.MissingContextParameter` + If a context name is not provided. + :py:class:`docker.errors.ContextNotFound` + If a context with the name does not exist. + :py:class:`docker.errors.ContextException` + If name is default. + + Example: + + >>> from docker.context import ContextAPI + >>> ContextAPI.remove_context(name='test') + >>> + """ + if not name: + raise errors.MissingContextParameter("name") + if name == "default": + raise errors.ContextException( + 'context "default" cannot be removed') + ctx = Context.load_context(name) + if not ctx: + raise errors.ContextNotFound(name) + if name == get_current_context_name(): + write_context_name_to_docker_config(None) + ctx.remove() + + @classmethod + def inspect_context(cls, name="default"): + """Remove a context. Similar to the ``docker context inspect`` command. + + Args: + name (str): The name of the context + + Raises: + :py:class:`docker.errors.MissingContextParameter` + If a context name is not provided. + :py:class:`docker.errors.ContextNotFound` + If a context with the name does not exist. + + Example: + + >>> from docker.context import ContextAPI + >>> ContextAPI.remove_context(name='test') + >>> + """ + if not name: + raise errors.MissingContextParameter("name") + if name == "default": + return cls.DEFAULT_CONTEXT() + ctx = Context.load_context(name) + if not ctx: + raise errors.ContextNotFound(name) + + return ctx() diff --git a/docker/context/config.py b/docker/context/config.py new file mode 100644 index 0000000..baf54f7 --- /dev/null +++ b/docker/context/config.py @@ -0,0 +1,81 @@ +import os +import json +import hashlib + +from docker import utils +from docker.constants import IS_WINDOWS_PLATFORM +from docker.constants import DEFAULT_UNIX_SOCKET +from docker.utils.config import find_config_file + +METAFILE = "meta.json" + + +def get_current_context_name(): + name = "default" + docker_cfg_path = find_config_file() + if docker_cfg_path: + try: + with open(docker_cfg_path, "r") as f: + name = json.load(f).get("currentContext", "default") + except Exception: + return "default" + return name + + +def write_context_name_to_docker_config(name=None): + if name == 'default': + name = None + docker_cfg_path = find_config_file() + config = {} + if docker_cfg_path: + try: + with open(docker_cfg_path, "r") as f: + config = json.load(f) + except Exception as e: + return e + current_context = config.get("currentContext", None) + if current_context and not name: + del config["currentContext"] + elif name: + config["currentContext"] = name + else: + return + try: + with open(docker_cfg_path, "w") as f: + json.dump(config, f, indent=4) + except Exception as e: + return e + + +def get_context_id(name): + return hashlib.sha256(name.encode('utf-8')).hexdigest() + + +def get_context_dir(): + return os.path.join(os.path.dirname(find_config_file() or ""), "contexts") + + +def get_meta_dir(name=None): + meta_dir = os.path.join(get_context_dir(), "meta") + if name: + return os.path.join(meta_dir, get_context_id(name)) + return meta_dir + + +def get_meta_file(name): + return os.path.join(get_meta_dir(name), METAFILE) + + +def get_tls_dir(name=None, endpoint=""): + context_dir = get_context_dir() + if name: + return os.path.join(context_dir, "tls", get_context_id(name), endpoint) + return os.path.join(context_dir, "tls") + + +def get_context_host(path=None, tls=False): + host = utils.parse_host(path, IS_WINDOWS_PLATFORM, tls) + if host == DEFAULT_UNIX_SOCKET: + # remove http+ from default docker socket url + return host.strip("http+") + return host diff --git a/docker/context/context.py b/docker/context/context.py new file mode 100644 index 0000000..2413b2e --- /dev/null +++ b/docker/context/context.py @@ -0,0 +1,238 @@ +import os +import json +from shutil import copyfile, rmtree +from docker.tls import TLSConfig +from docker.errors import ContextException +from docker.context.config import get_meta_dir +from docker.context.config import get_meta_file +from docker.context.config import get_tls_dir +from docker.context.config import get_context_host + + +class Context: + """A context.""" + def __init__(self, name, orchestrator=None, host=None, endpoints=None, + tls=False): + if not name: + raise Exception("Name not provided") + self.name = name + self.context_type = None + self.orchestrator = orchestrator + self.endpoints = {} + self.tls_cfg = {} + self.meta_path = "IN MEMORY" + self.tls_path = "IN MEMORY" + + if not endpoints: + # set default docker endpoint if no endpoint is set + default_endpoint = "docker" if ( + not orchestrator or orchestrator == "swarm" + ) else orchestrator + + self.endpoints = { + default_endpoint: { + "Host": get_context_host(host, tls), + "SkipTLSVerify": not tls + } + } + return + + # check docker endpoints + for k, v in endpoints.items(): + if not isinstance(v, dict): + # unknown format + raise ContextException("""Unknown endpoint format for + context {}: {}""".format(name, v)) + + self.endpoints[k] = v + if k != "docker": + continue + + self.endpoints[k]["Host"] = v.get("Host", get_context_host( + host, tls)) + self.endpoints[k]["SkipTLSVerify"] = bool(v.get( + "SkipTLSVerify", not tls)) + + def set_endpoint( + self, name="docker", host=None, tls_cfg=None, + skip_tls_verify=False, def_namespace=None): + self.endpoints[name] = { + "Host": get_context_host(host, not skip_tls_verify), + "SkipTLSVerify": skip_tls_verify + } + if def_namespace: + self.endpoints[name]["DefaultNamespace"] = def_namespace + + if tls_cfg: + self.tls_cfg[name] = tls_cfg + + def inspect(self): + return self.__call__() + + @classmethod + def load_context(cls, name): + meta = Context._load_meta(name) + if meta: + instance = cls( + meta["Name"], + orchestrator=meta["Metadata"].get("StackOrchestrator", None), + endpoints=meta.get("Endpoints", None)) + instance.context_type = meta["Metadata"].get("Type", None) + instance._load_certs() + instance.meta_path = get_meta_dir(name) + return instance + return None + + @classmethod + def _load_meta(cls, name): + meta_file = get_meta_file(name) + if not os.path.isfile(meta_file): + return None + + metadata = {} + try: + with open(meta_file) as f: + metadata = json.load(f) + except (IOError, KeyError, ValueError) as e: + # unknown format + raise Exception("""Detected corrupted meta file for + context {} : {}""".format(name, e)) + + # for docker endpoints, set defaults for + # Host and SkipTLSVerify fields + for k, v in metadata["Endpoints"].items(): + if k != "docker": + continue + metadata["Endpoints"][k]["Host"] = v.get( + "Host", get_context_host(None, False)) + metadata["Endpoints"][k]["SkipTLSVerify"] = bool( + v.get("SkipTLSVerify", True)) + + return metadata + + def _load_certs(self): + certs = {} + tls_dir = get_tls_dir(self.name) + for endpoint in self.endpoints.keys(): + if not os.path.isdir(os.path.join(tls_dir, endpoint)): + continue + ca_cert = None + cert = None + key = None + for filename in os.listdir(os.path.join(tls_dir, endpoint)): + if filename.startswith("ca"): + ca_cert = os.path.join(tls_dir, endpoint, filename) + elif filename.startswith("cert"): + cert = os.path.join(tls_dir, endpoint, filename) + elif filename.startswith("key"): + key = os.path.join(tls_dir, endpoint, filename) + if all([ca_cert, cert, key]): + certs[endpoint] = TLSConfig( + client_cert=(cert, key), ca_cert=ca_cert) + self.tls_cfg = certs + self.tls_path = tls_dir + + def save(self): + meta_dir = get_meta_dir(self.name) + if not os.path.isdir(meta_dir): + os.makedirs(meta_dir) + with open(get_meta_file(self.name), "w") as f: + f.write(json.dumps(self.Metadata)) + + tls_dir = get_tls_dir(self.name) + for endpoint, tls in self.tls_cfg.items(): + if not os.path.isdir(os.path.join(tls_dir, endpoint)): + os.makedirs(os.path.join(tls_dir, endpoint)) + + ca_file = tls.ca_cert + if ca_file: + copyfile(ca_file, os.path.join( + tls_dir, endpoint, os.path.basename(ca_file))) + + if tls.cert: + cert_file, key_file = tls.cert + copyfile(cert_file, os.path.join( + tls_dir, endpoint, os.path.basename(cert_file))) + copyfile(key_file, os.path.join( + tls_dir, endpoint, os.path.basename(key_file))) + + self.meta_path = get_meta_dir(self.name) + self.tls_path = get_tls_dir(self.name) + + def remove(self): + if os.path.isdir(self.meta_path): + rmtree(self.meta_path) + if os.path.isdir(self.tls_path): + rmtree(self.tls_path) + + def __repr__(self): + return "<%s: '%s'>" % (self.__class__.__name__, self.name) + + def __str__(self): + return json.dumps(self.__call__(), indent=2) + + def __call__(self): + result = self.Metadata + result.update(self.TLSMaterial) + result.update(self.Storage) + return result + + def is_docker_host(self): + return self.context_type is None + + @property + def Name(self): + return self.name + + @property + def Host(self): + if not self.orchestrator or self.orchestrator == "swarm": + endpoint = self.endpoints.get("docker", None) + if endpoint: + return endpoint.get("Host", None) + return None + + return self.endpoints[self.orchestrator].get("Host", None) + + @property + def Orchestrator(self): + return self.orchestrator + + @property + def Metadata(self): + meta = {} + if self.orchestrator: + meta = {"StackOrchestrator": self.orchestrator} + return { + "Name": self.name, + "Metadata": meta, + "Endpoints": self.endpoints + } + + @property + def TLSConfig(self): + key = self.orchestrator + if not key or key == "swarm": + key = "docker" + if key in self.tls_cfg.keys(): + return self.tls_cfg[key] + return None + + @property + def TLSMaterial(self): + certs = {} + for endpoint, tls in self.tls_cfg.items(): + cert, key = tls.cert + certs[endpoint] = list( + map(os.path.basename, [tls.ca_cert, cert, key])) + return { + "TLSMaterial": certs + } + + @property + def Storage(self): + return { + "Storage": { + "MetadataPath": self.meta_path, + "TLSPath": self.tls_path + }} diff --git a/docker/errors.py b/docker/errors.py index c340dcb..ab30a29 100644 --- a/docker/errors.py +++ b/docker/errors.py @@ -46,12 +46,14 @@ class APIError(requests.exceptions.HTTPError, DockerException): message = super(APIError, self).__str__() if self.is_client_error(): - message = '{0} Client Error: {1}'.format( - self.response.status_code, self.response.reason) + message = '{0} Client Error for {1}: {2}'.format( + self.response.status_code, self.response.url, + self.response.reason) elif self.is_server_error(): - message = '{0} Server Error: {1}'.format( - self.response.status_code, self.response.reason) + message = '{0} Server Error for {1}: {2}'.format( + self.response.status_code, self.response.url, + self.response.reason) if self.explanation: message = '{0} ("{1}")'.format(message, self.explanation) @@ -163,3 +165,35 @@ def create_unexpected_kwargs_error(name, kwargs): text.append("got unexpected keyword arguments ") text.append(', '.join(quoted_kwargs)) return TypeError(''.join(text)) + + +class MissingContextParameter(DockerException): + def __init__(self, param): + self.param = param + + def __str__(self): + return ("missing parameter: {}".format(self.param)) + + +class ContextAlreadyExists(DockerException): + def __init__(self, name): + self.name = name + + def __str__(self): + return ("context {} already exists".format(self.name)) + + +class ContextException(DockerException): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return (self.msg) + + +class ContextNotFound(DockerException): + def __init__(self, name): + self.name = name + + def __str__(self): + return ("context '{}' not found".format(self.name)) diff --git a/docker/models/containers.py b/docker/models/containers.py index d1f275f..36cbbc4 100644 --- a/docker/models/containers.py +++ b/docker/models/containers.py @@ -225,7 +225,8 @@ class Container(Model): """ return self.client.api.export(self.id, chunk_size) - def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE): + def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE, + encode_stream=False): """ Retrieve a file or folder from the container in the form of a tar archive. @@ -235,6 +236,8 @@ class Container(Model): chunk_size (int): The number of bytes returned by each iteration of the generator. If ``None``, data will be streamed as it is received. Default: 2 MB + encode_stream (bool): Determines if data should be encoded + (gzip-compressed) during transmission. Default: False Returns: (tuple): First element is a raw tar data stream. Second element is @@ -255,7 +258,8 @@ class Container(Model): ... f.write(chunk) >>> f.close() """ - return self.client.api.get_archive(self.id, path, chunk_size) + return self.client.api.get_archive(self.id, path, + chunk_size, encode_stream) def kill(self, signal=None): """ @@ -579,6 +583,9 @@ class ContainerCollection(Collection): For example, ``/dev/sda:/dev/xvda:rwm`` allows the container to have read-write access to the host's ``/dev/sda`` via a node named ``/dev/xvda`` inside the container. + device_requests (:py:class:`list`): Expose host resources such as + GPUs to the container, as a list of + :py:class:`docker.types.DeviceRequest` instances. dns (:py:class:`list`): Set custom DNS servers. dns_opt (:py:class:`list`): Additional options to be added to the container's ``resolv.conf`` file. @@ -642,6 +649,7 @@ class ContainerCollection(Collection): - ``container:<name|id>`` Reuse another container's network stack. - ``host`` Use the host network stack. + This mode is incompatible with ``ports``. Incompatible with ``network``. oom_kill_disable (bool): Whether to disable OOM killer. @@ -675,6 +683,7 @@ class ContainerCollection(Collection): to a single container port. For example, ``{'1111/tcp': [1234, 4567]}``. + Incompatible with ``host`` network mode. privileged (bool): Give extended privileges to this container. publish_all_ports (bool): Publish all ports to the host. read_only (bool): Mount the container's root filesystem as read @@ -998,6 +1007,7 @@ RUN_HOST_CONFIG_KWARGS = [ 'device_write_bps', 'device_write_iops', 'devices', + 'device_requests', 'dns_opt', 'dns_search', 'dns', diff --git a/docker/models/images.py b/docker/models/images.py index 757a5a4..e635588 100644 --- a/docker/models/images.py +++ b/docker/models/images.py @@ -395,12 +395,13 @@ class ImageCollection(Collection): return [self.get(i) for i in images] - def pull(self, repository, tag=None, **kwargs): + def pull(self, repository, tag=None, all_tags=False, **kwargs): """ Pull an image of the given name and return it. Similar to the ``docker pull`` command. - If no tag is specified, all tags from that repository will be - pulled. + If ``tag`` is ``None`` or empty, it is set to ``latest``. + If ``all_tags`` is set, the ``tag`` parameter is ignored and all image + tags will be pulled. If you want to get the raw pull output, use the :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the @@ -413,10 +414,11 @@ class ImageCollection(Collection): config for this request. ``auth_config`` should contain the ``username`` and ``password`` keys to be valid. platform (str): Platform in the format ``os[/arch[/variant]]`` + all_tags (bool): Pull all image tags Returns: (:py:class:`Image` or list): The image that has been pulled. - If no ``tag`` was specified, the method will return a list + If ``all_tags`` is True, the method will return a list of :py:class:`Image` objects belonging to this repository. Raises: @@ -426,13 +428,13 @@ class ImageCollection(Collection): Example: >>> # Pull the image tagged `latest` in the busybox repo - >>> image = client.images.pull('busybox:latest') + >>> image = client.images.pull('busybox') >>> # Pull all tags in the busybox repo - >>> images = client.images.pull('busybox') + >>> images = client.images.pull('busybox', all_tags=True) """ - if not tag: - repository, tag = parse_repository_tag(repository) + repository, image_tag = parse_repository_tag(repository) + tag = tag or image_tag or 'latest' if 'stream' in kwargs: warnings.warn( @@ -442,14 +444,14 @@ class ImageCollection(Collection): del kwargs['stream'] pull_log = self.client.api.pull( - repository, tag=tag, stream=True, **kwargs + repository, tag=tag, stream=True, all_tags=all_tags, **kwargs ) for _ in pull_log: # We don't do anything with the logs, but we need # to keep the connection alive and wait for the image # to be pulled. pass - if tag: + if not all_tags: return self.get('{0}{2}{1}'.format( repository, tag, '@' if tag.startswith('sha256:') else ':' )) diff --git a/docker/models/networks.py b/docker/models/networks.py index f944c8e..093deb7 100644 --- a/docker/models/networks.py +++ b/docker/models/networks.py @@ -46,6 +46,8 @@ class Network(Model): network, using the IPv6 protocol. Defaults to ``None``. link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6) addresses. + driver_opt (dict): A dictionary of options to provide to the + network driver. Defaults to ``None``. Raises: :py:class:`docker.errors.APIError` diff --git a/docker/models/plugins.py b/docker/models/plugins.py index 0688018..ae5851c 100644 --- a/docker/models/plugins.py +++ b/docker/models/plugins.py @@ -119,7 +119,7 @@ class Plugin(Model): privileges = self.client.api.plugin_privileges(remote) for d in self.client.api.upgrade_plugin(self.name, remote, privileges): yield d - self._reload() + self.reload() class PluginCollection(Collection): diff --git a/docker/models/services.py b/docker/models/services.py index a35687b..a29ff13 100644 --- a/docker/models/services.py +++ b/docker/models/services.py @@ -157,6 +157,8 @@ class ServiceCollection(Collection): constraints. preferences (list of tuple): :py:class:`~docker.types.Placement` preferences. + maxreplicas (int): :py:class:`~docker.types.Placement` maxreplicas + or (int) representing maximum number of replicas per node. platforms (list of tuple): A list of platform constraints expressed as ``(arch, os)`` tuples. container_labels (dict): Labels to apply to the container. @@ -319,6 +321,7 @@ PLACEMENT_KWARGS = [ 'constraints', 'preferences', 'platforms', + 'maxreplicas', ] diff --git a/docker/tls.py b/docker/tls.py index d4671d1..1b297ab 100644 --- a/docker/tls.py +++ b/docker/tls.py @@ -32,7 +32,7 @@ class TLSConfig(object): # https://docs.docker.com/engine/articles/https/ # This diverges from the Docker CLI in that users can specify 'tls' # here, but also disable any public/default CA pool verification by - # leaving tls_verify=False + # leaving verify=False self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint @@ -62,7 +62,7 @@ class TLSConfig(object): # https://github.com/docker/docker-py/issues/963 self.ssl_version = ssl.PROTOCOL_TLSv1 - # "tls" and "tls_verify" must have both or neither cert/key files In + # "client_cert" must have both or neither cert/key files. In # either case, Alert the user when both are expected, but any are # missing. @@ -71,7 +71,7 @@ class TLSConfig(object): tls_cert, tls_key = client_cert except ValueError: raise errors.TLSParameterError( - 'client_config must be a tuple of' + 'client_cert must be a tuple of' ' (client certificate, key file)' ) @@ -79,7 +79,7 @@ class TLSConfig(object): not os.path.isfile(tls_key)): raise errors.TLSParameterError( 'Path to a certificate and key files must be provided' - ' through the client_config param' + ' through the client_cert param' ) self.cert = (tls_cert, tls_key) @@ -88,7 +88,7 @@ class TLSConfig(object): self.ca_cert = ca_cert if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert): raise errors.TLSParameterError( - 'Invalid CA certificate provided for `tls_ca_cert`.' + 'Invalid CA certificate provided for `ca_cert`.' ) def configure_client(self, client): diff --git a/docker/transport/npipeconn.py b/docker/transport/npipeconn.py index aa05538..70d8519 100644 --- a/docker/transport/npipeconn.py +++ b/docker/transport/npipeconn.py @@ -73,12 +73,15 @@ class NpipeHTTPAdapter(BaseHTTPAdapter): __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path', 'pools', - 'timeout'] + 'timeout', + 'max_pool_size'] def __init__(self, base_url, timeout=60, - pool_connections=constants.DEFAULT_NUM_POOLS): + pool_connections=constants.DEFAULT_NUM_POOLS, + max_pool_size=constants.DEFAULT_MAX_POOL_SIZE): self.npipe_path = base_url.replace('npipe://', '') self.timeout = timeout + self.max_pool_size = max_pool_size self.pools = RecentlyUsedContainer( pool_connections, dispose_func=lambda p: p.close() ) @@ -91,7 +94,8 @@ class NpipeHTTPAdapter(BaseHTTPAdapter): return pool pool = NpipeHTTPConnectionPool( - self.npipe_path, self.timeout + self.npipe_path, self.timeout, + maxsize=self.max_pool_size ) self.pools[url] = pool diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py index ef02031..176b5c8 100644 --- a/docker/transport/npipesocket.py +++ b/docker/transport/npipesocket.py @@ -1,4 +1,5 @@ import functools +import time import io import six @@ -9,7 +10,7 @@ cERROR_PIPE_BUSY = 0xe7 cSECURITY_SQOS_PRESENT = 0x100000 cSECURITY_ANONYMOUS = 0 -RETRY_WAIT_TIMEOUT = 10000 +MAXIMUM_RETRY_COUNT = 10 def check_closed(f): @@ -46,8 +47,7 @@ class NpipeSocket(object): self._closed = True @check_closed - def connect(self, address): - win32pipe.WaitNamedPipe(address, self._timeout) + def connect(self, address, retry_count=0): try: handle = win32file.CreateFile( address, @@ -65,8 +65,10 @@ class NpipeSocket(object): # Another program or thread has grabbed our pipe instance # before we got to it. Wait for availability and attempt to # connect again. - win32pipe.WaitNamedPipe(address, RETRY_WAIT_TIMEOUT) - return self.connect(address) + retry_count = retry_count + 1 + if (retry_count < MAXIMUM_RETRY_COUNT): + time.sleep(1) + return self.connect(address, retry_count) raise e self.flags = win32pipe.GetNamedPipeInfo(handle)[0] diff --git a/docker/transport/sshconn.py b/docker/transport/sshconn.py index 5a8ceb0..a761ef5 100644 --- a/docker/transport/sshconn.py +++ b/docker/transport/sshconn.py @@ -1,6 +1,11 @@ import paramiko import requests.adapters import six +import logging +import os +import signal +import socket +import subprocess from docker.transport.basehttpadapter import BaseHTTPAdapter from .. import constants @@ -18,33 +23,123 @@ except ImportError: RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer +class SSHSocket(socket.socket): + def __init__(self, host): + super(SSHSocket, self).__init__( + socket.AF_INET, socket.SOCK_STREAM) + self.host = host + self.port = None + self.user = None + if ':' in self.host: + self.host, self.port = self.host.split(':') + if '@' in self.host: + self.user, self.host = self.host.split('@') + + self.proc = None + + def connect(self, **kwargs): + args = ['ssh'] + if self.user: + args = args + ['-l', self.user] + + if self.port: + args = args + ['-p', self.port] + + args = args + ['--', self.host, 'docker system dial-stdio'] + + preexec_func = None + if not constants.IS_WINDOWS_PLATFORM: + def f(): + signal.signal(signal.SIGINT, signal.SIG_IGN) + preexec_func = f + + env = dict(os.environ) + + # drop LD_LIBRARY_PATH and SSL_CERT_FILE + env.pop('LD_LIBRARY_PATH', None) + env.pop('SSL_CERT_FILE', None) + + self.proc = subprocess.Popen( + ' '.join(args), + env=env, + shell=True, + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + preexec_fn=preexec_func) + + def _write(self, data): + if not self.proc or self.proc.stdin.closed: + raise Exception('SSH subprocess not initiated.' + 'connect() must be called first.') + written = self.proc.stdin.write(data) + self.proc.stdin.flush() + return written + + def sendall(self, data): + self._write(data) + + def send(self, data): + return self._write(data) + + def recv(self, n): + if not self.proc: + raise Exception('SSH subprocess not initiated.' + 'connect() must be called first.') + return self.proc.stdout.read(n) + + def makefile(self, mode): + if not self.proc: + self.connect() + if six.PY3: + self.proc.stdout.channel = self + + return self.proc.stdout + + def close(self): + if not self.proc or self.proc.stdin.closed: + return + self.proc.stdin.write(b'\n\n') + self.proc.stdin.flush() + self.proc.terminate() + + class SSHConnection(httplib.HTTPConnection, object): - def __init__(self, ssh_transport, timeout=60): + def __init__(self, ssh_transport=None, timeout=60, host=None): super(SSHConnection, self).__init__( 'localhost', timeout=timeout ) self.ssh_transport = ssh_transport self.timeout = timeout + self.ssh_host = host def connect(self): - sock = self.ssh_transport.open_session() - sock.settimeout(self.timeout) - sock.exec_command('docker system dial-stdio') + if self.ssh_transport: + sock = self.ssh_transport.open_session() + sock.settimeout(self.timeout) + sock.exec_command('docker system dial-stdio') + else: + sock = SSHSocket(self.ssh_host) + sock.settimeout(self.timeout) + sock.connect() + self.sock = sock class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool): scheme = 'ssh' - def __init__(self, ssh_client, timeout=60, maxsize=10): + def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None): super(SSHConnectionPool, self).__init__( 'localhost', timeout=timeout, maxsize=maxsize ) - self.ssh_transport = ssh_client.get_transport() + self.ssh_transport = None self.timeout = timeout + if ssh_client: + self.ssh_transport = ssh_client.get_transport() + self.ssh_host = host def _new_conn(self): - return SSHConnection(self.ssh_transport, self.timeout) + return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host) # When re-using connections, urllib3 calls fileno() on our # SSH channel instance, quickly overloading our fd limit. To avoid this, @@ -72,40 +167,85 @@ class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool): class SSHHTTPAdapter(BaseHTTPAdapter): __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [ - 'pools', 'timeout', 'ssh_client', + 'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size' ] def __init__(self, base_url, timeout=60, - pool_connections=constants.DEFAULT_NUM_POOLS): - self.ssh_client = paramiko.SSHClient() - self.ssh_client.load_system_host_keys() + pool_connections=constants.DEFAULT_NUM_POOLS, + max_pool_size=constants.DEFAULT_MAX_POOL_SIZE, + shell_out=False): + self.ssh_client = None + if not shell_out: + self._create_paramiko_client(base_url) + self._connect() + + self.ssh_host = base_url + if base_url.startswith('ssh://'): + self.ssh_host = base_url[len('ssh://'):] - self.base_url = base_url - self._connect() self.timeout = timeout + self.max_pool_size = max_pool_size self.pools = RecentlyUsedContainer( pool_connections, dispose_func=lambda p: p.close() ) super(SSHHTTPAdapter, self).__init__() + def _create_paramiko_client(self, base_url): + logging.getLogger("paramiko").setLevel(logging.WARNING) + self.ssh_client = paramiko.SSHClient() + base_url = six.moves.urllib_parse.urlparse(base_url) + self.ssh_params = { + "hostname": base_url.hostname, + "port": base_url.port, + "username": base_url.username + } + ssh_config_file = os.path.expanduser("~/.ssh/config") + if os.path.exists(ssh_config_file): + conf = paramiko.SSHConfig() + with open(ssh_config_file) as f: + conf.parse(f) + host_config = conf.lookup(base_url.hostname) + self.ssh_conf = host_config + if 'proxycommand' in host_config: + self.ssh_params["sock"] = paramiko.ProxyCommand( + self.ssh_conf['proxycommand'] + ) + if 'hostname' in host_config: + self.ssh_params['hostname'] = host_config['hostname'] + if base_url.port is None and 'port' in host_config: + self.ssh_params['port'] = self.ssh_conf['port'] + if base_url.username is None and 'user' in host_config: + self.ssh_params['username'] = self.ssh_conf['user'] + + self.ssh_client.load_system_host_keys() + self.ssh_client.set_missing_host_key_policy(paramiko.WarningPolicy()) + def _connect(self): - parsed = six.moves.urllib_parse.urlparse(self.base_url) - self.ssh_client.connect( - parsed.hostname, parsed.port, parsed.username, - ) + if self.ssh_client: + self.ssh_client.connect(**self.ssh_params) def get_connection(self, url, proxies=None): + if not self.ssh_client: + return SSHConnectionPool( + ssh_client=self.ssh_client, + timeout=self.timeout, + maxsize=self.max_pool_size, + host=self.ssh_host + ) with self.pools.lock: pool = self.pools.get(url) if pool: return pool # Connection is closed try a reconnect - if not self.ssh_client.get_transport(): + if self.ssh_client and not self.ssh_client.get_transport(): self._connect() pool = SSHConnectionPool( - self.ssh_client, self.timeout + ssh_client=self.ssh_client, + timeout=self.timeout, + maxsize=self.max_pool_size, + host=self.ssh_host ) self.pools[url] = pool @@ -113,4 +253,5 @@ class SSHHTTPAdapter(BaseHTTPAdapter): def close(self): super(SSHHTTPAdapter, self).close() - self.ssh_client.close() + if self.ssh_client: + self.ssh_client.close() diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py index b619103..3e040c5 100644 --- a/docker/transport/unixconn.py +++ b/docker/transport/unixconn.py @@ -74,15 +74,18 @@ class UnixHTTPAdapter(BaseHTTPAdapter): __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools', 'socket_path', - 'timeout'] + 'timeout', + 'max_pool_size'] def __init__(self, socket_url, timeout=60, - pool_connections=constants.DEFAULT_NUM_POOLS): + pool_connections=constants.DEFAULT_NUM_POOLS, + max_pool_size=constants.DEFAULT_MAX_POOL_SIZE): socket_path = socket_url.replace('http+unix://', '') if not socket_path.startswith('/'): socket_path = '/' + socket_path self.socket_path = socket_path self.timeout = timeout + self.max_pool_size = max_pool_size self.pools = RecentlyUsedContainer( pool_connections, dispose_func=lambda p: p.close() ) @@ -95,7 +98,8 @@ class UnixHTTPAdapter(BaseHTTPAdapter): return pool pool = UnixHTTPConnectionPool( - url, self.socket_path, self.timeout + url, self.socket_path, self.timeout, + maxsize=self.max_pool_size ) self.pools[url] = pool diff --git a/docker/types/__init__.py b/docker/types/__init__.py index 5db330e..b425746 100644 --- a/docker/types/__init__.py +++ b/docker/types/__init__.py @@ -1,5 +1,7 @@ # flake8: noqa -from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit +from .containers import ( + ContainerConfig, HostConfig, LogConfig, Ulimit, DeviceRequest +) from .daemon import CancellableStream from .healthcheck import Healthcheck from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig diff --git a/docker/types/containers.py b/docker/types/containers.py index fd8cab4..9fa4656 100644 --- a/docker/types/containers.py +++ b/docker/types/containers.py @@ -97,8 +97,8 @@ class Ulimit(DictType): Args: - name (str): Which ulimit will this apply to. A list of valid names can - be found `here <http://tinyurl.me/ZWRkM2Ztwlykf>`_. + name (str): Which ulimit will this apply to. The valid names can be + found in '/etc/security/limits.conf' on a gnu/linux system. soft (int): The soft limit for this ulimit. Optional. hard (int): The hard limit for this ulimit. Optional. @@ -154,6 +154,104 @@ class Ulimit(DictType): self['Hard'] = value +class DeviceRequest(DictType): + """ + Create a device request to be used with + :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`. + + Args: + + driver (str): Which driver to use for this device. Optional. + count (int): Number or devices to request. Optional. + Set to -1 to request all available devices. + device_ids (list): List of strings for device IDs. Optional. + Set either ``count`` or ``device_ids``. + capabilities (list): List of lists of strings to request + capabilities. Optional. The global list acts like an OR, + and the sub-lists are AND. The driver will try to satisfy + one of the sub-lists. + Available capabilities for the ``nvidia`` driver can be found + `here <https://github.com/NVIDIA/nvidia-container-runtime>`_. + options (dict): Driver-specific options. Optional. + """ + + def __init__(self, **kwargs): + driver = kwargs.get('driver', kwargs.get('Driver')) + count = kwargs.get('count', kwargs.get('Count')) + device_ids = kwargs.get('device_ids', kwargs.get('DeviceIDs')) + capabilities = kwargs.get('capabilities', kwargs.get('Capabilities')) + options = kwargs.get('options', kwargs.get('Options')) + + if driver is None: + driver = '' + elif not isinstance(driver, six.string_types): + raise ValueError('DeviceRequest.driver must be a string') + if count is None: + count = 0 + elif not isinstance(count, int): + raise ValueError('DeviceRequest.count must be an integer') + if device_ids is None: + device_ids = [] + elif not isinstance(device_ids, list): + raise ValueError('DeviceRequest.device_ids must be a list') + if capabilities is None: + capabilities = [] + elif not isinstance(capabilities, list): + raise ValueError('DeviceRequest.capabilities must be a list') + if options is None: + options = {} + elif not isinstance(options, dict): + raise ValueError('DeviceRequest.options must be a dict') + + super(DeviceRequest, self).__init__({ + 'Driver': driver, + 'Count': count, + 'DeviceIDs': device_ids, + 'Capabilities': capabilities, + 'Options': options + }) + + @property + def driver(self): + return self['Driver'] + + @driver.setter + def driver(self, value): + self['Driver'] = value + + @property + def count(self): + return self['Count'] + + @count.setter + def count(self, value): + self['Count'] = value + + @property + def device_ids(self): + return self['DeviceIDs'] + + @device_ids.setter + def device_ids(self, value): + self['DeviceIDs'] = value + + @property + def capabilities(self): + return self['Capabilities'] + + @capabilities.setter + def capabilities(self, value): + self['Capabilities'] = value + + @property + def options(self): + return self['Options'] + + @options.setter + def options(self, value): + self['Options'] = value + + class HostConfig(dict): def __init__(self, version, binds=None, port_bindings=None, lxc_conf=None, publish_all_ports=False, links=None, @@ -176,7 +274,7 @@ class HostConfig(dict): volume_driver=None, cpu_count=None, cpu_percent=None, nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None, cpu_rt_period=None, cpu_rt_runtime=None, - device_cgroup_rules=None): + device_cgroup_rules=None, device_requests=None): if mem_limit is not None: self['Memory'] = parse_bytes(mem_limit) @@ -236,10 +334,11 @@ class HostConfig(dict): if dns_search: self['DnsSearch'] = dns_search - if network_mode: - self['NetworkMode'] = network_mode - elif network_mode is None: - self['NetworkMode'] = 'default' + if network_mode == 'host' and port_bindings: + raise host_config_incompatible_error( + 'network_mode', 'host', 'port_bindings' + ) + self['NetworkMode'] = network_mode or 'default' if restart_policy: if not isinstance(restart_policy, dict): @@ -536,6 +635,19 @@ class HostConfig(dict): ) self['DeviceCgroupRules'] = device_cgroup_rules + if device_requests is not None: + if version_lt(version, '1.40'): + raise host_config_version_error('device_requests', '1.40') + if not isinstance(device_requests, list): + raise host_config_type_error( + 'device_requests', device_requests, 'list' + ) + self['DeviceRequests'] = [] + for req in device_requests: + if not isinstance(req, DeviceRequest): + req = DeviceRequest(**req) + self['DeviceRequests'].append(req) + def host_config_type_error(param, param_value, expected): error_msg = 'Invalid type for {0} param: expected {1} but found {2}' @@ -553,6 +665,13 @@ def host_config_value_error(param, param_value): return ValueError(error_msg.format(param, param_value)) +def host_config_incompatible_error(param, param_value, incompatible_param): + error_msg = '\"{1}\" {0} is incompatible with {2}' + return errors.InvalidArgument( + error_msg.format(param, param_value, incompatible_param) + ) + + class ContainerConfig(dict): def __init__( self, version, image, command, hostname=None, user=None, detach=False, diff --git a/docker/types/networks.py b/docker/types/networks.py index 1c7b2c9..1370dc1 100644 --- a/docker/types/networks.py +++ b/docker/types/networks.py @@ -4,7 +4,7 @@ from ..utils import normalize_links, version_lt class EndpointConfig(dict): def __init__(self, version, aliases=None, links=None, ipv4_address=None, - ipv6_address=None, link_local_ips=None): + ipv6_address=None, link_local_ips=None, driver_opt=None): if version_lt(version, '1.22'): raise errors.InvalidVersion( 'Endpoint config is not supported for API version < 1.22' @@ -33,6 +33,15 @@ class EndpointConfig(dict): if ipam_config: self['IPAMConfig'] = ipam_config + if driver_opt: + if version_lt(version, '1.32'): + raise errors.InvalidVersion( + 'DriverOpts is not supported for API version < 1.32' + ) + if not isinstance(driver_opt, dict): + raise TypeError('driver_opt must be a dictionary') + self['DriverOpts'] = driver_opt + class NetworkingConfig(dict): def __init__(self, endpoints_config=None): diff --git a/docker/types/services.py b/docker/types/services.py index 05dda15..29498e9 100644 --- a/docker/types/services.py +++ b/docker/types/services.py @@ -659,10 +659,12 @@ class Placement(dict): are provided in order from highest to lowest precedence and are expressed as ``(strategy, descriptor)`` tuples. See :py:class:`PlacementPreference` for details. + maxreplicas (int): Maximum number of replicas per node platforms (:py:class:`list` of tuple): A list of platforms expressed as ``(arch, os)`` tuples """ - def __init__(self, constraints=None, preferences=None, platforms=None): + def __init__(self, constraints=None, preferences=None, platforms=None, + maxreplicas=None): if constraints is not None: self['Constraints'] = constraints if preferences is not None: @@ -671,6 +673,8 @@ class Placement(dict): if isinstance(pref, tuple): pref = PlacementPreference(*pref) self['Preferences'].append(pref) + if maxreplicas is not None: + self['MaxReplicas'] = maxreplicas if platforms: self['Platforms'] = [] for plat in platforms: diff --git a/docker/utils/build.py b/docker/utils/build.py index 4fa5751..5787cab 100644 --- a/docker/utils/build.py +++ b/docker/utils/build.py @@ -105,8 +105,9 @@ def create_archive(root, files=None, fileobj=None, gzip=False, for name, contents in extra_files: info = tarfile.TarInfo(name) - info.size = len(contents) - t.addfile(info, io.BytesIO(contents.encode('utf-8'))) + contents_encoded = contents.encode('utf-8') + info.size = len(contents_encoded) + t.addfile(info, io.BytesIO(contents_encoded)) t.close() fileobj.seek(0) diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 7819ace..1b195e2 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -11,6 +11,10 @@ import six from .. import errors from .. import tls +from ..constants import DEFAULT_HTTP_HOST +from ..constants import DEFAULT_UNIX_SOCKET +from ..constants import DEFAULT_NPIPE +from ..constants import BYTE_UNITS if six.PY2: from urllib import splitnport @@ -18,17 +22,6 @@ if six.PY2: else: from urllib.parse import splitnport, urlparse -DEFAULT_HTTP_HOST = "127.0.0.1" -DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock" -DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine' - -BYTE_UNITS = { - 'b': 1, - 'k': 1024, - 'm': 1024 * 1024, - 'g': 1024 * 1024 * 1024 -} - def create_ipam_pool(*args, **kwargs): raise errors.DeprecatedMethod( @@ -419,7 +412,7 @@ def parse_bytes(s): if suffix in units.keys() or suffix.isdigit(): try: - digits = int(digits_part) + digits = float(digits_part) except ValueError: raise errors.DockerException( 'Failed converting the string value for memory ({0}) to' diff --git a/docker/version.py b/docker/version.py index 99a8b42..f576846 100644 --- a/docker/version.py +++ b/docker/version.py @@ -1,2 +1,2 @@ -version = "4.1.0" +version = "4.4.4" version_info = tuple([int(d) for d in version.split("-")[0].split(".")]) diff --git a/requirements.txt b/requirements.txt index 804a78a..43a688f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,8 @@ appdirs==1.4.3 asn1crypto==0.22.0 backports.ssl-match-hostname==3.5.0.1 -cffi==1.10.0 -cryptography==2.3 +cffi==1.14.4 +cryptography==3.2 enum34==1.1.6 idna==2.5 ipaddress==1.0.18 @@ -11,8 +11,7 @@ paramiko==2.4.2 pycparser==2.17 pyOpenSSL==18.0.0 pyparsing==2.2.0 -pypiwin32==219; sys_platform == 'win32' and python_version < '3.6' -pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6' +pywin32==227; sys_platform == 'win32' requests==2.20.0 six==1.10.0 urllib3==1.24.3 @@ -24,10 +24,7 @@ extras_require = { ':python_version < "3.3"': 'ipaddress >= 1.0.16', # win32 APIs if on Windows (required for npipe support) - # Python 3.6 is only compatible with v220 ; Python < 3.5 is not supported - # on v220 ; ALL versions are broken for v222 (as of 2018-01-26) - ':sys_platform == "win32" and python_version < "3.6"': 'pypiwin32==219', - ':sys_platform == "win32" and python_version >= "3.6"': 'pypiwin32==223', + ':sys_platform == "win32"': 'pywin32==227', # If using docker-py over TLS, highly recommend this option is # pip-installed or pinned. @@ -87,6 +84,8 @@ setup( 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', 'Topic :: Software Development', 'Topic :: Utilities', 'License :: OSI Approved :: Apache Software License', diff --git a/test-requirements.txt b/test-requirements.txt index 0b01e56..24078e2 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,3 +1,4 @@ +setuptools==44.0.0 # last version with python 2.7 support coverage==4.5.2 flake8==3.6.0 mock==1.0.1 diff --git a/tests/integration/api_build_test.py b/tests/integration/api_build_test.py index 5712812..b830a10 100644 --- a/tests/integration/api_build_test.py +++ b/tests/integration/api_build_test.py @@ -339,7 +339,6 @@ class BuildTest(BaseAPIIntegrationTest): assert self.client.inspect_image(img_name) ctnr = self.run_container(img_name, 'cat /hosts-file') - self.tmp_containers.append(ctnr) logs = self.client.logs(ctnr) if six.PY3: logs = logs.decode('utf-8') diff --git a/tests/integration/api_container_test.py b/tests/integration/api_container_test.py index 1ba3eaa..65e611b 100644 --- a/tests/integration/api_container_test.py +++ b/tests/integration/api_container_test.py @@ -273,11 +273,14 @@ class CreateContainerTest(BaseAPIIntegrationTest): def test_invalid_log_driver_raises_exception(self): log_config = docker.types.LogConfig( - type='asdf-nope', + type='asdf', config={} ) - expected_msg = "logger: no log driver named 'asdf-nope' is registered" + expected_msgs = [ + "logger: no log driver named 'asdf' is registered", + "error looking up logging plugin asdf: plugin \"asdf\" not found", + ] with pytest.raises(docker.errors.APIError) as excinfo: # raises an internal server error 500 container = self.client.create_container( @@ -287,7 +290,7 @@ class CreateContainerTest(BaseAPIIntegrationTest): ) self.client.start(container) - assert excinfo.value.explanation == expected_msg + assert excinfo.value.explanation in expected_msgs def test_valid_no_log_driver_specified(self): log_config = docker.types.LogConfig( @@ -1102,6 +1105,8 @@ class PortTest(BaseAPIIntegrationTest): class ContainerTopTest(BaseAPIIntegrationTest): + @pytest.mark.xfail(reason='Output of docker top depends on host distro, ' + 'and is not formalized.') def test_top(self): container = self.client.create_container( TEST_IMG, ['sleep', '60'] @@ -1112,9 +1117,7 @@ class ContainerTopTest(BaseAPIIntegrationTest): self.client.start(container) res = self.client.top(container) if not IS_WINDOWS_PLATFORM: - assert res['Titles'] == [ - 'UID', 'PID', 'PPID', 'C', 'STIME', 'TTY', 'TIME', 'CMD' - ] + assert res['Titles'] == [u'PID', u'USER', u'TIME', u'COMMAND'] assert len(res['Processes']) == 1 assert res['Processes'][0][-1] == 'sleep 60' self.client.kill(container) @@ -1122,6 +1125,8 @@ class ContainerTopTest(BaseAPIIntegrationTest): @pytest.mark.skipif( IS_WINDOWS_PLATFORM, reason='No psargs support on windows' ) + @pytest.mark.xfail(reason='Output of docker top depends on host distro, ' + 'and is not formalized.') def test_top_with_psargs(self): container = self.client.create_container( TEST_IMG, ['sleep', '60']) @@ -1129,11 +1134,8 @@ class ContainerTopTest(BaseAPIIntegrationTest): self.tmp_containers.append(container) self.client.start(container) - res = self.client.top(container, 'waux') - assert res['Titles'] == [ - 'USER', 'PID', '%CPU', '%MEM', 'VSZ', 'RSS', - 'TTY', 'STAT', 'START', 'TIME', 'COMMAND' - ] + res = self.client.top(container, '-eopid,user') + assert res['Titles'] == [u'PID', u'USER'] assert len(res['Processes']) == 1 assert res['Processes'][0][10] == 'sleep 60' diff --git a/tests/integration/api_image_test.py b/tests/integration/api_image_test.py index 2bc96ab..37e26a3 100644 --- a/tests/integration/api_image_test.py +++ b/tests/integration/api_image_test.py @@ -42,7 +42,7 @@ class PullImageTest(BaseAPIIntegrationTest): self.client.remove_image('hello-world') except docker.errors.APIError: pass - res = self.client.pull('hello-world', tag='latest') + res = self.client.pull('hello-world') self.tmp_imgs.append('hello-world') assert type(res) == six.text_type assert len(self.client.images('hello-world')) >= 1 @@ -55,7 +55,7 @@ class PullImageTest(BaseAPIIntegrationTest): except docker.errors.APIError: pass stream = self.client.pull( - 'hello-world', tag='latest', stream=True, decode=True) + 'hello-world', stream=True, decode=True) self.tmp_imgs.append('hello-world') for chunk in stream: assert isinstance(chunk, dict) diff --git a/tests/integration/api_network_test.py b/tests/integration/api_network_test.py index 0f26827..af22da8 100644 --- a/tests/integration/api_network_test.py +++ b/tests/integration/api_network_test.py @@ -275,6 +275,27 @@ class TestNetworks(BaseAPIIntegrationTest): assert 'LinkLocalIPs' in net_cfg['IPAMConfig'] assert net_cfg['IPAMConfig']['LinkLocalIPs'] == ['169.254.8.8'] + @requires_api_version('1.32') + def test_create_with_driveropt(self): + container = self.client.create_container( + TEST_IMG, 'top', + networking_config=self.client.create_networking_config( + { + 'bridge': self.client.create_endpoint_config( + driver_opt={'com.docker-py.setting': 'on'} + ) + } + ), + host_config=self.client.create_host_config(network_mode='bridge') + ) + self.tmp_containers.append(container) + self.client.start(container) + container_data = self.client.inspect_container(container) + net_cfg = container_data['NetworkSettings']['Networks']['bridge'] + assert 'DriverOpts' in net_cfg + assert 'com.docker-py.setting' in net_cfg['DriverOpts'] + assert net_cfg['DriverOpts']['com.docker-py.setting'] == 'on' + @requires_api_version('1.22') def test_create_with_links(self): net_name, net_id = self.create_network() diff --git a/tests/integration/api_service_test.py b/tests/integration/api_service_test.py index b6b7ec5..7e5336e 100644 --- a/tests/integration/api_service_test.py +++ b/tests/integration/api_service_test.py @@ -471,6 +471,19 @@ class ServiceTest(BaseAPIIntegrationTest): assert 'Placement' in svc_info['Spec']['TaskTemplate'] assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt + @requires_api_version('1.40') + def test_create_service_with_placement_maxreplicas(self): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + placemt = docker.types.Placement(maxreplicas=1) + task_tmpl = docker.types.TaskTemplate( + container_spec, placement=placemt + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Placement' in svc_info['Spec']['TaskTemplate'] + assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt + def test_create_service_with_endpoint_spec(self): container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) task_tmpl = docker.types.TaskTemplate(container_spec) diff --git a/tests/integration/context_api_test.py b/tests/integration/context_api_test.py new file mode 100644 index 0000000..a2a12a5 --- /dev/null +++ b/tests/integration/context_api_test.py @@ -0,0 +1,59 @@ +import os +import tempfile +import pytest +from docker import errors +from docker.context import ContextAPI +from docker.tls import TLSConfig +from .base import BaseAPIIntegrationTest + + +class ContextLifecycleTest(BaseAPIIntegrationTest): + def test_lifecycle(self): + assert ContextAPI.get_context().Name == "default" + assert not ContextAPI.get_context("test") + assert ContextAPI.get_current_context().Name == "default" + + dirpath = tempfile.mkdtemp() + ca = tempfile.NamedTemporaryFile( + prefix=os.path.join(dirpath, "ca.pem"), mode="r") + cert = tempfile.NamedTemporaryFile( + prefix=os.path.join(dirpath, "cert.pem"), mode="r") + key = tempfile.NamedTemporaryFile( + prefix=os.path.join(dirpath, "key.pem"), mode="r") + + # create context 'test + docker_tls = TLSConfig( + client_cert=(cert.name, key.name), + ca_cert=ca.name) + ContextAPI.create_context( + "test", tls_cfg=docker_tls) + + # check for a context 'test' in the context store + assert any([ctx.Name == "test" for ctx in ContextAPI.contexts()]) + # retrieve a context object for 'test' + assert ContextAPI.get_context("test") + # remove context + ContextAPI.remove_context("test") + with pytest.raises(errors.ContextNotFound): + ContextAPI.inspect_context("test") + # check there is no 'test' context in store + assert not ContextAPI.get_context("test") + + ca.close() + key.close() + cert.close() + + def test_context_remove(self): + ContextAPI.create_context("test") + assert ContextAPI.inspect_context("test")["Name"] == "test" + + ContextAPI.remove_context("test") + with pytest.raises(errors.ContextNotFound): + ContextAPI.inspect_context("test") + + def test_load_context_without_orchestrator(self): + ContextAPI.create_context("test") + ctx = ContextAPI.get_context("test") + assert ctx + assert ctx.Name == "test" + assert ctx.Orchestrator is None diff --git a/tests/integration/models_images_test.py b/tests/integration/models_images_test.py index 375d972..0d60f37 100644 --- a/tests/integration/models_images_test.py +++ b/tests/integration/models_images_test.py @@ -86,9 +86,11 @@ class ImageCollectionTest(BaseIntegrationTest): def test_pull_multiple(self): client = docker.from_env(version=TEST_API_VERSION) - images = client.images.pull('hello-world') - assert len(images) == 1 - assert 'hello-world:latest' in images[0].attrs['RepoTags'] + images = client.images.pull('hello-world', all_tags=True) + assert len(images) >= 1 + assert any([ + 'hello-world:latest' in img.attrs['RepoTags'] for img in images + ]) def test_load_error(self): client = docker.from_env(version=TEST_API_VERSION) diff --git a/tests/ssh/__init__.py b/tests/ssh/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/tests/ssh/__init__.py diff --git a/tests/ssh/api_build_test.py b/tests/ssh/api_build_test.py new file mode 100644 index 0000000..b830a10 --- /dev/null +++ b/tests/ssh/api_build_test.py @@ -0,0 +1,595 @@ +import io +import os +import shutil +import tempfile + +from docker import errors +from docker.utils.proxy import ProxyConfig + +import pytest +import six + +from .base import BaseAPIIntegrationTest, TEST_IMG +from ..helpers import random_name, requires_api_version, requires_experimental + + +class BuildTest(BaseAPIIntegrationTest): + def test_build_with_proxy(self): + self.client._proxy_configs = ProxyConfig( + ftp='a', http='b', https='c', no_proxy='d' + ) + + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN env | grep "FTP_PROXY=a"', + 'RUN env | grep "ftp_proxy=a"', + 'RUN env | grep "HTTP_PROXY=b"', + 'RUN env | grep "http_proxy=b"', + 'RUN env | grep "HTTPS_PROXY=c"', + 'RUN env | grep "https_proxy=c"', + 'RUN env | grep "NO_PROXY=d"', + 'RUN env | grep "no_proxy=d"', + ]).encode('ascii')) + + self.client.build(fileobj=script, decode=True) + + def test_build_with_proxy_and_buildargs(self): + self.client._proxy_configs = ProxyConfig( + ftp='a', http='b', https='c', no_proxy='d' + ) + + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN env | grep "FTP_PROXY=XXX"', + 'RUN env | grep "ftp_proxy=xxx"', + 'RUN env | grep "HTTP_PROXY=b"', + 'RUN env | grep "http_proxy=b"', + 'RUN env | grep "HTTPS_PROXY=c"', + 'RUN env | grep "https_proxy=c"', + 'RUN env | grep "NO_PROXY=d"', + 'RUN env | grep "no_proxy=d"', + ]).encode('ascii')) + + self.client.build( + fileobj=script, + decode=True, + buildargs={'FTP_PROXY': 'XXX', 'ftp_proxy': 'xxx'} + ) + + def test_build_streaming(self): + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN mkdir -p /tmp/test', + 'EXPOSE 8080', + 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz' + ' /tmp/silence.tar.gz' + ]).encode('ascii')) + stream = self.client.build(fileobj=script, decode=True) + logs = [] + for chunk in stream: + logs.append(chunk) + assert len(logs) > 0 + + def test_build_from_stringio(self): + if six.PY3: + return + script = io.StringIO(six.text_type('\n').join([ + 'FROM busybox', + 'RUN mkdir -p /tmp/test', + 'EXPOSE 8080', + 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz' + ' /tmp/silence.tar.gz' + ])) + stream = self.client.build(fileobj=script) + logs = '' + for chunk in stream: + if six.PY3: + chunk = chunk.decode('utf-8') + logs += chunk + assert logs != '' + + def test_build_with_dockerignore(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + + with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: + f.write("\n".join([ + 'FROM busybox', + 'ADD . /test', + ])) + + with open(os.path.join(base_dir, '.dockerignore'), 'w') as f: + f.write("\n".join([ + 'ignored', + 'Dockerfile', + '.dockerignore', + '!ignored/subdir/excepted-file', + '', # empty line, + '#*', # comment line + ])) + + with open(os.path.join(base_dir, 'not-ignored'), 'w') as f: + f.write("this file should not be ignored") + + with open(os.path.join(base_dir, '#file.txt'), 'w') as f: + f.write('this file should not be ignored') + + subdir = os.path.join(base_dir, 'ignored', 'subdir') + os.makedirs(subdir) + with open(os.path.join(subdir, 'file'), 'w') as f: + f.write("this file should be ignored") + + with open(os.path.join(subdir, 'excepted-file'), 'w') as f: + f.write("this file should not be ignored") + + tag = 'docker-py-test-build-with-dockerignore' + stream = self.client.build( + path=base_dir, + tag=tag, + ) + for chunk in stream: + pass + + c = self.client.create_container(tag, ['find', '/test', '-type', 'f']) + self.client.start(c) + self.client.wait(c) + logs = self.client.logs(c) + + if six.PY3: + logs = logs.decode('utf-8') + + assert sorted(list(filter(None, logs.split('\n')))) == sorted([ + '/test/#file.txt', + '/test/ignored/subdir/excepted-file', + '/test/not-ignored' + ]) + + def test_build_with_buildargs(self): + script = io.BytesIO('\n'.join([ + 'FROM scratch', + 'ARG test', + 'USER $test' + ]).encode('ascii')) + + stream = self.client.build( + fileobj=script, tag='buildargs', buildargs={'test': 'OK'} + ) + self.tmp_imgs.append('buildargs') + for chunk in stream: + pass + + info = self.client.inspect_image('buildargs') + assert info['Config']['User'] == 'OK' + + @requires_api_version('1.22') + def test_build_shmsize(self): + script = io.BytesIO('\n'.join([ + 'FROM scratch', + 'CMD sh -c "echo \'Hello, World!\'"', + ]).encode('ascii')) + + tag = 'shmsize' + shmsize = 134217728 + + stream = self.client.build( + fileobj=script, tag=tag, shmsize=shmsize + ) + self.tmp_imgs.append(tag) + for chunk in stream: + pass + + # There is currently no way to get the shmsize + # that was used to build the image + + @requires_api_version('1.24') + def test_build_isolation(self): + script = io.BytesIO('\n'.join([ + 'FROM scratch', + 'CMD sh -c "echo \'Deaf To All But The Song\'' + ]).encode('ascii')) + + stream = self.client.build( + fileobj=script, tag='isolation', + isolation='default' + ) + + for chunk in stream: + pass + + @requires_api_version('1.23') + def test_build_labels(self): + script = io.BytesIO('\n'.join([ + 'FROM scratch', + ]).encode('ascii')) + + labels = {'test': 'OK'} + + stream = self.client.build( + fileobj=script, tag='labels', labels=labels + ) + self.tmp_imgs.append('labels') + for chunk in stream: + pass + + info = self.client.inspect_image('labels') + assert info['Config']['Labels'] == labels + + @requires_api_version('1.25') + def test_build_with_cache_from(self): + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'ENV FOO=bar', + 'RUN touch baz', + 'RUN touch bax', + ]).encode('ascii')) + + stream = self.client.build(fileobj=script, tag='build1') + self.tmp_imgs.append('build1') + for chunk in stream: + pass + + stream = self.client.build( + fileobj=script, tag='build2', cache_from=['build1'], + decode=True + ) + self.tmp_imgs.append('build2') + counter = 0 + for chunk in stream: + if 'Using cache' in chunk.get('stream', ''): + counter += 1 + assert counter == 3 + self.client.remove_image('build2') + + counter = 0 + stream = self.client.build( + fileobj=script, tag='build2', cache_from=['nosuchtag'], + decode=True + ) + for chunk in stream: + if 'Using cache' in chunk.get('stream', ''): + counter += 1 + assert counter == 0 + + @requires_api_version('1.29') + def test_build_container_with_target(self): + script = io.BytesIO('\n'.join([ + 'FROM busybox as first', + 'RUN mkdir -p /tmp/test', + 'RUN touch /tmp/silence.tar.gz', + 'FROM alpine:latest', + 'WORKDIR /root/' + 'COPY --from=first /tmp/silence.tar.gz .', + 'ONBUILD RUN echo "This should not be in the final image"' + ]).encode('ascii')) + + stream = self.client.build( + fileobj=script, target='first', tag='build1' + ) + self.tmp_imgs.append('build1') + for chunk in stream: + pass + + info = self.client.inspect_image('build1') + assert not info['Config']['OnBuild'] + + @requires_api_version('1.25') + def test_build_with_network_mode(self): + # Set up pingable endpoint on custom network + network = self.client.create_network(random_name())['Id'] + self.tmp_networks.append(network) + container = self.client.create_container(TEST_IMG, 'top') + self.tmp_containers.append(container) + self.client.start(container) + self.client.connect_container_to_network( + container, network, aliases=['pingtarget.docker'] + ) + + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN ping -c1 pingtarget.docker' + ]).encode('ascii')) + + stream = self.client.build( + fileobj=script, network_mode=network, + tag='dockerpytest_customnetbuild' + ) + + self.tmp_imgs.append('dockerpytest_customnetbuild') + for chunk in stream: + pass + + assert self.client.inspect_image('dockerpytest_customnetbuild') + + script.seek(0) + stream = self.client.build( + fileobj=script, network_mode='none', + tag='dockerpytest_nonebuild', nocache=True, decode=True + ) + + self.tmp_imgs.append('dockerpytest_nonebuild') + logs = [chunk for chunk in stream] + assert 'errorDetail' in logs[-1] + assert logs[-1]['errorDetail']['code'] == 1 + + with pytest.raises(errors.NotFound): + self.client.inspect_image('dockerpytest_nonebuild') + + @requires_api_version('1.27') + def test_build_with_extra_hosts(self): + img_name = 'dockerpytest_extrahost_build' + self.tmp_imgs.append(img_name) + + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN ping -c1 hello.world.test', + 'RUN ping -c1 extrahost.local.test', + 'RUN cp /etc/hosts /hosts-file' + ]).encode('ascii')) + + stream = self.client.build( + fileobj=script, tag=img_name, + extra_hosts={ + 'extrahost.local.test': '127.0.0.1', + 'hello.world.test': '127.0.0.1', + }, decode=True + ) + for chunk in stream: + if 'errorDetail' in chunk: + pytest.fail(chunk) + + assert self.client.inspect_image(img_name) + ctnr = self.run_container(img_name, 'cat /hosts-file') + logs = self.client.logs(ctnr) + if six.PY3: + logs = logs.decode('utf-8') + assert '127.0.0.1\textrahost.local.test' in logs + assert '127.0.0.1\thello.world.test' in logs + + @requires_experimental(until=None) + @requires_api_version('1.25') + def test_build_squash(self): + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN echo blah > /file_1', + 'RUN echo blahblah > /file_2', + 'RUN echo blahblahblah > /file_3' + ]).encode('ascii')) + + def build_squashed(squash): + tag = 'squash' if squash else 'nosquash' + stream = self.client.build( + fileobj=script, tag=tag, squash=squash + ) + self.tmp_imgs.append(tag) + for chunk in stream: + pass + + return self.client.inspect_image(tag) + + non_squashed = build_squashed(False) + squashed = build_squashed(True) + assert len(non_squashed['RootFS']['Layers']) == 4 + assert len(squashed['RootFS']['Layers']) == 2 + + def test_build_stderr_data(self): + control_chars = ['\x1b[91m', '\x1b[0m'] + snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)' + script = io.BytesIO(b'\n'.join([ + b'FROM busybox', + 'RUN sh -c ">&2 echo \'{0}\'"'.format(snippet).encode('utf-8') + ])) + + stream = self.client.build( + fileobj=script, decode=True, nocache=True + ) + lines = [] + for chunk in stream: + lines.append(chunk.get('stream')) + expected = '{0}{2}\n{1}'.format( + control_chars[0], control_chars[1], snippet + ) + assert any([line == expected for line in lines]) + + def test_build_gzip_encoding(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + + with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: + f.write("\n".join([ + 'FROM busybox', + 'ADD . /test', + ])) + + stream = self.client.build( + path=base_dir, decode=True, nocache=True, + gzip=True + ) + + lines = [] + for chunk in stream: + lines.append(chunk) + + assert 'Successfully built' in lines[-1]['stream'] + + def test_build_with_dockerfile_empty_lines(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: + f.write('FROM busybox\n') + with open(os.path.join(base_dir, '.dockerignore'), 'w') as f: + f.write('\n'.join([ + ' ', + '', + '\t\t', + '\t ', + ])) + + stream = self.client.build( + path=base_dir, decode=True, nocache=True + ) + + lines = [] + for chunk in stream: + lines.append(chunk) + assert 'Successfully built' in lines[-1]['stream'] + + def test_build_gzip_custom_encoding(self): + with pytest.raises(errors.DockerException): + self.client.build(path='.', gzip=True, encoding='text/html') + + @requires_api_version('1.32') + @requires_experimental(until=None) + def test_build_invalid_platform(self): + script = io.BytesIO('FROM busybox\n'.encode('ascii')) + + with pytest.raises(errors.APIError) as excinfo: + stream = self.client.build(fileobj=script, platform='foobar') + for _ in stream: + pass + + # Some API versions incorrectly returns 500 status; assert 4xx or 5xx + assert excinfo.value.is_error() + assert 'unknown operating system' in excinfo.exconly() \ + or 'invalid platform' in excinfo.exconly() + + def test_build_out_of_context_dockerfile(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + with open(os.path.join(base_dir, 'file.txt'), 'w') as f: + f.write('hello world') + with open(os.path.join(base_dir, '.dockerignore'), 'w') as f: + f.write('.dockerignore\n') + df_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, df_dir) + df_name = os.path.join(df_dir, 'Dockerfile') + with open(df_name, 'wb') as df: + df.write(('\n'.join([ + 'FROM busybox', + 'COPY . /src', + 'WORKDIR /src', + ])).encode('utf-8')) + df.flush() + img_name = random_name() + self.tmp_imgs.append(img_name) + stream = self.client.build( + path=base_dir, dockerfile=df_name, tag=img_name, + decode=True + ) + lines = [] + for chunk in stream: + lines.append(chunk) + assert 'Successfully tagged' in lines[-1]['stream'] + + ctnr = self.client.create_container(img_name, 'ls -a') + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + lsdata = self.client.logs(ctnr).strip().split(b'\n') + assert len(lsdata) == 3 + assert sorted([b'.', b'..', b'file.txt']) == sorted(lsdata) + + def test_build_in_context_dockerfile(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + with open(os.path.join(base_dir, 'file.txt'), 'w') as f: + f.write('hello world') + with open(os.path.join(base_dir, 'custom.dockerfile'), 'w') as df: + df.write('\n'.join([ + 'FROM busybox', + 'COPY . /src', + 'WORKDIR /src', + ])) + img_name = random_name() + self.tmp_imgs.append(img_name) + stream = self.client.build( + path=base_dir, dockerfile='custom.dockerfile', tag=img_name, + decode=True + ) + lines = [] + for chunk in stream: + lines.append(chunk) + assert 'Successfully tagged' in lines[-1]['stream'] + + ctnr = self.client.create_container(img_name, 'ls -a') + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + lsdata = self.client.logs(ctnr).strip().split(b'\n') + assert len(lsdata) == 4 + assert sorted( + [b'.', b'..', b'file.txt', b'custom.dockerfile'] + ) == sorted(lsdata) + + def test_build_in_context_nested_dockerfile(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + with open(os.path.join(base_dir, 'file.txt'), 'w') as f: + f.write('hello world') + subdir = os.path.join(base_dir, 'hello', 'world') + os.makedirs(subdir) + with open(os.path.join(subdir, 'custom.dockerfile'), 'w') as df: + df.write('\n'.join([ + 'FROM busybox', + 'COPY . /src', + 'WORKDIR /src', + ])) + img_name = random_name() + self.tmp_imgs.append(img_name) + stream = self.client.build( + path=base_dir, dockerfile='hello/world/custom.dockerfile', + tag=img_name, decode=True + ) + lines = [] + for chunk in stream: + lines.append(chunk) + assert 'Successfully tagged' in lines[-1]['stream'] + + ctnr = self.client.create_container(img_name, 'ls -a') + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + lsdata = self.client.logs(ctnr).strip().split(b'\n') + assert len(lsdata) == 4 + assert sorted( + [b'.', b'..', b'file.txt', b'hello'] + ) == sorted(lsdata) + + def test_build_in_context_abs_dockerfile(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + abs_dockerfile_path = os.path.join(base_dir, 'custom.dockerfile') + with open(os.path.join(base_dir, 'file.txt'), 'w') as f: + f.write('hello world') + with open(abs_dockerfile_path, 'w') as df: + df.write('\n'.join([ + 'FROM busybox', + 'COPY . /src', + 'WORKDIR /src', + ])) + img_name = random_name() + self.tmp_imgs.append(img_name) + stream = self.client.build( + path=base_dir, dockerfile=abs_dockerfile_path, tag=img_name, + decode=True + ) + lines = [] + for chunk in stream: + lines.append(chunk) + assert 'Successfully tagged' in lines[-1]['stream'] + + ctnr = self.client.create_container(img_name, 'ls -a') + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + lsdata = self.client.logs(ctnr).strip().split(b'\n') + assert len(lsdata) == 4 + assert sorted( + [b'.', b'..', b'file.txt', b'custom.dockerfile'] + ) == sorted(lsdata) + + @requires_api_version('1.31') + @pytest.mark.xfail( + True, + reason='Currently fails on 18.09: ' + 'https://github.com/moby/moby/issues/37920' + ) + def test_prune_builds(self): + prune_result = self.client.prune_builds() + assert 'SpaceReclaimed' in prune_result + assert isinstance(prune_result['SpaceReclaimed'], int) diff --git a/tests/ssh/base.py b/tests/ssh/base.py new file mode 100644 index 0000000..c723d82 --- /dev/null +++ b/tests/ssh/base.py @@ -0,0 +1,130 @@ +import os +import shutil +import unittest + +import docker +from .. import helpers +from docker.utils import kwargs_from_env + +TEST_IMG = 'alpine:3.10' +TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION') + + +class BaseIntegrationTest(unittest.TestCase): + """ + A base class for integration test cases. It cleans up the Docker server + after itself. + """ + + def setUp(self): + self.tmp_imgs = [] + self.tmp_containers = [] + self.tmp_folders = [] + self.tmp_volumes = [] + self.tmp_networks = [] + self.tmp_plugins = [] + self.tmp_secrets = [] + self.tmp_configs = [] + + def tearDown(self): + client = docker.from_env(version=TEST_API_VERSION, use_ssh_client=True) + try: + for img in self.tmp_imgs: + try: + client.api.remove_image(img) + except docker.errors.APIError: + pass + for container in self.tmp_containers: + try: + client.api.remove_container(container, force=True, v=True) + except docker.errors.APIError: + pass + for network in self.tmp_networks: + try: + client.api.remove_network(network) + except docker.errors.APIError: + pass + for volume in self.tmp_volumes: + try: + client.api.remove_volume(volume) + except docker.errors.APIError: + pass + + for secret in self.tmp_secrets: + try: + client.api.remove_secret(secret) + except docker.errors.APIError: + pass + + for config in self.tmp_configs: + try: + client.api.remove_config(config) + except docker.errors.APIError: + pass + + for folder in self.tmp_folders: + shutil.rmtree(folder) + finally: + client.close() + + +class BaseAPIIntegrationTest(BaseIntegrationTest): + """ + A test case for `APIClient` integration tests. It sets up an `APIClient` + as `self.client`. + """ + @classmethod + def setUpClass(cls): + cls.client = cls.get_client_instance() + cls.client.pull(TEST_IMG) + + def tearDown(self): + super(BaseAPIIntegrationTest, self).tearDown() + self.client.close() + + @staticmethod + def get_client_instance(): + return docker.APIClient( + version=TEST_API_VERSION, + timeout=60, + use_ssh_client=True, + **kwargs_from_env() + ) + + @staticmethod + def _init_swarm(client, **kwargs): + return client.init_swarm( + '127.0.0.1', listen_addr=helpers.swarm_listen_addr(), **kwargs + ) + + def run_container(self, *args, **kwargs): + container = self.client.create_container(*args, **kwargs) + self.tmp_containers.append(container) + self.client.start(container) + exitcode = self.client.wait(container)['StatusCode'] + + if exitcode != 0: + output = self.client.logs(container) + raise Exception( + "Container exited with code {}:\n{}" + .format(exitcode, output)) + + return container + + def create_and_start(self, image=TEST_IMG, command='top', **kwargs): + container = self.client.create_container( + image=image, command=command, **kwargs) + self.tmp_containers.append(container) + self.client.start(container) + return container + + def execute(self, container, cmd, exit_code=0, **kwargs): + exc = self.client.exec_create(container, cmd, **kwargs) + output = self.client.exec_start(exc) + actual_exit_code = self.client.exec_inspect(exc)['ExitCode'] + msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format( + " ".join(cmd), exit_code, actual_exit_code, output) + assert actual_exit_code == exit_code, msg + + def init_swarm(self, **kwargs): + return self._init_swarm(self.client, **kwargs) diff --git a/tests/unit/api_container_test.py b/tests/unit/api_container_test.py index a7e183c..8a0577e 100644 --- a/tests/unit/api_container_test.py +++ b/tests/unit/api_container_test.py @@ -5,6 +5,7 @@ import json import signal import docker +from docker.api import APIClient import pytest import six @@ -12,7 +13,7 @@ from . import fake_api from ..helpers import requires_api_version from .api_test import ( BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS, - fake_inspect_container + fake_inspect_container, url_base ) try: @@ -767,6 +768,67 @@ class CreateContainerTest(BaseAPIClientTest): assert args[1]['headers'] == {'Content-Type': 'application/json'} assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + def test_create_container_with_device_requests(self): + client = APIClient(version='1.40') + fake_api.fake_responses.setdefault( + '{0}/v1.40/containers/create'.format(fake_api.prefix), + fake_api.post_fake_create_container, + ) + client.create_container( + 'busybox', 'true', host_config=client.create_host_config( + device_requests=[ + { + 'device_ids': [ + '0', + 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a' + ] + }, + { + 'driver': 'nvidia', + 'Count': -1, + 'capabilities': [ + ['gpu', 'utility'] + ], + 'options': { + 'key': 'value' + } + } + ] + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_base + 'v1.40/' + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = client.create_host_config() + expected_payload['HostConfig']['DeviceRequests'] = [ + { + 'Driver': '', + 'Count': 0, + 'DeviceIDs': [ + '0', + 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a' + ], + 'Capabilities': [], + 'Options': {} + }, + { + 'Driver': 'nvidia', + 'Count': -1, + 'DeviceIDs': [], + 'Capabilities': [ + ['gpu', 'utility'] + ], + 'Options': { + 'key': 'value' + } + } + ] + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers']['Content-Type'] == 'application/json' + assert set(args[1]['headers']) <= {'Content-Type', 'User-Agent'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + def test_create_container_with_labels_dict(self): labels_dict = { six.text_type('foo'): six.text_type('1'), diff --git a/tests/unit/api_image_test.py b/tests/unit/api_image_test.py index 1e2315d..0b60df4 100644 --- a/tests/unit/api_image_test.py +++ b/tests/unit/api_image_test.py @@ -26,7 +26,18 @@ class ImageTest(BaseAPIClientTest): fake_request.assert_called_with( 'GET', url_prefix + 'images/json', - params={'filter': None, 'only_ids': 0, 'all': 1}, + params={'only_ids': 0, 'all': 1}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_images_name(self): + self.client.images('foo:bar') + + fake_request.assert_called_with( + 'GET', + url_prefix + 'images/json', + params={'only_ids': 0, 'all': 0, + 'filters': '{"reference": ["foo:bar"]}'}, timeout=DEFAULT_TIMEOUT_SECONDS ) @@ -36,7 +47,7 @@ class ImageTest(BaseAPIClientTest): fake_request.assert_called_with( 'GET', url_prefix + 'images/json', - params={'filter': None, 'only_ids': 1, 'all': 1}, + params={'only_ids': 1, 'all': 1}, timeout=DEFAULT_TIMEOUT_SECONDS ) @@ -46,7 +57,7 @@ class ImageTest(BaseAPIClientTest): fake_request.assert_called_with( 'GET', url_prefix + 'images/json', - params={'filter': None, 'only_ids': 1, 'all': 0}, + params={'only_ids': 1, 'all': 0}, timeout=DEFAULT_TIMEOUT_SECONDS ) @@ -56,7 +67,7 @@ class ImageTest(BaseAPIClientTest): fake_request.assert_called_with( 'GET', url_prefix + 'images/json', - params={'filter': None, 'only_ids': 0, 'all': 0, + params={'only_ids': 0, 'all': 0, 'filters': '{"dangling": ["true"]}'}, timeout=DEFAULT_TIMEOUT_SECONDS ) @@ -67,7 +78,7 @@ class ImageTest(BaseAPIClientTest): args = fake_request.call_args assert args[0][1] == url_prefix + 'images/create' assert args[1]['params'] == { - 'tag': None, 'fromImage': 'joffrey/test001' + 'tag': 'latest', 'fromImage': 'joffrey/test001' } assert not args[1]['stream'] @@ -77,7 +88,7 @@ class ImageTest(BaseAPIClientTest): args = fake_request.call_args assert args[0][1] == url_prefix + 'images/create' assert args[1]['params'] == { - 'tag': None, 'fromImage': 'joffrey/test001' + 'tag': 'latest', 'fromImage': 'joffrey/test001' } assert args[1]['stream'] diff --git a/tests/unit/api_network_test.py b/tests/unit/api_network_test.py index c78554d..758f013 100644 --- a/tests/unit/api_network_test.py +++ b/tests/unit/api_network_test.py @@ -136,7 +136,8 @@ class NetworkTest(BaseAPIClientTest): container={'Id': container_id}, net_id=network_id, aliases=['foo', 'bar'], - links=[('baz', 'quux')] + links=[('baz', 'quux')], + driver_opt={'com.docker-py.setting': 'yes'}, ) assert post.call_args[0][0] == ( @@ -148,6 +149,7 @@ class NetworkTest(BaseAPIClientTest): 'EndpointConfig': { 'Aliases': ['foo', 'bar'], 'Links': ['baz:quux'], + 'DriverOpts': {'com.docker-py.setting': 'yes'}, }, } diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py index f4d220a..cb14b74 100644 --- a/tests/unit/api_test.py +++ b/tests/unit/api_test.py @@ -1,26 +1,26 @@ import datetime -import json import io +import json import os import re import shutil import socket +import struct import tempfile import threading import time import unittest import docker -from docker.api import APIClient +import pytest import requests -from requests.packages import urllib3 import six -import struct +from docker.api import APIClient +from docker.constants import DEFAULT_DOCKER_API_VERSION +from requests.packages import urllib3 from . import fake_api -import pytest - try: from unittest import mock except ImportError: @@ -105,7 +105,7 @@ class BaseAPIClientTest(unittest.TestCase): _read_from_socket=fake_read_from_socket ) self.patcher.start() - self.client = APIClient() + self.client = APIClient(version=DEFAULT_DOCKER_API_VERSION) def tearDown(self): self.client.close() @@ -282,27 +282,37 @@ class DockerApiTest(BaseAPIClientTest): return socket_adapter.socket_path def test_url_compatibility_unix(self): - c = APIClient(base_url="unix://socket") + c = APIClient( + base_url="unix://socket", + version=DEFAULT_DOCKER_API_VERSION) assert self._socket_path_for_client_session(c) == '/socket' def test_url_compatibility_unix_triple_slash(self): - c = APIClient(base_url="unix:///socket") + c = APIClient( + base_url="unix:///socket", + version=DEFAULT_DOCKER_API_VERSION) assert self._socket_path_for_client_session(c) == '/socket' def test_url_compatibility_http_unix_triple_slash(self): - c = APIClient(base_url="http+unix:///socket") + c = APIClient( + base_url="http+unix:///socket", + version=DEFAULT_DOCKER_API_VERSION) assert self._socket_path_for_client_session(c) == '/socket' def test_url_compatibility_http(self): - c = APIClient(base_url="http://hostname:1234") + c = APIClient( + base_url="http://hostname:1234", + version=DEFAULT_DOCKER_API_VERSION) assert c.base_url == "http://hostname:1234" def test_url_compatibility_tcp(self): - c = APIClient(base_url="tcp://hostname:1234") + c = APIClient( + base_url="tcp://hostname:1234", + version=DEFAULT_DOCKER_API_VERSION) assert c.base_url == "http://hostname:1234" @@ -447,7 +457,9 @@ class UnixSocketStreamTest(unittest.TestCase): b'\r\n' ) + b'\r\n'.join(lines) - with APIClient(base_url="http+unix://" + self.socket_file) as client: + with APIClient( + base_url="http+unix://" + self.socket_file, + version=DEFAULT_DOCKER_API_VERSION) as client: for i in range(5): try: stream = client.build( @@ -532,7 +544,10 @@ class TCPSocketStreamTest(unittest.TestCase): def request(self, stream=None, tty=None, demux=None): assert stream is not None and tty is not None and demux is not None - with APIClient(base_url=self.address) as client: + with APIClient( + base_url=self.address, + version=DEFAULT_DOCKER_API_VERSION + ) as client: if tty: url = client._url('/tty') else: @@ -597,7 +612,7 @@ class UserAgentTest(unittest.TestCase): self.patcher.stop() def test_default_user_agent(self): - client = APIClient() + client = APIClient(version=DEFAULT_DOCKER_API_VERSION) client.version() assert self.mock_send.call_count == 1 @@ -606,7 +621,9 @@ class UserAgentTest(unittest.TestCase): assert headers['User-Agent'] == expected def test_custom_user_agent(self): - client = APIClient(user_agent='foo/bar') + client = APIClient( + user_agent='foo/bar', + version=DEFAULT_DOCKER_API_VERSION) client.version() assert self.mock_send.call_count == 1 @@ -626,7 +643,7 @@ class DisableSocketTest(unittest.TestCase): return self.timeout def setUp(self): - self.client = APIClient() + self.client = APIClient(version=DEFAULT_DOCKER_API_VERSION) def test_disable_socket_timeout(self): """Test that the timeout is disabled on a generic socket object.""" diff --git a/tests/unit/client_test.py b/tests/unit/client_test.py index cce99c5..ad88e84 100644 --- a/tests/unit/client_test.py +++ b/tests/unit/client_test.py @@ -1,22 +1,24 @@ import datetime +import os +import unittest + import docker -from docker.utils import kwargs_from_env +import pytest from docker.constants import ( - DEFAULT_DOCKER_API_VERSION, DEFAULT_TIMEOUT_SECONDS + DEFAULT_DOCKER_API_VERSION, DEFAULT_TIMEOUT_SECONDS, + DEFAULT_MAX_POOL_SIZE, IS_WINDOWS_PLATFORM ) -import os -import unittest +from docker.utils import kwargs_from_env from . import fake_api -import pytest try: from unittest import mock except ImportError: import mock - TEST_CERT_DIR = os.path.join(os.path.dirname(__file__), 'testdata/certs') +POOL_SIZE = 20 class ClientTest(unittest.TestCase): @@ -25,33 +27,33 @@ class ClientTest(unittest.TestCase): def test_events(self, mock_func): since = datetime.datetime(2016, 1, 1, 0, 0) mock_func.return_value = fake_api.get_fake_events()[1] - client = docker.from_env() + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) assert client.events(since=since) == mock_func.return_value mock_func.assert_called_with(since=since) @mock.patch('docker.api.APIClient.info') def test_info(self, mock_func): mock_func.return_value = fake_api.get_fake_info()[1] - client = docker.from_env() + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) assert client.info() == mock_func.return_value mock_func.assert_called_with() @mock.patch('docker.api.APIClient.ping') def test_ping(self, mock_func): mock_func.return_value = True - client = docker.from_env() + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) assert client.ping() is True mock_func.assert_called_with() @mock.patch('docker.api.APIClient.version') def test_version(self, mock_func): mock_func.return_value = fake_api.get_fake_version()[1] - client = docker.from_env() + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) assert client.version() == mock_func.return_value mock_func.assert_called_with() def test_call_api_client_method(self): - client = docker.from_env() + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) with pytest.raises(AttributeError) as cm: client.create_container() s = cm.exconly() @@ -65,7 +67,9 @@ class ClientTest(unittest.TestCase): assert "this method is now on the object APIClient" not in s def test_call_containers(self): - client = docker.DockerClient(**kwargs_from_env()) + client = docker.DockerClient( + version=DEFAULT_DOCKER_API_VERSION, + **kwargs_from_env()) with pytest.raises(TypeError) as cm: client.containers() @@ -74,6 +78,84 @@ class ClientTest(unittest.TestCase): assert "'ContainerCollection' object is not callable" in s assert "docker.APIClient" in s + @pytest.mark.skipif( + IS_WINDOWS_PLATFORM, reason='Unix Connection Pool only on Linux' + ) + @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool") + def test_default_pool_size_unix(self, mock_obj): + client = docker.DockerClient( + version=DEFAULT_DOCKER_API_VERSION + ) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + base_url = "{base_url}/v{version}/_ping".format( + base_url=client.api.base_url, + version=client.api._version + ) + + mock_obj.assert_called_once_with(base_url, + "/var/run/docker.sock", + 60, + maxsize=DEFAULT_MAX_POOL_SIZE + ) + + @pytest.mark.skipif( + not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows' + ) + @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool") + def test_default_pool_size_win(self, mock_obj): + client = docker.DockerClient( + version=DEFAULT_DOCKER_API_VERSION + ) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + mock_obj.assert_called_once_with("//./pipe/docker_engine", + 60, + maxsize=DEFAULT_MAX_POOL_SIZE + ) + + @pytest.mark.skipif( + IS_WINDOWS_PLATFORM, reason='Unix Connection Pool only on Linux' + ) + @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool") + def test_pool_size_unix(self, mock_obj): + client = docker.DockerClient( + version=DEFAULT_DOCKER_API_VERSION, + max_pool_size=POOL_SIZE + ) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + base_url = "{base_url}/v{version}/_ping".format( + base_url=client.api.base_url, + version=client.api._version + ) + + mock_obj.assert_called_once_with(base_url, + "/var/run/docker.sock", + 60, + maxsize=POOL_SIZE + ) + + @pytest.mark.skipif( + not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows' + ) + @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool") + def test_pool_size_win(self, mock_obj): + client = docker.DockerClient( + version=DEFAULT_DOCKER_API_VERSION, + max_pool_size=POOL_SIZE + ) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + mock_obj.assert_called_once_with("//./pipe/docker_engine", + 60, + maxsize=POOL_SIZE + ) + class FromEnvTest(unittest.TestCase): @@ -90,7 +172,7 @@ class FromEnvTest(unittest.TestCase): os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376', DOCKER_CERT_PATH=TEST_CERT_DIR, DOCKER_TLS_VERIFY='1') - client = docker.from_env() + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) assert client.api.base_url == "https://192.168.59.103:2376" def test_from_env_with_version(self): @@ -102,11 +184,85 @@ class FromEnvTest(unittest.TestCase): assert client.api._version == '2.32' def test_from_env_without_version_uses_default(self): - client = docker.from_env() + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) assert client.api._version == DEFAULT_DOCKER_API_VERSION def test_from_env_without_timeout_uses_default(self): - client = docker.from_env() + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) assert client.api.timeout == DEFAULT_TIMEOUT_SECONDS + + @pytest.mark.skipif( + IS_WINDOWS_PLATFORM, reason='Unix Connection Pool only on Linux' + ) + @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool") + def test_default_pool_size_from_env_unix(self, mock_obj): + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + base_url = "{base_url}/v{version}/_ping".format( + base_url=client.api.base_url, + version=client.api._version + ) + + mock_obj.assert_called_once_with(base_url, + "/var/run/docker.sock", + 60, + maxsize=DEFAULT_MAX_POOL_SIZE + ) + + @pytest.mark.skipif( + not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows' + ) + @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool") + def test_default_pool_size_from_env_win(self, mock_obj): + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + mock_obj.assert_called_once_with("//./pipe/docker_engine", + 60, + maxsize=DEFAULT_MAX_POOL_SIZE + ) + + @pytest.mark.skipif( + IS_WINDOWS_PLATFORM, reason='Unix Connection Pool only on Linux' + ) + @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool") + def test_pool_size_from_env_unix(self, mock_obj): + client = docker.from_env( + version=DEFAULT_DOCKER_API_VERSION, + max_pool_size=POOL_SIZE + ) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + base_url = "{base_url}/v{version}/_ping".format( + base_url=client.api.base_url, + version=client.api._version + ) + + mock_obj.assert_called_once_with(base_url, + "/var/run/docker.sock", + 60, + maxsize=POOL_SIZE + ) + + @pytest.mark.skipif( + not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows' + ) + @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool") + def test_pool_size_from_env_win(self, mock_obj): + client = docker.from_env( + version=DEFAULT_DOCKER_API_VERSION, + max_pool_size=POOL_SIZE + ) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + mock_obj.assert_called_once_with("//./pipe/docker_engine", + 60, + maxsize=POOL_SIZE + ) diff --git a/tests/unit/context_test.py b/tests/unit/context_test.py new file mode 100644 index 0000000..6d6d672 --- /dev/null +++ b/tests/unit/context_test.py @@ -0,0 +1,49 @@ +import unittest +import docker +import pytest +from docker.constants import DEFAULT_UNIX_SOCKET +from docker.constants import DEFAULT_NPIPE +from docker.constants import IS_WINDOWS_PLATFORM +from docker.context import ContextAPI, Context + + +class BaseContextTest(unittest.TestCase): + @pytest.mark.skipif( + IS_WINDOWS_PLATFORM, reason='Linux specific path check' + ) + def test_url_compatibility_on_linux(self): + c = Context("test") + assert c.Host == DEFAULT_UNIX_SOCKET.strip("http+") + + @pytest.mark.skipif( + not IS_WINDOWS_PLATFORM, reason='Windows specific path check' + ) + def test_url_compatibility_on_windows(self): + c = Context("test") + assert c.Host == DEFAULT_NPIPE + + def test_fail_on_default_context_create(self): + with pytest.raises(docker.errors.ContextException): + ContextAPI.create_context("default") + + def test_default_in_context_list(self): + found = False + ctx = ContextAPI.contexts() + for c in ctx: + if c.Name == "default": + found = True + assert found is True + + def test_get_current_context(self): + assert ContextAPI.get_current_context().Name == "default" + + def test_https_host(self): + c = Context("test", host="tcp://testdomain:8080", tls=True) + assert c.Host == "https://testdomain:8080" + + def test_context_inspect_without_params(self): + ctx = ContextAPI.inspect_context() + assert ctx["Name"] == "default" + assert ctx["Metadata"]["StackOrchestrator"] == "swarm" + assert ctx["Endpoints"]["docker"]["Host"] in [ + DEFAULT_NPIPE, DEFAULT_UNIX_SOCKET.strip("http+")] diff --git a/tests/unit/errors_test.py b/tests/unit/errors_test.py index 2134f86..54c2ba8 100644 --- a/tests/unit/errors_test.py +++ b/tests/unit/errors_test.py @@ -101,17 +101,17 @@ class APIErrorTest(unittest.TestCase): assert err.is_error() is True def test_create_error_from_exception(self): - resp = requests.Response() - resp.status_code = 500 - err = APIError('') + resp = requests.Response() + resp.status_code = 500 + err = APIError('') + try: + resp.raise_for_status() + except requests.exceptions.HTTPError as e: try: - resp.raise_for_status() - except requests.exceptions.HTTPError as e: - try: - create_api_error_from_http_exception(e) - except APIError as e: - err = e - assert err.is_server_error() is True + create_api_error_from_http_exception(e) + except APIError as e: + err = e + assert err.is_server_error() is True class ContainerErrorTest(unittest.TestCase): diff --git a/tests/unit/fake_api.py b/tests/unit/fake_api.py index e609b64..27e463d 100644 --- a/tests/unit/fake_api.py +++ b/tests/unit/fake_api.py @@ -1,6 +1,7 @@ -from . import fake_stat from docker import constants +from . import fake_stat + CURRENT_VERSION = 'v{0}'.format(constants.DEFAULT_DOCKER_API_VERSION) FAKE_CONTAINER_ID = '3cc2351ab11b' diff --git a/tests/unit/fake_api_client.py b/tests/unit/fake_api_client.py index 2147bfd..e85001d 100644 --- a/tests/unit/fake_api_client.py +++ b/tests/unit/fake_api_client.py @@ -1,6 +1,7 @@ import copy -import docker +import docker +from docker.constants import DEFAULT_DOCKER_API_VERSION from . import fake_api try: @@ -30,7 +31,7 @@ def make_fake_api_client(overrides=None): if overrides is None: overrides = {} - api_client = docker.APIClient() + api_client = docker.APIClient(version=DEFAULT_DOCKER_API_VERSION) mock_attrs = { 'build.return_value': fake_api.FAKE_IMAGE_ID, 'commit.return_value': fake_api.post_fake_commit()[1], @@ -50,6 +51,7 @@ def make_fake_api_client(overrides=None): 'networks.return_value': fake_api.get_fake_network_list()[1], 'start.return_value': None, 'wait.return_value': {'StatusCode': 0}, + 'version.return_value': fake_api.get_fake_version() } mock_attrs.update(overrides) mock_client = CopyReturnMagicMock(**mock_attrs) @@ -62,6 +64,6 @@ def make_fake_client(overrides=None): """ Returns a Client with a fake APIClient. """ - client = docker.DockerClient() + client = docker.DockerClient(version=DEFAULT_DOCKER_API_VERSION) client.api = make_fake_api_client(overrides) return client diff --git a/tests/unit/models_containers_test.py b/tests/unit/models_containers_test.py index da5f0ab..c7aa46b 100644 --- a/tests/unit/models_containers_test.py +++ b/tests/unit/models_containers_test.py @@ -233,7 +233,7 @@ class ContainerCollectionTest(unittest.TestCase): assert container.id == FAKE_CONTAINER_ID client.api.pull.assert_called_with( - 'alpine', platform=None, tag=None, stream=True + 'alpine', platform=None, tag='latest', all_tags=False, stream=True ) def test_run_with_error(self): @@ -450,7 +450,7 @@ class ContainerTest(unittest.TestCase): container = client.containers.get(FAKE_CONTAINER_ID) container.get_archive('foo') client.api.get_archive.assert_called_with( - FAKE_CONTAINER_ID, 'foo', DEFAULT_DATA_CHUNK_SIZE + FAKE_CONTAINER_ID, 'foo', DEFAULT_DATA_CHUNK_SIZE, False ) def test_image(self): diff --git a/tests/unit/models_images_test.py b/tests/unit/models_images_test.py index fd894ab..e3d070c 100644 --- a/tests/unit/models_images_test.py +++ b/tests/unit/models_images_test.py @@ -44,9 +44,25 @@ class ImageCollectionTest(unittest.TestCase): def test_pull(self): client = make_fake_client() - image = client.images.pull('test_image:latest') + image = client.images.pull('test_image:test') client.api.pull.assert_called_with( - 'test_image', tag='latest', stream=True + 'test_image', tag='test', all_tags=False, stream=True + ) + client.api.inspect_image.assert_called_with('test_image:test') + assert isinstance(image, Image) + assert image.id == FAKE_IMAGE_ID + + def test_pull_tag_precedence(self): + client = make_fake_client() + image = client.images.pull('test_image:latest', tag='test') + client.api.pull.assert_called_with( + 'test_image', tag='test', all_tags=False, stream=True + ) + client.api.inspect_image.assert_called_with('test_image:test') + + image = client.images.pull('test_image') + client.api.pull.assert_called_with( + 'test_image', tag='latest', all_tags=False, stream=True ) client.api.inspect_image.assert_called_with('test_image:latest') assert isinstance(image, Image) @@ -54,9 +70,9 @@ class ImageCollectionTest(unittest.TestCase): def test_pull_multiple(self): client = make_fake_client() - images = client.images.pull('test_image') + images = client.images.pull('test_image', all_tags=True) client.api.pull.assert_called_with( - 'test_image', tag=None, stream=True + 'test_image', tag='latest', all_tags=True, stream=True ) client.api.images.assert_called_with( all=False, name='test_image', filters=None diff --git a/tests/unit/models_services_test.py b/tests/unit/models_services_test.py index a4ac50c..07bb589 100644 --- a/tests/unit/models_services_test.py +++ b/tests/unit/models_services_test.py @@ -28,6 +28,7 @@ class CreateServiceKwargsTest(unittest.TestCase): 'constraints': ['foo=bar'], 'preferences': ['bar=baz'], 'platforms': [('x86_64', 'linux')], + 'maxreplicas': 1 }) task_template = kwargs.pop('task_template') @@ -47,6 +48,7 @@ class CreateServiceKwargsTest(unittest.TestCase): 'Constraints': ['foo=bar'], 'Preferences': ['bar=baz'], 'Platforms': [{'Architecture': 'x86_64', 'OS': 'linux'}], + 'MaxReplicas': 1, } assert task_template['LogDriver'] == { 'Name': 'logdriver', diff --git a/tests/unit/sshadapter_test.py b/tests/unit/sshadapter_test.py new file mode 100644 index 0000000..ddee592 --- /dev/null +++ b/tests/unit/sshadapter_test.py @@ -0,0 +1,32 @@ +import unittest +import docker +from docker.transport.sshconn import SSHSocket + +class SSHAdapterTest(unittest.TestCase): + def test_ssh_hostname_prefix_trim(self): + conn = docker.transport.SSHHTTPAdapter(base_url="ssh://user@hostname:1234", shell_out=True) + assert conn.ssh_host == "user@hostname:1234" + + def test_ssh_parse_url(self): + c = SSHSocket(host="user@hostname:1234") + assert c.host == "hostname" + assert c.port == "1234" + assert c.user == "user" + + def test_ssh_parse_hostname_only(self): + c = SSHSocket(host="hostname") + assert c.host == "hostname" + assert c.port == None + assert c.user == None + + def test_ssh_parse_user_and_hostname(self): + c = SSHSocket(host="user@hostname") + assert c.host == "hostname" + assert c.port == None + assert c.user == "user" + + def test_ssh_parse_hostname_and_port(self): + c = SSHSocket(host="hostname:22") + assert c.host == "hostname" + assert c.port == "22" + assert c.user == None
\ No newline at end of file diff --git a/tests/unit/utils_build_test.py b/tests/unit/utils_build_test.py index 012f15b..bc6fb5f 100644 --- a/tests/unit/utils_build_test.py +++ b/tests/unit/utils_build_test.py @@ -335,7 +335,7 @@ class ExcludePathsTest(unittest.TestCase): # Dockerignore reference stipulates that absolute paths are # equivalent to relative paths, hence /../foo should be # equivalent to ../foo. It also stipulates that paths are run - # through Go's filepath.Clean, which explicitely "replace + # through Go's filepath.Clean, which explicitly "replace # "/.." by "/" at the beginning of a path". assert exclude_paths( base, diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py index d9cb002..a53151c 100644 --- a/tests/unit/utils_test.py +++ b/tests/unit/utils_test.py @@ -5,27 +5,21 @@ import json import os import os.path import shutil -import sys import tempfile import unittest - +import pytest +import six from docker.api.client import APIClient -from docker.constants import IS_WINDOWS_PLATFORM +from docker.constants import IS_WINDOWS_PLATFORM, DEFAULT_DOCKER_API_VERSION from docker.errors import DockerException -from docker.utils import ( - convert_filters, convert_volume_binds, decode_json_header, kwargs_from_env, - parse_bytes, parse_devices, parse_env_file, parse_host, - parse_repository_tag, split_command, update_headers, -) - +from docker.utils import (convert_filters, convert_volume_binds, + decode_json_header, kwargs_from_env, parse_bytes, + parse_devices, parse_env_file, parse_host, + parse_repository_tag, split_command, update_headers) from docker.utils.ports import build_port_bindings, split_port from docker.utils.utils import format_environment -import pytest - -import six - TEST_CERT_DIR = os.path.join( os.path.dirname(__file__), 'testdata/certs', @@ -41,7 +35,7 @@ class DecoratorsTest(unittest.TestCase): def f(self, headers=None): return headers - client = APIClient() + client = APIClient(version=DEFAULT_DOCKER_API_VERSION) client._general_configs = {} g = update_headers(f) @@ -92,6 +86,7 @@ class KwargsFromEnvTest(unittest.TestCase): assert kwargs['tls'].verify parsed_host = parse_host(kwargs['base_url'], IS_WINDOWS_PLATFORM, True) + kwargs['version'] = DEFAULT_DOCKER_API_VERSION try: client = APIClient(**kwargs) assert parsed_host == client.base_url @@ -112,6 +107,7 @@ class KwargsFromEnvTest(unittest.TestCase): assert kwargs['tls'].assert_hostname is True assert kwargs['tls'].verify is False parsed_host = parse_host(kwargs['base_url'], IS_WINDOWS_PLATFORM, True) + kwargs['version'] = DEFAULT_DOCKER_API_VERSION try: client = APIClient(**kwargs) assert parsed_host == client.base_url @@ -447,11 +443,7 @@ class ParseBytesTest(unittest.TestCase): parse_bytes("127.0.0.1K") def test_parse_bytes_float(self): - with pytest.raises(DockerException): - parse_bytes("1.5k") - - def test_parse_bytes_maxint(self): - assert parse_bytes("{0}k".format(sys.maxsize)) == sys.maxsize * 1024 + assert parse_bytes("1.5k") == 1536 class UtilsTest(unittest.TestCase): |